diff --git a/packages/openai_dart/lib/src/generated/schema/create_assistant_request.dart b/packages/openai_dart/lib/src/generated/schema/create_assistant_request.dart index 1fa0adde..1a3b57db 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_assistant_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_assistant_request.dart @@ -35,6 +35,23 @@ class CreateAssistantRequest with _$CreateAssistantRequest { /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? metadata, + + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + @JsonKey(includeIfNull: false) @Default(1.0) double? temperature, + + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// + /// We generally recommend altering this or temperature but not both. + @JsonKey(name: 'top_p', includeIfNull: false) @Default(1.0) double? topP, + + /// Specifies the format that the model must output. Compatible with [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + /// + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + @_CreateAssistantRequestResponseFormatConverter() + @JsonKey(name: 'response_format', includeIfNull: false) + CreateAssistantRequestResponseFormat? responseFormat, }) = _CreateAssistantRequest; /// Object construction from a JSON representation @@ -49,13 +66,22 @@ class CreateAssistantRequest with _$CreateAssistantRequest { 'instructions', 'tools', 'file_ids', - 'metadata' + 'metadata', + 'temperature', + 'top_p', + 'response_format' ]; /// Validation constants static const nameMaxLengthValue = 256; static const descriptionMaxLengthValue = 512; static const instructionsMaxLengthValue = 256000; + static const temperatureDefaultValue = 1.0; + static const temperatureMinValue = 0.0; + static const temperatureMaxValue = 2.0; + static const topPDefaultValue = 1.0; + static const topPMinValue = 0.0; + static const topPMaxValue = 1.0; /// Perform validations on the schema property values String? validateSchema() { @@ -70,6 +96,18 @@ class CreateAssistantRequest with _$CreateAssistantRequest { instructions!.length > instructionsMaxLengthValue) { return "The length of 'instructions' cannot be > $instructionsMaxLengthValue characters"; } + if (temperature != null && temperature! < temperatureMinValue) { + return "The value of 'temperature' cannot be < $temperatureMinValue"; + } + if (temperature != null && temperature! > temperatureMaxValue) { + return "The value of 'temperature' cannot be > $temperatureMaxValue"; + } + if (topP != null && topP! < topPMinValue) { + return "The value of 'topP' cannot be < $topPMinValue"; + } + if (topP != null && topP! > topPMaxValue) { + return "The value of 'topP' cannot be > $topPMaxValue"; + } return null; } @@ -83,6 +121,9 @@ class CreateAssistantRequest with _$CreateAssistantRequest { 'tools': tools, 'file_ids': fileIds, 'metadata': metadata, + 'temperature': temperature, + 'top_p': topP, + 'response_format': responseFormat, }; } } @@ -185,3 +226,91 @@ class _AssistantModelConverter }; } } + +// ========================================== +// ENUM: CreateAssistantResponseFormatMode +// ========================================== + +/// `auto` is the default value +enum CreateAssistantResponseFormatMode { + @JsonValue('none') + none, + @JsonValue('auto') + auto, +} + +// ========================================== +// CLASS: CreateAssistantRequestResponseFormat +// ========================================== + +/// Specifies the format that the model must output. Compatible with [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. +/// +/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. +/// +/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. +@freezed +sealed class CreateAssistantRequestResponseFormat + with _$CreateAssistantRequestResponseFormat { + const CreateAssistantRequestResponseFormat._(); + + /// `auto` is the default value + const factory CreateAssistantRequestResponseFormat.mode( + CreateAssistantResponseFormatMode value, + ) = CreateAssistantRequestResponseFormatEnumeration; + + /// No Description + const factory CreateAssistantRequestResponseFormat.format( + AssistantsResponseFormat value, + ) = CreateAssistantRequestResponseFormatAssistantsResponseFormat; + + /// Object construction from a JSON representation + factory CreateAssistantRequestResponseFormat.fromJson( + Map json) => + _$CreateAssistantRequestResponseFormatFromJson(json); +} + +/// Custom JSON converter for [CreateAssistantRequestResponseFormat] +class _CreateAssistantRequestResponseFormatConverter + implements JsonConverter { + const _CreateAssistantRequestResponseFormatConverter(); + + @override + CreateAssistantRequestResponseFormat? fromJson(Object? data) { + if (data == null) { + return null; + } + if (data is String && + _$CreateAssistantResponseFormatModeEnumMap.values.contains(data)) { + return CreateAssistantRequestResponseFormatEnumeration( + _$CreateAssistantResponseFormatModeEnumMap.keys.elementAt( + _$CreateAssistantResponseFormatModeEnumMap.values + .toList() + .indexOf(data), + ), + ); + } + if (data is Map) { + try { + return CreateAssistantRequestResponseFormatAssistantsResponseFormat( + AssistantsResponseFormat.fromJson(data), + ); + } catch (e) {} + } + throw Exception( + 'Unexpected value for CreateAssistantRequestResponseFormat: $data', + ); + } + + @override + Object? toJson(CreateAssistantRequestResponseFormat? data) { + return switch (data) { + CreateAssistantRequestResponseFormatEnumeration(value: final v) => + _$CreateAssistantResponseFormatModeEnumMap[v]!, + CreateAssistantRequestResponseFormatAssistantsResponseFormat( + value: final v + ) => + v.toJson(), + null => null, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/create_fine_tuning_job_request.dart b/packages/openai_dart/lib/src/generated/schema/create_fine_tuning_job_request.dart index a3923b76..14929898 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_fine_tuning_job_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_fine_tuning_job_request.dart @@ -21,7 +21,7 @@ class CreateFineTuningJobRequest with _$CreateFineTuningJobRequest { /// The ID of an uploaded file that contains training data. /// - /// See [upload file](https://platform.openai.com/docs/api-reference/files/upload) for how to upload a file. + /// See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file. /// /// Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. /// diff --git a/packages/openai_dart/lib/src/generated/schema/create_run_request.dart b/packages/openai_dart/lib/src/generated/schema/create_run_request.dart index afaad329..093ed107 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_run_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_run_request.dart @@ -43,6 +43,11 @@ class CreateRunRequest with _$CreateRunRequest { /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. @JsonKey(includeIfNull: false) @Default(1.0) double? temperature, + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// + /// We generally recommend altering this or temperature but not both. + @JsonKey(name: 'top_p', includeIfNull: false) @Default(1.0) double? topP, + /// The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `complete`. See `incomplete_details` for more info. @JsonKey(name: 'max_prompt_tokens', includeIfNull: false) int? maxPromptTokens, @@ -90,6 +95,7 @@ class CreateRunRequest with _$CreateRunRequest { 'tools', 'metadata', 'temperature', + 'top_p', 'max_prompt_tokens', 'max_completion_tokens', 'truncation_strategy', @@ -102,6 +108,9 @@ class CreateRunRequest with _$CreateRunRequest { static const temperatureDefaultValue = 1.0; static const temperatureMinValue = 0.0; static const temperatureMaxValue = 2.0; + static const topPDefaultValue = 1.0; + static const topPMinValue = 0.0; + static const topPMaxValue = 1.0; static const maxPromptTokensMinValue = 256; static const maxCompletionTokensMinValue = 256; @@ -113,6 +122,12 @@ class CreateRunRequest with _$CreateRunRequest { if (temperature != null && temperature! > temperatureMaxValue) { return "The value of 'temperature' cannot be > $temperatureMaxValue"; } + if (topP != null && topP! < topPMinValue) { + return "The value of 'topP' cannot be < $topPMinValue"; + } + if (topP != null && topP! > topPMaxValue) { + return "The value of 'topP' cannot be > $topPMaxValue"; + } if (maxPromptTokens != null && maxPromptTokens! < maxPromptTokensMinValue) { return "The value of 'maxPromptTokens' cannot be < $maxPromptTokensMinValue"; } @@ -134,6 +149,7 @@ class CreateRunRequest with _$CreateRunRequest { 'tools': tools, 'metadata': metadata, 'temperature': temperature, + 'top_p': topP, 'max_prompt_tokens': maxPromptTokens, 'max_completion_tokens': maxCompletionTokens, 'truncation_strategy': truncationStrategy, diff --git a/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart b/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart index 04b67a22..f5a67784 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_thread_and_run_request.dart @@ -38,6 +38,11 @@ class CreateThreadAndRunRequest with _$CreateThreadAndRunRequest { /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. @JsonKey(includeIfNull: false) @Default(1.0) double? temperature, + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// + /// We generally recommend altering this or temperature but not both. + @JsonKey(name: 'top_p', includeIfNull: false) @Default(1.0) double? topP, + /// The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `complete`. See `incomplete_details` for more info. @JsonKey(name: 'max_prompt_tokens', includeIfNull: false) int? maxPromptTokens, @@ -84,6 +89,7 @@ class CreateThreadAndRunRequest with _$CreateThreadAndRunRequest { 'tools', 'metadata', 'temperature', + 'top_p', 'max_prompt_tokens', 'max_completion_tokens', 'truncation_strategy', @@ -96,6 +102,9 @@ class CreateThreadAndRunRequest with _$CreateThreadAndRunRequest { static const temperatureDefaultValue = 1.0; static const temperatureMinValue = 0.0; static const temperatureMaxValue = 2.0; + static const topPDefaultValue = 1.0; + static const topPMinValue = 0.0; + static const topPMaxValue = 1.0; static const maxPromptTokensMinValue = 256; static const maxCompletionTokensMinValue = 256; @@ -107,6 +116,12 @@ class CreateThreadAndRunRequest with _$CreateThreadAndRunRequest { if (temperature != null && temperature! > temperatureMaxValue) { return "The value of 'temperature' cannot be > $temperatureMaxValue"; } + if (topP != null && topP! < topPMinValue) { + return "The value of 'topP' cannot be < $topPMinValue"; + } + if (topP != null && topP! > topPMaxValue) { + return "The value of 'topP' cannot be > $topPMaxValue"; + } if (maxPromptTokens != null && maxPromptTokens! < maxPromptTokensMinValue) { return "The value of 'maxPromptTokens' cannot be < $maxPromptTokensMinValue"; } @@ -127,6 +142,7 @@ class CreateThreadAndRunRequest with _$CreateThreadAndRunRequest { 'tools': tools, 'metadata': metadata, 'temperature': temperature, + 'top_p': topP, 'max_prompt_tokens': maxPromptTokens, 'max_completion_tokens': maxCompletionTokens, 'truncation_strategy': truncationStrategy, diff --git a/packages/openai_dart/lib/src/generated/schema/modify_assistant_request.dart b/packages/openai_dart/lib/src/generated/schema/modify_assistant_request.dart index 1f53920e..2d88d5e7 100644 --- a/packages/openai_dart/lib/src/generated/schema/modify_assistant_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/modify_assistant_request.dart @@ -35,6 +35,23 @@ class ModifyAssistantRequest with _$ModifyAssistantRequest { /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @JsonKey(includeIfNull: false) Map? metadata, + + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + @JsonKey(includeIfNull: false) @Default(1.0) double? temperature, + + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// + /// We generally recommend altering this or temperature but not both. + @JsonKey(name: 'top_p', includeIfNull: false) @Default(1.0) double? topP, + + /// Specifies the format that the model must output. Compatible with [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + /// + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + @_ModifyAssistantRequestResponseFormatConverter() + @JsonKey(name: 'response_format', includeIfNull: false) + ModifyAssistantRequestResponseFormat? responseFormat, }) = _ModifyAssistantRequest; /// Object construction from a JSON representation @@ -49,13 +66,22 @@ class ModifyAssistantRequest with _$ModifyAssistantRequest { 'instructions', 'tools', 'file_ids', - 'metadata' + 'metadata', + 'temperature', + 'top_p', + 'response_format' ]; /// Validation constants static const nameMaxLengthValue = 256; static const descriptionMaxLengthValue = 512; static const instructionsMaxLengthValue = 256000; + static const temperatureDefaultValue = 1.0; + static const temperatureMinValue = 0.0; + static const temperatureMaxValue = 2.0; + static const topPDefaultValue = 1.0; + static const topPMinValue = 0.0; + static const topPMaxValue = 1.0; /// Perform validations on the schema property values String? validateSchema() { @@ -70,6 +96,18 @@ class ModifyAssistantRequest with _$ModifyAssistantRequest { instructions!.length > instructionsMaxLengthValue) { return "The length of 'instructions' cannot be > $instructionsMaxLengthValue characters"; } + if (temperature != null && temperature! < temperatureMinValue) { + return "The value of 'temperature' cannot be < $temperatureMinValue"; + } + if (temperature != null && temperature! > temperatureMaxValue) { + return "The value of 'temperature' cannot be > $temperatureMaxValue"; + } + if (topP != null && topP! < topPMinValue) { + return "The value of 'topP' cannot be < $topPMinValue"; + } + if (topP != null && topP! > topPMaxValue) { + return "The value of 'topP' cannot be > $topPMaxValue"; + } return null; } @@ -83,6 +121,97 @@ class ModifyAssistantRequest with _$ModifyAssistantRequest { 'tools': tools, 'file_ids': fileIds, 'metadata': metadata, + 'temperature': temperature, + 'top_p': topP, + 'response_format': responseFormat, + }; + } +} + +// ========================================== +// ENUM: ModifyAssistantResponseFormatMode +// ========================================== + +/// `auto` is the default value +enum ModifyAssistantResponseFormatMode { + @JsonValue('none') + none, + @JsonValue('auto') + auto, +} + +// ========================================== +// CLASS: ModifyAssistantRequestResponseFormat +// ========================================== + +/// Specifies the format that the model must output. Compatible with [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. +/// +/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. +/// +/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. +@freezed +sealed class ModifyAssistantRequestResponseFormat + with _$ModifyAssistantRequestResponseFormat { + const ModifyAssistantRequestResponseFormat._(); + + /// `auto` is the default value + const factory ModifyAssistantRequestResponseFormat.mode( + ModifyAssistantResponseFormatMode value, + ) = ModifyAssistantRequestResponseFormatEnumeration; + + /// No Description + const factory ModifyAssistantRequestResponseFormat.format( + AssistantsResponseFormat value, + ) = ModifyAssistantRequestResponseFormatAssistantsResponseFormat; + + /// Object construction from a JSON representation + factory ModifyAssistantRequestResponseFormat.fromJson( + Map json) => + _$ModifyAssistantRequestResponseFormatFromJson(json); +} + +/// Custom JSON converter for [ModifyAssistantRequestResponseFormat] +class _ModifyAssistantRequestResponseFormatConverter + implements JsonConverter { + const _ModifyAssistantRequestResponseFormatConverter(); + + @override + ModifyAssistantRequestResponseFormat? fromJson(Object? data) { + if (data == null) { + return null; + } + if (data is String && + _$ModifyAssistantResponseFormatModeEnumMap.values.contains(data)) { + return ModifyAssistantRequestResponseFormatEnumeration( + _$ModifyAssistantResponseFormatModeEnumMap.keys.elementAt( + _$ModifyAssistantResponseFormatModeEnumMap.values + .toList() + .indexOf(data), + ), + ); + } + if (data is Map) { + try { + return ModifyAssistantRequestResponseFormatAssistantsResponseFormat( + AssistantsResponseFormat.fromJson(data), + ); + } catch (e) {} + } + throw Exception( + 'Unexpected value for ModifyAssistantRequestResponseFormat: $data', + ); + } + + @override + Object? toJson(ModifyAssistantRequestResponseFormat? data) { + return switch (data) { + ModifyAssistantRequestResponseFormatEnumeration(value: final v) => + _$ModifyAssistantResponseFormatModeEnumMap[v]!, + ModifyAssistantRequestResponseFormatAssistantsResponseFormat( + value: final v + ) => + v.toJson(), + null => null, }; } } diff --git a/packages/openai_dart/lib/src/generated/schema/run_object.dart b/packages/openai_dart/lib/src/generated/schema/run_object.dart index 7064b95c..5200f48a 100644 --- a/packages/openai_dart/lib/src/generated/schema/run_object.dart +++ b/packages/openai_dart/lib/src/generated/schema/run_object.dart @@ -80,6 +80,9 @@ class RunObject with _$RunObject { /// The sampling temperature used for this run. If not set, defaults to 1. @JsonKey(includeIfNull: false) double? temperature, + /// The nucleus sampling value used for this run. If not set, defaults to 1. + @JsonKey(name: 'top_p', includeIfNull: false) double? topP, + /// The maximum number of prompt tokens specified to have been used over the course of the run. @JsonKey(name: 'max_prompt_tokens') required int? maxPromptTokens, @@ -135,6 +138,7 @@ class RunObject with _$RunObject { 'metadata', 'usage', 'temperature', + 'top_p', 'max_prompt_tokens', 'max_completion_tokens', 'truncation_strategy', @@ -182,6 +186,7 @@ class RunObject with _$RunObject { 'metadata': metadata, 'usage': usage, 'temperature': temperature, + 'top_p': topP, 'max_prompt_tokens': maxPromptTokens, 'max_completion_tokens': maxCompletionTokens, 'truncation_strategy': truncationStrategy, diff --git a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart index da5dcd14..f1ba81e2 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart @@ -13100,7 +13100,7 @@ mixin _$CreateFineTuningJobRequest { /// The ID of an uploaded file that contains training data. /// - /// See [upload file](https://platform.openai.com/docs/api-reference/files/upload) for how to upload a file. + /// See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file. /// /// Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. /// @@ -13354,7 +13354,7 @@ class _$CreateFineTuningJobRequestImpl extends _CreateFineTuningJobRequest { /// The ID of an uploaded file that contains training data. /// - /// See [upload file](https://platform.openai.com/docs/api-reference/files/upload) for how to upload a file. + /// See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file. /// /// Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. /// @@ -13487,7 +13487,7 @@ abstract class _CreateFineTuningJobRequest extends CreateFineTuningJobRequest { /// The ID of an uploaded file that contains training data. /// - /// See [upload file](https://platform.openai.com/docs/api-reference/files/upload) for how to upload a file. + /// See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file. /// /// Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. /// @@ -22120,6 +22120,26 @@ mixin _$CreateAssistantRequest { @JsonKey(includeIfNull: false) Map? get metadata => throw _privateConstructorUsedError; + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + @JsonKey(includeIfNull: false) + double? get temperature => throw _privateConstructorUsedError; + + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// + /// We generally recommend altering this or temperature but not both. + @JsonKey(name: 'top_p', includeIfNull: false) + double? get topP => throw _privateConstructorUsedError; + + /// Specifies the format that the model must output. Compatible with [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + /// + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + @_CreateAssistantRequestResponseFormatConverter() + @JsonKey(name: 'response_format', includeIfNull: false) + CreateAssistantRequestResponseFormat? get responseFormat => + throw _privateConstructorUsedError; + Map toJson() => throw _privateConstructorUsedError; @JsonKey(ignore: true) $CreateAssistantRequestCopyWith get copyWith => @@ -22139,9 +22159,15 @@ abstract class $CreateAssistantRequestCopyWith<$Res> { @JsonKey(includeIfNull: false) String? instructions, List tools, @JsonKey(name: 'file_ids') List fileIds, - @JsonKey(includeIfNull: false) Map? metadata}); + @JsonKey(includeIfNull: false) Map? metadata, + @JsonKey(includeIfNull: false) double? temperature, + @JsonKey(name: 'top_p', includeIfNull: false) double? topP, + @_CreateAssistantRequestResponseFormatConverter() + @JsonKey(name: 'response_format', includeIfNull: false) + CreateAssistantRequestResponseFormat? responseFormat}); $AssistantModelCopyWith<$Res> get model; + $CreateAssistantRequestResponseFormatCopyWith<$Res>? get responseFormat; } /// @nodoc @@ -22165,6 +22191,9 @@ class _$CreateAssistantRequestCopyWithImpl<$Res, Object? tools = null, Object? fileIds = null, Object? metadata = freezed, + Object? temperature = freezed, + Object? topP = freezed, + Object? responseFormat = freezed, }) { return _then(_value.copyWith( model: null == model @@ -22195,6 +22224,18 @@ class _$CreateAssistantRequestCopyWithImpl<$Res, ? _value.metadata : metadata // ignore: cast_nullable_to_non_nullable as Map?, + temperature: freezed == temperature + ? _value.temperature + : temperature // ignore: cast_nullable_to_non_nullable + as double?, + topP: freezed == topP + ? _value.topP + : topP // ignore: cast_nullable_to_non_nullable + as double?, + responseFormat: freezed == responseFormat + ? _value.responseFormat + : responseFormat // ignore: cast_nullable_to_non_nullable + as CreateAssistantRequestResponseFormat?, ) as $Val); } @@ -22205,6 +22246,19 @@ class _$CreateAssistantRequestCopyWithImpl<$Res, return _then(_value.copyWith(model: value) as $Val); }); } + + @override + @pragma('vm:prefer-inline') + $CreateAssistantRequestResponseFormatCopyWith<$Res>? get responseFormat { + if (_value.responseFormat == null) { + return null; + } + + return $CreateAssistantRequestResponseFormatCopyWith<$Res>( + _value.responseFormat!, (value) { + return _then(_value.copyWith(responseFormat: value) as $Val); + }); + } } /// @nodoc @@ -22223,10 +22277,17 @@ abstract class _$$CreateAssistantRequestImplCopyWith<$Res> @JsonKey(includeIfNull: false) String? instructions, List tools, @JsonKey(name: 'file_ids') List fileIds, - @JsonKey(includeIfNull: false) Map? metadata}); + @JsonKey(includeIfNull: false) Map? metadata, + @JsonKey(includeIfNull: false) double? temperature, + @JsonKey(name: 'top_p', includeIfNull: false) double? topP, + @_CreateAssistantRequestResponseFormatConverter() + @JsonKey(name: 'response_format', includeIfNull: false) + CreateAssistantRequestResponseFormat? responseFormat}); @override $AssistantModelCopyWith<$Res> get model; + @override + $CreateAssistantRequestResponseFormatCopyWith<$Res>? get responseFormat; } /// @nodoc @@ -22249,6 +22310,9 @@ class __$$CreateAssistantRequestImplCopyWithImpl<$Res> Object? tools = null, Object? fileIds = null, Object? metadata = freezed, + Object? temperature = freezed, + Object? topP = freezed, + Object? responseFormat = freezed, }) { return _then(_$CreateAssistantRequestImpl( model: null == model @@ -22279,6 +22343,18 @@ class __$$CreateAssistantRequestImplCopyWithImpl<$Res> ? _value._metadata : metadata // ignore: cast_nullable_to_non_nullable as Map?, + temperature: freezed == temperature + ? _value.temperature + : temperature // ignore: cast_nullable_to_non_nullable + as double?, + topP: freezed == topP + ? _value.topP + : topP // ignore: cast_nullable_to_non_nullable + as double?, + responseFormat: freezed == responseFormat + ? _value.responseFormat + : responseFormat // ignore: cast_nullable_to_non_nullable + as CreateAssistantRequestResponseFormat?, )); } } @@ -22293,7 +22369,12 @@ class _$CreateAssistantRequestImpl extends _CreateAssistantRequest { @JsonKey(includeIfNull: false) this.instructions, final List tools = const [], @JsonKey(name: 'file_ids') final List fileIds = const [], - @JsonKey(includeIfNull: false) final Map? metadata}) + @JsonKey(includeIfNull: false) final Map? metadata, + @JsonKey(includeIfNull: false) this.temperature = 1.0, + @JsonKey(name: 'top_p', includeIfNull: false) this.topP = 1.0, + @_CreateAssistantRequestResponseFormatConverter() + @JsonKey(name: 'response_format', includeIfNull: false) + this.responseFormat}) : _tools = tools, _fileIds = fileIds, _metadata = metadata, @@ -22360,9 +22441,31 @@ class _$CreateAssistantRequestImpl extends _CreateAssistantRequest { return EqualUnmodifiableMapView(value); } + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + @override + @JsonKey(includeIfNull: false) + final double? temperature; + + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// + /// We generally recommend altering this or temperature but not both. + @override + @JsonKey(name: 'top_p', includeIfNull: false) + final double? topP; + + /// Specifies the format that the model must output. Compatible with [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + /// + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + @override + @_CreateAssistantRequestResponseFormatConverter() + @JsonKey(name: 'response_format', includeIfNull: false) + final CreateAssistantRequestResponseFormat? responseFormat; + @override String toString() { - return 'CreateAssistantRequest(model: $model, name: $name, description: $description, instructions: $instructions, tools: $tools, fileIds: $fileIds, metadata: $metadata)'; + return 'CreateAssistantRequest(model: $model, name: $name, description: $description, instructions: $instructions, tools: $tools, fileIds: $fileIds, metadata: $metadata, temperature: $temperature, topP: $topP, responseFormat: $responseFormat)'; } @override @@ -22378,7 +22481,12 @@ class _$CreateAssistantRequestImpl extends _CreateAssistantRequest { other.instructions == instructions) && const DeepCollectionEquality().equals(other._tools, _tools) && const DeepCollectionEquality().equals(other._fileIds, _fileIds) && - const DeepCollectionEquality().equals(other._metadata, _metadata)); + const DeepCollectionEquality().equals(other._metadata, _metadata) && + (identical(other.temperature, temperature) || + other.temperature == temperature) && + (identical(other.topP, topP) || other.topP == topP) && + (identical(other.responseFormat, responseFormat) || + other.responseFormat == responseFormat)); } @JsonKey(ignore: true) @@ -22391,7 +22499,10 @@ class _$CreateAssistantRequestImpl extends _CreateAssistantRequest { instructions, const DeepCollectionEquality().hash(_tools), const DeepCollectionEquality().hash(_fileIds), - const DeepCollectionEquality().hash(_metadata)); + const DeepCollectionEquality().hash(_metadata), + temperature, + topP, + responseFormat); @JsonKey(ignore: true) @override @@ -22410,14 +22521,19 @@ class _$CreateAssistantRequestImpl extends _CreateAssistantRequest { abstract class _CreateAssistantRequest extends CreateAssistantRequest { const factory _CreateAssistantRequest( - {@_AssistantModelConverter() required final AssistantModel model, - @JsonKey(includeIfNull: false) final String? name, - @JsonKey(includeIfNull: false) final String? description, - @JsonKey(includeIfNull: false) final String? instructions, - final List tools, - @JsonKey(name: 'file_ids') final List fileIds, - @JsonKey(includeIfNull: false) - final Map? metadata}) = _$CreateAssistantRequestImpl; + {@_AssistantModelConverter() required final AssistantModel model, + @JsonKey(includeIfNull: false) final String? name, + @JsonKey(includeIfNull: false) final String? description, + @JsonKey(includeIfNull: false) final String? instructions, + final List tools, + @JsonKey(name: 'file_ids') final List fileIds, + @JsonKey(includeIfNull: false) final Map? metadata, + @JsonKey(includeIfNull: false) final double? temperature, + @JsonKey(name: 'top_p', includeIfNull: false) final double? topP, + @_CreateAssistantRequestResponseFormatConverter() + @JsonKey(name: 'response_format', includeIfNull: false) + final CreateAssistantRequestResponseFormat? responseFormat}) = + _$CreateAssistantRequestImpl; const _CreateAssistantRequest._() : super._(); factory _CreateAssistantRequest.fromJson(Map json) = @@ -22458,6 +22574,28 @@ abstract class _CreateAssistantRequest extends CreateAssistantRequest { @JsonKey(includeIfNull: false) Map? get metadata; @override + + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + @JsonKey(includeIfNull: false) + double? get temperature; + @override + + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// + /// We generally recommend altering this or temperature but not both. + @JsonKey(name: 'top_p', includeIfNull: false) + double? get topP; + @override + + /// Specifies the format that the model must output. Compatible with [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + /// + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + @_CreateAssistantRequestResponseFormatConverter() + @JsonKey(name: 'response_format', includeIfNull: false) + CreateAssistantRequestResponseFormat? get responseFormat; + @override @JsonKey(ignore: true) _$$CreateAssistantRequestImplCopyWith<_$CreateAssistantRequestImpl> get copyWith => throw _privateConstructorUsedError; @@ -22853,86 +22991,596 @@ abstract class AssistantModelString extends AssistantModel { get copyWith => throw _privateConstructorUsedError; } -ModifyAssistantRequest _$ModifyAssistantRequestFromJson( - Map json) { - return _ModifyAssistantRequest.fromJson(json); +CreateAssistantRequestResponseFormat + _$CreateAssistantRequestResponseFormatFromJson(Map json) { + switch (json['runtimeType']) { + case 'mode': + return CreateAssistantRequestResponseFormatEnumeration.fromJson(json); + case 'format': + return CreateAssistantRequestResponseFormatAssistantsResponseFormat + .fromJson(json); + + default: + throw CheckedFromJsonException( + json, + 'runtimeType', + 'CreateAssistantRequestResponseFormat', + 'Invalid union type "${json['runtimeType']}"!'); + } } /// @nodoc -mixin _$ModifyAssistantRequest { - /// ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them. - @JsonKey(includeIfNull: false) - String? get model => throw _privateConstructorUsedError; - - /// The name of the assistant. The maximum length is 256 characters. - @JsonKey(includeIfNull: false) - String? get name => throw _privateConstructorUsedError; - - /// The description of the assistant. The maximum length is 512 characters. - @JsonKey(includeIfNull: false) - String? get description => throw _privateConstructorUsedError; - - /// The system instructions that the assistant uses. The maximum length is 256,000 characters. - @JsonKey(includeIfNull: false) - String? get instructions => throw _privateConstructorUsedError; - - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `retrieval`, or `function`. - List get tools => throw _privateConstructorUsedError; - - /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs attached to this assistant. There can be a maximum of 20 files attached to the assistant. Files are ordered by their creation date in ascending order. If a file was previosuly attached to the list but does not show up in the list, it will be deleted from the assistant. - @JsonKey(name: 'file_ids') - List get fileIds => throw _privateConstructorUsedError; - - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. - @JsonKey(includeIfNull: false) - Map? get metadata => throw _privateConstructorUsedError; - - Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) - $ModifyAssistantRequestCopyWith get copyWith => +mixin _$CreateAssistantRequestResponseFormat { + Object get value => throw _privateConstructorUsedError; + @optionalTypeArgs + TResult when({ + required TResult Function(CreateAssistantResponseFormatMode value) mode, + required TResult Function(AssistantsResponseFormat value) format, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(CreateAssistantResponseFormatMode value)? mode, + TResult? Function(AssistantsResponseFormat value)? format, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(CreateAssistantResponseFormatMode value)? mode, + TResult Function(AssistantsResponseFormat value)? format, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult map({ + required TResult Function( + CreateAssistantRequestResponseFormatEnumeration value) + mode, + required TResult Function( + CreateAssistantRequestResponseFormatAssistantsResponseFormat value) + format, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(CreateAssistantRequestResponseFormatEnumeration value)? + mode, + TResult? Function( + CreateAssistantRequestResponseFormatAssistantsResponseFormat value)? + format, + }) => throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeMap({ + TResult Function(CreateAssistantRequestResponseFormatEnumeration value)? + mode, + TResult Function( + CreateAssistantRequestResponseFormatAssistantsResponseFormat value)? + format, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + Map toJson() => throw _privateConstructorUsedError; } /// @nodoc -abstract class $ModifyAssistantRequestCopyWith<$Res> { - factory $ModifyAssistantRequestCopyWith(ModifyAssistantRequest value, - $Res Function(ModifyAssistantRequest) then) = - _$ModifyAssistantRequestCopyWithImpl<$Res, ModifyAssistantRequest>; - @useResult - $Res call( - {@JsonKey(includeIfNull: false) String? model, - @JsonKey(includeIfNull: false) String? name, - @JsonKey(includeIfNull: false) String? description, - @JsonKey(includeIfNull: false) String? instructions, - List tools, - @JsonKey(name: 'file_ids') List fileIds, - @JsonKey(includeIfNull: false) Map? metadata}); +abstract class $CreateAssistantRequestResponseFormatCopyWith<$Res> { + factory $CreateAssistantRequestResponseFormatCopyWith( + CreateAssistantRequestResponseFormat value, + $Res Function(CreateAssistantRequestResponseFormat) then) = + _$CreateAssistantRequestResponseFormatCopyWithImpl<$Res, + CreateAssistantRequestResponseFormat>; } /// @nodoc -class _$ModifyAssistantRequestCopyWithImpl<$Res, - $Val extends ModifyAssistantRequest> - implements $ModifyAssistantRequestCopyWith<$Res> { - _$ModifyAssistantRequestCopyWithImpl(this._value, this._then); +class _$CreateAssistantRequestResponseFormatCopyWithImpl<$Res, + $Val extends CreateAssistantRequestResponseFormat> + implements $CreateAssistantRequestResponseFormatCopyWith<$Res> { + _$CreateAssistantRequestResponseFormatCopyWithImpl(this._value, this._then); // ignore: unused_field final $Val _value; // ignore: unused_field final $Res Function($Val) _then; +} + +/// @nodoc +abstract class _$$CreateAssistantRequestResponseFormatEnumerationImplCopyWith< + $Res> { + factory _$$CreateAssistantRequestResponseFormatEnumerationImplCopyWith( + _$CreateAssistantRequestResponseFormatEnumerationImpl value, + $Res Function(_$CreateAssistantRequestResponseFormatEnumerationImpl) + then) = + __$$CreateAssistantRequestResponseFormatEnumerationImplCopyWithImpl<$Res>; + @useResult + $Res call({CreateAssistantResponseFormatMode value}); +} + +/// @nodoc +class __$$CreateAssistantRequestResponseFormatEnumerationImplCopyWithImpl<$Res> + extends _$CreateAssistantRequestResponseFormatCopyWithImpl<$Res, + _$CreateAssistantRequestResponseFormatEnumerationImpl> + implements + _$$CreateAssistantRequestResponseFormatEnumerationImplCopyWith<$Res> { + __$$CreateAssistantRequestResponseFormatEnumerationImplCopyWithImpl( + _$CreateAssistantRequestResponseFormatEnumerationImpl _value, + $Res Function(_$CreateAssistantRequestResponseFormatEnumerationImpl) + _then) + : super(_value, _then); @pragma('vm:prefer-inline') @override $Res call({ - Object? model = freezed, - Object? name = freezed, - Object? description = freezed, - Object? instructions = freezed, - Object? tools = null, - Object? fileIds = null, - Object? metadata = freezed, + Object? value = null, }) { - return _then(_value.copyWith( - model: freezed == model + return _then(_$CreateAssistantRequestResponseFormatEnumerationImpl( + null == value + ? _value.value + : value // ignore: cast_nullable_to_non_nullable + as CreateAssistantResponseFormatMode, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$CreateAssistantRequestResponseFormatEnumerationImpl + extends CreateAssistantRequestResponseFormatEnumeration { + const _$CreateAssistantRequestResponseFormatEnumerationImpl(this.value, + {final String? $type}) + : $type = $type ?? 'mode', + super._(); + + factory _$CreateAssistantRequestResponseFormatEnumerationImpl.fromJson( + Map json) => + _$$CreateAssistantRequestResponseFormatEnumerationImplFromJson(json); + + @override + final CreateAssistantResponseFormatMode value; + + @JsonKey(name: 'runtimeType') + final String $type; + + @override + String toString() { + return 'CreateAssistantRequestResponseFormat.mode(value: $value)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$CreateAssistantRequestResponseFormatEnumerationImpl && + (identical(other.value, value) || other.value == value)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, value); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$CreateAssistantRequestResponseFormatEnumerationImplCopyWith< + _$CreateAssistantRequestResponseFormatEnumerationImpl> + get copyWith => + __$$CreateAssistantRequestResponseFormatEnumerationImplCopyWithImpl< + _$CreateAssistantRequestResponseFormatEnumerationImpl>( + this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(CreateAssistantResponseFormatMode value) mode, + required TResult Function(AssistantsResponseFormat value) format, + }) { + return mode(value); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(CreateAssistantResponseFormatMode value)? mode, + TResult? Function(AssistantsResponseFormat value)? format, + }) { + return mode?.call(value); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(CreateAssistantResponseFormatMode value)? mode, + TResult Function(AssistantsResponseFormat value)? format, + required TResult orElse(), + }) { + if (mode != null) { + return mode(value); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function( + CreateAssistantRequestResponseFormatEnumeration value) + mode, + required TResult Function( + CreateAssistantRequestResponseFormatAssistantsResponseFormat value) + format, + }) { + return mode(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(CreateAssistantRequestResponseFormatEnumeration value)? + mode, + TResult? Function( + CreateAssistantRequestResponseFormatAssistantsResponseFormat value)? + format, + }) { + return mode?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(CreateAssistantRequestResponseFormatEnumeration value)? + mode, + TResult Function( + CreateAssistantRequestResponseFormatAssistantsResponseFormat value)? + format, + required TResult orElse(), + }) { + if (mode != null) { + return mode(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$CreateAssistantRequestResponseFormatEnumerationImplToJson( + this, + ); + } +} + +abstract class CreateAssistantRequestResponseFormatEnumeration + extends CreateAssistantRequestResponseFormat { + const factory CreateAssistantRequestResponseFormatEnumeration( + final CreateAssistantResponseFormatMode value) = + _$CreateAssistantRequestResponseFormatEnumerationImpl; + const CreateAssistantRequestResponseFormatEnumeration._() : super._(); + + factory CreateAssistantRequestResponseFormatEnumeration.fromJson( + Map json) = + _$CreateAssistantRequestResponseFormatEnumerationImpl.fromJson; + + @override + CreateAssistantResponseFormatMode get value; + @JsonKey(ignore: true) + _$$CreateAssistantRequestResponseFormatEnumerationImplCopyWith< + _$CreateAssistantRequestResponseFormatEnumerationImpl> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWith< + $Res> { + factory _$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWith( + _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl value, + $Res Function( + _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl) + then) = + __$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl< + $Res>; + @useResult + $Res call({AssistantsResponseFormat value}); + + $AssistantsResponseFormatCopyWith<$Res> get value; +} + +/// @nodoc +class __$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl< + $Res> + extends _$CreateAssistantRequestResponseFormatCopyWithImpl<$Res, + _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl> + implements + _$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWith< + $Res> { + __$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl( + _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl _value, + $Res Function( + _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl) + _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? value = null, + }) { + return _then( + _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl( + null == value + ? _value.value + : value // ignore: cast_nullable_to_non_nullable + as AssistantsResponseFormat, + )); + } + + @override + @pragma('vm:prefer-inline') + $AssistantsResponseFormatCopyWith<$Res> get value { + return $AssistantsResponseFormatCopyWith<$Res>(_value.value, (value) { + return _then(_value.copyWith(value: value)); + }); + } +} + +/// @nodoc +@JsonSerializable() +class _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl + extends CreateAssistantRequestResponseFormatAssistantsResponseFormat { + const _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl( + this.value, + {final String? $type}) + : $type = $type ?? 'format', + super._(); + + factory _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl.fromJson( + Map json) => + _$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplFromJson( + json); + + @override + final AssistantsResponseFormat value; + + @JsonKey(name: 'runtimeType') + final String $type; + + @override + String toString() { + return 'CreateAssistantRequestResponseFormat.format(value: $value)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other + is _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl && + (identical(other.value, value) || other.value == value)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, value); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWith< + _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl> + get copyWith => + __$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl< + _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl>( + this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(CreateAssistantResponseFormatMode value) mode, + required TResult Function(AssistantsResponseFormat value) format, + }) { + return format(value); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(CreateAssistantResponseFormatMode value)? mode, + TResult? Function(AssistantsResponseFormat value)? format, + }) { + return format?.call(value); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(CreateAssistantResponseFormatMode value)? mode, + TResult Function(AssistantsResponseFormat value)? format, + required TResult orElse(), + }) { + if (format != null) { + return format(value); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function( + CreateAssistantRequestResponseFormatEnumeration value) + mode, + required TResult Function( + CreateAssistantRequestResponseFormatAssistantsResponseFormat value) + format, + }) { + return format(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(CreateAssistantRequestResponseFormatEnumeration value)? + mode, + TResult? Function( + CreateAssistantRequestResponseFormatAssistantsResponseFormat value)? + format, + }) { + return format?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(CreateAssistantRequestResponseFormatEnumeration value)? + mode, + TResult Function( + CreateAssistantRequestResponseFormatAssistantsResponseFormat value)? + format, + required TResult orElse(), + }) { + if (format != null) { + return format(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplToJson( + this, + ); + } +} + +abstract class CreateAssistantRequestResponseFormatAssistantsResponseFormat + extends CreateAssistantRequestResponseFormat { + const factory CreateAssistantRequestResponseFormatAssistantsResponseFormat( + final AssistantsResponseFormat value) = + _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl; + const CreateAssistantRequestResponseFormatAssistantsResponseFormat._() + : super._(); + + factory CreateAssistantRequestResponseFormatAssistantsResponseFormat.fromJson( + Map json) = + _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl + .fromJson; + + @override + AssistantsResponseFormat get value; + @JsonKey(ignore: true) + _$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWith< + _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl> + get copyWith => throw _privateConstructorUsedError; +} + +ModifyAssistantRequest _$ModifyAssistantRequestFromJson( + Map json) { + return _ModifyAssistantRequest.fromJson(json); +} + +/// @nodoc +mixin _$ModifyAssistantRequest { + /// ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them. + @JsonKey(includeIfNull: false) + String? get model => throw _privateConstructorUsedError; + + /// The name of the assistant. The maximum length is 256 characters. + @JsonKey(includeIfNull: false) + String? get name => throw _privateConstructorUsedError; + + /// The description of the assistant. The maximum length is 512 characters. + @JsonKey(includeIfNull: false) + String? get description => throw _privateConstructorUsedError; + + /// The system instructions that the assistant uses. The maximum length is 256,000 characters. + @JsonKey(includeIfNull: false) + String? get instructions => throw _privateConstructorUsedError; + + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `retrieval`, or `function`. + List get tools => throw _privateConstructorUsedError; + + /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs attached to this assistant. There can be a maximum of 20 files attached to the assistant. Files are ordered by their creation date in ascending order. If a file was previosuly attached to the list but does not show up in the list, it will be deleted from the assistant. + @JsonKey(name: 'file_ids') + List get fileIds => throw _privateConstructorUsedError; + + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + @JsonKey(includeIfNull: false) + Map? get metadata => throw _privateConstructorUsedError; + + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + @JsonKey(includeIfNull: false) + double? get temperature => throw _privateConstructorUsedError; + + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// + /// We generally recommend altering this or temperature but not both. + @JsonKey(name: 'top_p', includeIfNull: false) + double? get topP => throw _privateConstructorUsedError; + + /// Specifies the format that the model must output. Compatible with [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + /// + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + @_ModifyAssistantRequestResponseFormatConverter() + @JsonKey(name: 'response_format', includeIfNull: false) + ModifyAssistantRequestResponseFormat? get responseFormat => + throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $ModifyAssistantRequestCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ModifyAssistantRequestCopyWith<$Res> { + factory $ModifyAssistantRequestCopyWith(ModifyAssistantRequest value, + $Res Function(ModifyAssistantRequest) then) = + _$ModifyAssistantRequestCopyWithImpl<$Res, ModifyAssistantRequest>; + @useResult + $Res call( + {@JsonKey(includeIfNull: false) String? model, + @JsonKey(includeIfNull: false) String? name, + @JsonKey(includeIfNull: false) String? description, + @JsonKey(includeIfNull: false) String? instructions, + List tools, + @JsonKey(name: 'file_ids') List fileIds, + @JsonKey(includeIfNull: false) Map? metadata, + @JsonKey(includeIfNull: false) double? temperature, + @JsonKey(name: 'top_p', includeIfNull: false) double? topP, + @_ModifyAssistantRequestResponseFormatConverter() + @JsonKey(name: 'response_format', includeIfNull: false) + ModifyAssistantRequestResponseFormat? responseFormat}); + + $ModifyAssistantRequestResponseFormatCopyWith<$Res>? get responseFormat; +} + +/// @nodoc +class _$ModifyAssistantRequestCopyWithImpl<$Res, + $Val extends ModifyAssistantRequest> + implements $ModifyAssistantRequestCopyWith<$Res> { + _$ModifyAssistantRequestCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? model = freezed, + Object? name = freezed, + Object? description = freezed, + Object? instructions = freezed, + Object? tools = null, + Object? fileIds = null, + Object? metadata = freezed, + Object? temperature = freezed, + Object? topP = freezed, + Object? responseFormat = freezed, + }) { + return _then(_value.copyWith( + model: freezed == model ? _value.model : model // ignore: cast_nullable_to_non_nullable as String?, @@ -22960,260 +23608,850 @@ class _$ModifyAssistantRequestCopyWithImpl<$Res, ? _value.metadata : metadata // ignore: cast_nullable_to_non_nullable as Map?, + temperature: freezed == temperature + ? _value.temperature + : temperature // ignore: cast_nullable_to_non_nullable + as double?, + topP: freezed == topP + ? _value.topP + : topP // ignore: cast_nullable_to_non_nullable + as double?, + responseFormat: freezed == responseFormat + ? _value.responseFormat + : responseFormat // ignore: cast_nullable_to_non_nullable + as ModifyAssistantRequestResponseFormat?, ) as $Val); } + + @override + @pragma('vm:prefer-inline') + $ModifyAssistantRequestResponseFormatCopyWith<$Res>? get responseFormat { + if (_value.responseFormat == null) { + return null; + } + + return $ModifyAssistantRequestResponseFormatCopyWith<$Res>( + _value.responseFormat!, (value) { + return _then(_value.copyWith(responseFormat: value) as $Val); + }); + } +} + +/// @nodoc +abstract class _$$ModifyAssistantRequestImplCopyWith<$Res> + implements $ModifyAssistantRequestCopyWith<$Res> { + factory _$$ModifyAssistantRequestImplCopyWith( + _$ModifyAssistantRequestImpl value, + $Res Function(_$ModifyAssistantRequestImpl) then) = + __$$ModifyAssistantRequestImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@JsonKey(includeIfNull: false) String? model, + @JsonKey(includeIfNull: false) String? name, + @JsonKey(includeIfNull: false) String? description, + @JsonKey(includeIfNull: false) String? instructions, + List tools, + @JsonKey(name: 'file_ids') List fileIds, + @JsonKey(includeIfNull: false) Map? metadata, + @JsonKey(includeIfNull: false) double? temperature, + @JsonKey(name: 'top_p', includeIfNull: false) double? topP, + @_ModifyAssistantRequestResponseFormatConverter() + @JsonKey(name: 'response_format', includeIfNull: false) + ModifyAssistantRequestResponseFormat? responseFormat}); + + @override + $ModifyAssistantRequestResponseFormatCopyWith<$Res>? get responseFormat; +} + +/// @nodoc +class __$$ModifyAssistantRequestImplCopyWithImpl<$Res> + extends _$ModifyAssistantRequestCopyWithImpl<$Res, + _$ModifyAssistantRequestImpl> + implements _$$ModifyAssistantRequestImplCopyWith<$Res> { + __$$ModifyAssistantRequestImplCopyWithImpl( + _$ModifyAssistantRequestImpl _value, + $Res Function(_$ModifyAssistantRequestImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? model = freezed, + Object? name = freezed, + Object? description = freezed, + Object? instructions = freezed, + Object? tools = null, + Object? fileIds = null, + Object? metadata = freezed, + Object? temperature = freezed, + Object? topP = freezed, + Object? responseFormat = freezed, + }) { + return _then(_$ModifyAssistantRequestImpl( + model: freezed == model + ? _value.model + : model // ignore: cast_nullable_to_non_nullable + as String?, + name: freezed == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String?, + description: freezed == description + ? _value.description + : description // ignore: cast_nullable_to_non_nullable + as String?, + instructions: freezed == instructions + ? _value.instructions + : instructions // ignore: cast_nullable_to_non_nullable + as String?, + tools: null == tools + ? _value._tools + : tools // ignore: cast_nullable_to_non_nullable + as List, + fileIds: null == fileIds + ? _value._fileIds + : fileIds // ignore: cast_nullable_to_non_nullable + as List, + metadata: freezed == metadata + ? _value._metadata + : metadata // ignore: cast_nullable_to_non_nullable + as Map?, + temperature: freezed == temperature + ? _value.temperature + : temperature // ignore: cast_nullable_to_non_nullable + as double?, + topP: freezed == topP + ? _value.topP + : topP // ignore: cast_nullable_to_non_nullable + as double?, + responseFormat: freezed == responseFormat + ? _value.responseFormat + : responseFormat // ignore: cast_nullable_to_non_nullable + as ModifyAssistantRequestResponseFormat?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ModifyAssistantRequestImpl extends _ModifyAssistantRequest { + const _$ModifyAssistantRequestImpl( + {@JsonKey(includeIfNull: false) this.model, + @JsonKey(includeIfNull: false) this.name, + @JsonKey(includeIfNull: false) this.description, + @JsonKey(includeIfNull: false) this.instructions, + final List tools = const [], + @JsonKey(name: 'file_ids') final List fileIds = const [], + @JsonKey(includeIfNull: false) final Map? metadata, + @JsonKey(includeIfNull: false) this.temperature = 1.0, + @JsonKey(name: 'top_p', includeIfNull: false) this.topP = 1.0, + @_ModifyAssistantRequestResponseFormatConverter() + @JsonKey(name: 'response_format', includeIfNull: false) + this.responseFormat}) + : _tools = tools, + _fileIds = fileIds, + _metadata = metadata, + super._(); + + factory _$ModifyAssistantRequestImpl.fromJson(Map json) => + _$$ModifyAssistantRequestImplFromJson(json); + + /// ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them. + @override + @JsonKey(includeIfNull: false) + final String? model; + + /// The name of the assistant. The maximum length is 256 characters. + @override + @JsonKey(includeIfNull: false) + final String? name; + + /// The description of the assistant. The maximum length is 512 characters. + @override + @JsonKey(includeIfNull: false) + final String? description; + + /// The system instructions that the assistant uses. The maximum length is 256,000 characters. + @override + @JsonKey(includeIfNull: false) + final String? instructions; + + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `retrieval`, or `function`. + final List _tools; + + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `retrieval`, or `function`. + @override + @JsonKey() + List get tools { + if (_tools is EqualUnmodifiableListView) return _tools; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(_tools); + } + + /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs attached to this assistant. There can be a maximum of 20 files attached to the assistant. Files are ordered by their creation date in ascending order. If a file was previosuly attached to the list but does not show up in the list, it will be deleted from the assistant. + final List _fileIds; + + /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs attached to this assistant. There can be a maximum of 20 files attached to the assistant. Files are ordered by their creation date in ascending order. If a file was previosuly attached to the list but does not show up in the list, it will be deleted from the assistant. + @override + @JsonKey(name: 'file_ids') + List get fileIds { + if (_fileIds is EqualUnmodifiableListView) return _fileIds; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(_fileIds); + } + + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + final Map? _metadata; + + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + @override + @JsonKey(includeIfNull: false) + Map? get metadata { + final value = _metadata; + if (value == null) return null; + if (_metadata is EqualUnmodifiableMapView) return _metadata; + // ignore: implicit_dynamic_type + return EqualUnmodifiableMapView(value); + } + + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + @override + @JsonKey(includeIfNull: false) + final double? temperature; + + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// + /// We generally recommend altering this or temperature but not both. + @override + @JsonKey(name: 'top_p', includeIfNull: false) + final double? topP; + + /// Specifies the format that the model must output. Compatible with [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + /// + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + @override + @_ModifyAssistantRequestResponseFormatConverter() + @JsonKey(name: 'response_format', includeIfNull: false) + final ModifyAssistantRequestResponseFormat? responseFormat; + + @override + String toString() { + return 'ModifyAssistantRequest(model: $model, name: $name, description: $description, instructions: $instructions, tools: $tools, fileIds: $fileIds, metadata: $metadata, temperature: $temperature, topP: $topP, responseFormat: $responseFormat)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ModifyAssistantRequestImpl && + (identical(other.model, model) || other.model == model) && + (identical(other.name, name) || other.name == name) && + (identical(other.description, description) || + other.description == description) && + (identical(other.instructions, instructions) || + other.instructions == instructions) && + const DeepCollectionEquality().equals(other._tools, _tools) && + const DeepCollectionEquality().equals(other._fileIds, _fileIds) && + const DeepCollectionEquality().equals(other._metadata, _metadata) && + (identical(other.temperature, temperature) || + other.temperature == temperature) && + (identical(other.topP, topP) || other.topP == topP) && + (identical(other.responseFormat, responseFormat) || + other.responseFormat == responseFormat)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash( + runtimeType, + model, + name, + description, + instructions, + const DeepCollectionEquality().hash(_tools), + const DeepCollectionEquality().hash(_fileIds), + const DeepCollectionEquality().hash(_metadata), + temperature, + topP, + responseFormat); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ModifyAssistantRequestImplCopyWith<_$ModifyAssistantRequestImpl> + get copyWith => __$$ModifyAssistantRequestImplCopyWithImpl< + _$ModifyAssistantRequestImpl>(this, _$identity); + + @override + Map toJson() { + return _$$ModifyAssistantRequestImplToJson( + this, + ); + } +} + +abstract class _ModifyAssistantRequest extends ModifyAssistantRequest { + const factory _ModifyAssistantRequest( + {@JsonKey(includeIfNull: false) final String? model, + @JsonKey(includeIfNull: false) final String? name, + @JsonKey(includeIfNull: false) final String? description, + @JsonKey(includeIfNull: false) final String? instructions, + final List tools, + @JsonKey(name: 'file_ids') final List fileIds, + @JsonKey(includeIfNull: false) final Map? metadata, + @JsonKey(includeIfNull: false) final double? temperature, + @JsonKey(name: 'top_p', includeIfNull: false) final double? topP, + @_ModifyAssistantRequestResponseFormatConverter() + @JsonKey(name: 'response_format', includeIfNull: false) + final ModifyAssistantRequestResponseFormat? responseFormat}) = + _$ModifyAssistantRequestImpl; + const _ModifyAssistantRequest._() : super._(); + + factory _ModifyAssistantRequest.fromJson(Map json) = + _$ModifyAssistantRequestImpl.fromJson; + + @override + + /// ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them. + @JsonKey(includeIfNull: false) + String? get model; + @override + + /// The name of the assistant. The maximum length is 256 characters. + @JsonKey(includeIfNull: false) + String? get name; + @override + + /// The description of the assistant. The maximum length is 512 characters. + @JsonKey(includeIfNull: false) + String? get description; + @override + + /// The system instructions that the assistant uses. The maximum length is 256,000 characters. + @JsonKey(includeIfNull: false) + String? get instructions; + @override + + /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `retrieval`, or `function`. + List get tools; + @override + + /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs attached to this assistant. There can be a maximum of 20 files attached to the assistant. Files are ordered by their creation date in ascending order. If a file was previosuly attached to the list but does not show up in the list, it will be deleted from the assistant. + @JsonKey(name: 'file_ids') + List get fileIds; + @override + + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + @JsonKey(includeIfNull: false) + Map? get metadata; + @override + + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + @JsonKey(includeIfNull: false) + double? get temperature; + @override + + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// + /// We generally recommend altering this or temperature but not both. + @JsonKey(name: 'top_p', includeIfNull: false) + double? get topP; + @override + + /// Specifies the format that the model must output. Compatible with [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + /// + /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + /// + /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + @_ModifyAssistantRequestResponseFormatConverter() + @JsonKey(name: 'response_format', includeIfNull: false) + ModifyAssistantRequestResponseFormat? get responseFormat; + @override + @JsonKey(ignore: true) + _$$ModifyAssistantRequestImplCopyWith<_$ModifyAssistantRequestImpl> + get copyWith => throw _privateConstructorUsedError; +} + +ModifyAssistantRequestResponseFormat + _$ModifyAssistantRequestResponseFormatFromJson(Map json) { + switch (json['runtimeType']) { + case 'mode': + return ModifyAssistantRequestResponseFormatEnumeration.fromJson(json); + case 'format': + return ModifyAssistantRequestResponseFormatAssistantsResponseFormat + .fromJson(json); + + default: + throw CheckedFromJsonException( + json, + 'runtimeType', + 'ModifyAssistantRequestResponseFormat', + 'Invalid union type "${json['runtimeType']}"!'); + } +} + +/// @nodoc +mixin _$ModifyAssistantRequestResponseFormat { + Object get value => throw _privateConstructorUsedError; + @optionalTypeArgs + TResult when({ + required TResult Function(ModifyAssistantResponseFormatMode value) mode, + required TResult Function(AssistantsResponseFormat value) format, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(ModifyAssistantResponseFormatMode value)? mode, + TResult? Function(AssistantsResponseFormat value)? format, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(ModifyAssistantResponseFormatMode value)? mode, + TResult Function(AssistantsResponseFormat value)? format, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult map({ + required TResult Function( + ModifyAssistantRequestResponseFormatEnumeration value) + mode, + required TResult Function( + ModifyAssistantRequestResponseFormatAssistantsResponseFormat value) + format, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(ModifyAssistantRequestResponseFormatEnumeration value)? + mode, + TResult? Function( + ModifyAssistantRequestResponseFormatAssistantsResponseFormat value)? + format, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeMap({ + TResult Function(ModifyAssistantRequestResponseFormatEnumeration value)? + mode, + TResult Function( + ModifyAssistantRequestResponseFormatAssistantsResponseFormat value)? + format, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + Map toJson() => throw _privateConstructorUsedError; } /// @nodoc -abstract class _$$ModifyAssistantRequestImplCopyWith<$Res> - implements $ModifyAssistantRequestCopyWith<$Res> { - factory _$$ModifyAssistantRequestImplCopyWith( - _$ModifyAssistantRequestImpl value, - $Res Function(_$ModifyAssistantRequestImpl) then) = - __$$ModifyAssistantRequestImplCopyWithImpl<$Res>; - @override +abstract class $ModifyAssistantRequestResponseFormatCopyWith<$Res> { + factory $ModifyAssistantRequestResponseFormatCopyWith( + ModifyAssistantRequestResponseFormat value, + $Res Function(ModifyAssistantRequestResponseFormat) then) = + _$ModifyAssistantRequestResponseFormatCopyWithImpl<$Res, + ModifyAssistantRequestResponseFormat>; +} + +/// @nodoc +class _$ModifyAssistantRequestResponseFormatCopyWithImpl<$Res, + $Val extends ModifyAssistantRequestResponseFormat> + implements $ModifyAssistantRequestResponseFormatCopyWith<$Res> { + _$ModifyAssistantRequestResponseFormatCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; +} + +/// @nodoc +abstract class _$$ModifyAssistantRequestResponseFormatEnumerationImplCopyWith< + $Res> { + factory _$$ModifyAssistantRequestResponseFormatEnumerationImplCopyWith( + _$ModifyAssistantRequestResponseFormatEnumerationImpl value, + $Res Function(_$ModifyAssistantRequestResponseFormatEnumerationImpl) + then) = + __$$ModifyAssistantRequestResponseFormatEnumerationImplCopyWithImpl<$Res>; @useResult - $Res call( - {@JsonKey(includeIfNull: false) String? model, - @JsonKey(includeIfNull: false) String? name, - @JsonKey(includeIfNull: false) String? description, - @JsonKey(includeIfNull: false) String? instructions, - List tools, - @JsonKey(name: 'file_ids') List fileIds, - @JsonKey(includeIfNull: false) Map? metadata}); + $Res call({ModifyAssistantResponseFormatMode value}); } /// @nodoc -class __$$ModifyAssistantRequestImplCopyWithImpl<$Res> - extends _$ModifyAssistantRequestCopyWithImpl<$Res, - _$ModifyAssistantRequestImpl> - implements _$$ModifyAssistantRequestImplCopyWith<$Res> { - __$$ModifyAssistantRequestImplCopyWithImpl( - _$ModifyAssistantRequestImpl _value, - $Res Function(_$ModifyAssistantRequestImpl) _then) +class __$$ModifyAssistantRequestResponseFormatEnumerationImplCopyWithImpl<$Res> + extends _$ModifyAssistantRequestResponseFormatCopyWithImpl<$Res, + _$ModifyAssistantRequestResponseFormatEnumerationImpl> + implements + _$$ModifyAssistantRequestResponseFormatEnumerationImplCopyWith<$Res> { + __$$ModifyAssistantRequestResponseFormatEnumerationImplCopyWithImpl( + _$ModifyAssistantRequestResponseFormatEnumerationImpl _value, + $Res Function(_$ModifyAssistantRequestResponseFormatEnumerationImpl) + _then) : super(_value, _then); @pragma('vm:prefer-inline') @override $Res call({ - Object? model = freezed, - Object? name = freezed, - Object? description = freezed, - Object? instructions = freezed, - Object? tools = null, - Object? fileIds = null, - Object? metadata = freezed, + Object? value = null, }) { - return _then(_$ModifyAssistantRequestImpl( - model: freezed == model - ? _value.model - : model // ignore: cast_nullable_to_non_nullable - as String?, - name: freezed == name - ? _value.name - : name // ignore: cast_nullable_to_non_nullable - as String?, - description: freezed == description - ? _value.description - : description // ignore: cast_nullable_to_non_nullable - as String?, - instructions: freezed == instructions - ? _value.instructions - : instructions // ignore: cast_nullable_to_non_nullable - as String?, - tools: null == tools - ? _value._tools - : tools // ignore: cast_nullable_to_non_nullable - as List, - fileIds: null == fileIds - ? _value._fileIds - : fileIds // ignore: cast_nullable_to_non_nullable - as List, - metadata: freezed == metadata - ? _value._metadata - : metadata // ignore: cast_nullable_to_non_nullable - as Map?, + return _then(_$ModifyAssistantRequestResponseFormatEnumerationImpl( + null == value + ? _value.value + : value // ignore: cast_nullable_to_non_nullable + as ModifyAssistantResponseFormatMode, )); } } /// @nodoc @JsonSerializable() -class _$ModifyAssistantRequestImpl extends _ModifyAssistantRequest { - const _$ModifyAssistantRequestImpl( - {@JsonKey(includeIfNull: false) this.model, - @JsonKey(includeIfNull: false) this.name, - @JsonKey(includeIfNull: false) this.description, - @JsonKey(includeIfNull: false) this.instructions, - final List tools = const [], - @JsonKey(name: 'file_ids') final List fileIds = const [], - @JsonKey(includeIfNull: false) final Map? metadata}) - : _tools = tools, - _fileIds = fileIds, - _metadata = metadata, +class _$ModifyAssistantRequestResponseFormatEnumerationImpl + extends ModifyAssistantRequestResponseFormatEnumeration { + const _$ModifyAssistantRequestResponseFormatEnumerationImpl(this.value, + {final String? $type}) + : $type = $type ?? 'mode', super._(); - factory _$ModifyAssistantRequestImpl.fromJson(Map json) => - _$$ModifyAssistantRequestImplFromJson(json); + factory _$ModifyAssistantRequestResponseFormatEnumerationImpl.fromJson( + Map json) => + _$$ModifyAssistantRequestResponseFormatEnumerationImplFromJson(json); - /// ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them. @override - @JsonKey(includeIfNull: false) - final String? model; + final ModifyAssistantResponseFormatMode value; + + @JsonKey(name: 'runtimeType') + final String $type; - /// The name of the assistant. The maximum length is 256 characters. @override - @JsonKey(includeIfNull: false) - final String? name; + String toString() { + return 'ModifyAssistantRequestResponseFormat.mode(value: $value)'; + } - /// The description of the assistant. The maximum length is 512 characters. @override - @JsonKey(includeIfNull: false) - final String? description; + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ModifyAssistantRequestResponseFormatEnumerationImpl && + (identical(other.value, value) || other.value == value)); + } - /// The system instructions that the assistant uses. The maximum length is 256,000 characters. + @JsonKey(ignore: true) @override - @JsonKey(includeIfNull: false) - final String? instructions; + int get hashCode => Object.hash(runtimeType, value); - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `retrieval`, or `function`. - final List _tools; + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ModifyAssistantRequestResponseFormatEnumerationImplCopyWith< + _$ModifyAssistantRequestResponseFormatEnumerationImpl> + get copyWith => + __$$ModifyAssistantRequestResponseFormatEnumerationImplCopyWithImpl< + _$ModifyAssistantRequestResponseFormatEnumerationImpl>( + this, _$identity); - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `retrieval`, or `function`. @override - @JsonKey() - List get tools { - if (_tools is EqualUnmodifiableListView) return _tools; - // ignore: implicit_dynamic_type - return EqualUnmodifiableListView(_tools); + @optionalTypeArgs + TResult when({ + required TResult Function(ModifyAssistantResponseFormatMode value) mode, + required TResult Function(AssistantsResponseFormat value) format, + }) { + return mode(value); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(ModifyAssistantResponseFormatMode value)? mode, + TResult? Function(AssistantsResponseFormat value)? format, + }) { + return mode?.call(value); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(ModifyAssistantResponseFormatMode value)? mode, + TResult Function(AssistantsResponseFormat value)? format, + required TResult orElse(), + }) { + if (mode != null) { + return mode(value); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function( + ModifyAssistantRequestResponseFormatEnumeration value) + mode, + required TResult Function( + ModifyAssistantRequestResponseFormatAssistantsResponseFormat value) + format, + }) { + return mode(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(ModifyAssistantRequestResponseFormatEnumeration value)? + mode, + TResult? Function( + ModifyAssistantRequestResponseFormatAssistantsResponseFormat value)? + format, + }) { + return mode?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(ModifyAssistantRequestResponseFormatEnumeration value)? + mode, + TResult Function( + ModifyAssistantRequestResponseFormatAssistantsResponseFormat value)? + format, + required TResult orElse(), + }) { + if (mode != null) { + return mode(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$ModifyAssistantRequestResponseFormatEnumerationImplToJson( + this, + ); + } +} + +abstract class ModifyAssistantRequestResponseFormatEnumeration + extends ModifyAssistantRequestResponseFormat { + const factory ModifyAssistantRequestResponseFormatEnumeration( + final ModifyAssistantResponseFormatMode value) = + _$ModifyAssistantRequestResponseFormatEnumerationImpl; + const ModifyAssistantRequestResponseFormatEnumeration._() : super._(); + + factory ModifyAssistantRequestResponseFormatEnumeration.fromJson( + Map json) = + _$ModifyAssistantRequestResponseFormatEnumerationImpl.fromJson; + + @override + ModifyAssistantResponseFormatMode get value; + @JsonKey(ignore: true) + _$$ModifyAssistantRequestResponseFormatEnumerationImplCopyWith< + _$ModifyAssistantRequestResponseFormatEnumerationImpl> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWith< + $Res> { + factory _$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWith( + _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl value, + $Res Function( + _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl) + then) = + __$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl< + $Res>; + @useResult + $Res call({AssistantsResponseFormat value}); + + $AssistantsResponseFormatCopyWith<$Res> get value; +} + +/// @nodoc +class __$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl< + $Res> + extends _$ModifyAssistantRequestResponseFormatCopyWithImpl<$Res, + _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl> + implements + _$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWith< + $Res> { + __$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl( + _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl _value, + $Res Function( + _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl) + _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? value = null, + }) { + return _then( + _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl( + null == value + ? _value.value + : value // ignore: cast_nullable_to_non_nullable + as AssistantsResponseFormat, + )); } - /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs attached to this assistant. There can be a maximum of 20 files attached to the assistant. Files are ordered by their creation date in ascending order. If a file was previosuly attached to the list but does not show up in the list, it will be deleted from the assistant. - final List _fileIds; - - /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs attached to this assistant. There can be a maximum of 20 files attached to the assistant. Files are ordered by their creation date in ascending order. If a file was previosuly attached to the list but does not show up in the list, it will be deleted from the assistant. @override - @JsonKey(name: 'file_ids') - List get fileIds { - if (_fileIds is EqualUnmodifiableListView) return _fileIds; - // ignore: implicit_dynamic_type - return EqualUnmodifiableListView(_fileIds); + @pragma('vm:prefer-inline') + $AssistantsResponseFormatCopyWith<$Res> get value { + return $AssistantsResponseFormatCopyWith<$Res>(_value.value, (value) { + return _then(_value.copyWith(value: value)); + }); } +} - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. - final Map? _metadata; +/// @nodoc +@JsonSerializable() +class _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl + extends ModifyAssistantRequestResponseFormatAssistantsResponseFormat { + const _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl( + this.value, + {final String? $type}) + : $type = $type ?? 'format', + super._(); + + factory _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl.fromJson( + Map json) => + _$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplFromJson( + json); - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. @override - @JsonKey(includeIfNull: false) - Map? get metadata { - final value = _metadata; - if (value == null) return null; - if (_metadata is EqualUnmodifiableMapView) return _metadata; - // ignore: implicit_dynamic_type - return EqualUnmodifiableMapView(value); - } + final AssistantsResponseFormat value; + + @JsonKey(name: 'runtimeType') + final String $type; @override String toString() { - return 'ModifyAssistantRequest(model: $model, name: $name, description: $description, instructions: $instructions, tools: $tools, fileIds: $fileIds, metadata: $metadata)'; + return 'ModifyAssistantRequestResponseFormat.format(value: $value)'; } @override bool operator ==(Object other) { return identical(this, other) || (other.runtimeType == runtimeType && - other is _$ModifyAssistantRequestImpl && - (identical(other.model, model) || other.model == model) && - (identical(other.name, name) || other.name == name) && - (identical(other.description, description) || - other.description == description) && - (identical(other.instructions, instructions) || - other.instructions == instructions) && - const DeepCollectionEquality().equals(other._tools, _tools) && - const DeepCollectionEquality().equals(other._fileIds, _fileIds) && - const DeepCollectionEquality().equals(other._metadata, _metadata)); + other + is _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl && + (identical(other.value, value) || other.value == value)); } @JsonKey(ignore: true) @override - int get hashCode => Object.hash( - runtimeType, - model, - name, - description, - instructions, - const DeepCollectionEquality().hash(_tools), - const DeepCollectionEquality().hash(_fileIds), - const DeepCollectionEquality().hash(_metadata)); + int get hashCode => Object.hash(runtimeType, value); @JsonKey(ignore: true) @override @pragma('vm:prefer-inline') - _$$ModifyAssistantRequestImplCopyWith<_$ModifyAssistantRequestImpl> - get copyWith => __$$ModifyAssistantRequestImplCopyWithImpl< - _$ModifyAssistantRequestImpl>(this, _$identity); + _$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWith< + _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl> + get copyWith => + __$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWithImpl< + _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl>( + this, _$identity); @override - Map toJson() { - return _$$ModifyAssistantRequestImplToJson( - this, - ); + @optionalTypeArgs + TResult when({ + required TResult Function(ModifyAssistantResponseFormatMode value) mode, + required TResult Function(AssistantsResponseFormat value) format, + }) { + return format(value); } -} - -abstract class _ModifyAssistantRequest extends ModifyAssistantRequest { - const factory _ModifyAssistantRequest( - {@JsonKey(includeIfNull: false) final String? model, - @JsonKey(includeIfNull: false) final String? name, - @JsonKey(includeIfNull: false) final String? description, - @JsonKey(includeIfNull: false) final String? instructions, - final List tools, - @JsonKey(name: 'file_ids') final List fileIds, - @JsonKey(includeIfNull: false) - final Map? metadata}) = _$ModifyAssistantRequestImpl; - const _ModifyAssistantRequest._() : super._(); - - factory _ModifyAssistantRequest.fromJson(Map json) = - _$ModifyAssistantRequestImpl.fromJson; @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(ModifyAssistantResponseFormatMode value)? mode, + TResult? Function(AssistantsResponseFormat value)? format, + }) { + return format?.call(value); + } - /// ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them. - @JsonKey(includeIfNull: false) - String? get model; @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(ModifyAssistantResponseFormatMode value)? mode, + TResult Function(AssistantsResponseFormat value)? format, + required TResult orElse(), + }) { + if (format != null) { + return format(value); + } + return orElse(); + } - /// The name of the assistant. The maximum length is 256 characters. - @JsonKey(includeIfNull: false) - String? get name; @override + @optionalTypeArgs + TResult map({ + required TResult Function( + ModifyAssistantRequestResponseFormatEnumeration value) + mode, + required TResult Function( + ModifyAssistantRequestResponseFormatAssistantsResponseFormat value) + format, + }) { + return format(this); + } - /// The description of the assistant. The maximum length is 512 characters. - @JsonKey(includeIfNull: false) - String? get description; @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(ModifyAssistantRequestResponseFormatEnumeration value)? + mode, + TResult? Function( + ModifyAssistantRequestResponseFormatAssistantsResponseFormat value)? + format, + }) { + return format?.call(this); + } - /// The system instructions that the assistant uses. The maximum length is 256,000 characters. - @JsonKey(includeIfNull: false) - String? get instructions; @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(ModifyAssistantRequestResponseFormatEnumeration value)? + mode, + TResult Function( + ModifyAssistantRequestResponseFormatAssistantsResponseFormat value)? + format, + required TResult orElse(), + }) { + if (format != null) { + return format(this); + } + return orElse(); + } - /// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `retrieval`, or `function`. - List get tools; @override + Map toJson() { + return _$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplToJson( + this, + ); + } +} - /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs attached to this assistant. There can be a maximum of 20 files attached to the assistant. Files are ordered by their creation date in ascending order. If a file was previosuly attached to the list but does not show up in the list, it will be deleted from the assistant. - @JsonKey(name: 'file_ids') - List get fileIds; - @override +abstract class ModifyAssistantRequestResponseFormatAssistantsResponseFormat + extends ModifyAssistantRequestResponseFormat { + const factory ModifyAssistantRequestResponseFormatAssistantsResponseFormat( + final AssistantsResponseFormat value) = + _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl; + const ModifyAssistantRequestResponseFormatAssistantsResponseFormat._() + : super._(); + + factory ModifyAssistantRequestResponseFormatAssistantsResponseFormat.fromJson( + Map json) = + _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl + .fromJson; - /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. - @JsonKey(includeIfNull: false) - Map? get metadata; @override + AssistantsResponseFormat get value; @JsonKey(ignore: true) - _$$ModifyAssistantRequestImplCopyWith<_$ModifyAssistantRequestImpl> + _$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplCopyWith< + _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl> get copyWith => throw _privateConstructorUsedError; } @@ -24459,6 +25697,10 @@ mixin _$RunObject { @JsonKey(includeIfNull: false) double? get temperature => throw _privateConstructorUsedError; + /// The nucleus sampling value used for this run. If not set, defaults to 1. + @JsonKey(name: 'top_p', includeIfNull: false) + double? get topP => throw _privateConstructorUsedError; + /// The maximum number of prompt tokens specified to have been used over the course of the run. @JsonKey(name: 'max_prompt_tokens') int? get maxPromptTokens => throw _privateConstructorUsedError; @@ -24524,6 +25766,7 @@ abstract class $RunObjectCopyWith<$Res> { Map? metadata, RunCompletionUsage? usage, @JsonKey(includeIfNull: false) double? temperature, + @JsonKey(name: 'top_p', includeIfNull: false) double? topP, @JsonKey(name: 'max_prompt_tokens') int? maxPromptTokens, @JsonKey(name: 'max_completion_tokens') int? maxCompletionTokens, @JsonKey(name: 'truncation_strategy') @@ -24578,6 +25821,7 @@ class _$RunObjectCopyWithImpl<$Res, $Val extends RunObject> Object? metadata = freezed, Object? usage = freezed, Object? temperature = freezed, + Object? topP = freezed, Object? maxPromptTokens = freezed, Object? maxCompletionTokens = freezed, Object? truncationStrategy = freezed, @@ -24669,6 +25913,10 @@ class _$RunObjectCopyWithImpl<$Res, $Val extends RunObject> ? _value.temperature : temperature // ignore: cast_nullable_to_non_nullable as double?, + topP: freezed == topP + ? _value.topP + : topP // ignore: cast_nullable_to_non_nullable + as double?, maxPromptTokens: freezed == maxPromptTokens ? _value.maxPromptTokens : maxPromptTokens // ignore: cast_nullable_to_non_nullable @@ -24806,6 +26054,7 @@ abstract class _$$RunObjectImplCopyWith<$Res> Map? metadata, RunCompletionUsage? usage, @JsonKey(includeIfNull: false) double? temperature, + @JsonKey(name: 'top_p', includeIfNull: false) double? topP, @JsonKey(name: 'max_prompt_tokens') int? maxPromptTokens, @JsonKey(name: 'max_completion_tokens') int? maxCompletionTokens, @JsonKey(name: 'truncation_strategy') @@ -24865,6 +26114,7 @@ class __$$RunObjectImplCopyWithImpl<$Res> Object? metadata = freezed, Object? usage = freezed, Object? temperature = freezed, + Object? topP = freezed, Object? maxPromptTokens = freezed, Object? maxCompletionTokens = freezed, Object? truncationStrategy = freezed, @@ -24956,6 +26206,10 @@ class __$$RunObjectImplCopyWithImpl<$Res> ? _value.temperature : temperature // ignore: cast_nullable_to_non_nullable as double?, + topP: freezed == topP + ? _value.topP + : topP // ignore: cast_nullable_to_non_nullable + as double?, maxPromptTokens: freezed == maxPromptTokens ? _value.maxPromptTokens : maxPromptTokens // ignore: cast_nullable_to_non_nullable @@ -25005,6 +26259,7 @@ class _$RunObjectImpl extends _RunObject { required final Map? metadata, required this.usage, @JsonKey(includeIfNull: false) this.temperature, + @JsonKey(name: 'top_p', includeIfNull: false) this.topP, @JsonKey(name: 'max_prompt_tokens') required this.maxPromptTokens, @JsonKey(name: 'max_completion_tokens') required this.maxCompletionTokens, @JsonKey(name: 'truncation_strategy') required this.truncationStrategy, @@ -25142,6 +26397,11 @@ class _$RunObjectImpl extends _RunObject { @JsonKey(includeIfNull: false) final double? temperature; + /// The nucleus sampling value used for this run. If not set, defaults to 1. + @override + @JsonKey(name: 'top_p', includeIfNull: false) + final double? topP; + /// The maximum number of prompt tokens specified to have been used over the course of the run. @override @JsonKey(name: 'max_prompt_tokens') @@ -25178,7 +26438,7 @@ class _$RunObjectImpl extends _RunObject { @override String toString() { - return 'RunObject(id: $id, object: $object, createdAt: $createdAt, threadId: $threadId, assistantId: $assistantId, status: $status, requiredAction: $requiredAction, lastError: $lastError, expiresAt: $expiresAt, startedAt: $startedAt, cancelledAt: $cancelledAt, failedAt: $failedAt, completedAt: $completedAt, incompleteDetails: $incompleteDetails, model: $model, instructions: $instructions, tools: $tools, fileIds: $fileIds, metadata: $metadata, usage: $usage, temperature: $temperature, maxPromptTokens: $maxPromptTokens, maxCompletionTokens: $maxCompletionTokens, truncationStrategy: $truncationStrategy, toolChoice: $toolChoice, responseFormat: $responseFormat)'; + return 'RunObject(id: $id, object: $object, createdAt: $createdAt, threadId: $threadId, assistantId: $assistantId, status: $status, requiredAction: $requiredAction, lastError: $lastError, expiresAt: $expiresAt, startedAt: $startedAt, cancelledAt: $cancelledAt, failedAt: $failedAt, completedAt: $completedAt, incompleteDetails: $incompleteDetails, model: $model, instructions: $instructions, tools: $tools, fileIds: $fileIds, metadata: $metadata, usage: $usage, temperature: $temperature, topP: $topP, maxPromptTokens: $maxPromptTokens, maxCompletionTokens: $maxCompletionTokens, truncationStrategy: $truncationStrategy, toolChoice: $toolChoice, responseFormat: $responseFormat)'; } @override @@ -25220,6 +26480,7 @@ class _$RunObjectImpl extends _RunObject { (identical(other.usage, usage) || other.usage == usage) && (identical(other.temperature, temperature) || other.temperature == temperature) && + (identical(other.topP, topP) || other.topP == topP) && (identical(other.maxPromptTokens, maxPromptTokens) || other.maxPromptTokens == maxPromptTokens) && (identical(other.maxCompletionTokens, maxCompletionTokens) || @@ -25257,6 +26518,7 @@ class _$RunObjectImpl extends _RunObject { const DeepCollectionEquality().hash(_metadata), usage, temperature, + topP, maxPromptTokens, maxCompletionTokens, truncationStrategy, @@ -25303,6 +26565,7 @@ abstract class _RunObject extends RunObject { required final Map? metadata, required final RunCompletionUsage? usage, @JsonKey(includeIfNull: false) final double? temperature, + @JsonKey(name: 'top_p', includeIfNull: false) final double? topP, @JsonKey(name: 'max_prompt_tokens') required final int? maxPromptTokens, @JsonKey(name: 'max_completion_tokens') required final int? maxCompletionTokens, @@ -25418,6 +26681,11 @@ abstract class _RunObject extends RunObject { double? get temperature; @override + /// The nucleus sampling value used for this run. If not set, defaults to 1. + @JsonKey(name: 'top_p', includeIfNull: false) + double? get topP; + @override + /// The maximum number of prompt tokens specified to have been used over the course of the run. @JsonKey(name: 'max_prompt_tokens') int? get maxPromptTokens; @@ -27277,6 +28545,12 @@ mixin _$CreateRunRequest { @JsonKey(includeIfNull: false) double? get temperature => throw _privateConstructorUsedError; + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// + /// We generally recommend altering this or temperature but not both. + @JsonKey(name: 'top_p', includeIfNull: false) + double? get topP => throw _privateConstructorUsedError; + /// The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `complete`. See `incomplete_details` for more info. @JsonKey(name: 'max_prompt_tokens', includeIfNull: false) int? get maxPromptTokens => throw _privateConstructorUsedError; @@ -27338,6 +28612,7 @@ abstract class $CreateRunRequestCopyWith<$Res> { @JsonKey(includeIfNull: false) List? tools, @JsonKey(includeIfNull: false) Map? metadata, @JsonKey(includeIfNull: false) double? temperature, + @JsonKey(name: 'top_p', includeIfNull: false) double? topP, @JsonKey(name: 'max_prompt_tokens', includeIfNull: false) int? maxPromptTokens, @JsonKey(name: 'max_completion_tokens', includeIfNull: false) @@ -27379,6 +28654,7 @@ class _$CreateRunRequestCopyWithImpl<$Res, $Val extends CreateRunRequest> Object? tools = freezed, Object? metadata = freezed, Object? temperature = freezed, + Object? topP = freezed, Object? maxPromptTokens = freezed, Object? maxCompletionTokens = freezed, Object? truncationStrategy = freezed, @@ -27419,6 +28695,10 @@ class _$CreateRunRequestCopyWithImpl<$Res, $Val extends CreateRunRequest> ? _value.temperature : temperature // ignore: cast_nullable_to_non_nullable as double?, + topP: freezed == topP + ? _value.topP + : topP // ignore: cast_nullable_to_non_nullable + as double?, maxPromptTokens: freezed == maxPromptTokens ? _value.maxPromptTokens : maxPromptTokens // ignore: cast_nullable_to_non_nullable @@ -27518,6 +28798,7 @@ abstract class _$$CreateRunRequestImplCopyWith<$Res> @JsonKey(includeIfNull: false) List? tools, @JsonKey(includeIfNull: false) Map? metadata, @JsonKey(includeIfNull: false) double? temperature, + @JsonKey(name: 'top_p', includeIfNull: false) double? topP, @JsonKey(name: 'max_prompt_tokens', includeIfNull: false) int? maxPromptTokens, @JsonKey(name: 'max_completion_tokens', includeIfNull: false) @@ -27561,6 +28842,7 @@ class __$$CreateRunRequestImplCopyWithImpl<$Res> Object? tools = freezed, Object? metadata = freezed, Object? temperature = freezed, + Object? topP = freezed, Object? maxPromptTokens = freezed, Object? maxCompletionTokens = freezed, Object? truncationStrategy = freezed, @@ -27601,6 +28883,10 @@ class __$$CreateRunRequestImplCopyWithImpl<$Res> ? _value.temperature : temperature // ignore: cast_nullable_to_non_nullable as double?, + topP: freezed == topP + ? _value.topP + : topP // ignore: cast_nullable_to_non_nullable + as double?, maxPromptTokens: freezed == maxPromptTokens ? _value.maxPromptTokens : maxPromptTokens // ignore: cast_nullable_to_non_nullable @@ -27645,6 +28931,7 @@ class _$CreateRunRequestImpl extends _CreateRunRequest { @JsonKey(includeIfNull: false) final List? tools, @JsonKey(includeIfNull: false) final Map? metadata, @JsonKey(includeIfNull: false) this.temperature = 1.0, + @JsonKey(name: 'top_p', includeIfNull: false) this.topP = 1.0, @JsonKey(name: 'max_prompt_tokens', includeIfNull: false) this.maxPromptTokens, @JsonKey(name: 'max_completion_tokens', includeIfNull: false) @@ -27735,6 +29022,13 @@ class _$CreateRunRequestImpl extends _CreateRunRequest { @JsonKey(includeIfNull: false) final double? temperature; + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// + /// We generally recommend altering this or temperature but not both. + @override + @JsonKey(name: 'top_p', includeIfNull: false) + final double? topP; + /// The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `complete`. See `incomplete_details` for more info. @override @JsonKey(name: 'max_prompt_tokens', includeIfNull: false) @@ -27776,7 +29070,7 @@ class _$CreateRunRequestImpl extends _CreateRunRequest { @override String toString() { - return 'CreateRunRequest(assistantId: $assistantId, model: $model, instructions: $instructions, additionalInstructions: $additionalInstructions, additionalMessages: $additionalMessages, tools: $tools, metadata: $metadata, temperature: $temperature, maxPromptTokens: $maxPromptTokens, maxCompletionTokens: $maxCompletionTokens, truncationStrategy: $truncationStrategy, toolChoice: $toolChoice, responseFormat: $responseFormat, stream: $stream)'; + return 'CreateRunRequest(assistantId: $assistantId, model: $model, instructions: $instructions, additionalInstructions: $additionalInstructions, additionalMessages: $additionalMessages, tools: $tools, metadata: $metadata, temperature: $temperature, topP: $topP, maxPromptTokens: $maxPromptTokens, maxCompletionTokens: $maxCompletionTokens, truncationStrategy: $truncationStrategy, toolChoice: $toolChoice, responseFormat: $responseFormat, stream: $stream)'; } @override @@ -27797,6 +29091,7 @@ class _$CreateRunRequestImpl extends _CreateRunRequest { const DeepCollectionEquality().equals(other._metadata, _metadata) && (identical(other.temperature, temperature) || other.temperature == temperature) && + (identical(other.topP, topP) || other.topP == topP) && (identical(other.maxPromptTokens, maxPromptTokens) || other.maxPromptTokens == maxPromptTokens) && (identical(other.maxCompletionTokens, maxCompletionTokens) || @@ -27822,6 +29117,7 @@ class _$CreateRunRequestImpl extends _CreateRunRequest { const DeepCollectionEquality().hash(_tools), const DeepCollectionEquality().hash(_metadata), temperature, + topP, maxPromptTokens, maxCompletionTokens, truncationStrategy, @@ -27858,6 +29154,7 @@ abstract class _CreateRunRequest extends CreateRunRequest { @JsonKey(includeIfNull: false) final List? tools, @JsonKey(includeIfNull: false) final Map? metadata, @JsonKey(includeIfNull: false) final double? temperature, + @JsonKey(name: 'top_p', includeIfNull: false) final double? topP, @JsonKey(name: 'max_prompt_tokens', includeIfNull: false) final int? maxPromptTokens, @JsonKey(name: 'max_completion_tokens', includeIfNull: false) @@ -27920,6 +29217,13 @@ abstract class _CreateRunRequest extends CreateRunRequest { double? get temperature; @override + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// + /// We generally recommend altering this or temperature but not both. + @JsonKey(name: 'top_p', includeIfNull: false) + double? get topP; + @override + /// The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `complete`. See `incomplete_details` for more info. @JsonKey(name: 'max_prompt_tokens', includeIfNull: false) int? get maxPromptTokens; @@ -30509,6 +31813,12 @@ mixin _$CreateThreadAndRunRequest { @JsonKey(includeIfNull: false) double? get temperature => throw _privateConstructorUsedError; + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// + /// We generally recommend altering this or temperature but not both. + @JsonKey(name: 'top_p', includeIfNull: false) + double? get topP => throw _privateConstructorUsedError; + /// The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `complete`. See `incomplete_details` for more info. @JsonKey(name: 'max_prompt_tokens', includeIfNull: false) int? get maxPromptTokens => throw _privateConstructorUsedError; @@ -30567,6 +31877,7 @@ abstract class $CreateThreadAndRunRequestCopyWith<$Res> { @JsonKey(includeIfNull: false) List? tools, @JsonKey(includeIfNull: false) Map? metadata, @JsonKey(includeIfNull: false) double? temperature, + @JsonKey(name: 'top_p', includeIfNull: false) double? topP, @JsonKey(name: 'max_prompt_tokens', includeIfNull: false) int? maxPromptTokens, @JsonKey(name: 'max_completion_tokens', includeIfNull: false) @@ -30609,6 +31920,7 @@ class _$CreateThreadAndRunRequestCopyWithImpl<$Res, Object? tools = freezed, Object? metadata = freezed, Object? temperature = freezed, + Object? topP = freezed, Object? maxPromptTokens = freezed, Object? maxCompletionTokens = freezed, Object? truncationStrategy = freezed, @@ -30645,6 +31957,10 @@ class _$CreateThreadAndRunRequestCopyWithImpl<$Res, ? _value.temperature : temperature // ignore: cast_nullable_to_non_nullable as double?, + topP: freezed == topP + ? _value.topP + : topP // ignore: cast_nullable_to_non_nullable + as double?, maxPromptTokens: freezed == maxPromptTokens ? _value.maxPromptTokens : maxPromptTokens // ignore: cast_nullable_to_non_nullable @@ -30754,6 +32070,7 @@ abstract class _$$CreateThreadAndRunRequestImplCopyWith<$Res> @JsonKey(includeIfNull: false) List? tools, @JsonKey(includeIfNull: false) Map? metadata, @JsonKey(includeIfNull: false) double? temperature, + @JsonKey(name: 'top_p', includeIfNull: false) double? topP, @JsonKey(name: 'max_prompt_tokens', includeIfNull: false) int? maxPromptTokens, @JsonKey(name: 'max_completion_tokens', includeIfNull: false) @@ -30800,6 +32117,7 @@ class __$$CreateThreadAndRunRequestImplCopyWithImpl<$Res> Object? tools = freezed, Object? metadata = freezed, Object? temperature = freezed, + Object? topP = freezed, Object? maxPromptTokens = freezed, Object? maxCompletionTokens = freezed, Object? truncationStrategy = freezed, @@ -30836,6 +32154,10 @@ class __$$CreateThreadAndRunRequestImplCopyWithImpl<$Res> ? _value.temperature : temperature // ignore: cast_nullable_to_non_nullable as double?, + topP: freezed == topP + ? _value.topP + : topP // ignore: cast_nullable_to_non_nullable + as double?, maxPromptTokens: freezed == maxPromptTokens ? _value.maxPromptTokens : maxPromptTokens // ignore: cast_nullable_to_non_nullable @@ -30875,6 +32197,7 @@ class _$CreateThreadAndRunRequestImpl extends _CreateThreadAndRunRequest { @JsonKey(includeIfNull: false) final List? tools, @JsonKey(includeIfNull: false) final Map? metadata, @JsonKey(includeIfNull: false) this.temperature = 1.0, + @JsonKey(name: 'top_p', includeIfNull: false) this.topP = 1.0, @JsonKey(name: 'max_prompt_tokens', includeIfNull: false) this.maxPromptTokens, @JsonKey(name: 'max_completion_tokens', includeIfNull: false) @@ -30949,6 +32272,13 @@ class _$CreateThreadAndRunRequestImpl extends _CreateThreadAndRunRequest { @JsonKey(includeIfNull: false) final double? temperature; + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// + /// We generally recommend altering this or temperature but not both. + @override + @JsonKey(name: 'top_p', includeIfNull: false) + final double? topP; + /// The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `complete`. See `incomplete_details` for more info. @override @JsonKey(name: 'max_prompt_tokens', includeIfNull: false) @@ -30990,7 +32320,7 @@ class _$CreateThreadAndRunRequestImpl extends _CreateThreadAndRunRequest { @override String toString() { - return 'CreateThreadAndRunRequest(assistantId: $assistantId, thread: $thread, model: $model, instructions: $instructions, tools: $tools, metadata: $metadata, temperature: $temperature, maxPromptTokens: $maxPromptTokens, maxCompletionTokens: $maxCompletionTokens, truncationStrategy: $truncationStrategy, toolChoice: $toolChoice, responseFormat: $responseFormat, stream: $stream)'; + return 'CreateThreadAndRunRequest(assistantId: $assistantId, thread: $thread, model: $model, instructions: $instructions, tools: $tools, metadata: $metadata, temperature: $temperature, topP: $topP, maxPromptTokens: $maxPromptTokens, maxCompletionTokens: $maxCompletionTokens, truncationStrategy: $truncationStrategy, toolChoice: $toolChoice, responseFormat: $responseFormat, stream: $stream)'; } @override @@ -31008,6 +32338,7 @@ class _$CreateThreadAndRunRequestImpl extends _CreateThreadAndRunRequest { const DeepCollectionEquality().equals(other._metadata, _metadata) && (identical(other.temperature, temperature) || other.temperature == temperature) && + (identical(other.topP, topP) || other.topP == topP) && (identical(other.maxPromptTokens, maxPromptTokens) || other.maxPromptTokens == maxPromptTokens) && (identical(other.maxCompletionTokens, maxCompletionTokens) || @@ -31032,6 +32363,7 @@ class _$CreateThreadAndRunRequestImpl extends _CreateThreadAndRunRequest { const DeepCollectionEquality().hash(_tools), const DeepCollectionEquality().hash(_metadata), temperature, + topP, maxPromptTokens, maxCompletionTokens, truncationStrategy, @@ -31065,6 +32397,7 @@ abstract class _CreateThreadAndRunRequest extends CreateThreadAndRunRequest { @JsonKey(includeIfNull: false) final List? tools, @JsonKey(includeIfNull: false) final Map? metadata, @JsonKey(includeIfNull: false) final double? temperature, + @JsonKey(name: 'top_p', includeIfNull: false) final double? topP, @JsonKey(name: 'max_prompt_tokens', includeIfNull: false) final int? maxPromptTokens, @JsonKey(name: 'max_completion_tokens', includeIfNull: false) @@ -31122,6 +32455,13 @@ abstract class _CreateThreadAndRunRequest extends CreateThreadAndRunRequest { double? get temperature; @override + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// + /// We generally recommend altering this or temperature but not both. + @JsonKey(name: 'top_p', includeIfNull: false) + double? get topP; + @override + /// The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `complete`. See `incomplete_details` for more info. @JsonKey(name: 'max_prompt_tokens', includeIfNull: false) int? get maxPromptTokens; diff --git a/packages/openai_dart/lib/src/generated/schema/schema.g.dart b/packages/openai_dart/lib/src/generated/schema/schema.g.dart index c13aab6e..3926404b 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.g.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.g.dart @@ -2093,6 +2093,10 @@ _$CreateAssistantRequestImpl _$$CreateAssistantRequestImplFromJson( .toList() ?? const [], metadata: json['metadata'] as Map?, + temperature: (json['temperature'] as num?)?.toDouble() ?? 1.0, + topP: (json['top_p'] as num?)?.toDouble() ?? 1.0, + responseFormat: const _CreateAssistantRequestResponseFormatConverter() + .fromJson(json['response_format']), ); Map _$$CreateAssistantRequestImplToJson( @@ -2113,6 +2117,12 @@ Map _$$CreateAssistantRequestImplToJson( val['tools'] = instance.tools.map((e) => e.toJson()).toList(); val['file_ids'] = instance.fileIds; writeNotNull('metadata', instance.metadata); + writeNotNull('temperature', instance.temperature); + writeNotNull('top_p', instance.topP); + writeNotNull( + 'response_format', + const _CreateAssistantRequestResponseFormatConverter() + .toJson(instance.responseFormat)); return val; } @@ -2165,6 +2175,46 @@ Map _$$AssistantModelStringImplToJson( 'runtimeType': instance.$type, }; +_$CreateAssistantRequestResponseFormatEnumerationImpl + _$$CreateAssistantRequestResponseFormatEnumerationImplFromJson( + Map json) => + _$CreateAssistantRequestResponseFormatEnumerationImpl( + $enumDecode( + _$CreateAssistantResponseFormatModeEnumMap, json['value']), + $type: json['runtimeType'] as String?, + ); + +Map + _$$CreateAssistantRequestResponseFormatEnumerationImplToJson( + _$CreateAssistantRequestResponseFormatEnumerationImpl instance) => + { + 'value': _$CreateAssistantResponseFormatModeEnumMap[instance.value]!, + 'runtimeType': instance.$type, + }; + +const _$CreateAssistantResponseFormatModeEnumMap = { + CreateAssistantResponseFormatMode.none: 'none', + CreateAssistantResponseFormatMode.auto: 'auto', +}; + +_$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl + _$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplFromJson( + Map json) => + _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl( + AssistantsResponseFormat.fromJson( + json['value'] as Map), + $type: json['runtimeType'] as String?, + ); + +Map + _$$CreateAssistantRequestResponseFormatAssistantsResponseFormatImplToJson( + _$CreateAssistantRequestResponseFormatAssistantsResponseFormatImpl + instance) => + { + 'value': instance.value.toJson(), + 'runtimeType': instance.$type, + }; + _$ModifyAssistantRequestImpl _$$ModifyAssistantRequestImplFromJson( Map json) => _$ModifyAssistantRequestImpl( @@ -2181,6 +2231,10 @@ _$ModifyAssistantRequestImpl _$$ModifyAssistantRequestImplFromJson( .toList() ?? const [], metadata: json['metadata'] as Map?, + temperature: (json['temperature'] as num?)?.toDouble() ?? 1.0, + topP: (json['top_p'] as num?)?.toDouble() ?? 1.0, + responseFormat: const _ModifyAssistantRequestResponseFormatConverter() + .fromJson(json['response_format']), ); Map _$$ModifyAssistantRequestImplToJson( @@ -2200,9 +2254,55 @@ Map _$$ModifyAssistantRequestImplToJson( val['tools'] = instance.tools.map((e) => e.toJson()).toList(); val['file_ids'] = instance.fileIds; writeNotNull('metadata', instance.metadata); + writeNotNull('temperature', instance.temperature); + writeNotNull('top_p', instance.topP); + writeNotNull( + 'response_format', + const _ModifyAssistantRequestResponseFormatConverter() + .toJson(instance.responseFormat)); return val; } +_$ModifyAssistantRequestResponseFormatEnumerationImpl + _$$ModifyAssistantRequestResponseFormatEnumerationImplFromJson( + Map json) => + _$ModifyAssistantRequestResponseFormatEnumerationImpl( + $enumDecode( + _$ModifyAssistantResponseFormatModeEnumMap, json['value']), + $type: json['runtimeType'] as String?, + ); + +Map + _$$ModifyAssistantRequestResponseFormatEnumerationImplToJson( + _$ModifyAssistantRequestResponseFormatEnumerationImpl instance) => + { + 'value': _$ModifyAssistantResponseFormatModeEnumMap[instance.value]!, + 'runtimeType': instance.$type, + }; + +const _$ModifyAssistantResponseFormatModeEnumMap = { + ModifyAssistantResponseFormatMode.none: 'none', + ModifyAssistantResponseFormatMode.auto: 'auto', +}; + +_$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl + _$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplFromJson( + Map json) => + _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl( + AssistantsResponseFormat.fromJson( + json['value'] as Map), + $type: json['runtimeType'] as String?, + ); + +Map + _$$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImplToJson( + _$ModifyAssistantRequestResponseFormatAssistantsResponseFormatImpl + instance) => + { + 'value': instance.value.toJson(), + 'runtimeType': instance.$type, + }; + _$DeleteAssistantResponseImpl _$$DeleteAssistantResponseImplFromJson( Map json) => _$DeleteAssistantResponseImpl( @@ -2373,6 +2473,7 @@ _$RunObjectImpl _$$RunObjectImplFromJson(Map json) => ? null : RunCompletionUsage.fromJson(json['usage'] as Map), temperature: (json['temperature'] as num?)?.toDouble(), + topP: (json['top_p'] as num?)?.toDouble(), maxPromptTokens: json['max_prompt_tokens'] as int?, maxCompletionTokens: json['max_completion_tokens'] as int?, truncationStrategy: json['truncation_strategy'] == null @@ -2416,6 +2517,7 @@ Map _$$RunObjectImplToJson(_$RunObjectImpl instance) { } writeNotNull('temperature', instance.temperature); + writeNotNull('top_p', instance.topP); val['max_prompt_tokens'] = instance.maxPromptTokens; val['max_completion_tokens'] = instance.maxCompletionTokens; val['truncation_strategy'] = instance.truncationStrategy?.toJson(); @@ -2623,6 +2725,7 @@ _$CreateRunRequestImpl _$$CreateRunRequestImplFromJson( .toList(), metadata: json['metadata'] as Map?, temperature: (json['temperature'] as num?)?.toDouble() ?? 1.0, + topP: (json['top_p'] as num?)?.toDouble() ?? 1.0, maxPromptTokens: json['max_prompt_tokens'] as int?, maxCompletionTokens: json['max_completion_tokens'] as int?, truncationStrategy: json['truncation_strategy'] == null @@ -2657,6 +2760,7 @@ Map _$$CreateRunRequestImplToJson( writeNotNull('tools', instance.tools?.map((e) => e.toJson()).toList()); writeNotNull('metadata', instance.metadata); writeNotNull('temperature', instance.temperature); + writeNotNull('top_p', instance.topP); writeNotNull('max_prompt_tokens', instance.maxPromptTokens); writeNotNull('max_completion_tokens', instance.maxCompletionTokens); writeNotNull('truncation_strategy', instance.truncationStrategy?.toJson()); @@ -2935,6 +3039,7 @@ _$CreateThreadAndRunRequestImpl _$$CreateThreadAndRunRequestImplFromJson( .toList(), metadata: json['metadata'] as Map?, temperature: (json['temperature'] as num?)?.toDouble() ?? 1.0, + topP: (json['top_p'] as num?)?.toDouble() ?? 1.0, maxPromptTokens: json['max_prompt_tokens'] as int?, maxCompletionTokens: json['max_completion_tokens'] as int?, truncationStrategy: json['truncation_strategy'] == null @@ -2967,6 +3072,7 @@ Map _$$CreateThreadAndRunRequestImplToJson( writeNotNull('tools', instance.tools?.map((e) => e.toJson()).toList()); writeNotNull('metadata', instance.metadata); writeNotNull('temperature', instance.temperature); + writeNotNull('top_p', instance.topP); writeNotNull('max_prompt_tokens', instance.maxPromptTokens); writeNotNull('max_completion_tokens', instance.maxCompletionTokens); writeNotNull('truncation_strategy', instance.truncationStrategy?.toJson()); diff --git a/packages/openai_dart/oas/main.dart b/packages/openai_dart/oas/main.dart index 600fec42..a269440e 100644 --- a/packages/openai_dart/oas/main.dart +++ b/packages/openai_dart/oas/main.dart @@ -106,10 +106,14 @@ String? _onSchemaUnionFactoryName( 'CreateRunRequestResponseFormatEnumeration' => 'mode', 'CreateThreadAndRunRequestResponseFormatEnumeration' => 'mode', 'RunObjectResponseFormatEnumeration' => 'mode', + 'CreateAssistantRequestResponseFormatEnumeration' => 'mode', + 'ModifyAssistantRequestResponseFormatEnumeration' => 'mode', 'CreateRunRequestResponseFormatAssistantsResponseFormat' => 'format', 'CreateThreadAndRunRequestResponseFormatAssistantsResponseFormat' => 'format', 'RunObjectResponseFormatAssistantsResponseFormat' => 'format', + 'CreateAssistantRequestResponseFormatAssistantsResponseFormat' => 'format', + 'ModifyAssistantRequestResponseFormatAssistantsResponseFormat' => 'format', 'CreateRunRequestToolChoiceEnumeration' => 'mode', 'CreateThreadAndRunRequestToolChoiceEnumeration' => 'mode', 'RunObjectToolChoiceEnumeration' => 'mode', diff --git a/packages/openai_dart/oas/openapi_curated.yaml b/packages/openai_dart/oas/openapi_curated.yaml index 298e1992..4f627e89 100644 --- a/packages/openai_dart/oas/openapi_curated.yaml +++ b/packages/openai_dart/oas/openapi_curated.yaml @@ -2408,7 +2408,7 @@ components: description: | The ID of an uploaded file that contains training data. - See [upload file](https://platform.openai.com/docs/api-reference/files/upload) for how to upload a file. + See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file. Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. @@ -3240,6 +3240,40 @@ components: type: object additionalProperties: true nullable: true + temperature: + description: &run_temperature_description | + What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + type: number + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: &run_top_p_description | + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. + response_format: + description: | + Specifies the format that the model must output. Compatible with [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + oneOf: + - type: string + title: CreateAssistantResponseFormatMode + description: > + `auto` is the default value + enum: [none, auto] + - $ref: "#/components/schemas/AssistantsResponseFormat" required: - model ModifyAssistantRequest: @@ -3285,6 +3319,40 @@ components: type: object additionalProperties: true nullable: true + temperature: + description: &run_temperature_description | + What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + type: number + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: &run_top_p_description | + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. + response_format: + description: | + Specifies the format that the model must output. Compatible with [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + oneOf: + - type: string + title: ModifyAssistantResponseFormatMode + description: > + `auto` is the default value + enum: [none, auto] + - $ref: "#/components/schemas/AssistantsResponseFormat" DeleteAssistantResponse: type: object description: Represents a deleted response returned by the Delete assistant endpoint. @@ -3555,6 +3623,10 @@ components: description: The sampling temperature used for this run. If not set, defaults to 1. type: number nullable: true + top_p: + description: The nucleus sampling value used for this run. If not set, defaults to 1. + type: number + nullable: true max_prompt_tokens: type: integer nullable: true @@ -3716,8 +3788,18 @@ components: default: 1 example: 1 nullable: true - description: &run_temperature_description | - What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + description: *run_temperature_description + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: &run_top_p_description | + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. max_prompt_tokens: type: integer nullable: true @@ -3933,6 +4015,14 @@ components: example: 1 nullable: true description: *run_temperature_description + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: *run_top_p_description max_prompt_tokens: type: integer nullable: true diff --git a/packages/openai_dart/oas/openapi_official.yaml b/packages/openai_dart/oas/openapi_official.yaml index 340cadaf..b6bb0001 100644 --- a/packages/openai_dart/oas/openapi_official.yaml +++ b/packages/openai_dart/oas/openapi_official.yaml @@ -2888,7 +2888,10 @@ paths: "instructions": "You are a helpful assistant designed to make me better at coding!", "tools": [], "file_ids": [], - "metadata": {} + "metadata": {}, + "top_p": 1.0, + "temperature": 1.0, + "response_format": "auto" }, { "id": "asst_abc456", @@ -2900,7 +2903,10 @@ paths: "instructions": "You are a helpful assistant designed to make me better at coding!", "tools": [], "file_ids": [], - "metadata": {} + "metadata": {}, + "top_p": 1.0, + "temperature": 1.0, + "response_format": "auto" }, { "id": "asst_abc789", @@ -2912,7 +2918,10 @@ paths: "instructions": null, "tools": [], "file_ids": [], - "metadata": {} + "metadata": {}, + "top_p": 1.0, + "temperature": 1.0, + "response_format": "auto" } ], "first_id": "asst_abc123", @@ -3001,7 +3010,10 @@ paths: } ], "file_ids": [], - "metadata": {} + "metadata": {}, + "top_p": 1.0, + "temperature": 1.0, + "response_format": "auto" } - title: Files request: @@ -3064,7 +3076,10 @@ paths: "file_ids": [ "file-abc123" ], - "metadata": {} + "metadata": {}, + "top_p": 1.0, + "temperature": 1.0, + "response_format": "auto" } /assistants/{assistant_id}: @@ -3238,7 +3253,10 @@ paths: "file-abc123", "file-abc456" ], - "metadata": {} + "metadata": {}, + "top_p": 1.0, + "temperature": 1.0, + "response_format": "auto" } delete: operationId: deleteAssistant @@ -4501,7 +4519,8 @@ paths: "completion_tokens": 456, "total_tokens": 579 }, - "temperature": 1, + "temperature": 1.0, + "top_p": 1.0, "max_prompt_tokens": 1000, "max_completion_tokens": 1000, "truncation_strategy": { @@ -4542,7 +4561,8 @@ paths: "completion_tokens": 456, "total_tokens": 579 }, - "temperature": 1, + "temperature": 1.0, + "top_p": 1.0, "max_prompt_tokens": 1000, "max_completion_tokens": 1000, "truncation_strategy": { @@ -4651,7 +4671,8 @@ paths: ], "metadata": {}, "usage": null, - "temperature": 1, + "temperature": 1.0, + "top_p": 1.0, "max_prompt_tokens": 1000, "max_completion_tokens": 1000, "truncation_strategy": { @@ -4996,7 +5017,8 @@ paths: "completion_tokens": 456, "total_tokens": 579 }, - "temperature": 1, + "temperature": 1.0, + "top_p": 1.0, "max_prompt_tokens": 1000, "max_completion_tokens": 1000, "truncation_strategy": { @@ -5119,7 +5141,8 @@ paths: "completion_tokens": 456, "total_tokens": 579 }, - "temperature": 1, + "temperature": 1.0, + "top_p": 1.0, "max_prompt_tokens": 1000, "max_completion_tokens": 1000, "truncation_strategy": { @@ -5266,7 +5289,8 @@ paths: "file_ids": [], "metadata": {}, "usage": null, - "temperature": 1, + "temperature": 1.0, + "top_p": 1.0, "max_prompt_tokens": 1000, "max_completion_tokens": 1000, "truncation_strategy": { @@ -5474,7 +5498,8 @@ paths: "file_ids": [], "metadata": {}, "usage": null, - "temperature": 1 + "temperature": 1.0, + "top_p": 1.0, } /threads/{thread_id}/runs/{run_id}/steps: @@ -7775,7 +7800,7 @@ components: description: | The ID of an uploaded file that contains training data. - See [upload file](/docs/api-reference/files/upload) for how to upload a file. + See [upload file](/docs/api-reference/files/create) for how to upload a file. Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. @@ -8770,6 +8795,33 @@ components: - total_tokens nullable: true + AssistantsApiResponseFormatOption: + description: | + Specifies the format that the model must output. Compatible with [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + oneOf: + - type: string + description: > + `auto` is the default value + enum: [none, auto] + - $ref: "#/components/schemas/AssistantsApiResponseFormat" + x-oaiExpandable: true + + AssistantsApiResponseFormat: + type: object + description: | + An object describing the expected output of the model. If `json_object` only `function` type `tools` are allowed to be passed to the Run. If `text` the model can return text or any value needed. + properties: + type: + type: string + enum: ["text", "json_object"] + example: "json_object" + default: "text" + description: Must be one of `text` or `json_object`. + AssistantObject: type: object title: Assistant @@ -8918,6 +8970,29 @@ components: type: object x-oaiTypeLabel: map nullable: true + temperature: + description: &run_temperature_description | + What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + type: number + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: &run_top_p_description | + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. + response_format: + $ref: "#/components/schemas/AssistantsApiResponseFormatOption" + nullable: true required: - model @@ -8968,6 +9043,29 @@ components: type: object x-oaiTypeLabel: map nullable: true + temperature: + description: &run_temperature_description | + What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + type: number + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: &run_top_p_description | + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. + response_format: + $ref: "#/components/schemas/AssistantsApiResponseFormatOption" + nullable: true DeleteAssistantResponse: type: object @@ -9101,33 +9199,6 @@ components: required: - type - AssistantsApiResponseFormatOption: - description: | - Specifies the format that the model must output. Compatible with [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. - - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. - - **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. - oneOf: - - type: string - description: > - `auto` is the default value - enum: [none, auto] - - $ref: "#/components/schemas/AssistantsApiResponseFormat" - x-oaiExpandable: true - - AssistantsApiResponseFormat: - type: object - description: | - An object describing the expected output of the model. If `json_object` only `function` type `tools` are allowed to be passed to the Run. If `text` the model can return text or any value needed. - properties: - type: - type: string - enum: ["text", "json_object"] - example: "json_object" - default: "text" - description: Must be one of `text` or `json_object`. - RunObject: type: object title: A run on a thread @@ -9264,6 +9335,10 @@ components: description: The sampling temperature used for this run. If not set, defaults to 1. type: number nullable: true + top_p: + description: The nucleus sampling value used for this run. If not set, defaults to 1. + type: number + nullable: true max_prompt_tokens: type: integer nullable: true @@ -9339,7 +9414,8 @@ components: "completion_tokens": 456, "total_tokens": 579 }, - "temperature": 1, + "temperature": 1.0, + "top_p": 1.0, "max_prompt_tokens": 1000, "max_completion_tokens": 1000, "truncation_strategy": { @@ -9422,8 +9498,18 @@ components: default: 1 example: 1 nullable: true - description: &run_temperature_description | - What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + description: *run_temperature_description + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: &run_top_p_description | + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. stream: type: boolean nullable: true @@ -9606,6 +9692,14 @@ components: example: 1 nullable: true description: *run_temperature_description + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: *run_top_p_description stream: type: boolean nullable: true