diff --git a/packages/openai_dart/lib/src/generated/client.dart b/packages/openai_dart/lib/src/generated/client.dart index f12c884e..d4005e4c 100644 --- a/packages/openai_dart/lib/src/generated/client.dart +++ b/packages/openai_dart/lib/src/generated/client.dart @@ -663,7 +663,7 @@ class OpenAIClient { // METHOD: createModeration // ------------------------------------------ - /// Classifies if text violates OpenAI's Content Policy. + /// Classifies if text is potentially harmful. /// /// `request`: Request object for the Create moderation endpoint. /// diff --git a/packages/openai_dart/lib/src/generated/schema/chat_completion_token_logprob.dart b/packages/openai_dart/lib/src/generated/schema/chat_completion_token_logprob.dart index edcbed96..27bd0aae 100644 --- a/packages/openai_dart/lib/src/generated/schema/chat_completion_token_logprob.dart +++ b/packages/openai_dart/lib/src/generated/schema/chat_completion_token_logprob.dart @@ -18,7 +18,7 @@ class ChatCompletionTokenLogprob with _$ChatCompletionTokenLogprob { /// The token. required String token, - /// The log probability of this token. + /// The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely. required double logprob, /// A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token. diff --git a/packages/openai_dart/lib/src/generated/schema/chat_completion_token_top_logprob.dart b/packages/openai_dart/lib/src/generated/schema/chat_completion_token_top_logprob.dart index 40c2b0f0..c817c88b 100644 --- a/packages/openai_dart/lib/src/generated/schema/chat_completion_token_top_logprob.dart +++ b/packages/openai_dart/lib/src/generated/schema/chat_completion_token_top_logprob.dart @@ -18,7 +18,7 @@ class ChatCompletionTokenTopLogprob with _$ChatCompletionTokenTopLogprob { /// The token. required String token, - /// The log probability of this token. + /// The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely. required double logprob, /// A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token. diff --git a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart index d8fc2ba2..a7757db7 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart @@ -28,9 +28,6 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { @Default(0.0) double? frequencyPenalty, - /// An unique identifier to a custom instance to execute the request. The requesting organization is required to have access to the instance. - @JsonKey(name: 'instance_id', includeIfNull: false) String? instanceId, - /// Modify the likelihood of specified tokens appearing in the completion. /// /// Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. @@ -40,7 +37,7 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { /// Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. This option is currently not available on the `gpt-4-vision-preview` model. @JsonKey(includeIfNull: false) bool? logprobs, - /// An integer between 0 and 5 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. + /// An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. @JsonKey(name: 'top_logprobs', includeIfNull: false) int? topLogprobs, /// The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in the chat completion. @@ -132,7 +129,6 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { 'model', 'messages', 'frequency_penalty', - 'instance_id', 'logit_bias', 'logprobs', 'top_logprobs', @@ -157,7 +153,7 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { static const frequencyPenaltyMinValue = -2.0; static const frequencyPenaltyMaxValue = 2.0; static const topLogprobsMinValue = 0; - static const topLogprobsMaxValue = 5; + static const topLogprobsMaxValue = 20; static const nDefaultValue = 1; static const nMinValue = 1; static const nMaxValue = 128; @@ -220,7 +216,6 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest { 'model': model, 'messages': messages, 'frequency_penalty': frequencyPenalty, - 'instance_id': instanceId, 'logit_bias': logitBias, 'logprobs': logprobs, 'top_logprobs': topLogprobs, diff --git a/packages/openai_dart/lib/src/generated/schema/create_image_request.dart b/packages/openai_dart/lib/src/generated/schema/create_image_request.dart index 6ba48b38..dad24bde 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_image_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_image_request.dart @@ -32,7 +32,7 @@ class CreateImageRequest with _$CreateImageRequest { /// The quality of the image that will be generated. `hd` creates images with finer details and greater consistency across the image. This param is only supported for `dall-e-3`. @Default(ImageQuality.standard) ImageQuality quality, - /// The format in which the generated images are returned. Must be one of `url` or `b64_json`. + /// The format in which the generated images are returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the image has been generated. @JsonKey( name: 'response_format', includeIfNull: false, @@ -194,7 +194,7 @@ enum ImageQuality { // ENUM: ImageResponseFormat // ========================================== -/// The format in which the generated images are returned. Must be one of `url` or `b64_json`. +/// The format in which the generated images are returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the image has been generated. enum ImageResponseFormat { @JsonValue('url') url, diff --git a/packages/openai_dart/lib/src/generated/schema/create_moderation_response.dart b/packages/openai_dart/lib/src/generated/schema/create_moderation_response.dart index eae24da3..16f837bc 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_moderation_response.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_moderation_response.dart @@ -8,7 +8,7 @@ part of open_a_i_schema; // CLASS: CreateModerationResponse // ========================================== -/// Represents policy compliance report by OpenAI's content moderation model against a given input. +/// Represents if a given text input is potentially harmful. @freezed class CreateModerationResponse with _$CreateModerationResponse { const CreateModerationResponse._(); diff --git a/packages/openai_dart/lib/src/generated/schema/moderation.dart b/packages/openai_dart/lib/src/generated/schema/moderation.dart index 9162fb03..eb1de3bd 100644 --- a/packages/openai_dart/lib/src/generated/schema/moderation.dart +++ b/packages/openai_dart/lib/src/generated/schema/moderation.dart @@ -15,7 +15,7 @@ class Moderation with _$Moderation { /// Factory constructor for Moderation const factory Moderation({ - /// Whether the content violates [OpenAI's usage policies](https://platform.openai.com/policies/usage-policies). + /// Whether any of the below categories are flagged. required bool flagged, /// A list of the categories, and whether they are flagged or not. diff --git a/packages/openai_dart/lib/src/generated/schema/run_object.dart b/packages/openai_dart/lib/src/generated/schema/run_object.dart index 540c32c6..7c0c04fc 100644 --- a/packages/openai_dart/lib/src/generated/schema/run_object.dart +++ b/packages/openai_dart/lib/src/generated/schema/run_object.dart @@ -217,7 +217,7 @@ class RunLastError with _$RunLastError { /// Factory constructor for RunLastError const factory RunLastError({ - /// One of `server_error` or `rate_limit_exceeded`. + /// One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`. required RunLastErrorCode code, /// A human-readable description of the error. @@ -294,10 +294,12 @@ class RunSubmitToolOutputs with _$RunSubmitToolOutputs { // ENUM: RunLastErrorCode // ========================================== -/// One of `server_error` or `rate_limit_exceeded`. +/// One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`. enum RunLastErrorCode { @JsonValue('server_error') serverError, @JsonValue('rate_limit_exceeded') rateLimitExceeded, + @JsonValue('invalid_prompt') + invalidPrompt, } diff --git a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart index 60f7f00b..1a8ba075 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart @@ -3356,10 +3356,6 @@ mixin _$CreateChatCompletionRequest { @JsonKey(name: 'frequency_penalty', includeIfNull: false) double? get frequencyPenalty => throw _privateConstructorUsedError; - /// An unique identifier to a custom instance to execute the request. The requesting organization is required to have access to the instance. - @JsonKey(name: 'instance_id', includeIfNull: false) - String? get instanceId => throw _privateConstructorUsedError; - /// Modify the likelihood of specified tokens appearing in the completion. /// /// Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. @@ -3370,7 +3366,7 @@ mixin _$CreateChatCompletionRequest { @JsonKey(includeIfNull: false) bool? get logprobs => throw _privateConstructorUsedError; - /// An integer between 0 and 5 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. + /// An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. @JsonKey(name: 'top_logprobs', includeIfNull: false) int? get topLogprobs => throw _privateConstructorUsedError; @@ -3483,7 +3479,6 @@ abstract class $CreateChatCompletionRequestCopyWith<$Res> { List messages, @JsonKey(name: 'frequency_penalty', includeIfNull: false) double? frequencyPenalty, - @JsonKey(name: 'instance_id', includeIfNull: false) String? instanceId, @JsonKey(name: 'logit_bias', includeIfNull: false) Map? logitBias, @JsonKey(includeIfNull: false) bool? logprobs, @@ -3535,7 +3530,6 @@ class _$CreateChatCompletionRequestCopyWithImpl<$Res, Object? model = null, Object? messages = null, Object? frequencyPenalty = freezed, - Object? instanceId = freezed, Object? logitBias = freezed, Object? logprobs = freezed, Object? topLogprobs = freezed, @@ -3567,10 +3561,6 @@ class _$CreateChatCompletionRequestCopyWithImpl<$Res, ? _value.frequencyPenalty : frequencyPenalty // ignore: cast_nullable_to_non_nullable as double?, - instanceId: freezed == instanceId - ? _value.instanceId - : instanceId // ignore: cast_nullable_to_non_nullable - as String?, logitBias: freezed == logitBias ? _value.logitBias : logitBias // ignore: cast_nullable_to_non_nullable @@ -3716,7 +3706,6 @@ abstract class _$$CreateChatCompletionRequestImplCopyWith<$Res> List messages, @JsonKey(name: 'frequency_penalty', includeIfNull: false) double? frequencyPenalty, - @JsonKey(name: 'instance_id', includeIfNull: false) String? instanceId, @JsonKey(name: 'logit_bias', includeIfNull: false) Map? logitBias, @JsonKey(includeIfNull: false) bool? logprobs, @@ -3772,7 +3761,6 @@ class __$$CreateChatCompletionRequestImplCopyWithImpl<$Res> Object? model = null, Object? messages = null, Object? frequencyPenalty = freezed, - Object? instanceId = freezed, Object? logitBias = freezed, Object? logprobs = freezed, Object? topLogprobs = freezed, @@ -3804,10 +3792,6 @@ class __$$CreateChatCompletionRequestImplCopyWithImpl<$Res> ? _value.frequencyPenalty : frequencyPenalty // ignore: cast_nullable_to_non_nullable as double?, - instanceId: freezed == instanceId - ? _value.instanceId - : instanceId // ignore: cast_nullable_to_non_nullable - as String?, logitBias: freezed == logitBias ? _value._logitBias : logitBias // ignore: cast_nullable_to_non_nullable @@ -3888,7 +3872,6 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { required final List messages, @JsonKey(name: 'frequency_penalty', includeIfNull: false) this.frequencyPenalty = 0.0, - @JsonKey(name: 'instance_id', includeIfNull: false) this.instanceId, @JsonKey(name: 'logit_bias', includeIfNull: false) final Map? logitBias, @JsonKey(includeIfNull: false) this.logprobs, @@ -3946,11 +3929,6 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { @JsonKey(name: 'frequency_penalty', includeIfNull: false) final double? frequencyPenalty; - /// An unique identifier to a custom instance to execute the request. The requesting organization is required to have access to the instance. - @override - @JsonKey(name: 'instance_id', includeIfNull: false) - final String? instanceId; - /// Modify the likelihood of specified tokens appearing in the completion. /// /// Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. @@ -3974,7 +3952,7 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { @JsonKey(includeIfNull: false) final bool? logprobs; - /// An integer between 0 and 5 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. + /// An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. @override @JsonKey(name: 'top_logprobs', includeIfNull: false) final int? topLogprobs; @@ -4102,7 +4080,7 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { @override String toString() { - return 'CreateChatCompletionRequest(model: $model, messages: $messages, frequencyPenalty: $frequencyPenalty, instanceId: $instanceId, logitBias: $logitBias, logprobs: $logprobs, topLogprobs: $topLogprobs, maxTokens: $maxTokens, n: $n, presencePenalty: $presencePenalty, responseFormat: $responseFormat, seed: $seed, stop: $stop, stream: $stream, temperature: $temperature, topP: $topP, tools: $tools, toolChoice: $toolChoice, user: $user, functionCall: $functionCall, functions: $functions)'; + return 'CreateChatCompletionRequest(model: $model, messages: $messages, frequencyPenalty: $frequencyPenalty, logitBias: $logitBias, logprobs: $logprobs, topLogprobs: $topLogprobs, maxTokens: $maxTokens, n: $n, presencePenalty: $presencePenalty, responseFormat: $responseFormat, seed: $seed, stop: $stop, stream: $stream, temperature: $temperature, topP: $topP, tools: $tools, toolChoice: $toolChoice, user: $user, functionCall: $functionCall, functions: $functions)'; } @override @@ -4114,8 +4092,6 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { const DeepCollectionEquality().equals(other._messages, _messages) && (identical(other.frequencyPenalty, frequencyPenalty) || other.frequencyPenalty == frequencyPenalty) && - (identical(other.instanceId, instanceId) || - other.instanceId == instanceId) && const DeepCollectionEquality() .equals(other._logitBias, _logitBias) && (identical(other.logprobs, logprobs) || @@ -4152,7 +4128,6 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { model, const DeepCollectionEquality().hash(_messages), frequencyPenalty, - instanceId, const DeepCollectionEquality().hash(_logitBias), logprobs, topLogprobs, @@ -4195,8 +4170,6 @@ abstract class _CreateChatCompletionRequest required final List messages, @JsonKey(name: 'frequency_penalty', includeIfNull: false) final double? frequencyPenalty, - @JsonKey(name: 'instance_id', includeIfNull: false) - final String? instanceId, @JsonKey(name: 'logit_bias', includeIfNull: false) final Map? logitBias, @JsonKey(includeIfNull: false) final bool? logprobs, @@ -4249,11 +4222,6 @@ abstract class _CreateChatCompletionRequest double? get frequencyPenalty; @override - /// An unique identifier to a custom instance to execute the request. The requesting organization is required to have access to the instance. - @JsonKey(name: 'instance_id', includeIfNull: false) - String? get instanceId; - @override - /// Modify the likelihood of specified tokens appearing in the completion. /// /// Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. @@ -4266,7 +4234,7 @@ abstract class _CreateChatCompletionRequest bool? get logprobs; @override - /// An integer between 0 and 5 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. + /// An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. @JsonKey(name: 'top_logprobs', includeIfNull: false) int? get topLogprobs; @override @@ -8274,7 +8242,7 @@ mixin _$ChatCompletionTokenLogprob { /// The token. String get token => throw _privateConstructorUsedError; - /// The log probability of this token. + /// The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely. double get logprob => throw _privateConstructorUsedError; /// A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token. @@ -8423,7 +8391,7 @@ class _$ChatCompletionTokenLogprobImpl extends _ChatCompletionTokenLogprob { @override final String token; - /// The log probability of this token. + /// The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely. @override final double logprob; @@ -8512,7 +8480,7 @@ abstract class _ChatCompletionTokenLogprob extends ChatCompletionTokenLogprob { String get token; @override - /// The log probability of this token. + /// The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely. double get logprob; @override @@ -8539,7 +8507,7 @@ mixin _$ChatCompletionTokenTopLogprob { /// The token. String get token => throw _privateConstructorUsedError; - /// The log probability of this token. + /// The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely. double get logprob => throw _privateConstructorUsedError; /// A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token. @@ -8662,7 +8630,7 @@ class _$ChatCompletionTokenTopLogprobImpl @override final String token; - /// The log probability of this token. + /// The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely. @override final double logprob; @@ -8732,7 +8700,7 @@ abstract class _ChatCompletionTokenTopLogprob String get token; @override - /// The log probability of this token. + /// The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely. double get logprob; @override @@ -15918,7 +15886,7 @@ mixin _$CreateImageRequest { /// The quality of the image that will be generated. `hd` creates images with finer details and greater consistency across the image. This param is only supported for `dall-e-3`. ImageQuality get quality => throw _privateConstructorUsedError; - /// The format in which the generated images are returned. Must be one of `url` or `b64_json`. + /// The format in which the generated images are returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the image has been generated. @JsonKey( name: 'response_format', includeIfNull: false, @@ -16187,7 +16155,7 @@ class _$CreateImageRequestImpl extends _CreateImageRequest { @JsonKey() final ImageQuality quality; - /// The format in which the generated images are returned. Must be one of `url` or `b64_json`. + /// The format in which the generated images are returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the image has been generated. @override @JsonKey( name: 'response_format', @@ -16302,7 +16270,7 @@ abstract class _CreateImageRequest extends CreateImageRequest { ImageQuality get quality; @override - /// The format in which the generated images are returned. Must be one of `url` or `b64_json`. + /// The format in which the generated images are returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the image has been generated. @JsonKey( name: 'response_format', includeIfNull: false, @@ -18950,7 +18918,7 @@ Moderation _$ModerationFromJson(Map json) { /// @nodoc mixin _$Moderation { - /// Whether the content violates [OpenAI's usage policies](https://platform.openai.com/policies/usage-policies). + /// Whether any of the below categories are flagged. bool get flagged => throw _privateConstructorUsedError; /// A list of the categories, and whether they are flagged or not. @@ -19098,7 +19066,7 @@ class _$ModerationImpl extends _Moderation { factory _$ModerationImpl.fromJson(Map json) => _$$ModerationImplFromJson(json); - /// Whether the content violates [OpenAI's usage policies](https://platform.openai.com/policies/usage-policies). + /// Whether any of the below categories are flagged. @override final bool flagged; @@ -19161,7 +19129,7 @@ abstract class _Moderation extends Moderation { @override - /// Whether the content violates [OpenAI's usage policies](https://platform.openai.com/policies/usage-policies). + /// Whether any of the below categories are flagged. bool get flagged; @override @@ -22933,7 +22901,7 @@ RunLastError _$RunLastErrorFromJson(Map json) { /// @nodoc mixin _$RunLastError { - /// One of `server_error` or `rate_limit_exceeded`. + /// One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`. RunLastErrorCode get code => throw _privateConstructorUsedError; /// A human-readable description of the error. @@ -23030,7 +22998,7 @@ class _$RunLastErrorImpl extends _RunLastError { factory _$RunLastErrorImpl.fromJson(Map json) => _$$RunLastErrorImplFromJson(json); - /// One of `server_error` or `rate_limit_exceeded`. + /// One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`. @override final RunLastErrorCode code; @@ -23081,7 +23049,7 @@ abstract class _RunLastError extends RunLastError { @override - /// One of `server_error` or `rate_limit_exceeded`. + /// One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`. RunLastErrorCode get code; @override diff --git a/packages/openai_dart/lib/src/generated/schema/schema.g.dart b/packages/openai_dart/lib/src/generated/schema/schema.g.dart index 90f10a1d..cb0ff1a8 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.g.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.g.dart @@ -304,7 +304,6 @@ _$CreateChatCompletionRequestImpl _$$CreateChatCompletionRequestImplFromJson( .map((e) => ChatCompletionMessage.fromJson(e as Map)) .toList(), frequencyPenalty: (json['frequency_penalty'] as num?)?.toDouble() ?? 0.0, - instanceId: json['instance_id'] as String?, logitBias: (json['logit_bias'] as Map?)?.map( (k, e) => MapEntry(k, e as int), ), @@ -349,7 +348,6 @@ Map _$$CreateChatCompletionRequestImplToJson( } writeNotNull('frequency_penalty', instance.frequencyPenalty); - writeNotNull('instance_id', instance.instanceId); writeNotNull('logit_bias', instance.logitBias); writeNotNull('logprobs', instance.logprobs); writeNotNull('top_logprobs', instance.topLogprobs); @@ -2141,6 +2139,7 @@ Map _$$RunLastErrorImplToJson(_$RunLastErrorImpl instance) => const _$RunLastErrorCodeEnumMap = { RunLastErrorCode.serverError: 'server_error', RunLastErrorCode.rateLimitExceeded: 'rate_limit_exceeded', + RunLastErrorCode.invalidPrompt: 'invalid_prompt', }; _$RunSubmitToolOutputsImpl _$$RunSubmitToolOutputsImplFromJson( diff --git a/packages/openai_dart/oas/openapi_curated.yaml b/packages/openai_dart/oas/openapi_curated.yaml index 185c46fe..65028db8 100644 --- a/packages/openai_dart/oas/openapi_curated.yaml +++ b/packages/openai_dart/oas/openapi_curated.yaml @@ -32,7 +32,7 @@ tags: - name: Models description: List and describe the various models available in the API. - name: Moderations - description: Given a input text, outputs if the model classifies it as violating OpenAI's content policy. + description: Given a input text, outputs if the model classifies it as potentially harmful. paths: /chat/completions: @@ -306,7 +306,7 @@ paths: operationId: createModeration tags: - Moderations - summary: Classifies if text violates OpenAI's Content Policy. + summary: Classifies if text is potentially harmful. requestBody: required: true content: @@ -1497,11 +1497,6 @@ components: maximum: 2 nullable: true description: *completions_frequency_penalty_description - instance_id: - type: string - default: null - nullable: true - description: An unique identifier to a custom instance to execute the request. The requesting organization is required to have access to the instance. logit_bias: type: object default: null @@ -1517,10 +1512,10 @@ components: type: boolean nullable: true top_logprobs: - description: An integer between 0 and 5 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. + description: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. type: integer minimum: 0 - maximum: 5 + maximum: 20 nullable: true max_tokens: description: | @@ -2011,7 +2006,7 @@ components: description: The token. type: string logprob: &chat_completion_response_logprobs_token_logprob - description: The log probability of this token. + description: The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely. type: number bytes: &chat_completion_response_logprobs_bytes description: A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token. @@ -2568,7 +2563,7 @@ components: default: "url" example: "url" nullable: true - description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. + description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the image has been generated. size: &images_size title: ImageSize type: string @@ -2709,7 +2704,7 @@ components: - input CreateModerationResponse: type: object - description: Represents policy compliance report by OpenAI's content moderation model against a given input. + description: Represents if a given text input is potentially harmful. properties: id: type: string @@ -2732,7 +2727,7 @@ components: properties: flagged: type: boolean - description: Whether the content violates [OpenAI's usage policies](https://platform.openai.com/policies/usage-policies). + description: Whether any of the below categories are flagged. categories: $ref: "#/components/schemas/ModerationCategories" category_scores: @@ -3150,8 +3145,8 @@ components: properties: code: type: string - description: One of `server_error` or `rate_limit_exceeded`. - enum: [ "server_error", "rate_limit_exceeded" ] + description: One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`. + enum: ["server_error", "rate_limit_exceeded", "invalid_prompt"] message: type: string description: A human-readable description of the error. diff --git a/packages/openai_dart/oas/openapi_official.yaml b/packages/openai_dart/oas/openapi_official.yaml index 4e74cf94..a370a155 100644 --- a/packages/openai_dart/oas/openapi_official.yaml +++ b/packages/openai_dart/oas/openapi_official.yaml @@ -32,7 +32,7 @@ tags: - name: Models description: List and describe the various models available in the API. - name: Moderations - description: Given a input text, outputs if the model classifies it as violating OpenAI's content policy. + description: Given a input text, outputs if the model classifies it as potentially harmful. paths: # Note: When adding an endpoint, make sure you also add it in the `groups` section, in the end of this file, # under the appropriate group @@ -115,7 +115,7 @@ paths: "id": "chatcmpl-123", "object": "chat.completion", "created": 1677652288, - "model": "gpt-3.5-turbo-0613", + "model": "gpt-3.5-turbo-0125", "system_fingerprint": "fp_44709d6fcb", "choices": [{ "index": 0, @@ -212,7 +212,7 @@ paths: "id": "chatcmpl-123", "object": "chat.completion", "created": 1677652288, - "model": "gpt-3.5-turbo-0613", + "model": "gpt-3.5-turbo-0125", "system_fingerprint": "fp_44709d6fcb", "choices": [{ "index": 0, @@ -287,19 +287,13 @@ paths: main(); response: &chat_completion_chunk_example | - {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0613", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]} + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0125", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]} - {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0613", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{"content":"Hello"},"logprobs":null,"finish_reason":null}]} - - {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0613", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{"content":"!"},"logprobs":null,"finish_reason":null}]} + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0125", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{"content":"Hello"},"logprobs":null,"finish_reason":null}]} .... - {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0613", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{"content":" today"},"logprobs":null,"finish_reason":null}]} - - {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0613", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{"content":"?"},"logprobs":null,"finish_reason":null}]} - - {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0613", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]} + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0125", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]} - title: Functions request: curl: | @@ -416,7 +410,7 @@ paths: "id": "chatcmpl-abc123", "object": "chat.completion", "created": 1699896916, - "model": "gpt-3.5-turbo-0613", + "model": "gpt-3.5-turbo-0125", "choices": [ { "index": 0, @@ -498,7 +492,7 @@ paths: "id": "chatcmpl-123", "object": "chat.completion", "created": 1702685778, - "model": "gpt-3.5-turbo-0613", + "model": "gpt-3.5-turbo-0125", "choices": [ { "index": 0, @@ -1201,47 +1195,174 @@ paths: content: application/json: schema: - $ref: "#/components/schemas/CreateTranscriptionResponse" + oneOf: + - $ref: "#/components/schemas/CreateTranscriptionResponseJson" + - $ref: "#/components/schemas/CreateTranscriptionResponseVerboseJson" x-oaiMeta: name: Create transcription group: audio - returns: The transcribed text. + returns: The [transcription object](/docs/api-reference/audio/json-object) or a [verbose transcription object](/docs/api-reference/audio/verbose-json-object). examples: - request: - curl: | - curl https://api.openai.com/v1/audio/transcriptions \ - -H "Authorization: Bearer $OPENAI_API_KEY" \ - -H "Content-Type: multipart/form-data" \ - -F file="@/path/to/file/audio.mp3" \ - -F model="whisper-1" - python: | - from openai import OpenAI - client = OpenAI() + - title: Default + request: + curl: | + curl https://api.openai.com/v1/audio/transcriptions \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: multipart/form-data" \ + -F file="@/path/to/file/audio.mp3" \ + -F model="whisper-1" + python: | + from openai import OpenAI + client = OpenAI() - audio_file = open("speech.mp3", "rb") - transcript = client.audio.transcriptions.create( - model="whisper-1", - file=audio_file - ) - node: | - import fs from "fs"; - import OpenAI from "openai"; + audio_file = open("speech.mp3", "rb") + transcript = client.audio.transcriptions.create( + model="whisper-1", + file=audio_file + ) + node: | + import fs from "fs"; + import OpenAI from "openai"; - const openai = new OpenAI(); + const openai = new OpenAI(); - async function main() { - const transcription = await openai.audio.transcriptions.create({ - file: fs.createReadStream("audio.mp3"), - model: "whisper-1", - }); + async function main() { + const transcription = await openai.audio.transcriptions.create({ + file: fs.createReadStream("audio.mp3"), + model: "whisper-1", + }); - console.log(transcription.text); + console.log(transcription.text); + } + main(); + response: &basic_transcription_response_example | + { + "text": "Imagine the wildest idea that you've ever had, and you're curious about how it might scale to something that's a 100, a 1,000 times bigger. This is a place where you can get to do that." + } + - title: Word timestamps + request: + curl: | + curl https://api.openai.com/v1/audio/transcriptions \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: multipart/form-data" \ + -F file="@/path/to/file/audio.mp3" \ + -F "timestamp_granularities[]=word" \ + -F model="whisper-1" \ + -F response_format="verbose_json" + python: | + from openai import OpenAI + client = OpenAI() + + audio_file = open("speech.mp3", "rb") + transcript = client.audio.transcriptions.create( + file=audio_file, + model="whisper-1", + response_format="verbose_json", + timestamp_granularities=["word"] + ) + + print(transcript.words) + node: | + import fs from "fs"; + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const transcription = await openai.audio.transcriptions.create({ + file: fs.createReadStream("audio.mp3"), + model: "whisper-1", + response_format: "verbose_json", + timestamp_granularities: ["word"] + }); + + console.log(transcription.text); + } + main(); + response: | + { + "task": "transcribe", + "language": "english", + "duration": 8.470000267028809, + "text": "The beach was a popular spot on a hot summer day. People were swimming in the ocean, building sandcastles, and playing beach volleyball.", + "words": [ + { + "word": "The", + "start": 0.0, + "end": 0.23999999463558197 + }, + ... + { + "word": "volleyball", + "start": 7.400000095367432, + "end": 7.900000095367432 + } + ] + } + - title: Segment timestamps + request: + curl: | + curl https://api.openai.com/v1/audio/transcriptions \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: multipart/form-data" \ + -F file="@/path/to/file/audio.mp3" \ + -F "timestamp_granularities[]=segment" \ + -F model="whisper-1" \ + -F response_format="verbose_json" + python: | + from openai import OpenAI + client = OpenAI() + + audio_file = open("speech.mp3", "rb") + transcript = client.audio.transcriptions.create( + file=audio_file, + model="whisper-1", + response_format="verbose_json", + timestamp_granularities=["segment"] + ) + + print(transcript.words) + node: | + import fs from "fs"; + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const transcription = await openai.audio.transcriptions.create({ + file: fs.createReadStream("audio.mp3"), + model: "whisper-1", + response_format: "verbose_json", + timestamp_granularities: ["segment"] + }); + + console.log(transcription.text); + } + main(); + response: &verbose_transcription_response_example | + { + "task": "transcribe", + "language": "english", + "duration": 8.470000267028809, + "text": "The beach was a popular spot on a hot summer day. People were swimming in the ocean, building sandcastles, and playing beach volleyball.", + "segments": [ + { + "id": 0, + "seek": 0, + "start": 0.0, + "end": 3.319999933242798, + "text": " The beach was a popular spot on a hot summer day.", + "tokens": [ + 50364, 440, 7534, 390, 257, 3743, 4008, 322, 257, 2368, 4266, 786, 13, 50530 + ], + "temperature": 0.0, + "avg_logprob": -0.2860786020755768, + "compression_ratio": 1.2363636493682861, + "no_speech_prob": 0.00985979475080967 + }, + ... + ] } - main(); - response: | - { - "text": "Imagine the wildest idea that you've ever had, and you're curious about how it might scale to something that's a 100, a 1,000 times bigger. This is a place where you can get to do that." - } /audio/translations: post: operationId: createTranslation @@ -1260,7 +1381,9 @@ paths: content: application/json: schema: - $ref: "#/components/schemas/CreateTranslationResponse" + oneOf: + - $ref: "#/components/schemas/CreateTranslationResponseJson" + - $ref: "#/components/schemas/CreateTranslationResponseVerboseJson" x-oaiMeta: name: Create translation group: audio @@ -1658,7 +1781,7 @@ paths: { "object": "fine_tuning.job", "id": "ftjob-abc123", - "model": "gpt-3.5-turbo-0613", + "model": "gpt-3.5-turbo-0125", "created_at": 1614807352, "fine_tuned_model": null, "organization_id": "org-123", @@ -1711,7 +1834,7 @@ paths: { "object": "fine_tuning.job", "id": "ftjob-abc123", - "model": "gpt-3.5-turbo-0613", + "model": "gpt-3.5-turbo-0125", "created_at": 1614807352, "fine_tuned_model": null, "organization_id": "org-123", @@ -1760,7 +1883,7 @@ paths: { "object": "fine_tuning.job", "id": "ftjob-abc123", - "model": "gpt-3.5-turbo-0613", + "model": "gpt-3.5-turbo-0125", "created_at": 1614807352, "fine_tuned_model": null, "organization_id": "org-123", @@ -2056,7 +2179,7 @@ paths: { "object": "fine_tuning.job", "id": "ftjob-abc123", - "model": "gpt-3.5-turbo-0613", + "model": "gpt-3.5-turbo-0125", "created_at": 1689376978, "fine_tuned_model": null, "organization_id": "org-123", @@ -2247,7 +2370,7 @@ paths: operationId: createModeration tags: - Moderations - summary: Classifies if text violates OpenAI's Content Policy + summary: Classifies if text is potentially harmful. requestBody: required: true content: @@ -2278,7 +2401,8 @@ paths: from openai import OpenAI client = OpenAI() - client.moderations.create(input="I want to kill them.") + moderation = client.moderations.create(input="I want to kill them.") + print(moderation) node.js: | import OpenAI from "openai"; @@ -5817,11 +5941,6 @@ components: maximum: 2 nullable: true description: *completions_frequency_penalty_description - instance_id: - type: string - default: null - nullable: true - description: An unique identifier to a custom instance to execute the request. The requesting organization is required to have access to the instance. logit_bias: type: object x-oaiTypeLabel: map @@ -5839,10 +5958,10 @@ components: default: false nullable: true top_logprobs: - description: An integer between 0 and 5 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. + description: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. type: integer minimum: 0 - maximum: 5 + maximum: 20 nullable: true max_tokens: description: | @@ -6117,7 +6236,7 @@ components: description: The token. type: string logprob: &chat_completion_response_logprobs_token_logprob - description: The log probability of this token. + description: The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely. type: number bytes: &chat_completion_response_logprobs_bytes description: A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token. @@ -6267,7 +6386,7 @@ components: default: "url" example: "url" nullable: true - description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. + description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the image has been generated. size: &images_size type: string enum: ["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"] @@ -6421,7 +6540,7 @@ components: CreateModerationResponse: type: object - description: Represents policy compliance report by OpenAI's content moderation model against a given input. + description: Represents if a given text input is potentially harmful. properties: id: type: string @@ -6437,7 +6556,7 @@ components: properties: flagged: type: boolean - description: Whether the content violates [OpenAI's usage policies](/policies/usage-policies). + description: Whether any of the below categories are flagged. categories: type: object description: A list of the categories, and whether they are flagged or not. @@ -6814,7 +6933,7 @@ components: format: binary model: description: | - ID of the model to use. Only `whisper-1` is currently available. + ID of the model to use. Only `whisper-1` (which is powered by our open source Whisper V2 model) is currently available. example: whisper-1 anyOf: - type: string @@ -6847,7 +6966,7 @@ components: default: 0 timestamp_granularities[]: description: | - The timestamp granularities to populate for this transcription. Any of these options: `word`, or `segment`. Note: There is no additional latency for segment timestamps, but generating word timestamps incurs additional latency. + The timestamp granularities to populate for this transcription. `response_format` must be set `verbose_json` to use timestamp granularities. Either or both of these options are supported: `word`, or `segment`. Note: There is no additional latency for segment timestamps, but generating word timestamps incurs additional latency. type: array items: type: string @@ -6860,13 +6979,117 @@ components: - model # Note: This does not currently support the non-default response format types. - CreateTranscriptionResponse: + CreateTranscriptionResponseJson: type: object + description: Represents a transcription response returned by model, based on the provided input. properties: text: type: string + description: The transcribed text. + required: + - text + x-oaiMeta: + name: The transcription object + group: audio + example: *basic_transcription_response_example + + TranscriptionSegment: + type: object + properties: + id: + type: integer + description: Unique identifier of the segment. + seek: + type: integer + description: Seek offset of the segment. + start: + type: number + format: float + description: Start time of the segment in seconds. + end: + type: number + format: float + description: End time of the segment in seconds. + text: + type: string + description: Text content of the segment. + tokens: + type: array + items: + type: integer + description: Array of token IDs for the text content. + temperature: + type: number + format: float + description: Temperature parameter used for generating the segment. + avg_logprob: + type: number + format: float + description: Average logprob of the segment. If the value is lower than -1, consider the logprobs failed. + compression_ratio: + type: number + format: float + description: Compression ratio of the segment. If the value is greater than 2.4, consider the compression failed. + no_speech_prob: + type: number + format: float + description: Probability of no speech in the segment. If the value is higher than 1.0 and the `avg_logprob` is below -1, consider this segment silent. required: + - id + - seek + - start + - end - text + - tokens + - temperature + - avg_logprob + - compression_ratio + - no_speech_prob + + TranscriptionWord: + type: object + properties: + word: + type: string + description: The text content of the word. + start: + type: number + format: float + description: Start time of the word in seconds. + end: + type: number + format: float + description: End time of the word in seconds. + required: [word, start, end] + + CreateTranscriptionResponseVerboseJson: + type: object + description: Represents a verbose json transcription response returned by model, based on the provided input. + properties: + language: + type: string + description: The language of the input audio. + duration: + type: string + description: The duration of the input audio. + text: + type: string + description: The transcribed text. + words: + type: array + description: Extracted words and their corresponding timestamps. + items: + $ref: '#/components/schemas/TranscriptionWord' + segments: + type: array + description: Segments of the transcribed text and their corresponding details. + items: + $ref: '#/components/schemas/TranscriptionSegment' + required: [language, duration, text] + x-oaiMeta: + name: The transcription object + group: audio + example: *verbose_transcription_response_example CreateTranslationRequest: type: object @@ -6880,7 +7103,7 @@ components: format: binary model: description: | - ID of the model to use. Only `whisper-1` is currently available. + ID of the model to use. Only `whisper-1` (which is powered by our open source Whisper V2 model) is currently available. example: whisper-1 anyOf: - type: string @@ -6906,7 +7129,7 @@ components: - model # Note: This does not currently support the non-default response format types. - CreateTranslationResponse: + CreateTranslationResponseJson: type: object properties: text: @@ -6914,6 +7137,25 @@ components: required: - text + CreateTranslationResponseVerboseJson: + type: object + properties: + language: + type: string + description: The language of the output translation (always `english`). + duration: + type: string + description: The duration of the input audio. + text: + type: string + description: The translated text. + segments: + type: array + description: Segments of the translated text and their corresponding details. + items: + $ref: '#/components/schemas/TranscriptionSegment' + required: [language, duration, text] + CreateSpeechRequest: type: object additionalProperties: false @@ -6935,10 +7177,10 @@ components: type: string enum: ["alloy", "echo", "fable", "onyx", "nova", "shimmer"] response_format: - description: "The format to audio in. Supported formats are `mp3`, `opus`, `aac`, and `flac`." + description: "The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav`, and `pcm`." default: "mp3" type: string - enum: ["mp3", "opus", "aac", "flac"] + enum: ["mp3", "opus", "aac", "flac", "wav", "pcm"] speed: description: "The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is the default." type: number @@ -7595,8 +7837,8 @@ components: properties: code: type: string - description: One of `server_error` or `rate_limit_exceeded`. - enum: ["server_error", "rate_limit_exceeded"] + description: One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`. + enum: ["server_error", "rate_limit_exceeded", "invalid_prompt"] message: type: string description: A human-readable description of the error. @@ -8726,6 +8968,12 @@ x-oaiMeta: - type: endpoint key: createTranslation path: createTranslation + - type: object + key: CreateTranscriptionResponseJson + path: json-object + - type: object + key: CreateTranscriptionResponseVerboseJson + path: verbose-json-object - id: chat title: Chat description: | @@ -8845,7 +9093,7 @@ x-oaiMeta: - id: moderations title: Moderations description: | - Given a input text, outputs if the model classifies it as violating OpenAI's content policy. + Given some input text, outputs if the model classifies it as potentially harmful across several categories. Related guide: [Moderations](/docs/guides/moderation) sections: @@ -8996,7 +9244,7 @@ x-oaiMeta: title: Completions legacy: true description: | - Given a prompt, the model will return one or more predicted completions along with the probabilities of alternative tokens at each position. Most developer should use our [Chat Completions API](/docs/guides/text-generation/text-generation-models) to leverage our best and newest models. Most models that support the legacy Completions endpoint [will be shut off on January 4th, 2024](/docs/deprecations/2023-07-06-gpt-and-embeddings). + Given a prompt, the model will return one or more predicted completions along with the probabilities of alternative tokens at each position. Most developer should use our [Chat Completions API](/docs/guides/text-generation/text-generation-models) to leverage our best and newest models. sections: - type: endpoint key: createCompletion