diff --git a/packages/openai_dart/README.md b/packages/openai_dart/README.md index f020d128..76dcd335 100644 --- a/packages/openai_dart/README.md +++ b/packages/openai_dart/README.md @@ -16,7 +16,7 @@ Unofficial Dart client for [OpenAI](https://platform.openai.com/docs/api-referen - Custom base URL, headers and query params support (e.g. HTTP proxies) - Custom HTTP client support (e.g. SOCKS5 proxies or advanced use cases) - Partial Azure OpenAI API support -- It can be used to consume OpenAI-compatible APIs like [TogetherAI](https://www.together.ai/), [Anyscale](https://www.anyscale.com/), [OpenRouter](https://openrouter.ai), [One API](https://github.com/songquanpeng/one-api), [Groq](https://groq.com/), [Llamafile](https://llamafile.ai/), [GPT4All](https://gpt4all.io/), etc. +- It can be used to consume OpenAI-compatible APIs like [TogetherAI](https://www.together.ai/), [Anyscale](https://www.anyscale.com/), [OpenRouter](https://openrouter.ai), [One API](https://github.com/songquanpeng/one-api), [Groq](https://groq.com/), [Llamafile](https://llamafile.ai/), [GPT4All](https://gpt4all.io/), [FastChat](https://github.com/lm-sys/FastChat), etc. **Supported endpoints:** diff --git a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_stream_response.dart b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_stream_response.dart index 18cab5fa..724f4066 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_stream_response.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_stream_response.dart @@ -24,7 +24,7 @@ class CreateChatCompletionStreamResponse required List choices, /// The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp. - required int created, + @JsonKey(includeIfNull: false) int? created, /// The model to generate the completion. @JsonKey(includeIfNull: false) String? model, @@ -36,7 +36,7 @@ class CreateChatCompletionStreamResponse String? systemFingerprint, /// The object type, which is always `chat.completion.chunk`. - required String object, + @JsonKey(includeIfNull: false) String? object, /// Usage statistics for the completion request. @JsonKey(includeIfNull: false) CompletionUsage? usage, diff --git a/packages/openai_dart/lib/src/generated/schema/function_object.dart b/packages/openai_dart/lib/src/generated/schema/function_object.dart index 8049253e..647b4e0a 100644 --- a/packages/openai_dart/lib/src/generated/schema/function_object.dart +++ b/packages/openai_dart/lib/src/generated/schema/function_object.dart @@ -21,7 +21,7 @@ class FunctionObject with _$FunctionObject { /// A description of what the function does, used by the model to choose when and how to call the function. @JsonKey(includeIfNull: false) String? description, - /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. + /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. /// /// Omitting `parameters` defines a function with an empty parameter list. @JsonKey(includeIfNull: false) FunctionParameters? parameters, diff --git a/packages/openai_dart/lib/src/generated/schema/function_parameters.dart b/packages/openai_dart/lib/src/generated/schema/function_parameters.dart index abd11036..2429f8ba 100644 --- a/packages/openai_dart/lib/src/generated/schema/function_parameters.dart +++ b/packages/openai_dart/lib/src/generated/schema/function_parameters.dart @@ -8,7 +8,7 @@ part of open_a_i_schema; // TYPE: FunctionParameters // ========================================== -/// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. +/// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. /// /// Omitting `parameters` defines a function with an empty parameter list. typedef FunctionParameters = Map; diff --git a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart index 1395bc5a..16efa483 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart @@ -6695,7 +6695,7 @@ mixin _$FunctionObject { @JsonKey(includeIfNull: false) String? get description => throw _privateConstructorUsedError; - /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. + /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. /// /// Omitting `parameters` defines a function with an empty parameter list. @JsonKey(includeIfNull: false) @@ -6821,12 +6821,12 @@ class _$FunctionObjectImpl extends _FunctionObject { @JsonKey(includeIfNull: false) final String? description; - /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. + /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. /// /// Omitting `parameters` defines a function with an empty parameter list. final Map? _parameters; - /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. + /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. /// /// Omitting `parameters` defines a function with an empty parameter list. @override @@ -6898,7 +6898,7 @@ abstract class _FunctionObject extends FunctionObject { String? get description; @override - /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. + /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. /// /// Omitting `parameters` defines a function with an empty parameter list. @JsonKey(includeIfNull: false) @@ -9004,7 +9004,8 @@ mixin _$CreateChatCompletionStreamResponse { throw _privateConstructorUsedError; /// The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp. - int get created => throw _privateConstructorUsedError; + @JsonKey(includeIfNull: false) + int? get created => throw _privateConstructorUsedError; /// The model to generate the completion. @JsonKey(includeIfNull: false) @@ -9017,7 +9018,8 @@ mixin _$CreateChatCompletionStreamResponse { String? get systemFingerprint => throw _privateConstructorUsedError; /// The object type, which is always `chat.completion.chunk`. - String get object => throw _privateConstructorUsedError; + @JsonKey(includeIfNull: false) + String? get object => throw _privateConstructorUsedError; /// Usage statistics for the completion request. @JsonKey(includeIfNull: false) @@ -9041,11 +9043,11 @@ abstract class $CreateChatCompletionStreamResponseCopyWith<$Res> { $Res call( {@JsonKey(includeIfNull: false) String? id, List choices, - int created, + @JsonKey(includeIfNull: false) int? created, @JsonKey(includeIfNull: false) String? model, @JsonKey(name: 'system_fingerprint', includeIfNull: false) String? systemFingerprint, - String object, + @JsonKey(includeIfNull: false) String? object, @JsonKey(includeIfNull: false) CompletionUsage? usage}); $CompletionUsageCopyWith<$Res>? get usage; @@ -9067,10 +9069,10 @@ class _$CreateChatCompletionStreamResponseCopyWithImpl<$Res, $Res call({ Object? id = freezed, Object? choices = null, - Object? created = null, + Object? created = freezed, Object? model = freezed, Object? systemFingerprint = freezed, - Object? object = null, + Object? object = freezed, Object? usage = freezed, }) { return _then(_value.copyWith( @@ -9082,10 +9084,10 @@ class _$CreateChatCompletionStreamResponseCopyWithImpl<$Res, ? _value.choices : choices // ignore: cast_nullable_to_non_nullable as List, - created: null == created + created: freezed == created ? _value.created : created // ignore: cast_nullable_to_non_nullable - as int, + as int?, model: freezed == model ? _value.model : model // ignore: cast_nullable_to_non_nullable @@ -9094,10 +9096,10 @@ class _$CreateChatCompletionStreamResponseCopyWithImpl<$Res, ? _value.systemFingerprint : systemFingerprint // ignore: cast_nullable_to_non_nullable as String?, - object: null == object + object: freezed == object ? _value.object : object // ignore: cast_nullable_to_non_nullable - as String, + as String?, usage: freezed == usage ? _value.usage : usage // ignore: cast_nullable_to_non_nullable @@ -9130,11 +9132,11 @@ abstract class _$$CreateChatCompletionStreamResponseImplCopyWith<$Res> $Res call( {@JsonKey(includeIfNull: false) String? id, List choices, - int created, + @JsonKey(includeIfNull: false) int? created, @JsonKey(includeIfNull: false) String? model, @JsonKey(name: 'system_fingerprint', includeIfNull: false) String? systemFingerprint, - String object, + @JsonKey(includeIfNull: false) String? object, @JsonKey(includeIfNull: false) CompletionUsage? usage}); @override @@ -9156,10 +9158,10 @@ class __$$CreateChatCompletionStreamResponseImplCopyWithImpl<$Res> $Res call({ Object? id = freezed, Object? choices = null, - Object? created = null, + Object? created = freezed, Object? model = freezed, Object? systemFingerprint = freezed, - Object? object = null, + Object? object = freezed, Object? usage = freezed, }) { return _then(_$CreateChatCompletionStreamResponseImpl( @@ -9171,10 +9173,10 @@ class __$$CreateChatCompletionStreamResponseImplCopyWithImpl<$Res> ? _value._choices : choices // ignore: cast_nullable_to_non_nullable as List, - created: null == created + created: freezed == created ? _value.created : created // ignore: cast_nullable_to_non_nullable - as int, + as int?, model: freezed == model ? _value.model : model // ignore: cast_nullable_to_non_nullable @@ -9183,10 +9185,10 @@ class __$$CreateChatCompletionStreamResponseImplCopyWithImpl<$Res> ? _value.systemFingerprint : systemFingerprint // ignore: cast_nullable_to_non_nullable as String?, - object: null == object + object: freezed == object ? _value.object : object // ignore: cast_nullable_to_non_nullable - as String, + as String?, usage: freezed == usage ? _value.usage : usage // ignore: cast_nullable_to_non_nullable @@ -9202,11 +9204,11 @@ class _$CreateChatCompletionStreamResponseImpl const _$CreateChatCompletionStreamResponseImpl( {@JsonKey(includeIfNull: false) this.id, required final List choices, - required this.created, + @JsonKey(includeIfNull: false) this.created, @JsonKey(includeIfNull: false) this.model, @JsonKey(name: 'system_fingerprint', includeIfNull: false) this.systemFingerprint, - required this.object, + @JsonKey(includeIfNull: false) this.object, @JsonKey(includeIfNull: false) this.usage}) : _choices = choices, super._(); @@ -9235,7 +9237,8 @@ class _$CreateChatCompletionStreamResponseImpl /// The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp. @override - final int created; + @JsonKey(includeIfNull: false) + final int? created; /// The model to generate the completion. @override @@ -9251,7 +9254,8 @@ class _$CreateChatCompletionStreamResponseImpl /// The object type, which is always `chat.completion.chunk`. @override - final String object; + @JsonKey(includeIfNull: false) + final String? object; /// Usage statistics for the completion request. @override @@ -9311,11 +9315,11 @@ abstract class _CreateChatCompletionStreamResponse const factory _CreateChatCompletionStreamResponse( {@JsonKey(includeIfNull: false) final String? id, required final List choices, - required final int created, + @JsonKey(includeIfNull: false) final int? created, @JsonKey(includeIfNull: false) final String? model, @JsonKey(name: 'system_fingerprint', includeIfNull: false) final String? systemFingerprint, - required final String object, + @JsonKey(includeIfNull: false) final String? object, @JsonKey(includeIfNull: false) final CompletionUsage? usage}) = _$CreateChatCompletionStreamResponseImpl; const _CreateChatCompletionStreamResponse._() : super._(); @@ -9337,7 +9341,8 @@ abstract class _CreateChatCompletionStreamResponse @override /// The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp. - int get created; + @JsonKey(includeIfNull: false) + int? get created; @override /// The model to generate the completion. @@ -9353,7 +9358,8 @@ abstract class _CreateChatCompletionStreamResponse @override /// The object type, which is always `chat.completion.chunk`. - String get object; + @JsonKey(includeIfNull: false) + String? get object; @override /// Usage statistics for the completion request. diff --git a/packages/openai_dart/lib/src/generated/schema/schema.g.dart b/packages/openai_dart/lib/src/generated/schema/schema.g.dart index 4062dc95..8b4963d6 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.g.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.g.dart @@ -835,10 +835,10 @@ _$CreateChatCompletionStreamResponseImpl .map((e) => ChatCompletionStreamResponseChoice.fromJson( e as Map)) .toList(), - created: json['created'] as int, + created: json['created'] as int?, model: json['model'] as String?, systemFingerprint: json['system_fingerprint'] as String?, - object: json['object'] as String, + object: json['object'] as String?, usage: json['usage'] == null ? null : CompletionUsage.fromJson(json['usage'] as Map), @@ -856,10 +856,10 @@ Map _$$CreateChatCompletionStreamResponseImplToJson( writeNotNull('id', instance.id); val['choices'] = instance.choices.map((e) => e.toJson()).toList(); - val['created'] = instance.created; + writeNotNull('created', instance.created); writeNotNull('model', instance.model); writeNotNull('system_fingerprint', instance.systemFingerprint); - val['object'] = instance.object; + writeNotNull('object', instance.object); writeNotNull('usage', instance.usage?.toJson()); return val; } diff --git a/packages/openai_dart/oas/openapi_curated.yaml b/packages/openai_dart/oas/openapi_curated.yaml index f3eb8a26..07b38bb8 100644 --- a/packages/openai_dart/oas/openapi_curated.yaml +++ b/packages/openai_dart/oas/openapi_curated.yaml @@ -2212,7 +2212,7 @@ components: - name FunctionParameters: type: object - description: "The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. \n\nOmitting `parameters` defines a function with an empty parameter list." + description: "The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. \n\nOmitting `parameters` defines a function with an empty parameter list." additionalProperties: true ChatCompletionTool: type: object @@ -2426,10 +2426,10 @@ components: $ref: "#/components/schemas/CompletionUsage" required: - choices - - created + # - created # Made nullable to support FastChat API which doesn't return this field with some models # - id # Made nullable to support OpenRouter API which doesn't return this field with some models # - model # Made nullable to support TogetherAI API which doesn't return this field with some models - - object + # - object # Made nullable to support FastChat API which doesn't return this field with some models ChatCompletionStreamResponseChoice: type: object description: A choice the model generated for the input prompt. @@ -6128,7 +6128,12 @@ components: nullable: true BatchEndpoint: type: string - enum: [ "/v1/chat/completions", "/v1/embeddings", "/v1/completions" ] + enum: + [ + "/v1/chat/completions", + "/v1/embeddings", + "/v1/completions", + ] description: The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. BatchCompletionWindow: type: string diff --git a/packages/openai_dart/oas/openapi_official.yaml b/packages/openai_dart/oas/openapi_official.yaml index 2f18ad09..395d6481 100644 --- a/packages/openai_dart/oas/openapi_official.yaml +++ b/packages/openai_dart/oas/openapi_official.yaml @@ -87,7 +87,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + completion = client.chat.completions.create( model="VAR_model_id", messages=[ @@ -95,22 +95,22 @@ paths: {"role": "user", "content": "Hello!"} ] ) - + print(completion.choices[0].message) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const completion = await openai.chat.completions.create({ messages: [{ role: "system", content: "You are a helpful assistant." }], model: "VAR_model_id", }); - + console.log(completion.choices[0]); } - + main(); response: &chat_completion_example | { @@ -163,9 +163,9 @@ paths: }' python: | from openai import OpenAI - + client = OpenAI() - + response = client.chat.completions.create( model="gpt-4-turbo", messages=[ @@ -182,13 +182,13 @@ paths: ], max_tokens=300, ) - + print(response.choices[0]) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const response = await openai.chat.completions.create({ model: "gpt-4-turbo", @@ -254,7 +254,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + completion = client.chat.completions.create( model="VAR_model_id", messages=[ @@ -263,15 +263,15 @@ paths: ], stream=True ) - + for chunk in completion: print(chunk.choices[0].delta) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const completion = await openai.chat.completions.create({ model: "VAR_model_id", @@ -281,20 +281,20 @@ paths: ], stream: true, }); - + for await (const chunk of completion) { console.log(chunk.choices[0].delta.content); } } - + main(); response: &chat_completion_chunk_example | {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0125", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]} - + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0125", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{"content":"Hello"},"logprobs":null,"finish_reason":null}]} - + .... - + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0125", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]} - title: Functions request: @@ -338,7 +338,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + tools = [ { "type": "function", @@ -366,13 +366,13 @@ paths: tools=tools, tool_choice="auto" ) - + print(completion) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const messages = [{"role": "user", "content": "What's the weather like in Boston today?"}]; const tools = [ @@ -395,17 +395,17 @@ paths: } } ]; - + const response = await openai.chat.completions.create({ model: "gpt-4-turbo", messages: messages, tools: tools, tool_choice: "auto", }); - + console.log(response); } - + main(); response: &chat_completion_function_example | { @@ -460,7 +460,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + completion = client.chat.completions.create( model="VAR_model_id", messages=[ @@ -469,14 +469,14 @@ paths: logprobs=True, top_logprobs=2 ) - + print(completion.choices[0].message) print(completion.choices[0].logprobs) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const completion = await openai.chat.completions.create({ messages: [{ role: "user", content: "Hello!" }], @@ -484,10 +484,10 @@ paths: logprobs: true, top_logprobs: 2, }); - + console.log(completion.choices[0]); } - + main(); response: | { @@ -716,7 +716,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.completions.create( model="VAR_model_id", prompt="Say this is a test", @@ -725,9 +725,9 @@ paths: ) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const completion = await openai.completions.create({ model: "VAR_model_id", @@ -735,7 +735,7 @@ paths: max_tokens: 7, temperature: 0, }); - + console.log(completion); } main(); @@ -776,7 +776,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + for chunk in client.completions.create( model="VAR_model_id", prompt="Say this is a test", @@ -787,16 +787,16 @@ paths: print(chunk.choices[0].text) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const stream = await openai.completions.create({ model: "VAR_model_id", prompt: "Say this is a test.", stream: true, }); - + for await (const chunk of stream) { console.log(chunk.choices[0].text) } @@ -857,7 +857,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.images.generate( model="dall-e-3", prompt="A cute baby sea otter", @@ -866,12 +866,12 @@ paths: ) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const image = await openai.images.generate({ model: "dall-e-3", prompt: "A cute baby sea otter" }); - + console.log(image.data); } main(); @@ -923,7 +923,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.images.edit( image=open("otter.png", "rb"), mask=open("mask.png", "rb"), @@ -934,16 +934,16 @@ paths: node.js: |- import fs from "fs"; import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const image = await openai.images.edit({ image: fs.createReadStream("otter.png"), mask: fs.createReadStream("mask.png"), prompt: "A cute baby sea otter wearing a beret", }); - + console.log(image.data); } main(); @@ -993,7 +993,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + response = client.images.create_variation( image=open("image_edit_original.png", "rb"), n=2, @@ -1002,14 +1002,14 @@ paths: node.js: |- import fs from "fs"; import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const image = await openai.images.createVariation({ image: fs.createReadStream("otter.png"), }); - + console.log(image.data); } main(); @@ -1063,7 +1063,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.embeddings.create( model="text-embedding-ada-002", input="The food was delicious and the waiter...", @@ -1071,19 +1071,19 @@ paths: ) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const embedding = await openai.embeddings.create({ model: "text-embedding-ada-002", input: "The quick brown fox jumped over the lazy dog", encoding_format: "float", }); - + console.log(embedding); } - + main(); response: | { @@ -1151,7 +1151,7 @@ paths: python: | from pathlib import Path import openai - + speech_file_path = Path(__file__).parent / "speech.mp3" response = openai.audio.speech.create( model="tts-1", @@ -1163,11 +1163,11 @@ paths: import fs from "fs"; import path from "path"; import OpenAI from "openai"; - + const openai = new OpenAI(); - + const speechFile = path.resolve("./speech.mp3"); - + async function main() { const mp3 = await openai.audio.speech.create({ model: "tts-1", @@ -1216,7 +1216,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + audio_file = open("speech.mp3", "rb") transcript = client.audio.transcriptions.create( model="whisper-1", @@ -1225,15 +1225,15 @@ paths: node: | import fs from "fs"; import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const transcription = await openai.audio.transcriptions.create({ file: fs.createReadStream("audio.mp3"), model: "whisper-1", }); - + console.log(transcription.text); } main(); @@ -1254,7 +1254,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + audio_file = open("speech.mp3", "rb") transcript = client.audio.transcriptions.create( file=audio_file, @@ -1262,14 +1262,14 @@ paths: response_format="verbose_json", timestamp_granularities=["word"] ) - + print(transcript.words) node: | import fs from "fs"; import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const transcription = await openai.audio.transcriptions.create({ file: fs.createReadStream("audio.mp3"), @@ -1277,7 +1277,7 @@ paths: response_format: "verbose_json", timestamp_granularities: ["word"] }); - + console.log(transcription.text); } main(); @@ -1314,7 +1314,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + audio_file = open("speech.mp3", "rb") transcript = client.audio.transcriptions.create( file=audio_file, @@ -1322,14 +1322,14 @@ paths: response_format="verbose_json", timestamp_granularities=["segment"] ) - + print(transcript.words) node: | import fs from "fs"; import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const transcription = await openai.audio.transcriptions.create({ file: fs.createReadStream("audio.mp3"), @@ -1337,7 +1337,7 @@ paths: response_format: "verbose_json", timestamp_granularities: ["segment"] }); - + console.log(transcription.text); } main(); @@ -1401,7 +1401,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + audio_file = open("speech.mp3", "rb") transcript = client.audio.translations.create( model="whisper-1", @@ -1410,15 +1410,15 @@ paths: node: | import fs from "fs"; import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const translation = await openai.audio.translations.create({ file: fs.createReadStream("speech.mp3"), model: "whisper-1", }); - + console.log(translation.text); } main(); @@ -1459,21 +1459,21 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.files.list() node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const list = await openai.files.list(); - + for await (const file of list) { console.log(file); } } - + main(); response: | { @@ -1503,13 +1503,13 @@ paths: - Files summary: | Upload a file that can be used across various endpoints. Individual files can be up to 512 MB, and the size of all files uploaded by one organization can be up to 100 GB. - + The Assistants API supports files up to 2 million tokens and of specific file types. See the [Assistants Tools guide](/docs/assistants/tools) for details. - + The Fine-tuning API only supports `.jsonl` files. - + The Batch API only supports `.jsonl` files up to 100 MB in size. - + Please [contact us](https://help.openai.com/) if you need to increase these storage limits. requestBody: required: true @@ -1538,7 +1538,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.files.create( file=open("mydata.jsonl", "rb"), purpose="fine-tune" @@ -1546,18 +1546,18 @@ paths: node.js: |- import fs from "fs"; import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const file = await openai.files.create({ file: fs.createReadStream("mydata.jsonl"), purpose: "fine-tune", }); - + console.log(file); } - + main(); response: | { @@ -1601,19 +1601,19 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.files.delete("file-abc123") node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const file = await openai.files.del("file-abc123"); - + console.log(file); } - + main(); response: | { @@ -1652,19 +1652,19 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.files.retrieve("file-abc123") node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const file = await openai.files.retrieve("file-abc123"); - + console.log(file); } - + main(); response: | { @@ -1707,19 +1707,19 @@ paths: python: | from openai import OpenAI client = OpenAI() - + content = client.files.content("file-abc123") node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const file = await openai.files.content("file-abc123"); - + console.log(file); } - + main(); /fine_tuning/jobs: @@ -1729,9 +1729,9 @@ paths: - Fine-tuning summary: | Creates a fine-tuning job which begins the process of creating a new model from a given dataset. - + Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. - + [Learn more about fine-tuning](/docs/guides/fine-tuning) requestBody: required: true @@ -1764,24 +1764,24 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.fine_tuning.jobs.create( training_file="file-abc123", model="gpt-3.5-turbo" ) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const fineTune = await openai.fineTuning.jobs.create({ training_file: "file-abc123" }); - + console.log(fineTune); } - + main(); response: | { @@ -1812,7 +1812,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.fine_tuning.jobs.create( training_file="file-abc123", model="gpt-3.5-turbo", @@ -1822,19 +1822,19 @@ paths: ) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const fineTune = await openai.fineTuning.jobs.create({ training_file: "file-abc123", model: "gpt-3.5-turbo", hyperparameters: { n_epochs: 2 } }); - + console.log(fineTune); } - + main(); response: | { @@ -1864,7 +1864,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.fine_tuning.jobs.create( training_file="file-abc123", validation_file="file-def456", @@ -1872,18 +1872,18 @@ paths: ) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const fineTune = await openai.fineTuning.jobs.create({ training_file: "file-abc123", validation_file: "file-abc123" }); - + console.log(fineTune); } - + main(); response: | { @@ -1983,21 +1983,21 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.fine_tuning.jobs.list() node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const list = await openai.fineTuning.jobs.list(); - + for await (const fineTune of list) { console.log(fineTune); } } - + main(); response: | { @@ -2023,7 +2023,7 @@ paths: - Fine-tuning summary: | Get info about a fine-tuning job. - + [Learn more about fine-tuning](/docs/guides/fine-tuning) parameters: - in: path @@ -2053,19 +2053,19 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.fine_tuning.jobs.retrieve("ftjob-abc123") node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const fineTune = await openai.fineTuning.jobs.retrieve("ftjob-abc123"); - + console.log(fineTune); } - + main(); response: &fine_tuning_example | { @@ -2140,24 +2140,24 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.fine_tuning.jobs.list_events( fine_tuning_job_id="ftjob-abc123", limit=2 ) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const list = await openai.fineTuning.list_events(id="ftjob-abc123", limit=2); - + for await (const fineTune of list) { console.log(fineTune); } } - + main(); response: | { @@ -2219,16 +2219,16 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.fine_tuning.jobs.cancel("ftjob-abc123") node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const fineTune = await openai.fineTuning.jobs.cancel("ftjob-abc123"); - + console.log(fineTune); } main(); @@ -2352,16 +2352,16 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.models.list() node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const list = await openai.models.list(); - + for await (const model of list) { console.log(model); } @@ -2426,19 +2426,19 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.models.retrieve("VAR_model_id") node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const model = await openai.models.retrieve("VAR_model_id"); - + console.log(model); } - + main(); response: &retrieve_model_response | { @@ -2480,16 +2480,16 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.models.delete("ft:gpt-3.5-turbo:acemeco:suffix:abc123") node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const model = await openai.models.del("ft:gpt-3.5-turbo:acemeco:suffix:abc123"); - + console.log(model); } main(); @@ -2535,17 +2535,17 @@ paths: python: | from openai import OpenAI client = OpenAI() - + moderation = client.moderations.create(input="I want to kill them.") print(moderation) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const moderation = await openai.moderations.create({ input: "I want to kill them." }); - + console.log(moderation); } main(); @@ -2643,7 +2643,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + my_assistants = client.beta.assistants.list( order="desc", limit="20", @@ -2651,18 +2651,18 @@ paths: print(my_assistants.data) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const myAssistants = await openai.beta.assistants.list({ order: "desc", limit: "20", }); - + console.log(myAssistants.data); } - + main(); response: &list_assistants_example | { @@ -2759,7 +2759,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + my_assistant = client.beta.assistants.create( instructions="You are a personal math tutor. When asked a question, write and run Python code to answer the question.", name="Math Tutor", @@ -2769,9 +2769,9 @@ paths: print(my_assistant) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const myAssistant = await openai.beta.assistants.create({ instructions: @@ -2780,10 +2780,10 @@ paths: tools: [{ type: "code_interpreter" }], model: "gpt-4-turbo", }); - + console.log(myAssistant); } - + main(); response: &create_assistants_example | { @@ -2820,7 +2820,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + my_assistant = client.beta.assistants.create( instructions="You are an HR bot, and you have access to files to answer employee questions about company policies.", name="HR Helper", @@ -2831,9 +2831,9 @@ paths: print(my_assistant) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const myAssistant = await openai.beta.assistants.create({ instructions: @@ -2847,10 +2847,10 @@ paths: }, model: "gpt-4-turbo" }); - + console.log(myAssistant); } - + main(); response: | { @@ -2912,22 +2912,22 @@ paths: python: | from openai import OpenAI client = OpenAI() - + my_assistant = client.beta.assistants.retrieve("asst_abc123") print(my_assistant) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const myAssistant = await openai.beta.assistants.retrieve( "asst_abc123" ); - + console.log(myAssistant); } - + main(); response: | { @@ -2993,7 +2993,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + my_updated_assistant = client.beta.assistants.update( "asst_abc123", instructions="You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.", @@ -3001,13 +3001,13 @@ paths: tools=[{"type": "file_search"}], model="gpt-4-turbo" ) - + print(my_updated_assistant) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const myUpdatedAssistant = await openai.beta.assistants.update( "asst_abc123", @@ -3019,10 +3019,10 @@ paths: model: "gpt-4-turbo" } ); - + console.log(myUpdatedAssistant); } - + main(); response: | { @@ -3083,17 +3083,17 @@ paths: python: | from openai import OpenAI client = OpenAI() - + response = client.beta.assistants.delete("asst_abc123") print(response) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const response = await openai.beta.assistants.del("asst_abc123"); - + console.log(response); } main(); @@ -3139,20 +3139,20 @@ paths: python: | from openai import OpenAI client = OpenAI() - + empty_thread = client.beta.threads.create() print(empty_thread) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const emptyThread = await openai.beta.threads.create(); - + console.log(emptyThread); } - + main(); response: | { @@ -3181,7 +3181,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + message_thread = client.beta.threads.create( messages=[ { @@ -3194,13 +3194,13 @@ paths: }, ] ) - + print(message_thread) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const messageThread = await openai.beta.threads.create({ messages: [ @@ -3214,10 +3214,10 @@ paths: }, ], }); - + console.log(messageThread); } - + main(); response: | { @@ -3263,22 +3263,22 @@ paths: python: | from openai import OpenAI client = OpenAI() - + my_thread = client.beta.threads.retrieve("thread_abc123") print(my_thread) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const myThread = await openai.beta.threads.retrieve( "thread_abc123" ); - + console.log(myThread); } - + main(); response: | { @@ -3338,7 +3338,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + my_updated_thread = client.beta.threads.update( "thread_abc123", metadata={ @@ -3349,9 +3349,9 @@ paths: print(my_updated_thread) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const updatedThread = await openai.beta.threads.update( "thread_abc123", @@ -3359,10 +3359,10 @@ paths: metadata: { modified: "true", user: "abc123" }, } ); - + console.log(updatedThread); } - + main(); response: | { @@ -3410,17 +3410,17 @@ paths: python: | from openai import OpenAI client = OpenAI() - + response = client.beta.threads.delete("thread_abc123") print(response) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const response = await openai.beta.threads.del("thread_abc123"); - + console.log(response); } main(); @@ -3496,22 +3496,22 @@ paths: python: | from openai import OpenAI client = OpenAI() - + thread_messages = client.beta.threads.messages.list("thread_abc123") print(thread_messages.data) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const threadMessages = await openai.beta.threads.messages.list( "thread_abc123" ); - + console.log(threadMessages.data); } - + main(); response: | { @@ -3606,7 +3606,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + thread_message = client.beta.threads.messages.create( "thread_abc123", role="user", @@ -3615,18 +3615,18 @@ paths: print(thread_message) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const threadMessages = await openai.beta.threads.messages.create( "thread_abc123", { role: "user", content: "How does AI work? Explain it in simple terms." } ); - + console.log(threadMessages); } - + main(); response: | { @@ -3691,7 +3691,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + message = client.beta.threads.messages.retrieve( message_id="msg_abc123", thread_id="thread_abc123", @@ -3699,18 +3699,18 @@ paths: print(message) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const message = await openai.beta.threads.messages.retrieve( "thread_abc123", "msg_abc123" ); - + console.log(message); } - + main(); response: | { @@ -3785,7 +3785,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + message = client.beta.threads.messages.update( message_id="msg_abc12", thread_id="thread_abc123", @@ -3797,9 +3797,9 @@ paths: print(message) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const message = await openai.beta.threads.messages.update( "thread_abc123", @@ -3875,7 +3875,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + deleted_message = client.beta.threads.messages.delete( message_id="msg_abc12", thread_id="thread_abc123", @@ -3883,15 +3883,15 @@ paths: print(deleted_message) node.js: |- import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const deletedMessage = await openai.beta.threads.messages.del( "thread_abc123", "msg_abc123" ); - + console.log(deletedMessage); } response: | @@ -3901,7 +3901,6 @@ paths: "deleted": true } - /threads/runs: post: operationId: createThreadAndRun @@ -3945,7 +3944,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + run = client.beta.threads.create_and_run( assistant_id="asst_abc123", thread={ @@ -3954,13 +3953,13 @@ paths: ] } ) - + print(run) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const run = await openai.beta.threads.createAndRun({ assistant_id: "asst_abc123", @@ -3970,10 +3969,10 @@ paths: ], }, }); - + console.log(run); } - + main(); response: | { @@ -4028,7 +4027,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + stream = client.beta.threads.create_and_run( assistant_id="asst_123", thread={ @@ -4038,14 +4037,14 @@ paths: }, stream=True ) - + for event in stream: print(event) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const stream = await openai.beta.threads.createAndRun({ assistant_id: "asst_123", @@ -4056,58 +4055,58 @@ paths: }, stream: true }); - + for await (const event of stream) { console.log(event); } } - + main(); response: | event: thread.created data: {"id":"thread_123","object":"thread","created_at":1710348075,"metadata":{}} - + event: thread.run.created data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"} - + event: thread.run.queued data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"} - + event: thread.run.in_progress data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"} - + event: thread.run.step.created data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} - + event: thread.run.step.in_progress data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} - + event: thread.message.created data: {"id":"msg_001","object":"thread.message","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[], "metadata":{}} - + event: thread.message.in_progress data: {"id":"msg_001","object":"thread.message","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[], "metadata":{}} - + event: thread.message.delta data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"Hello","annotations":[]}}]}} - + ... - + event: thread.message.delta data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" today"}}]}} - + event: thread.message.delta data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"?"}}]}} - + event: thread.message.completed data: {"id":"msg_001","object":"thread.message","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"completed","incomplete_details":null,"incomplete_at":null,"completed_at":1710348077,"role":"assistant","content":[{"type":"text","text":{"value":"Hello! How can I assist you today?","annotations":[]}}], "metadata":{}} - + event: thread.run.step.completed data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1710348077,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31}} - + event: thread.run.completed {"id":"run_123","object":"thread.run","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1713226836,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1713226837,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":345,"completion_tokens":11,"total_tokens":356},"response_format":"auto","tool_choice":"auto"} - + event: done data: [DONE] @@ -4153,7 +4152,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + tools = [ { "type": "function", @@ -4174,7 +4173,7 @@ paths: } } ] - + stream = client.beta.threads.create_and_run( thread={ "messages": [ @@ -4185,14 +4184,14 @@ paths: tools=tools, stream=True ) - + for event in stream: print(event) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + const tools = [ { "type": "function", @@ -4213,7 +4212,7 @@ paths: } } ]; - + async function main() { const stream = await openai.beta.threads.createAndRun({ assistant_id: "asst_123", @@ -4225,52 +4224,52 @@ paths: tools: tools, stream: true }); - + for await (const event of stream) { console.log(event); } } - + main(); response: | event: thread.created data: {"id":"thread_123","object":"thread","created_at":1710351818,"metadata":{}} - + event: thread.run.created data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} - + event: thread.run.queued data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} - + event: thread.run.in_progress data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710351818,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} - + event: thread.run.step.created data: {"id":"step_001","object":"thread.run.step","created_at":1710351819,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"tool_calls","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710352418,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[]},"usage":null} - + event: thread.run.step.in_progress data: {"id":"step_001","object":"thread.run.step","created_at":1710351819,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"tool_calls","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710352418,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[]},"usage":null} - + event: thread.run.step.delta data: {"id":"step_001","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"id":"call_XXNp8YGaFrjrSjgqxtC8JJ1B","type":"function","function":{"name":"get_current_weather","arguments":"","output":null}}]}}} - + event: thread.run.step.delta data: {"id":"step_001","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"{\""}}]}}} - + event: thread.run.step.delta data: {"id":"step_001","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"location"}}]}}} - + ... - + event: thread.run.step.delta data: {"id":"step_001","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"ahrenheit"}}]}}} - + event: thread.run.step.delta data: {"id":"step_001","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"\"}"}}]}}} - + event: thread.run.requires_action data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"requires_action","started_at":1710351818,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":{"type":"submit_tool_outputs","submit_tool_outputs":{"tool_calls":[{"id":"call_XXNp8YGaFrjrSjgqxtC8JJ1B","type":"function","function":{"name":"get_current_weather","arguments":"{\"location\":\"San Francisco, CA\",\"unit\":\"fahrenheit\"}"}}]}},"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":345,"completion_tokens":11,"total_tokens":356},"response_format":"auto","tool_choice":"auto"}} - + event: done data: [DONE] @@ -4333,25 +4332,25 @@ paths: python: | from openai import OpenAI client = OpenAI() - + runs = client.beta.threads.runs.list( "thread_abc123" ) - + print(runs) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const runs = await openai.beta.threads.runs.list( "thread_abc123" ); - + console.log(runs); } - + main(); response: | { @@ -4498,27 +4497,27 @@ paths: python: | from openai import OpenAI client = OpenAI() - + run = client.beta.threads.runs.create( thread_id="thread_abc123", assistant_id="asst_abc123" ) - + print(run) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const run = await openai.beta.threads.runs.create( "thread_abc123", { assistant_id: "asst_abc123" } ); - + console.log(run); } - + main(); response: &run_object_example | { @@ -4569,74 +4568,74 @@ paths: python: | from openai import OpenAI client = OpenAI() - + stream = client.beta.threads.runs.create( thread_id="thread_123", assistant_id="asst_123", stream=True ) - + for event in stream: print(event) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const stream = await openai.beta.threads.runs.create( "thread_123", { assistant_id: "asst_123", stream: true } ); - + for await (const event of stream) { console.log(event); } } - + main(); response: | event: thread.run.created data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} - + event: thread.run.queued data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} - + event: thread.run.in_progress data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710330641,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} - + event: thread.run.step.created data: {"id":"step_001","object":"thread.run.step","created_at":1710330641,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710331240,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} - + event: thread.run.step.in_progress data: {"id":"step_001","object":"thread.run.step","created_at":1710330641,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710331240,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} - + event: thread.message.created data: {"id":"msg_001","object":"thread.message","created_at":1710330641,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"metadata":{}} - + event: thread.message.in_progress data: {"id":"msg_001","object":"thread.message","created_at":1710330641,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"metadata":{}} - + event: thread.message.delta data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"Hello","annotations":[]}}]}} - + ... - + event: thread.message.delta data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" today"}}]}} - + event: thread.message.delta data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"?"}}]}} - + event: thread.message.completed data: {"id":"msg_001","object":"thread.message","created_at":1710330641,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"completed","incomplete_details":null,"incomplete_at":null,"completed_at":1710330642,"role":"assistant","content":[{"type":"text","text":{"value":"Hello! How can I assist you today?","annotations":[]}}],"metadata":{}} - + event: thread.run.step.completed data: {"id":"step_001","object":"thread.run.step","created_at":1710330641,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1710330642,"expires_at":1710331240,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31}} - + event: thread.run.completed data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710330641,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710330642,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto"}} - + event: done data: [DONE] @@ -4677,7 +4676,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + tools = [ { "type": "function", @@ -4698,21 +4697,21 @@ paths: } } ] - + stream = client.beta.threads.runs.create( thread_id="thread_abc123", assistant_id="asst_abc123", tools=tools, stream=True ) - + for event in stream: print(event) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + const tools = [ { "type": "function", @@ -4733,7 +4732,7 @@ paths: } } ]; - + async function main() { const stream = await openai.beta.threads.runs.create( "thread_abc123", @@ -4743,55 +4742,55 @@ paths: stream: true } ); - + for await (const event of stream) { console.log(event); } } - + main(); response: | event: thread.run.created data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} - + event: thread.run.queued data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} - + event: thread.run.in_progress data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710348075,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} - + event: thread.run.step.created data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} - + event: thread.run.step.in_progress data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} - + event: thread.message.created data: {"id":"msg_001","object":"thread.message","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"metadata":{}} - + event: thread.message.in_progress data: {"id":"msg_001","object":"thread.message","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"metadata":{}} - + event: thread.message.delta data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"Hello","annotations":[]}}]}} - + ... - + event: thread.message.delta data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" today"}}]}} - + event: thread.message.delta data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"?"}}]}} - + event: thread.message.completed data: {"id":"msg_001","object":"thread.message","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"completed","incomplete_details":null,"incomplete_at":null,"completed_at":1710348077,"role":"assistant","content":[{"type":"text","text":{"value":"Hello! How can I assist you today?","annotations":[]}}],"metadata":{}} - + event: thread.run.step.completed data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1710348077,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31}} - + event: thread.run.completed data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710348075,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710348077,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto"}} - + event: done data: [DONE] @@ -4835,27 +4834,27 @@ paths: python: | from openai import OpenAI client = OpenAI() - + run = client.beta.threads.runs.retrieve( thread_id="thread_abc123", run_id="run_abc123" ) - + print(run) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const run = await openai.beta.threads.runs.retrieve( "thread_abc123", "run_abc123" ); - + console.log(run); } - + main(); response: | { @@ -4947,19 +4946,19 @@ paths: python: | from openai import OpenAI client = OpenAI() - + run = client.beta.threads.runs.update( thread_id="thread_abc123", run_id="run_abc123", metadata={"user_id": "user_abc123"}, ) - + print(run) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const run = await openai.beta.threads.runs.update( "thread_abc123", @@ -4970,10 +4969,10 @@ paths: }, } ); - + console.log(run); } - + main(); response: | { @@ -5082,7 +5081,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + run = client.beta.threads.runs.submit_tool_outputs( thread_id="thread_123", run_id="run_123", @@ -5093,13 +5092,13 @@ paths: } ] ) - + print(run) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const run = await openai.beta.threads.runs.submitToolOutputs( "thread_123", @@ -5113,10 +5112,10 @@ paths: ], } ); - + console.log(run); } - + main(); response: | { @@ -5190,7 +5189,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + stream = client.beta.threads.runs.submit_tool_outputs( thread_id="thread_123", run_id="run_123", @@ -5202,14 +5201,14 @@ paths: ], stream=True ) - + for event in stream: print(event) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const stream = await openai.beta.threads.runs.submitToolOutputs( "thread_123", @@ -5223,61 +5222,61 @@ paths: ], } ); - + for await (const event of stream) { console.log(event); } } - + main(); response: | event: thread.run.step.completed data: {"id":"step_001","object":"thread.run.step","created_at":1710352449,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"tool_calls","status":"completed","cancelled_at":null,"completed_at":1710352475,"expires_at":1710353047,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[{"id":"call_iWr0kQ2EaYMaxNdl0v3KYkx7","type":"function","function":{"name":"get_current_weather","arguments":"{\"location\":\"San Francisco, CA\",\"unit\":\"fahrenheit\"}","output":"70 degrees and sunny."}}]},"usage":{"prompt_tokens":291,"completion_tokens":24,"total_tokens":315}} - + event: thread.run.queued data: {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":1710352448,"expires_at":1710353047,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} - + event: thread.run.in_progress data: {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710352475,"expires_at":1710353047,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} - + event: thread.run.step.created data: {"id":"step_002","object":"thread.run.step","created_at":1710352476,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710353047,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_002"}},"usage":null} - + event: thread.run.step.in_progress data: {"id":"step_002","object":"thread.run.step","created_at":1710352476,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710353047,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_002"}},"usage":null} - + event: thread.message.created data: {"id":"msg_002","object":"thread.message","created_at":1710352476,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"metadata":{}} - + event: thread.message.in_progress data: {"id":"msg_002","object":"thread.message","created_at":1710352476,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"metadata":{}} - + event: thread.message.delta data: {"id":"msg_002","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"The","annotations":[]}}]}} - + event: thread.message.delta data: {"id":"msg_002","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" current"}}]}} - + event: thread.message.delta data: {"id":"msg_002","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" weather"}}]}} - + ... - + event: thread.message.delta data: {"id":"msg_002","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" sunny"}}]}} - + event: thread.message.delta data: {"id":"msg_002","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"."}}]}} - + event: thread.message.completed data: {"id":"msg_002","object":"thread.message","created_at":1710352476,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"completed","incomplete_details":null,"incomplete_at":null,"completed_at":1710352477,"role":"assistant","content":[{"type":"text","text":{"value":"The current weather in San Francisco, CA is 70 degrees Fahrenheit and sunny.","annotations":[]}}],"metadata":{}} - + event: thread.run.step.completed data: {"id":"step_002","object":"thread.run.step","created_at":1710352476,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1710352477,"expires_at":1710353047,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_002"}},"usage":{"prompt_tokens":329,"completion_tokens":18,"total_tokens":347}} - + event: thread.run.completed data: {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710352475,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710352477,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto"}} - + event: done data: [DONE] @@ -5322,27 +5321,27 @@ paths: python: | from openai import OpenAI client = OpenAI() - + run = client.beta.threads.runs.cancel( thread_id="thread_abc123", run_id="run_abc123" ) - + print(run) node.js: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const run = await openai.beta.threads.runs.cancel( "thread_abc123", "run_abc123" ); - + console.log(run); } - + main(); response: | { @@ -5442,17 +5441,17 @@ paths: python: | from openai import OpenAI client = OpenAI() - + run_steps = client.beta.threads.runs.steps.list( thread_id="thread_abc123", run_id="run_abc123" ) - + print(run_steps) node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const runStep = await openai.beta.threads.runs.steps.list( "thread_abc123", @@ -5460,7 +5459,7 @@ paths: ); console.log(runStep); } - + main(); response: | { @@ -5545,18 +5544,18 @@ paths: python: | from openai import OpenAI client = OpenAI() - + run_step = client.beta.threads.runs.steps.retrieve( thread_id="thread_abc123", run_id="run_abc123", step_id="step_abc123" ) - + print(run_step) node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const runStep = await openai.beta.threads.runs.steps.retrieve( "thread_abc123", @@ -5565,7 +5564,7 @@ paths: ); console.log(runStep); } - + main(); response: &run_step_object_example | { @@ -5648,18 +5647,18 @@ paths: python: | from openai import OpenAI client = OpenAI() - + vector_stores = client.beta.vector_stores.list() print(vector_stores) node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const vectorStores = await openai.beta.vectorStores.list(); console.log(vectorStores); } - + main(); response: | { @@ -5734,7 +5733,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + vector_store = client.beta.vector_stores.create( name="Support FAQ" ) @@ -5742,14 +5741,14 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const vectorStore = await openai.beta.vectorStores.create({ name: "Support FAQ" }); console.log(vectorStore); } - + main(); response: | { @@ -5802,7 +5801,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + vector_store = client.beta.vector_stores.retrieve( vector_store_id="vs_abc123" ) @@ -5810,14 +5809,14 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const vectorStore = await openai.beta.vectorStores.retrieve( "vs_abc123" ); console.log(vectorStore); } - + main(); response: | { @@ -5868,7 +5867,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + vector_store = client.beta.vector_stores.update( vector_store_id="vs_abc123", name="Support FAQ" @@ -5877,7 +5876,7 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const vectorStore = await openai.beta.vectorStores.update( "vs_abc123", @@ -5887,7 +5886,7 @@ paths: ); console.log(vectorStore); } - + main(); response: | { @@ -5940,7 +5939,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + deleted_vector_store = client.beta.vector_stores.delete( vector_store_id="vs_abc123" ) @@ -5948,14 +5947,14 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const deletedVectorStore = await openai.beta.vectorStores.del( "vs_abc123" ); console.log(deletedVectorStore); } - + main(); response: | { @@ -6029,7 +6028,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + vector_store_files = client.beta.vector_stores.files.list( vector_store_id="vs_abc123" ) @@ -6037,14 +6036,14 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const vectorStoreFiles = await openai.beta.vectorStores.files.list( "vs_abc123" ); console.log(vectorStoreFiles); } - + main(); response: | { @@ -6112,7 +6111,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + vector_store_file = client.beta.vector_stores.files.create( vector_store_id="vs_abc123", file_id="file-abc123" @@ -6121,7 +6120,7 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const myVectorStoreFile = await openai.beta.vectorStores.files.create( "vs_abc123", @@ -6131,7 +6130,7 @@ paths: ); console.log(myVectorStoreFile); } - + main(); response: | { @@ -6187,7 +6186,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + vector_store_file = client.beta.vector_stores.files.retrieve( vector_store_id="vs_abc123", file_id="file-abc123" @@ -6196,7 +6195,7 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const vectorStoreFile = await openai.beta.vectorStores.files.retrieve( "vs_abc123", @@ -6204,7 +6203,7 @@ paths: ); console.log(vectorStoreFile); } - + main(); response: | { @@ -6256,7 +6255,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + deleted_vector_store_file = client.beta.vector_stores.files.delete( vector_store_id="vs_abc123", file_id="file-abc123" @@ -6265,7 +6264,7 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const deletedVectorStoreFile = await openai.beta.vectorStores.files.del( "vs_abc123", @@ -6273,7 +6272,7 @@ paths: ); console.log(deletedVectorStoreFile); } - + main(); response: | { @@ -6328,7 +6327,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + vector_store_file_batch = client.beta.vector_stores.file_batches.create( vector_store_id="vs_abc123", file_ids=["file-abc123", "file-abc456"] @@ -6337,7 +6336,7 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const myVectorStoreFileBatch = await openai.beta.vectorStores.fileBatches.create( "vs_abc123", @@ -6347,7 +6346,7 @@ paths: ); console.log(myVectorStoreFileBatch); } - + main(); response: | { @@ -6408,7 +6407,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + vector_store_file_batch = client.beta.vector_stores.file_batches.retrieve( vector_store_id="vs_abc123", batch_id="vsfb_abc123" @@ -6417,7 +6416,7 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const vectorStoreFileBatch = await openai.beta.vectorStores.fileBatches.retrieve( "vs_abc123", @@ -6425,7 +6424,7 @@ paths: ); console.log(vectorStoreFileBatch); } - + main(); response: | { @@ -6485,7 +6484,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + deleted_vector_store_file_batch = client.beta.vector_stores.file_batches.cancel( vector_store_id="vs_abc123", file_batch_id="vsfb_abc123" @@ -6494,7 +6493,7 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const deletedVectorStoreFileBatch = await openai.vector_stores.fileBatches.cancel( "vs_abc123", @@ -6502,7 +6501,7 @@ paths: ); console.log(deletedVectorStoreFileBatch); } - + main(); response: | { @@ -6591,7 +6590,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + vector_store_files = client.beta.vector_stores.file_batches.list_files( vector_store_id="vs_abc123", batch_id="vsfb_abc123" @@ -6600,7 +6599,7 @@ paths: node.js: | import OpenAI from "openai"; const openai = new OpenAI(); - + async function main() { const vectorStoreFiles = await openai.beta.vectorStores.fileBatches.listFiles( "vs_abc123", @@ -6608,7 +6607,7 @@ paths: ); console.log(vectorStoreFiles); } - + main(); response: | { @@ -6653,13 +6652,18 @@ paths: type: string description: | The ID of an uploaded file that contains requests for the new batch. - + See [upload file](/docs/api-reference/files/create) for how to upload a file. - + Your input file must be formatted as a [JSONL file](/docs/api-reference/batch/requestInput), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 100 MB in size. endpoint: type: string - enum: [ "/v1/chat/completions", "/v1/embeddings", "/v1/completions" ] + enum: + [ + "/v1/chat/completions", + "/v1/embeddings", + "/v1/completions", + ] description: The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. completion_window: type: string @@ -6696,7 +6700,7 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.batches.create( input_file_id="file-abc123", endpoint="/v1/chat/completions", @@ -6704,19 +6708,19 @@ paths: ) node: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const batch = await openai.batches.create({ input_file_id: "file-abc123", endpoint: "/v1/chat/completions", completion_window: "24h" }); - + console.log(batch); } - + main(); response: | { @@ -6787,21 +6791,21 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.batches.list() node: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const list = await openai.batches.list(); - + for await (const batch of list) { console.log(batch); } } - + main(); response: | { @@ -6876,19 +6880,19 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.batches.retrieve("batch_abc123") node: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const batch = await openai.batches.retrieve("batch_abc123"); - + console.log(batch); } - + main(); response: &batch_object | { @@ -6955,19 +6959,19 @@ paths: python: | from openai import OpenAI client = OpenAI() - + client.batches.cancel("batch_abc123") node: | import OpenAI from "openai"; - + const openai = new OpenAI(); - + async function main() { const batch = await openai.batches.cancel("batch_abc123"); - + console.log(batch); } - + main(); response: | { @@ -7076,7 +7080,7 @@ components: prompt: description: &completions_prompt_description | The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. - + Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. default: "<|endoftext|>" nullable: true @@ -7110,9 +7114,9 @@ components: nullable: true description: &completions_best_of_description | Generates `best_of` completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed. - + When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. - + **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. echo: type: boolean @@ -7128,7 +7132,7 @@ components: nullable: true description: &completions_frequency_penalty_description | Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - + [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details) logit_bias: &completions_logit_bias type: object @@ -7139,9 +7143,9 @@ components: type: integer description: &completions_logit_bias_description | Modify the likelihood of specified tokens appearing in the completion. - + Accepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. - + As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being generated. logprobs: &completions_logprobs_configuration type: integer @@ -7151,7 +7155,7 @@ components: nullable: true description: &completions_logprobs_description | Include the log probabilities on the `logprobs` most likely output tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. - + The maximum value for `logprobs` is 5. max_tokens: type: integer @@ -7161,7 +7165,7 @@ components: nullable: true description: &completions_max_tokens_description | The maximum number of [tokens](/tokenizer) that can be generated in the completion. - + The token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. n: type: integer @@ -7172,7 +7176,7 @@ components: nullable: true description: &completions_completions_description | How many completions to generate for each prompt. - + **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. presence_penalty: type: number @@ -7182,7 +7186,7 @@ components: nullable: true description: &completions_presence_penalty_description | Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - + [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details) seed: &completions_seed_param type: integer @@ -7191,7 +7195,7 @@ components: nullable: true description: | If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. - + Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. stop: description: &completions_stop_description > @@ -7221,7 +7225,7 @@ components: suffix: description: | The suffix that comes after a completion of inserted text. - + This parameter is only supported for `gpt-3.5-turbo-instruct`. default: null nullable: true @@ -7236,7 +7240,7 @@ components: nullable: true description: &completions_temperature_description | What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. - + We generally recommend altering this or `top_p` but not both. top_p: type: number @@ -7247,7 +7251,7 @@ components: nullable: true description: &completions_top_p_description | An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. - + We generally recommend altering this or `temperature` but not both. user: &end_user_param_configuration type: string @@ -7320,7 +7324,7 @@ components: type: string description: | This fingerprint represents the backend configuration that the model runs with. - + Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. object: type: string @@ -7483,6 +7487,7 @@ components: type: object deprecated: true description: "Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model." + nullable: true properties: arguments: type: string @@ -7538,7 +7543,7 @@ components: FunctionParameters: type: object - description: "The parameters the functions accepts, described as a JSON Schema object. See the [guide](/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. \n\nOmitting `parameters` defines a function with an empty parameter list." + description: "The parameters the functions accepts, described as a JSON Schema object. See the [guide](/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. \n\nOmitting `parameters` defines a function with an empty parameter list." additionalProperties: true ChatCompletionFunctions: @@ -7601,7 +7606,7 @@ components: `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. - + `none` is the default when no tools are present. `auto` is the default if tools are present. oneOf: - type: string @@ -7831,7 +7836,7 @@ components: type: integer description: | Modify the likelihood of specified tokens appearing in the completion. - + Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. logprobs: description: Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. @@ -7847,7 +7852,7 @@ components: max_tokens: description: | The maximum number of [tokens](/tokenizer) that can be generated in the chat completion. - + The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. type: integer nullable: true @@ -7870,9 +7875,9 @@ components: type: object description: | An object specifying the format that the model must output. Compatible with [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. - + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. - + **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. properties: type: @@ -7943,12 +7948,12 @@ components: deprecated: true description: | Deprecated in favor of `tool_choice`. - + Controls which (if any) function is called by the model. `none` means the model will not call a function and instead generates a message. `auto` means the model can pick between generating a message or calling a function. Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. - + `none` is the default when no functions are present. `auto` is the default if functions are present. oneOf: - type: string @@ -7962,7 +7967,7 @@ components: deprecated: true description: | Deprecated in favor of `tools`. - + A list of functions the model may generate JSON inputs for. type: array minItems: 1 @@ -8035,7 +8040,7 @@ components: type: string description: | This fingerprint represents the backend configuration that the model runs with. - + Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. object: type: string @@ -8077,7 +8082,8 @@ components: description: &chat_completion_function_finish_reason_description | The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, `length` if the maximum number of tokens specified in the request was reached, `content_filter` if content was omitted due to a flag from our content filters, or `function_call` if the model called a function. - enum: [ "stop", "length", "function_call", "content_filter" ] + enum: + [ "stop", "length", "function_call", "content_filter" ] index: type: integer description: The index of the choice in the list of choices. @@ -8093,7 +8099,7 @@ components: type: string description: | This fingerprint represents the backend configuration that the model runs with. - + Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. object: type: string @@ -8429,7 +8435,7 @@ components: model: description: | Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`. - + The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. nullable: false default: "text-moderation-latest" @@ -8597,10 +8603,10 @@ components: purpose: description: | The intended purpose of the uploaded file. - + Use "assistants" for [Assistants](/docs/api-reference/assistants) and [Message](/docs/api-reference/messages) files, "vision" for Assistants image file inputs, "batch" for [Batch API](/docs/guides/batch), and "fine-tune" for [Fine-tuning](/docs/api-reference/fine-tuning). type: string - enum: [ "assistants", "batch", "fine-tune" ] + enum: [ "assistants", "batch", "fine-tune", "vision" ] required: - file - purpose @@ -8636,11 +8642,11 @@ components: training_file: description: | The ID of an uploaded file that contains training data. - + See [upload file](/docs/api-reference/files/create) for how to upload a file. - + Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. - + See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. type: string example: "file-abc123" @@ -8684,7 +8690,7 @@ components: suffix: description: | A string of up to 18 characters that will be added to your fine-tuned model name. - + For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. type: string minLength: 1 @@ -8694,14 +8700,14 @@ components: validation_file: description: | The ID of an uploaded file that contains validation data. - + If you provide this file, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in the fine-tuning results file. The same data should not be present in both train and validation files. - + Your dataset must be formatted as a JSONL file. You must upload your file with the purpose `fine-tune`. - + See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. type: string nullable: true @@ -8980,7 +8986,7 @@ components: required: - text x-oaiMeta: - name: The transcription object + name: The transcription object (JSON) group: audio example: *basic_transcription_response_example @@ -9078,7 +9084,7 @@ components: $ref: "#/components/schemas/TranscriptionSegment" required: [ language, duration, text ] x-oaiMeta: - name: The transcription object + name: The transcription object (Verbose JSON) group: audio example: *verbose_transcription_response_example @@ -9240,7 +9246,7 @@ components: "batch_output", "fine-tune", "fine-tune-results", - "vision" + "vision", ] status: type: string @@ -9647,9 +9653,9 @@ components: AssistantsApiResponseFormatOption: description: | Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. - + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. - + **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. oneOf: - type: string @@ -9770,7 +9776,7 @@ components: nullable: true description: &run_top_p_description | An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. - + We generally recommend altering this or temperature but not both. response_format: $ref: "#/components/schemas/AssistantsApiResponseFormatOption" @@ -9923,7 +9929,7 @@ components: nullable: true description: &run_top_p_description | An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. - + We generally recommend altering this or temperature but not both. response_format: $ref: "#/components/schemas/AssistantsApiResponseFormatOption" @@ -10014,7 +10020,7 @@ components: nullable: true description: &run_top_p_description | An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. - + We generally recommend altering this or temperature but not both. response_format: $ref: "#/components/schemas/AssistantsApiResponseFormatOption" @@ -10222,7 +10228,8 @@ components: code: type: string description: One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`. - enum: [ "server_error", "rate_limit_exceeded", "invalid_prompt" ] + enum: + [ "server_error", "rate_limit_exceeded", "invalid_prompt" ] message: type: string description: A human-readable description of the error. @@ -10459,7 +10466,7 @@ components: nullable: true description: &run_top_p_description | An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. - + We generally recommend altering this or temperature but not both. stream: type: boolean @@ -12438,21 +12445,21 @@ components: AssistantStreamEvent: description: | Represents an event emitted when streaming a Run. - + Each event in a server-sent events stream has an `event` and `data` property: - + ``` event: thread.created data: {"id": "thread_123", "object": "thread", ...} ``` - + We emit events whenever a new object is created, transitions to a new state, or is being streamed in parts (deltas). For example, we emit `thread.run.created` when a new run is created, `thread.run.completed` when a run completes, and so on. When an Assistant chooses to create a message during a run, we emit a `thread.message.created event`, a `thread.message.in_progress` event, many `thread.message.delta` events, and finally a `thread.message.completed` event. - + We may add additional events over time, so we recommend handling unknown events gracefully in your code. See the [Assistants API quickstart](/docs/assistants/overview) to learn how to integrate the Assistants API with streaming. @@ -12550,6 +12557,19 @@ components: description: Occurs when a [run](/docs/api-reference/runs/object) is completed. x-oaiMeta: dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: [ "thread.run.incomplete" ] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) ends with status `incomplete`. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" - type: object properties: event: @@ -13037,7 +13057,7 @@ x-oaiMeta: title: Audio description: | Learn how to turn audio into text or text into audio. - + Related guide: [Speech to text](/docs/guides/speech-to-text) navigationGroup: endpoints sections: @@ -13060,7 +13080,7 @@ x-oaiMeta: title: Chat description: | Given a list of messages comprising a conversation, the model will return a response. - + Related guide: [Chat Completions](/docs/guides/text-generation) navigationGroup: endpoints sections: @@ -13077,7 +13097,7 @@ x-oaiMeta: title: Embeddings description: | Get a vector representation of a given input that can be easily consumed by machine learning models and algorithms. - + Related guide: [Embeddings](/docs/guides/embeddings) navigationGroup: endpoints sections: @@ -13091,7 +13111,7 @@ x-oaiMeta: title: Fine-tuning description: | Manage fine-tuning jobs to tailor a model to your specific training data. - + Related guide: [Fine-tune models](/docs/guides/fine-tuning) navigationGroup: endpoints sections: @@ -13126,7 +13146,7 @@ x-oaiMeta: title: Batch description: | Create large batches of API requests for asynchronous processing. The Batch API returns completions within 24 hours for a 50% discount. - + Related guide: [Batch](/docs/guides/batch) navigationGroup: endpoints sections: @@ -13179,7 +13199,7 @@ x-oaiMeta: title: Images description: | Given a prompt and/or an input image, the model will generate a new image. - + Related guide: [Image generation](/docs/guides/images) navigationGroup: endpoints sections: @@ -13217,7 +13237,7 @@ x-oaiMeta: title: Moderations description: | Given some input text, outputs if the model classifies it as potentially harmful across several categories. - + Related guide: [Moderations](/docs/guides/moderation) navigationGroup: endpoints sections: @@ -13232,7 +13252,7 @@ x-oaiMeta: beta: true description: | Build assistants that can call models and use tools to perform tasks. - + [Get started with the Assistants API](/docs/assistants) navigationGroup: assistants sections: @@ -13259,7 +13279,7 @@ x-oaiMeta: beta: true description: | Create threads that assistants can interact with. - + Related guide: [Assistants](/docs/assistants/overview) navigationGroup: assistants sections: @@ -13283,7 +13303,7 @@ x-oaiMeta: beta: true description: | Create messages within threads - + Related guide: [Assistants](/docs/assistants/overview) navigationGroup: assistants sections: @@ -13310,7 +13330,7 @@ x-oaiMeta: beta: true description: | Represents an execution run on a thread. - + Related guide: [Assistants](/docs/assistants/overview) navigationGroup: assistants sections: @@ -13343,7 +13363,7 @@ x-oaiMeta: beta: true description: | Represents the steps (model and tool calls) taken during the run. - + Related guide: [Assistants](/docs/assistants/overview) navigationGroup: assistants sections: @@ -13361,7 +13381,7 @@ x-oaiMeta: beta: true description: | Vector stores are used to store files for use by the `file_search` tool. - + Related guide: [File Search](/docs/assistants/tools/file-search) navigationGroup: assistants sections: @@ -13388,7 +13408,7 @@ x-oaiMeta: beta: true description: | Vector store files represent files inside a vector store. - + Related guide: [File Search](/docs/assistants/tools/file-search) navigationGroup: assistants sections: @@ -13412,7 +13432,7 @@ x-oaiMeta: beta: true description: | Vector store file batches represent operations to add multiple files to a vector store. - + Related guide: [File Search](/docs/assistants/tools/file-search) navigationGroup: assistants sections: @@ -13436,11 +13456,11 @@ x-oaiMeta: beta: true description: | Stream the result of executing a Run or resuming a Run after submitting tool outputs. - + You can stream events from the [Create Thread and Run](/docs/api-reference/runs/createThreadAndRun), [Create Run](/docs/api-reference/runs/createRun), and [Submit Tool Outputs](/docs/api-reference/runs/submitToolOutputs) endpoints by passing `"stream": true`. The response will be a [Server-Sent events](https://html.spec.whatwg.org/multipage/server-sent-events.html#server-sent-events) stream. - + Our Node and Python SDKs provide helpful utilities to make streaming easy. Reference the [Assistants API quickstart](/docs/assistants/overview) to learn more. navigationGroup: assistants