From c0b48acbd497f62fe7811903e17c71c0e5f15cac Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Fri, 12 Apr 2024 23:46:34 +0200 Subject: [PATCH] feat: Add support for checkpoints in fine-tuning jobs in openai_dart (#376) --- .../openai_dart/lib/src/generated/client.dart | 33 + .../schema/fine_tuning_job_checkpoint.dart | 154 + ..._fine_tuning_job_checkpoints_response.dart | 74 + .../lib/src/generated/schema/schema.dart | 2 + .../src/generated/schema/schema.freezed.dart | 1010 +++++++ .../lib/src/generated/schema/schema.g.dart | 107 + packages/openai_dart/oas/openapi_curated.yaml | 123 + .../openai_dart/oas/openapi_official.yaml | 2581 +++++++++++++++-- .../test/openai_client_fine_tuning_test.dart | 9 + 9 files changed, 3773 insertions(+), 320 deletions(-) create mode 100644 packages/openai_dart/lib/src/generated/schema/fine_tuning_job_checkpoint.dart create mode 100644 packages/openai_dart/lib/src/generated/schema/list_fine_tuning_job_checkpoints_response.dart diff --git a/packages/openai_dart/lib/src/generated/client.dart b/packages/openai_dart/lib/src/generated/client.dart index d4005e4c..58c67d15 100644 --- a/packages/openai_dart/lib/src/generated/client.dart +++ b/packages/openai_dart/lib/src/generated/client.dart @@ -570,6 +570,39 @@ class OpenAIClient { return FineTuningJob.fromJson(_jsonDecode(r)); } + // ------------------------------------------ + // METHOD: listFineTuningJobCheckpoints + // ------------------------------------------ + + /// List checkpoints for a fine-tuning job. + /// + /// `fineTuningJobId`: The ID of the fine-tuning job to get checkpoints for. + /// + /// `after`: Identifier for the last checkpoint ID from the previous pagination request. + /// + /// `limit`: Number of checkpoints to retrieve. + /// + /// `GET` `https://api.openai.com/v1/fine_tuning/jobs/{fine_tuning_job_id}/checkpoints` + Future listFineTuningJobCheckpoints({ + required String fineTuningJobId, + String? after, + int limit = 10, + }) async { + final r = await makeRequest( + baseUrl: 'https://api.openai.com/v1', + path: '/fine_tuning/jobs/$fineTuningJobId/checkpoints', + method: HttpMethod.get, + isMultipart: false, + requestType: '', + responseType: 'application/json', + queryParams: { + if (after != null) 'after': after, + 'limit': limit, + }, + ); + return ListFineTuningJobCheckpointsResponse.fromJson(_jsonDecode(r)); + } + // ------------------------------------------ // METHOD: createImage // ------------------------------------------ diff --git a/packages/openai_dart/lib/src/generated/schema/fine_tuning_job_checkpoint.dart b/packages/openai_dart/lib/src/generated/schema/fine_tuning_job_checkpoint.dart new file mode 100644 index 00000000..3d542a60 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/fine_tuning_job_checkpoint.dart @@ -0,0 +1,154 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: FineTuningJobCheckpoint +// ========================================== + +/// The `fine_tuning.job.checkpoint` object represents a model checkpoint for a fine-tuning job that is ready to use. +@freezed +class FineTuningJobCheckpoint with _$FineTuningJobCheckpoint { + const FineTuningJobCheckpoint._(); + + /// Factory constructor for FineTuningJobCheckpoint + const factory FineTuningJobCheckpoint({ + /// The checkpoint identifier, which can be referenced in the API endpoints. + required String id, + + /// The Unix timestamp (in seconds) for when the checkpoint was created. + @JsonKey(name: 'created_at') required int createdAt, + + /// The name of the fine-tuned checkpoint model that is created. + @JsonKey(name: 'fine_tuned_model_checkpoint') + required String fineTunedModelCheckpoint, + + /// The step number that the checkpoint was created at. + @JsonKey(name: 'step_number') required int stepNumber, + + /// Metrics at the step number during the fine-tuning job. + required FineTuningJobCheckpointMetrics metrics, + + /// The name of the fine-tuning job that this checkpoint was created from. + @JsonKey(name: 'fine_tuning_job_id') required String fineTuningJobId, + + /// The object type, which is always "fine_tuning.job.checkpoint". + required FineTuningJobCheckpointObject object, + }) = _FineTuningJobCheckpoint; + + /// Object construction from a JSON representation + factory FineTuningJobCheckpoint.fromJson(Map json) => + _$FineTuningJobCheckpointFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'id', + 'created_at', + 'fine_tuned_model_checkpoint', + 'step_number', + 'metrics', + 'fine_tuning_job_id', + 'object' + ]; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'id': id, + 'created_at': createdAt, + 'fine_tuned_model_checkpoint': fineTunedModelCheckpoint, + 'step_number': stepNumber, + 'metrics': metrics, + 'fine_tuning_job_id': fineTuningJobId, + 'object': object, + }; + } +} + +// ========================================== +// CLASS: FineTuningJobCheckpointMetrics +// ========================================== + +/// Metrics at the step number during the fine-tuning job. +@freezed +class FineTuningJobCheckpointMetrics with _$FineTuningJobCheckpointMetrics { + const FineTuningJobCheckpointMetrics._(); + + /// Factory constructor for FineTuningJobCheckpointMetrics + const factory FineTuningJobCheckpointMetrics({ + /// The step number that the metrics were recorded at. + @JsonKey(includeIfNull: false) double? step, + + /// The training loss at the step number. + @JsonKey(name: 'train_loss', includeIfNull: false) double? trainLoss, + + /// The training mean token accuracy at the step number. + @JsonKey(name: 'train_mean_token_accuracy', includeIfNull: false) + double? trainMeanTokenAccuracy, + + /// The validation loss at the step number. + @JsonKey(name: 'valid_loss', includeIfNull: false) double? validLoss, + + /// The validation mean token accuracy at the step number. + @JsonKey(name: 'valid_mean_token_accuracy', includeIfNull: false) + double? validMeanTokenAccuracy, + + /// The full validation loss at the step number. + @JsonKey(name: 'full_valid_loss', includeIfNull: false) + double? fullValidLoss, + + /// The full validation mean token accuracy at the step number. + @JsonKey(name: 'full_valid_mean_token_accuracy', includeIfNull: false) + double? fullValidMeanTokenAccuracy, + }) = _FineTuningJobCheckpointMetrics; + + /// Object construction from a JSON representation + factory FineTuningJobCheckpointMetrics.fromJson(Map json) => + _$FineTuningJobCheckpointMetricsFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'step', + 'train_loss', + 'train_mean_token_accuracy', + 'valid_loss', + 'valid_mean_token_accuracy', + 'full_valid_loss', + 'full_valid_mean_token_accuracy' + ]; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'step': step, + 'train_loss': trainLoss, + 'train_mean_token_accuracy': trainMeanTokenAccuracy, + 'valid_loss': validLoss, + 'valid_mean_token_accuracy': validMeanTokenAccuracy, + 'full_valid_loss': fullValidLoss, + 'full_valid_mean_token_accuracy': fullValidMeanTokenAccuracy, + }; + } +} + +// ========================================== +// ENUM: FineTuningJobCheckpointObject +// ========================================== + +/// The object type, which is always "fine_tuning.job.checkpoint". +enum FineTuningJobCheckpointObject { + @JsonValue('fine_tuning.job.checkpoint') + fineTuningJobCheckpoint, +} diff --git a/packages/openai_dart/lib/src/generated/schema/list_fine_tuning_job_checkpoints_response.dart b/packages/openai_dart/lib/src/generated/schema/list_fine_tuning_job_checkpoints_response.dart new file mode 100644 index 00000000..0aa1698d --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/list_fine_tuning_job_checkpoints_response.dart @@ -0,0 +1,74 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: ListFineTuningJobCheckpointsResponse +// ========================================== + +/// Represents a list of fine-tuning job checkpoints. +@freezed +class ListFineTuningJobCheckpointsResponse + with _$ListFineTuningJobCheckpointsResponse { + const ListFineTuningJobCheckpointsResponse._(); + + /// Factory constructor for ListFineTuningJobCheckpointsResponse + const factory ListFineTuningJobCheckpointsResponse({ + /// The list of fine-tuning job checkpoints. + required List data, + + /// The object type, which is always "list". + required ListFineTuningJobCheckpointsResponseObject object, + + /// The ID of the first checkpoint in the list. + @JsonKey(name: 'first_id', includeIfNull: false) String? firstId, + + /// The ID of the last checkpoint in the list. + @JsonKey(name: 'last_id', includeIfNull: false) String? lastId, + + /// Whether there are more checkpoints to retrieve. + @JsonKey(name: 'has_more') required bool hasMore, + }) = _ListFineTuningJobCheckpointsResponse; + + /// Object construction from a JSON representation + factory ListFineTuningJobCheckpointsResponse.fromJson( + Map json) => + _$ListFineTuningJobCheckpointsResponseFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'data', + 'object', + 'first_id', + 'last_id', + 'has_more' + ]; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'data': data, + 'object': object, + 'first_id': firstId, + 'last_id': lastId, + 'has_more': hasMore, + }; + } +} + +// ========================================== +// ENUM: ListFineTuningJobCheckpointsResponseObject +// ========================================== + +/// The object type, which is always "list". +enum ListFineTuningJobCheckpointsResponseObject { + @JsonValue('list') + list, +} diff --git a/packages/openai_dart/lib/src/generated/schema/schema.dart b/packages/openai_dart/lib/src/generated/schema/schema.dart index c582bdf2..c2f43e56 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.dart @@ -49,7 +49,9 @@ part 'fine_tuning_job_error.dart'; part 'fine_tuning_job_hyperparameters.dart'; part 'list_paginated_fine_tuning_jobs_response.dart'; part 'list_fine_tuning_job_events_response.dart'; +part 'list_fine_tuning_job_checkpoints_response.dart'; part 'fine_tuning_job_event.dart'; +part 'fine_tuning_job_checkpoint.dart'; part 'create_image_request.dart'; part 'images_response.dart'; part 'image.dart'; diff --git a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart index 1a8ba075..3cf108ce 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart @@ -15608,6 +15608,297 @@ abstract class _ListFineTuningJobEventsResponse get copyWith => throw _privateConstructorUsedError; } +ListFineTuningJobCheckpointsResponse + _$ListFineTuningJobCheckpointsResponseFromJson(Map json) { + return _ListFineTuningJobCheckpointsResponse.fromJson(json); +} + +/// @nodoc +mixin _$ListFineTuningJobCheckpointsResponse { + /// The list of fine-tuning job checkpoints. + List get data => throw _privateConstructorUsedError; + + /// The object type, which is always "list". + ListFineTuningJobCheckpointsResponseObject get object => + throw _privateConstructorUsedError; + + /// The ID of the first checkpoint in the list. + @JsonKey(name: 'first_id', includeIfNull: false) + String? get firstId => throw _privateConstructorUsedError; + + /// The ID of the last checkpoint in the list. + @JsonKey(name: 'last_id', includeIfNull: false) + String? get lastId => throw _privateConstructorUsedError; + + /// Whether there are more checkpoints to retrieve. + @JsonKey(name: 'has_more') + bool get hasMore => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $ListFineTuningJobCheckpointsResponseCopyWith< + ListFineTuningJobCheckpointsResponse> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ListFineTuningJobCheckpointsResponseCopyWith<$Res> { + factory $ListFineTuningJobCheckpointsResponseCopyWith( + ListFineTuningJobCheckpointsResponse value, + $Res Function(ListFineTuningJobCheckpointsResponse) then) = + _$ListFineTuningJobCheckpointsResponseCopyWithImpl<$Res, + ListFineTuningJobCheckpointsResponse>; + @useResult + $Res call( + {List data, + ListFineTuningJobCheckpointsResponseObject object, + @JsonKey(name: 'first_id', includeIfNull: false) String? firstId, + @JsonKey(name: 'last_id', includeIfNull: false) String? lastId, + @JsonKey(name: 'has_more') bool hasMore}); +} + +/// @nodoc +class _$ListFineTuningJobCheckpointsResponseCopyWithImpl<$Res, + $Val extends ListFineTuningJobCheckpointsResponse> + implements $ListFineTuningJobCheckpointsResponseCopyWith<$Res> { + _$ListFineTuningJobCheckpointsResponseCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? data = null, + Object? object = null, + Object? firstId = freezed, + Object? lastId = freezed, + Object? hasMore = null, + }) { + return _then(_value.copyWith( + data: null == data + ? _value.data + : data // ignore: cast_nullable_to_non_nullable + as List, + object: null == object + ? _value.object + : object // ignore: cast_nullable_to_non_nullable + as ListFineTuningJobCheckpointsResponseObject, + firstId: freezed == firstId + ? _value.firstId + : firstId // ignore: cast_nullable_to_non_nullable + as String?, + lastId: freezed == lastId + ? _value.lastId + : lastId // ignore: cast_nullable_to_non_nullable + as String?, + hasMore: null == hasMore + ? _value.hasMore + : hasMore // ignore: cast_nullable_to_non_nullable + as bool, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$ListFineTuningJobCheckpointsResponseImplCopyWith<$Res> + implements $ListFineTuningJobCheckpointsResponseCopyWith<$Res> { + factory _$$ListFineTuningJobCheckpointsResponseImplCopyWith( + _$ListFineTuningJobCheckpointsResponseImpl value, + $Res Function(_$ListFineTuningJobCheckpointsResponseImpl) then) = + __$$ListFineTuningJobCheckpointsResponseImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {List data, + ListFineTuningJobCheckpointsResponseObject object, + @JsonKey(name: 'first_id', includeIfNull: false) String? firstId, + @JsonKey(name: 'last_id', includeIfNull: false) String? lastId, + @JsonKey(name: 'has_more') bool hasMore}); +} + +/// @nodoc +class __$$ListFineTuningJobCheckpointsResponseImplCopyWithImpl<$Res> + extends _$ListFineTuningJobCheckpointsResponseCopyWithImpl<$Res, + _$ListFineTuningJobCheckpointsResponseImpl> + implements _$$ListFineTuningJobCheckpointsResponseImplCopyWith<$Res> { + __$$ListFineTuningJobCheckpointsResponseImplCopyWithImpl( + _$ListFineTuningJobCheckpointsResponseImpl _value, + $Res Function(_$ListFineTuningJobCheckpointsResponseImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? data = null, + Object? object = null, + Object? firstId = freezed, + Object? lastId = freezed, + Object? hasMore = null, + }) { + return _then(_$ListFineTuningJobCheckpointsResponseImpl( + data: null == data + ? _value._data + : data // ignore: cast_nullable_to_non_nullable + as List, + object: null == object + ? _value.object + : object // ignore: cast_nullable_to_non_nullable + as ListFineTuningJobCheckpointsResponseObject, + firstId: freezed == firstId + ? _value.firstId + : firstId // ignore: cast_nullable_to_non_nullable + as String?, + lastId: freezed == lastId + ? _value.lastId + : lastId // ignore: cast_nullable_to_non_nullable + as String?, + hasMore: null == hasMore + ? _value.hasMore + : hasMore // ignore: cast_nullable_to_non_nullable + as bool, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ListFineTuningJobCheckpointsResponseImpl + extends _ListFineTuningJobCheckpointsResponse { + const _$ListFineTuningJobCheckpointsResponseImpl( + {required final List data, + required this.object, + @JsonKey(name: 'first_id', includeIfNull: false) this.firstId, + @JsonKey(name: 'last_id', includeIfNull: false) this.lastId, + @JsonKey(name: 'has_more') required this.hasMore}) + : _data = data, + super._(); + + factory _$ListFineTuningJobCheckpointsResponseImpl.fromJson( + Map json) => + _$$ListFineTuningJobCheckpointsResponseImplFromJson(json); + + /// The list of fine-tuning job checkpoints. + final List _data; + + /// The list of fine-tuning job checkpoints. + @override + List get data { + if (_data is EqualUnmodifiableListView) return _data; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(_data); + } + + /// The object type, which is always "list". + @override + final ListFineTuningJobCheckpointsResponseObject object; + + /// The ID of the first checkpoint in the list. + @override + @JsonKey(name: 'first_id', includeIfNull: false) + final String? firstId; + + /// The ID of the last checkpoint in the list. + @override + @JsonKey(name: 'last_id', includeIfNull: false) + final String? lastId; + + /// Whether there are more checkpoints to retrieve. + @override + @JsonKey(name: 'has_more') + final bool hasMore; + + @override + String toString() { + return 'ListFineTuningJobCheckpointsResponse(data: $data, object: $object, firstId: $firstId, lastId: $lastId, hasMore: $hasMore)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ListFineTuningJobCheckpointsResponseImpl && + const DeepCollectionEquality().equals(other._data, _data) && + (identical(other.object, object) || other.object == object) && + (identical(other.firstId, firstId) || other.firstId == firstId) && + (identical(other.lastId, lastId) || other.lastId == lastId) && + (identical(other.hasMore, hasMore) || other.hasMore == hasMore)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash( + runtimeType, + const DeepCollectionEquality().hash(_data), + object, + firstId, + lastId, + hasMore); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ListFineTuningJobCheckpointsResponseImplCopyWith< + _$ListFineTuningJobCheckpointsResponseImpl> + get copyWith => __$$ListFineTuningJobCheckpointsResponseImplCopyWithImpl< + _$ListFineTuningJobCheckpointsResponseImpl>(this, _$identity); + + @override + Map toJson() { + return _$$ListFineTuningJobCheckpointsResponseImplToJson( + this, + ); + } +} + +abstract class _ListFineTuningJobCheckpointsResponse + extends ListFineTuningJobCheckpointsResponse { + const factory _ListFineTuningJobCheckpointsResponse( + {required final List data, + required final ListFineTuningJobCheckpointsResponseObject object, + @JsonKey(name: 'first_id', includeIfNull: false) final String? firstId, + @JsonKey(name: 'last_id', includeIfNull: false) final String? lastId, + @JsonKey(name: 'has_more') + required final bool + hasMore}) = _$ListFineTuningJobCheckpointsResponseImpl; + const _ListFineTuningJobCheckpointsResponse._() : super._(); + + factory _ListFineTuningJobCheckpointsResponse.fromJson( + Map json) = + _$ListFineTuningJobCheckpointsResponseImpl.fromJson; + + @override + + /// The list of fine-tuning job checkpoints. + List get data; + @override + + /// The object type, which is always "list". + ListFineTuningJobCheckpointsResponseObject get object; + @override + + /// The ID of the first checkpoint in the list. + @JsonKey(name: 'first_id', includeIfNull: false) + String? get firstId; + @override + + /// The ID of the last checkpoint in the list. + @JsonKey(name: 'last_id', includeIfNull: false) + String? get lastId; + @override + + /// Whether there are more checkpoints to retrieve. + @JsonKey(name: 'has_more') + bool get hasMore; + @override + @JsonKey(ignore: true) + _$$ListFineTuningJobCheckpointsResponseImplCopyWith< + _$ListFineTuningJobCheckpointsResponseImpl> + get copyWith => throw _privateConstructorUsedError; +} + FineTuningJobEvent _$FineTuningJobEventFromJson(Map json) { return _FineTuningJobEvent.fromJson(json); } @@ -15865,6 +16156,725 @@ abstract class _FineTuningJobEvent extends FineTuningJobEvent { throw _privateConstructorUsedError; } +FineTuningJobCheckpoint _$FineTuningJobCheckpointFromJson( + Map json) { + return _FineTuningJobCheckpoint.fromJson(json); +} + +/// @nodoc +mixin _$FineTuningJobCheckpoint { + /// The checkpoint identifier, which can be referenced in the API endpoints. + String get id => throw _privateConstructorUsedError; + + /// The Unix timestamp (in seconds) for when the checkpoint was created. + @JsonKey(name: 'created_at') + int get createdAt => throw _privateConstructorUsedError; + + /// The name of the fine-tuned checkpoint model that is created. + @JsonKey(name: 'fine_tuned_model_checkpoint') + String get fineTunedModelCheckpoint => throw _privateConstructorUsedError; + + /// The step number that the checkpoint was created at. + @JsonKey(name: 'step_number') + int get stepNumber => throw _privateConstructorUsedError; + + /// Metrics at the step number during the fine-tuning job. + FineTuningJobCheckpointMetrics get metrics => + throw _privateConstructorUsedError; + + /// The name of the fine-tuning job that this checkpoint was created from. + @JsonKey(name: 'fine_tuning_job_id') + String get fineTuningJobId => throw _privateConstructorUsedError; + + /// The object type, which is always "fine_tuning.job.checkpoint". + FineTuningJobCheckpointObject get object => + throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $FineTuningJobCheckpointCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $FineTuningJobCheckpointCopyWith<$Res> { + factory $FineTuningJobCheckpointCopyWith(FineTuningJobCheckpoint value, + $Res Function(FineTuningJobCheckpoint) then) = + _$FineTuningJobCheckpointCopyWithImpl<$Res, FineTuningJobCheckpoint>; + @useResult + $Res call( + {String id, + @JsonKey(name: 'created_at') int createdAt, + @JsonKey(name: 'fine_tuned_model_checkpoint') + String fineTunedModelCheckpoint, + @JsonKey(name: 'step_number') int stepNumber, + FineTuningJobCheckpointMetrics metrics, + @JsonKey(name: 'fine_tuning_job_id') String fineTuningJobId, + FineTuningJobCheckpointObject object}); + + $FineTuningJobCheckpointMetricsCopyWith<$Res> get metrics; +} + +/// @nodoc +class _$FineTuningJobCheckpointCopyWithImpl<$Res, + $Val extends FineTuningJobCheckpoint> + implements $FineTuningJobCheckpointCopyWith<$Res> { + _$FineTuningJobCheckpointCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? id = null, + Object? createdAt = null, + Object? fineTunedModelCheckpoint = null, + Object? stepNumber = null, + Object? metrics = null, + Object? fineTuningJobId = null, + Object? object = null, + }) { + return _then(_value.copyWith( + id: null == id + ? _value.id + : id // ignore: cast_nullable_to_non_nullable + as String, + createdAt: null == createdAt + ? _value.createdAt + : createdAt // ignore: cast_nullable_to_non_nullable + as int, + fineTunedModelCheckpoint: null == fineTunedModelCheckpoint + ? _value.fineTunedModelCheckpoint + : fineTunedModelCheckpoint // ignore: cast_nullable_to_non_nullable + as String, + stepNumber: null == stepNumber + ? _value.stepNumber + : stepNumber // ignore: cast_nullable_to_non_nullable + as int, + metrics: null == metrics + ? _value.metrics + : metrics // ignore: cast_nullable_to_non_nullable + as FineTuningJobCheckpointMetrics, + fineTuningJobId: null == fineTuningJobId + ? _value.fineTuningJobId + : fineTuningJobId // ignore: cast_nullable_to_non_nullable + as String, + object: null == object + ? _value.object + : object // ignore: cast_nullable_to_non_nullable + as FineTuningJobCheckpointObject, + ) as $Val); + } + + @override + @pragma('vm:prefer-inline') + $FineTuningJobCheckpointMetricsCopyWith<$Res> get metrics { + return $FineTuningJobCheckpointMetricsCopyWith<$Res>(_value.metrics, + (value) { + return _then(_value.copyWith(metrics: value) as $Val); + }); + } +} + +/// @nodoc +abstract class _$$FineTuningJobCheckpointImplCopyWith<$Res> + implements $FineTuningJobCheckpointCopyWith<$Res> { + factory _$$FineTuningJobCheckpointImplCopyWith( + _$FineTuningJobCheckpointImpl value, + $Res Function(_$FineTuningJobCheckpointImpl) then) = + __$$FineTuningJobCheckpointImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {String id, + @JsonKey(name: 'created_at') int createdAt, + @JsonKey(name: 'fine_tuned_model_checkpoint') + String fineTunedModelCheckpoint, + @JsonKey(name: 'step_number') int stepNumber, + FineTuningJobCheckpointMetrics metrics, + @JsonKey(name: 'fine_tuning_job_id') String fineTuningJobId, + FineTuningJobCheckpointObject object}); + + @override + $FineTuningJobCheckpointMetricsCopyWith<$Res> get metrics; +} + +/// @nodoc +class __$$FineTuningJobCheckpointImplCopyWithImpl<$Res> + extends _$FineTuningJobCheckpointCopyWithImpl<$Res, + _$FineTuningJobCheckpointImpl> + implements _$$FineTuningJobCheckpointImplCopyWith<$Res> { + __$$FineTuningJobCheckpointImplCopyWithImpl( + _$FineTuningJobCheckpointImpl _value, + $Res Function(_$FineTuningJobCheckpointImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? id = null, + Object? createdAt = null, + Object? fineTunedModelCheckpoint = null, + Object? stepNumber = null, + Object? metrics = null, + Object? fineTuningJobId = null, + Object? object = null, + }) { + return _then(_$FineTuningJobCheckpointImpl( + id: null == id + ? _value.id + : id // ignore: cast_nullable_to_non_nullable + as String, + createdAt: null == createdAt + ? _value.createdAt + : createdAt // ignore: cast_nullable_to_non_nullable + as int, + fineTunedModelCheckpoint: null == fineTunedModelCheckpoint + ? _value.fineTunedModelCheckpoint + : fineTunedModelCheckpoint // ignore: cast_nullable_to_non_nullable + as String, + stepNumber: null == stepNumber + ? _value.stepNumber + : stepNumber // ignore: cast_nullable_to_non_nullable + as int, + metrics: null == metrics + ? _value.metrics + : metrics // ignore: cast_nullable_to_non_nullable + as FineTuningJobCheckpointMetrics, + fineTuningJobId: null == fineTuningJobId + ? _value.fineTuningJobId + : fineTuningJobId // ignore: cast_nullable_to_non_nullable + as String, + object: null == object + ? _value.object + : object // ignore: cast_nullable_to_non_nullable + as FineTuningJobCheckpointObject, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$FineTuningJobCheckpointImpl extends _FineTuningJobCheckpoint { + const _$FineTuningJobCheckpointImpl( + {required this.id, + @JsonKey(name: 'created_at') required this.createdAt, + @JsonKey(name: 'fine_tuned_model_checkpoint') + required this.fineTunedModelCheckpoint, + @JsonKey(name: 'step_number') required this.stepNumber, + required this.metrics, + @JsonKey(name: 'fine_tuning_job_id') required this.fineTuningJobId, + required this.object}) + : super._(); + + factory _$FineTuningJobCheckpointImpl.fromJson(Map json) => + _$$FineTuningJobCheckpointImplFromJson(json); + + /// The checkpoint identifier, which can be referenced in the API endpoints. + @override + final String id; + + /// The Unix timestamp (in seconds) for when the checkpoint was created. + @override + @JsonKey(name: 'created_at') + final int createdAt; + + /// The name of the fine-tuned checkpoint model that is created. + @override + @JsonKey(name: 'fine_tuned_model_checkpoint') + final String fineTunedModelCheckpoint; + + /// The step number that the checkpoint was created at. + @override + @JsonKey(name: 'step_number') + final int stepNumber; + + /// Metrics at the step number during the fine-tuning job. + @override + final FineTuningJobCheckpointMetrics metrics; + + /// The name of the fine-tuning job that this checkpoint was created from. + @override + @JsonKey(name: 'fine_tuning_job_id') + final String fineTuningJobId; + + /// The object type, which is always "fine_tuning.job.checkpoint". + @override + final FineTuningJobCheckpointObject object; + + @override + String toString() { + return 'FineTuningJobCheckpoint(id: $id, createdAt: $createdAt, fineTunedModelCheckpoint: $fineTunedModelCheckpoint, stepNumber: $stepNumber, metrics: $metrics, fineTuningJobId: $fineTuningJobId, object: $object)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$FineTuningJobCheckpointImpl && + (identical(other.id, id) || other.id == id) && + (identical(other.createdAt, createdAt) || + other.createdAt == createdAt) && + (identical( + other.fineTunedModelCheckpoint, fineTunedModelCheckpoint) || + other.fineTunedModelCheckpoint == fineTunedModelCheckpoint) && + (identical(other.stepNumber, stepNumber) || + other.stepNumber == stepNumber) && + (identical(other.metrics, metrics) || other.metrics == metrics) && + (identical(other.fineTuningJobId, fineTuningJobId) || + other.fineTuningJobId == fineTuningJobId) && + (identical(other.object, object) || other.object == object)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, id, createdAt, + fineTunedModelCheckpoint, stepNumber, metrics, fineTuningJobId, object); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$FineTuningJobCheckpointImplCopyWith<_$FineTuningJobCheckpointImpl> + get copyWith => __$$FineTuningJobCheckpointImplCopyWithImpl< + _$FineTuningJobCheckpointImpl>(this, _$identity); + + @override + Map toJson() { + return _$$FineTuningJobCheckpointImplToJson( + this, + ); + } +} + +abstract class _FineTuningJobCheckpoint extends FineTuningJobCheckpoint { + const factory _FineTuningJobCheckpoint( + {required final String id, + @JsonKey(name: 'created_at') required final int createdAt, + @JsonKey(name: 'fine_tuned_model_checkpoint') + required final String fineTunedModelCheckpoint, + @JsonKey(name: 'step_number') required final int stepNumber, + required final FineTuningJobCheckpointMetrics metrics, + @JsonKey(name: 'fine_tuning_job_id') + required final String fineTuningJobId, + required final FineTuningJobCheckpointObject object}) = + _$FineTuningJobCheckpointImpl; + const _FineTuningJobCheckpoint._() : super._(); + + factory _FineTuningJobCheckpoint.fromJson(Map json) = + _$FineTuningJobCheckpointImpl.fromJson; + + @override + + /// The checkpoint identifier, which can be referenced in the API endpoints. + String get id; + @override + + /// The Unix timestamp (in seconds) for when the checkpoint was created. + @JsonKey(name: 'created_at') + int get createdAt; + @override + + /// The name of the fine-tuned checkpoint model that is created. + @JsonKey(name: 'fine_tuned_model_checkpoint') + String get fineTunedModelCheckpoint; + @override + + /// The step number that the checkpoint was created at. + @JsonKey(name: 'step_number') + int get stepNumber; + @override + + /// Metrics at the step number during the fine-tuning job. + FineTuningJobCheckpointMetrics get metrics; + @override + + /// The name of the fine-tuning job that this checkpoint was created from. + @JsonKey(name: 'fine_tuning_job_id') + String get fineTuningJobId; + @override + + /// The object type, which is always "fine_tuning.job.checkpoint". + FineTuningJobCheckpointObject get object; + @override + @JsonKey(ignore: true) + _$$FineTuningJobCheckpointImplCopyWith<_$FineTuningJobCheckpointImpl> + get copyWith => throw _privateConstructorUsedError; +} + +FineTuningJobCheckpointMetrics _$FineTuningJobCheckpointMetricsFromJson( + Map json) { + return _FineTuningJobCheckpointMetrics.fromJson(json); +} + +/// @nodoc +mixin _$FineTuningJobCheckpointMetrics { + /// The step number that the metrics were recorded at. + @JsonKey(includeIfNull: false) + double? get step => throw _privateConstructorUsedError; + + /// The training loss at the step number. + @JsonKey(name: 'train_loss', includeIfNull: false) + double? get trainLoss => throw _privateConstructorUsedError; + + /// The training mean token accuracy at the step number. + @JsonKey(name: 'train_mean_token_accuracy', includeIfNull: false) + double? get trainMeanTokenAccuracy => throw _privateConstructorUsedError; + + /// The validation loss at the step number. + @JsonKey(name: 'valid_loss', includeIfNull: false) + double? get validLoss => throw _privateConstructorUsedError; + + /// The validation mean token accuracy at the step number. + @JsonKey(name: 'valid_mean_token_accuracy', includeIfNull: false) + double? get validMeanTokenAccuracy => throw _privateConstructorUsedError; + + /// The full validation loss at the step number. + @JsonKey(name: 'full_valid_loss', includeIfNull: false) + double? get fullValidLoss => throw _privateConstructorUsedError; + + /// The full validation mean token accuracy at the step number. + @JsonKey(name: 'full_valid_mean_token_accuracy', includeIfNull: false) + double? get fullValidMeanTokenAccuracy => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $FineTuningJobCheckpointMetricsCopyWith + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $FineTuningJobCheckpointMetricsCopyWith<$Res> { + factory $FineTuningJobCheckpointMetricsCopyWith( + FineTuningJobCheckpointMetrics value, + $Res Function(FineTuningJobCheckpointMetrics) then) = + _$FineTuningJobCheckpointMetricsCopyWithImpl<$Res, + FineTuningJobCheckpointMetrics>; + @useResult + $Res call( + {@JsonKey(includeIfNull: false) double? step, + @JsonKey(name: 'train_loss', includeIfNull: false) double? trainLoss, + @JsonKey(name: 'train_mean_token_accuracy', includeIfNull: false) + double? trainMeanTokenAccuracy, + @JsonKey(name: 'valid_loss', includeIfNull: false) double? validLoss, + @JsonKey(name: 'valid_mean_token_accuracy', includeIfNull: false) + double? validMeanTokenAccuracy, + @JsonKey(name: 'full_valid_loss', includeIfNull: false) + double? fullValidLoss, + @JsonKey(name: 'full_valid_mean_token_accuracy', includeIfNull: false) + double? fullValidMeanTokenAccuracy}); +} + +/// @nodoc +class _$FineTuningJobCheckpointMetricsCopyWithImpl<$Res, + $Val extends FineTuningJobCheckpointMetrics> + implements $FineTuningJobCheckpointMetricsCopyWith<$Res> { + _$FineTuningJobCheckpointMetricsCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? step = freezed, + Object? trainLoss = freezed, + Object? trainMeanTokenAccuracy = freezed, + Object? validLoss = freezed, + Object? validMeanTokenAccuracy = freezed, + Object? fullValidLoss = freezed, + Object? fullValidMeanTokenAccuracy = freezed, + }) { + return _then(_value.copyWith( + step: freezed == step + ? _value.step + : step // ignore: cast_nullable_to_non_nullable + as double?, + trainLoss: freezed == trainLoss + ? _value.trainLoss + : trainLoss // ignore: cast_nullable_to_non_nullable + as double?, + trainMeanTokenAccuracy: freezed == trainMeanTokenAccuracy + ? _value.trainMeanTokenAccuracy + : trainMeanTokenAccuracy // ignore: cast_nullable_to_non_nullable + as double?, + validLoss: freezed == validLoss + ? _value.validLoss + : validLoss // ignore: cast_nullable_to_non_nullable + as double?, + validMeanTokenAccuracy: freezed == validMeanTokenAccuracy + ? _value.validMeanTokenAccuracy + : validMeanTokenAccuracy // ignore: cast_nullable_to_non_nullable + as double?, + fullValidLoss: freezed == fullValidLoss + ? _value.fullValidLoss + : fullValidLoss // ignore: cast_nullable_to_non_nullable + as double?, + fullValidMeanTokenAccuracy: freezed == fullValidMeanTokenAccuracy + ? _value.fullValidMeanTokenAccuracy + : fullValidMeanTokenAccuracy // ignore: cast_nullable_to_non_nullable + as double?, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$FineTuningJobCheckpointMetricsImplCopyWith<$Res> + implements $FineTuningJobCheckpointMetricsCopyWith<$Res> { + factory _$$FineTuningJobCheckpointMetricsImplCopyWith( + _$FineTuningJobCheckpointMetricsImpl value, + $Res Function(_$FineTuningJobCheckpointMetricsImpl) then) = + __$$FineTuningJobCheckpointMetricsImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@JsonKey(includeIfNull: false) double? step, + @JsonKey(name: 'train_loss', includeIfNull: false) double? trainLoss, + @JsonKey(name: 'train_mean_token_accuracy', includeIfNull: false) + double? trainMeanTokenAccuracy, + @JsonKey(name: 'valid_loss', includeIfNull: false) double? validLoss, + @JsonKey(name: 'valid_mean_token_accuracy', includeIfNull: false) + double? validMeanTokenAccuracy, + @JsonKey(name: 'full_valid_loss', includeIfNull: false) + double? fullValidLoss, + @JsonKey(name: 'full_valid_mean_token_accuracy', includeIfNull: false) + double? fullValidMeanTokenAccuracy}); +} + +/// @nodoc +class __$$FineTuningJobCheckpointMetricsImplCopyWithImpl<$Res> + extends _$FineTuningJobCheckpointMetricsCopyWithImpl<$Res, + _$FineTuningJobCheckpointMetricsImpl> + implements _$$FineTuningJobCheckpointMetricsImplCopyWith<$Res> { + __$$FineTuningJobCheckpointMetricsImplCopyWithImpl( + _$FineTuningJobCheckpointMetricsImpl _value, + $Res Function(_$FineTuningJobCheckpointMetricsImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? step = freezed, + Object? trainLoss = freezed, + Object? trainMeanTokenAccuracy = freezed, + Object? validLoss = freezed, + Object? validMeanTokenAccuracy = freezed, + Object? fullValidLoss = freezed, + Object? fullValidMeanTokenAccuracy = freezed, + }) { + return _then(_$FineTuningJobCheckpointMetricsImpl( + step: freezed == step + ? _value.step + : step // ignore: cast_nullable_to_non_nullable + as double?, + trainLoss: freezed == trainLoss + ? _value.trainLoss + : trainLoss // ignore: cast_nullable_to_non_nullable + as double?, + trainMeanTokenAccuracy: freezed == trainMeanTokenAccuracy + ? _value.trainMeanTokenAccuracy + : trainMeanTokenAccuracy // ignore: cast_nullable_to_non_nullable + as double?, + validLoss: freezed == validLoss + ? _value.validLoss + : validLoss // ignore: cast_nullable_to_non_nullable + as double?, + validMeanTokenAccuracy: freezed == validMeanTokenAccuracy + ? _value.validMeanTokenAccuracy + : validMeanTokenAccuracy // ignore: cast_nullable_to_non_nullable + as double?, + fullValidLoss: freezed == fullValidLoss + ? _value.fullValidLoss + : fullValidLoss // ignore: cast_nullable_to_non_nullable + as double?, + fullValidMeanTokenAccuracy: freezed == fullValidMeanTokenAccuracy + ? _value.fullValidMeanTokenAccuracy + : fullValidMeanTokenAccuracy // ignore: cast_nullable_to_non_nullable + as double?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$FineTuningJobCheckpointMetricsImpl + extends _FineTuningJobCheckpointMetrics { + const _$FineTuningJobCheckpointMetricsImpl( + {@JsonKey(includeIfNull: false) this.step, + @JsonKey(name: 'train_loss', includeIfNull: false) this.trainLoss, + @JsonKey(name: 'train_mean_token_accuracy', includeIfNull: false) + this.trainMeanTokenAccuracy, + @JsonKey(name: 'valid_loss', includeIfNull: false) this.validLoss, + @JsonKey(name: 'valid_mean_token_accuracy', includeIfNull: false) + this.validMeanTokenAccuracy, + @JsonKey(name: 'full_valid_loss', includeIfNull: false) + this.fullValidLoss, + @JsonKey(name: 'full_valid_mean_token_accuracy', includeIfNull: false) + this.fullValidMeanTokenAccuracy}) + : super._(); + + factory _$FineTuningJobCheckpointMetricsImpl.fromJson( + Map json) => + _$$FineTuningJobCheckpointMetricsImplFromJson(json); + + /// The step number that the metrics were recorded at. + @override + @JsonKey(includeIfNull: false) + final double? step; + + /// The training loss at the step number. + @override + @JsonKey(name: 'train_loss', includeIfNull: false) + final double? trainLoss; + + /// The training mean token accuracy at the step number. + @override + @JsonKey(name: 'train_mean_token_accuracy', includeIfNull: false) + final double? trainMeanTokenAccuracy; + + /// The validation loss at the step number. + @override + @JsonKey(name: 'valid_loss', includeIfNull: false) + final double? validLoss; + + /// The validation mean token accuracy at the step number. + @override + @JsonKey(name: 'valid_mean_token_accuracy', includeIfNull: false) + final double? validMeanTokenAccuracy; + + /// The full validation loss at the step number. + @override + @JsonKey(name: 'full_valid_loss', includeIfNull: false) + final double? fullValidLoss; + + /// The full validation mean token accuracy at the step number. + @override + @JsonKey(name: 'full_valid_mean_token_accuracy', includeIfNull: false) + final double? fullValidMeanTokenAccuracy; + + @override + String toString() { + return 'FineTuningJobCheckpointMetrics(step: $step, trainLoss: $trainLoss, trainMeanTokenAccuracy: $trainMeanTokenAccuracy, validLoss: $validLoss, validMeanTokenAccuracy: $validMeanTokenAccuracy, fullValidLoss: $fullValidLoss, fullValidMeanTokenAccuracy: $fullValidMeanTokenAccuracy)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$FineTuningJobCheckpointMetricsImpl && + (identical(other.step, step) || other.step == step) && + (identical(other.trainLoss, trainLoss) || + other.trainLoss == trainLoss) && + (identical(other.trainMeanTokenAccuracy, trainMeanTokenAccuracy) || + other.trainMeanTokenAccuracy == trainMeanTokenAccuracy) && + (identical(other.validLoss, validLoss) || + other.validLoss == validLoss) && + (identical(other.validMeanTokenAccuracy, validMeanTokenAccuracy) || + other.validMeanTokenAccuracy == validMeanTokenAccuracy) && + (identical(other.fullValidLoss, fullValidLoss) || + other.fullValidLoss == fullValidLoss) && + (identical(other.fullValidMeanTokenAccuracy, + fullValidMeanTokenAccuracy) || + other.fullValidMeanTokenAccuracy == + fullValidMeanTokenAccuracy)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash( + runtimeType, + step, + trainLoss, + trainMeanTokenAccuracy, + validLoss, + validMeanTokenAccuracy, + fullValidLoss, + fullValidMeanTokenAccuracy); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$FineTuningJobCheckpointMetricsImplCopyWith< + _$FineTuningJobCheckpointMetricsImpl> + get copyWith => __$$FineTuningJobCheckpointMetricsImplCopyWithImpl< + _$FineTuningJobCheckpointMetricsImpl>(this, _$identity); + + @override + Map toJson() { + return _$$FineTuningJobCheckpointMetricsImplToJson( + this, + ); + } +} + +abstract class _FineTuningJobCheckpointMetrics + extends FineTuningJobCheckpointMetrics { + const factory _FineTuningJobCheckpointMetrics( + {@JsonKey(includeIfNull: false) final double? step, + @JsonKey(name: 'train_loss', includeIfNull: false) + final double? trainLoss, + @JsonKey(name: 'train_mean_token_accuracy', includeIfNull: false) + final double? trainMeanTokenAccuracy, + @JsonKey(name: 'valid_loss', includeIfNull: false) + final double? validLoss, + @JsonKey(name: 'valid_mean_token_accuracy', includeIfNull: false) + final double? validMeanTokenAccuracy, + @JsonKey(name: 'full_valid_loss', includeIfNull: false) + final double? fullValidLoss, + @JsonKey(name: 'full_valid_mean_token_accuracy', includeIfNull: false) + final double? fullValidMeanTokenAccuracy}) = + _$FineTuningJobCheckpointMetricsImpl; + const _FineTuningJobCheckpointMetrics._() : super._(); + + factory _FineTuningJobCheckpointMetrics.fromJson(Map json) = + _$FineTuningJobCheckpointMetricsImpl.fromJson; + + @override + + /// The step number that the metrics were recorded at. + @JsonKey(includeIfNull: false) + double? get step; + @override + + /// The training loss at the step number. + @JsonKey(name: 'train_loss', includeIfNull: false) + double? get trainLoss; + @override + + /// The training mean token accuracy at the step number. + @JsonKey(name: 'train_mean_token_accuracy', includeIfNull: false) + double? get trainMeanTokenAccuracy; + @override + + /// The validation loss at the step number. + @JsonKey(name: 'valid_loss', includeIfNull: false) + double? get validLoss; + @override + + /// The validation mean token accuracy at the step number. + @JsonKey(name: 'valid_mean_token_accuracy', includeIfNull: false) + double? get validMeanTokenAccuracy; + @override + + /// The full validation loss at the step number. + @JsonKey(name: 'full_valid_loss', includeIfNull: false) + double? get fullValidLoss; + @override + + /// The full validation mean token accuracy at the step number. + @JsonKey(name: 'full_valid_mean_token_accuracy', includeIfNull: false) + double? get fullValidMeanTokenAccuracy; + @override + @JsonKey(ignore: true) + _$$FineTuningJobCheckpointMetricsImplCopyWith< + _$FineTuningJobCheckpointMetricsImpl> + get copyWith => throw _privateConstructorUsedError; +} + CreateImageRequest _$CreateImageRequestFromJson(Map json) { return _CreateImageRequest.fromJson(json); } diff --git a/packages/openai_dart/lib/src/generated/schema/schema.g.dart b/packages/openai_dart/lib/src/generated/schema/schema.g.dart index cb0ff1a8..c548b1a4 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.g.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.g.dart @@ -1453,6 +1453,46 @@ const _$ListFineTuningJobEventsResponseObjectEnumMap = { ListFineTuningJobEventsResponseObject.list: 'list', }; +_$ListFineTuningJobCheckpointsResponseImpl + _$$ListFineTuningJobCheckpointsResponseImplFromJson( + Map json) => + _$ListFineTuningJobCheckpointsResponseImpl( + data: (json['data'] as List) + .map((e) => + FineTuningJobCheckpoint.fromJson(e as Map)) + .toList(), + object: $enumDecode( + _$ListFineTuningJobCheckpointsResponseObjectEnumMap, + json['object']), + firstId: json['first_id'] as String?, + lastId: json['last_id'] as String?, + hasMore: json['has_more'] as bool, + ); + +Map _$$ListFineTuningJobCheckpointsResponseImplToJson( + _$ListFineTuningJobCheckpointsResponseImpl instance) { + final val = { + 'data': instance.data.map((e) => e.toJson()).toList(), + 'object': + _$ListFineTuningJobCheckpointsResponseObjectEnumMap[instance.object]!, + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('first_id', instance.firstId); + writeNotNull('last_id', instance.lastId); + val['has_more'] = instance.hasMore; + return val; +} + +const _$ListFineTuningJobCheckpointsResponseObjectEnumMap = { + ListFineTuningJobCheckpointsResponseObject.list: 'list', +}; + _$FineTuningJobEventImpl _$$FineTuningJobEventImplFromJson( Map json) => _$FineTuningJobEventImpl( @@ -1483,6 +1523,73 @@ const _$FineTuningJobEventObjectEnumMap = { FineTuningJobEventObject.fineTuningJobEvent: 'fine_tuning.job.event', }; +_$FineTuningJobCheckpointImpl _$$FineTuningJobCheckpointImplFromJson( + Map json) => + _$FineTuningJobCheckpointImpl( + id: json['id'] as String, + createdAt: json['created_at'] as int, + fineTunedModelCheckpoint: json['fine_tuned_model_checkpoint'] as String, + stepNumber: json['step_number'] as int, + metrics: FineTuningJobCheckpointMetrics.fromJson( + json['metrics'] as Map), + fineTuningJobId: json['fine_tuning_job_id'] as String, + object: + $enumDecode(_$FineTuningJobCheckpointObjectEnumMap, json['object']), + ); + +Map _$$FineTuningJobCheckpointImplToJson( + _$FineTuningJobCheckpointImpl instance) => + { + 'id': instance.id, + 'created_at': instance.createdAt, + 'fine_tuned_model_checkpoint': instance.fineTunedModelCheckpoint, + 'step_number': instance.stepNumber, + 'metrics': instance.metrics.toJson(), + 'fine_tuning_job_id': instance.fineTuningJobId, + 'object': _$FineTuningJobCheckpointObjectEnumMap[instance.object]!, + }; + +const _$FineTuningJobCheckpointObjectEnumMap = { + FineTuningJobCheckpointObject.fineTuningJobCheckpoint: + 'fine_tuning.job.checkpoint', +}; + +_$FineTuningJobCheckpointMetricsImpl + _$$FineTuningJobCheckpointMetricsImplFromJson(Map json) => + _$FineTuningJobCheckpointMetricsImpl( + step: (json['step'] as num?)?.toDouble(), + trainLoss: (json['train_loss'] as num?)?.toDouble(), + trainMeanTokenAccuracy: + (json['train_mean_token_accuracy'] as num?)?.toDouble(), + validLoss: (json['valid_loss'] as num?)?.toDouble(), + validMeanTokenAccuracy: + (json['valid_mean_token_accuracy'] as num?)?.toDouble(), + fullValidLoss: (json['full_valid_loss'] as num?)?.toDouble(), + fullValidMeanTokenAccuracy: + (json['full_valid_mean_token_accuracy'] as num?)?.toDouble(), + ); + +Map _$$FineTuningJobCheckpointMetricsImplToJson( + _$FineTuningJobCheckpointMetricsImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('step', instance.step); + writeNotNull('train_loss', instance.trainLoss); + writeNotNull('train_mean_token_accuracy', instance.trainMeanTokenAccuracy); + writeNotNull('valid_loss', instance.validLoss); + writeNotNull('valid_mean_token_accuracy', instance.validMeanTokenAccuracy); + writeNotNull('full_valid_loss', instance.fullValidLoss); + writeNotNull( + 'full_valid_mean_token_accuracy', instance.fullValidMeanTokenAccuracy); + return val; +} + _$CreateImageRequestImpl _$$CreateImageRequestImplFromJson( Map json) => _$CreateImageRequestImpl( diff --git a/packages/openai_dart/oas/openapi_curated.yaml b/packages/openai_dart/oas/openapi_curated.yaml index 65028db8..d10f2d45 100644 --- a/packages/openai_dart/oas/openapi_curated.yaml +++ b/packages/openai_dart/oas/openapi_curated.yaml @@ -227,6 +227,42 @@ paths: application/json: schema: $ref: "#/components/schemas/FineTuningJob" + /fine_tuning/jobs/{fine_tuning_job_id}/checkpoints: + get: + operationId: listFineTuningJobCheckpoints + tags: + - Fine-tuning + summary: | + List checkpoints for a fine-tuning job. + parameters: + - in: path + name: fine_tuning_job_id + required: true + schema: + type: string + example: ft-AF1WoRqd3aJAHsqc9NY7iL8F + description: | + The ID of the fine-tuning job to get checkpoints for. + - name: after + in: query + description: Identifier for the last checkpoint ID from the previous pagination request. + required: false + schema: + type: string + - name: limit + in: query + description: Number of checkpoints to retrieve. + required: false + schema: + type: integer + default: 10 + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ListFineTuningJobCheckpointsResponse" /images/generations: post: operationId: createImage @@ -2493,6 +2529,34 @@ components: required: - object - data + ListFineTuningJobCheckpointsResponse: + type: object + description: Represents a list of fine-tuning job checkpoints. + properties: + data: + type: array + description: The list of fine-tuning job checkpoints. + items: + $ref: "#/components/schemas/FineTuningJobCheckpoint" + object: + type: string + description: The object type, which is always "list". + enum: [list] + first_id: + type: string + description: The ID of the first checkpoint in the list. + nullable: true + last_id: + type: string + description: The ID of the last checkpoint in the list. + nullable: true + has_more: + description: Whether there are more checkpoints to retrieve. + type: boolean + required: + - object + - data + - has_more FineTuningJobEvent: type: object description: Fine-tuning job event object. @@ -2520,6 +2584,65 @@ components: - created_at - level - message + FineTuningJobCheckpoint: + id: FineTuningJobCheckpoint + type: object + description: | + The `fine_tuning.job.checkpoint` object represents a model checkpoint for a fine-tuning job that is ready to use. + properties: + id: + type: string + description: The checkpoint identifier, which can be referenced in the API endpoints. + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the checkpoint was created. + fine_tuned_model_checkpoint: + type: string + description: The name of the fine-tuned checkpoint model that is created. + step_number: + type: integer + description: The step number that the checkpoint was created at. + metrics: + id: FineTuningJobCheckpointMetrics + type: object + description: Metrics at the step number during the fine-tuning job. + properties: + step: + type: number + description: The step number that the metrics were recorded at. + train_loss: + type: number + description: The training loss at the step number. + train_mean_token_accuracy: + type: number + description: The training mean token accuracy at the step number. + valid_loss: + type: number + description: The validation loss at the step number. + valid_mean_token_accuracy: + type: number + description: The validation mean token accuracy at the step number. + full_valid_loss: + type: number + description: The full validation loss at the step number. + full_valid_mean_token_accuracy: + type: number + description: The full validation mean token accuracy at the step number. + fine_tuning_job_id: + type: string + description: The name of the fine-tuning job that this checkpoint was created from. + object: + type: string + description: The object type, which is always "fine_tuning.job.checkpoint". + enum: [fine_tuning.job.checkpoint] + required: + - created_at + - fine_tuning_job_id + - fine_tuned_model_checkpoint + - id + - metrics + - object + - step_number CreateImageRequest: type: object description: Request object for the Create image endpoint. diff --git a/packages/openai_dart/oas/openapi_official.yaml b/packages/openai_dart/oas/openapi_official.yaml index a370a155..f284e65e 100644 --- a/packages/openai_dart/oas/openapi_official.yaml +++ b/packages/openai_dart/oas/openapi_official.yaml @@ -139,14 +139,14 @@ paths: -H "Content-Type: application/json" \ -H "Authorization: Bearer $OPENAI_API_KEY" \ -d '{ - "model": "gpt-4-vision-preview", + "model": "gpt-4-turbo", "messages": [ { "role": "user", "content": [ { "type": "text", - "text": "What’s in this image?" + "text": "What'\''s in this image?" }, { "type": "image_url", @@ -165,12 +165,12 @@ paths: client = OpenAI() response = client.chat.completions.create( - model="gpt-4-vision-preview", + model="gpt-4-turbo", messages=[ { "role": "user", "content": [ - {"type": "text", "text": "What’s in this image?"}, + {"type": "text", "text": "What's in this image?"}, { "type": "image_url", "image_url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg", @@ -189,12 +189,12 @@ paths: async function main() { const response = await openai.chat.completions.create({ - model: "gpt-4-vision-preview", + model: "gpt-4-turbo", messages: [ { role: "user", content: [ - { type: "text", text: "What’s in this image?" }, + { type: "text", text: "What's in this image?" }, { type: "image_url", image_url: @@ -218,7 +218,7 @@ paths: "index": 0, "message": { "role": "assistant", - "content": "\n\nHello there, how may I assist you today?", + "content": "\n\nThis image shows a wooden boardwalk extending through a lush green marshland.", }, "logprobs": null, "finish_reason": "stop" @@ -301,11 +301,11 @@ paths: -H "Content-Type: application/json" \ -H "Authorization: Bearer $OPENAI_API_KEY" \ -d '{ - "model": "gpt-3.5-turbo", + "model": "gpt-4-turbo", "messages": [ { "role": "user", - "content": "What is the weather like in Boston?" + "content": "What'\''s the weather like in Boston today?" } ], "tools": [ @@ -395,7 +395,7 @@ paths: ]; const response = await openai.chat.completions.create({ - model: "gpt-3.5-turbo", + model: "gpt-4-turbo", messages: messages, tools: tools, tool_choice: "auto", @@ -1260,7 +1260,7 @@ paths: response_format="verbose_json", timestamp_granularities=["word"] ) - + print(transcript.words) node: | import fs from "fs"; @@ -1320,7 +1320,7 @@ paths: response_format="verbose_json", timestamp_granularities=["segment"] ) - + print(transcript.words) node: | import fs from "fs"; @@ -1892,6 +1892,52 @@ paths: "validation_file": "file-abc123", "training_file": "file-abc123", } + - title: W&B Integration + request: + curl: | + curl https://api.openai.com/v1/fine_tuning/jobs \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "training_file": "file-abc123", + "validation_file": "file-abc123", + "model": "gpt-3.5-turbo", + "integrations": [ + { + "type": "wandb", + "wandb": { + "project": "my-wandb-project", + "name": "ft-run-display-name" + "tags": [ + "first-experiment", "v2" + ] + } + } + ] + }' + response: | + { + "object": "fine_tuning.job", + "id": "ftjob-abc123", + "model": "gpt-3.5-turbo-0125", + "created_at": 1614807352, + "fine_tuned_model": null, + "organization_id": "org-123", + "result_files": [], + "status": "queued", + "validation_file": "file-abc123", + "training_file": "file-abc123", + "integrations": [ + { + "type": "wandb", + "wandb": { + "project": "my-wandb-project", + "entity": None, + "run_id": "ftjob-abc123" + } + } + ] + } get: operationId: listPaginatedFineTuningJobs tags: @@ -2032,8 +2078,12 @@ paths: "training_file": "file-abc123", "hyperparameters": { "n_epochs": 4, + "batch_size": 1, + "learning_rate_multiplier": 1.0 }, - "trained_tokens": 5768 + "trained_tokens": 5768, + "integrations": [], + "seed": 0 } /fine_tuning/jobs/{fine_tuning_job_id}/events: get: @@ -2191,6 +2241,84 @@ paths: "validation_file": "file-abc123", "training_file": "file-abc123" } + /fine_tuning/jobs/{fine_tuning_job_id}/checkpoints: + get: + operationId: listFineTuningJobCheckpoints + tags: + - Fine-tuning + summary: | + List checkpoints for a fine-tuning job. + parameters: + - in: path + name: fine_tuning_job_id + required: true + schema: + type: string + example: ft-AF1WoRqd3aJAHsqc9NY7iL8F + description: | + The ID of the fine-tuning job to get checkpoints for. + - name: after + in: query + description: Identifier for the last checkpoint ID from the previous pagination request. + required: false + schema: + type: string + - name: limit + in: query + description: Number of checkpoints to retrieve. + required: false + schema: + type: integer + default: 10 + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ListFineTuningJobCheckpointsResponse" + x-oaiMeta: + name: List fine-tuning checkpoints + group: fine-tuning + returns: A list of fine-tuning [checkpoint objects](/docs/api-reference/fine-tuning/checkpoint-object) for a fine-tuning job. + examples: + request: + curl: | + curl https://api.openai.com/v1/fine_tuning/jobs/ftjob-abc123/checkpoints \ + -H "Authorization: Bearer $OPENAI_API_KEY" + response: | + { + "object": "list" + "data": [ + { + "object": "fine_tuning.job.checkpoint", + "id": "ftckpt_zc4Q7MP6XxulcVzj4MZdwsAB", + "created_at": 1519129973, + "fine_tuned_model_checkpoint": "ft:gpt-3.5-turbo-0125:my-org:custom-suffix:96olL566:ckpt-step-2000", + "metrics": { + "full_valid_loss": 0.134, + "full_valid_mean_token_accuracy": 0.874 + }, + "fine_tuning_job_id": "ftjob-abc123", + "step_number": 2000, + }, + { + "object": "fine_tuning.job.checkpoint", + "id": "ftckpt_enQCFmOTGj3syEpYVhBRLTSy", + "created_at": 1519129833, + "fine_tuned_model_checkpoint": "ft:gpt-3.5-turbo-0125:my-org:custom-suffix:7q8mpxmy:ckpt-step-1000", + "metrics": { + "full_valid_loss": 0.167, + "full_valid_mean_token_accuracy": 0.781 + }, + "fine_tuning_job_id": "ftjob-abc123", + "step_number": 1000, + }, + ], + "first_id": "ftckpt_zc4Q7MP6XxulcVzj4MZdwsAB", + "last_id": "ftckpt_enQCFmOTGj3syEpYVhBRLTSy", + "has_more": true + } /models: get: @@ -2299,7 +2427,7 @@ paths: const openai = new OpenAI(); async function main() { - const model = await openai.models.retrieve("gpt-3.5-turbo"); + const model = await openai.models.retrieve("VAR_model_id"); console.log(model); } @@ -2539,7 +2667,7 @@ paths: "created_at": 1698982736, "name": "Coding Tutor", "description": null, - "model": "gpt-4", + "model": "gpt-4-turbo", "instructions": "You are a helpful assistant designed to make me better at coding!", "tools": [], "file_ids": [], @@ -2551,7 +2679,7 @@ paths: "created_at": 1698982718, "name": "My Assistant", "description": null, - "model": "gpt-4", + "model": "gpt-4-turbo", "instructions": "You are a helpful assistant designed to make me better at coding!", "tools": [], "file_ids": [], @@ -2563,7 +2691,7 @@ paths: "created_at": 1698982643, "name": null, "description": null, - "model": "gpt-4", + "model": "gpt-4-turbo", "instructions": null, "tools": [], "file_ids": [], @@ -2609,7 +2737,7 @@ paths: "instructions": "You are a personal math tutor. When asked a question, write and run Python code to answer the question.", "name": "Math Tutor", "tools": [{"type": "code_interpreter"}], - "model": "gpt-4" + "model": "gpt-4-turbo" }' python: | @@ -2620,7 +2748,7 @@ paths: instructions="You are a personal math tutor. When asked a question, write and run Python code to answer the question.", name="Math Tutor", tools=[{"type": "code_interpreter"}], - model="gpt-4", + model="gpt-4-turbo", ) print(my_assistant) node.js: |- @@ -2634,7 +2762,7 @@ paths: "You are a personal math tutor. When asked a question, write and run Python code to answer the question.", name: "Math Tutor", tools: [{ type: "code_interpreter" }], - model: "gpt-4", + model: "gpt-4-turbo", }); console.log(myAssistant); @@ -2648,7 +2776,7 @@ paths: "created_at": 1698984975, "name": "Math Tutor", "description": null, - "model": "gpt-4", + "model": "gpt-4-turbo", "instructions": "You are a personal math tutor. When asked a question, write and run Python code to answer the question.", "tools": [ { @@ -2668,7 +2796,7 @@ paths: -d '{ "instructions": "You are an HR bot, and you have access to files to answer employee questions about company policies.", "tools": [{"type": "retrieval"}], - "model": "gpt-4", + "model": "gpt-4-turbo", "file_ids": ["file-abc123"] }' python: | @@ -2679,7 +2807,7 @@ paths: instructions="You are an HR bot, and you have access to files to answer employee questions about company policies.", name="HR Helper", tools=[{"type": "retrieval"}], - model="gpt-4", + model="gpt-4-turbo", file_ids=["file-abc123"], ) print(my_assistant) @@ -2694,7 +2822,7 @@ paths: "You are an HR bot, and you have access to files to answer employee questions about company policies.", name: "HR Helper", tools: [{ type: "retrieval" }], - model: "gpt-4", + model: "gpt-4-turbo", file_ids: ["file-abc123"], }); @@ -2709,7 +2837,7 @@ paths: "created_at": 1699009403, "name": "HR Helper", "description": null, - "model": "gpt-4", + "model": "gpt-4-turbo", "instructions": "You are an HR bot, and you have access to files to answer employee questions about company policies.", "tools": [ { @@ -2781,7 +2909,7 @@ paths: "created_at": 1699009709, "name": "HR Helper", "description": null, - "model": "gpt-4", + "model": "gpt-4-turbo", "instructions": "You are an HR bot, and you have access to files to answer employee questions about company policies.", "tools": [ { @@ -2833,7 +2961,7 @@ paths: -d '{ "instructions": "You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.", "tools": [{"type": "retrieval"}], - "model": "gpt-4", + "model": "gpt-4-turbo", "file_ids": ["file-abc123", "file-abc456"] }' python: | @@ -2845,7 +2973,7 @@ paths: instructions="You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.", name="HR Helper", tools=[{"type": "retrieval"}], - model="gpt-4", + model="gpt-4-turbo", file_ids=["file-abc123", "file-abc456"], ) @@ -2863,7 +2991,7 @@ paths: "You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.", name: "HR Helper", tools: [{ type: "retrieval" }], - model: "gpt-4", + model: "gpt-4-turbo", file_ids: [ "file-abc123", "file-abc456", @@ -2882,7 +3010,7 @@ paths: "created_at": 1699009709, "name": "HR Helper", "description": null, - "model": "gpt-4", + "model": "gpt-4-turbo", "instructions": "You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.", "tools": [ { @@ -3310,6 +3438,12 @@ paths: description: *pagination_before_param_description schema: type: string + - name: run_id + in: query + description: | + Filter messages by the run ID that generated them. + schema: + type: string responses: "200": description: OK @@ -3699,136 +3833,407 @@ paths: beta: true returns: A [run](/docs/api-reference/runs/object) object. examples: - request: - curl: | - curl https://api.openai.com/v1/threads/runs \ - -H "Authorization: Bearer $OPENAI_API_KEY" \ - -H "Content-Type: application/json" \ - -H "OpenAI-Beta: assistants=v1" \ - -d '{ - "assistant_id": "asst_abc123", + - title: Default + request: + curl: | + curl https://api.openai.com/v1/threads/runs \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v1" \ + -d '{ + "assistant_id": "asst_abc123", + "thread": { + "messages": [ + {"role": "user", "content": "Explain deep learning to a 5 year old."} + ] + } + }' + python: | + from openai import OpenAI + client = OpenAI() + + run = client.beta.threads.create_and_run( + assistant_id="asst_abc123", + thread={ + "messages": [ + {"role": "user", "content": "Explain deep learning to a 5 year old."} + ] + } + ) + + print(run) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const run = await openai.beta.threads.createAndRun({ + assistant_id: "asst_abc123", + thread: { + messages: [ + { role: "user", content: "Explain deep learning to a 5 year old." }, + ], + }, + }); + + console.log(run); + } + + main(); + response: | + { + "id": "run_abc123", + "object": "thread.run", + "created_at": 1699076792, + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "status": "queued", + "started_at": null, + "expires_at": 1699077392, + "cancelled_at": null, + "failed_at": null, + "completed_at": null, + "last_error": null, + "model": "gpt-4-turbo", + "instructions": "You are a helpful assistant.", + "tools": [], + "file_ids": [], + "metadata": {}, + "usage": null, + "temperature": 1 + } + + - title: Streaming + request: + curl: | + curl https://api.openai.com/v1/threads/runs \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v1" \ + -d '{ + "assistant_id": "asst_123", "thread": { "messages": [ - {"role": "user", "content": "Explain deep learning to a 5 year old."} + {"role": "user", "content": "Hello"} ] - } + }, + "stream": true }' - python: | - from openai import OpenAI - client = OpenAI() + python: | + from openai import OpenAI + client = OpenAI() - run = client.beta.threads.create_and_run( - assistant_id="asst_abc123", - thread={ - "messages": [ - {"role": "user", "content": "Explain deep learning to a 5 year old."} - ] + stream = client.beta.threads.create_and_run( + assistant_id="asst_123", + thread={ + "messages": [ + {"role": "user", "content": "Hello"} + ] + }, + stream=True + ) + + for event in stream: + print(event) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const stream = await openai.beta.threads.createAndRun({ + assistant_id: "asst_123", + thread: { + messages: [ + { role: "user", content: "Hello" }, + ], + }, + stream: true + }); + + for await (const event of stream) { + console.log(event); + } } - ) - node.js: | - import OpenAI from "openai"; - const openai = new OpenAI(); + main(); + response: | + event: thread.created + data: {"id":"thread_123","object":"thread","created_at":1710348075,"metadata":{}} - async function main() { - const run = await openai.beta.threads.createAndRun({ - assistant_id: "asst_abc123", - thread: { - messages: [ - { role: "user", content: "Explain deep learning to a 5 year old." }, - ], - }, - }); + event: thread.run.created + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"file_ids":[],"metadata":{},"usage":null} - console.log(run); - } + event: thread.run.queued + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"file_ids":[],"metadata":{},"usage":null} - main(); - response: | - { - "id": "run_abc123", - "object": "thread.run", - "created_at": 1699076792, - "assistant_id": "asst_abc123", - "thread_id": "thread_abc123", - "status": "queued", - "started_at": null, - "expires_at": 1699077392, - "cancelled_at": null, - "failed_at": null, - "completed_at": null, - "last_error": null, - "model": "gpt-4", - "instructions": "You are a helpful assistant.", - "tools": [], - "file_ids": [], - "metadata": {}, - "usage": null - } + event: thread.run.in_progress + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710348075,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"file_ids":[],"metadata":{},"usage":null} - /threads/{thread_id}/runs: - get: - operationId: listRuns - tags: - - Assistants - summary: Returns a list of runs belonging to a thread. - parameters: - - name: thread_id - in: path - required: true - schema: - type: string - description: The ID of the thread the run belongs to. - - name: limit - in: query - description: *pagination_limit_param_description - required: false - schema: - type: integer - default: 20 - - name: order - in: query - description: *pagination_order_param_description - schema: - type: string - default: desc - enum: ["asc", "desc"] - - name: after - in: query - description: *pagination_after_param_description - schema: - type: string - - name: before - in: query - description: *pagination_before_param_description - schema: - type: string - responses: - "200": - description: OK - content: - application/json: - schema: - $ref: "#/components/schemas/ListRunsResponse" - x-oaiMeta: - name: List runs - group: threads - beta: true - returns: A list of [run](/docs/api-reference/runs/object) objects. - examples: - request: - curl: | - curl https://api.openai.com/v1/threads/thread_abc123/runs \ - -H "Authorization: Bearer $OPENAI_API_KEY" \ - -H "Content-Type: application/json" \ - -H "OpenAI-Beta: assistants=v1" - python: | - from openai import OpenAI - client = OpenAI() + event: thread.run.step.created + data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} - runs = client.beta.threads.runs.list( - "thread_abc123" + event: thread.run.step.in_progress + data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} + + event: thread.message.created + data: {"id":"msg_001","object":"thread.message","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"file_ids":[],"metadata":{}} + + event: thread.message.in_progress + data: {"id":"msg_001","object":"thread.message","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"file_ids":[],"metadata":{}} + + event: thread.message.delta + data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"Hello","annotations":[]}}]}} + + ... + + event: thread.message.delta + data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" today"}}]}} + + event: thread.message.delta + data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"?"}}]}} + + event: thread.message.completed + data: {"id":"msg_001","object":"thread.message","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"completed","incomplete_details":null,"incomplete_at":null,"completed_at":1710348077,"role":"assistant","content":[{"type":"text","text":{"value":"Hello! How can I assist you today?","annotations":[]}}],"file_ids":[],"metadata":{}} + + event: thread.run.step.completed + data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1710348077,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31}} + + event: thread.run.completed + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710348075,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710348077,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"file_ids":[],"metadata":{},"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31}} + + event: done + data: [DONE] + + - title: Streaming with Functions + request: + curl: | + curl https://api.openai.com/v1/threads/runs \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v1" \ + -d '{ + "assistant_id": "asst_abc123", + "thread": { + "messages": [ + {"role": "user", "content": "What is the weather like in San Francisco?"} + ] + }, + "tools": [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA" + }, + "unit": { + "type": "string", + "enum": ["celsius", "fahrenheit"] + } + }, + "required": ["location"] + } + } + } + ], + "stream": true + }' + python: | + from openai import OpenAI + client = OpenAI() + + tools = [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA", + }, + "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, + }, + "required": ["location"], + }, + } + } + ] + + stream = client.beta.threads.create_and_run( + thread={ + "messages": [ + {"role": "user", "content": "What is the weather like in San Francisco?"} + ] + }, + assistant_id="asst_abc123", + tools=tools, + stream=True + ) + + for event in stream: + print(event) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + const tools = [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA", + }, + "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, + }, + "required": ["location"], + }, + } + } + ]; + + async function main() { + const stream = await openai.beta.threads.createAndRun({ + assistant_id: "asst_123", + thread: { + messages: [ + { role: "user", content: "What is the weather like in San Francisco?" }, + ], + }, + tools: tools, + stream: true + }); + + for await (const event of stream) { + console.log(event); + } + } + + main(); + response: | + event: thread.created + data: {"id":"thread_123","object":"thread","created_at":1710351818,"metadata":{}} + + event: thread.run.created + data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"file_ids":[],"metadata":{},"usage":null} + + event: thread.run.queued + data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"file_ids":[],"metadata":{},"usage":null} + + event: thread.run.in_progress + data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710351818,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"file_ids":[],"metadata":{},"usage":null} + + event: thread.run.step.created + data: {"id":"step_001","object":"thread.run.step","created_at":1710351819,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"tool_calls","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710352418,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[]},"usage":null} + + event: thread.run.step.in_progress + data: {"id":"step_001","object":"thread.run.step","created_at":1710351819,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"tool_calls","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710352418,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[]},"usage":null} + + event: thread.run.step.delta + data: {"id":"step_001","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"id":"call_XXNp8YGaFrjrSjgqxtC8JJ1B","type":"function","function":{"name":"get_current_weather","arguments":"","output":null}}]}}} + + event: thread.run.step.delta + data: {"id":"step_001","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"{\""}}]}}} + + event: thread.run.step.delta + data: {"id":"step_001","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"location"}}]}}} + + ... + + event: thread.run.step.delta + data: {"id":"step_001","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"ahrenheit"}}]}}} + + event: thread.run.step.delta + data: {"id":"step_001","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"\"}"}}]}}} + + event: thread.run.requires_action + data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"requires_action","started_at":1710351818,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":{"type":"submit_tool_outputs","submit_tool_outputs":{"tool_calls":[{"id":"call_XXNp8YGaFrjrSjgqxtC8JJ1B","type":"function","function":{"name":"get_current_weather","arguments":"{\"location\":\"San Francisco, CA\",\"unit\":\"fahrenheit\"}"}}]}},"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"file_ids":[],"metadata":{},"usage":null} + + event: done + data: [DONE] + + + /threads/{thread_id}/runs: + get: + operationId: listRuns + tags: + - Assistants + summary: Returns a list of runs belonging to a thread. + parameters: + - name: thread_id + in: path + required: true + schema: + type: string + description: The ID of the thread the run belongs to. + - name: limit + in: query + description: *pagination_limit_param_description + required: false + schema: + type: integer + default: 20 + - name: order + in: query + description: *pagination_order_param_description + schema: + type: string + default: desc + enum: ["asc", "desc"] + - name: after + in: query + description: *pagination_after_param_description + schema: + type: string + - name: before + in: query + description: *pagination_before_param_description + schema: + type: string + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ListRunsResponse" + x-oaiMeta: + name: List runs + group: threads + beta: true + returns: A list of [run](/docs/api-reference/runs/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123/runs \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v1" + python: | + from openai import OpenAI + client = OpenAI() + + runs = client.beta.threads.runs.list( + "thread_abc123" ) + print(runs) node.js: | import OpenAI from "openai"; @@ -3861,7 +4266,7 @@ paths: "failed_at": null, "completed_at": 1699075073, "last_error": null, - "model": "gpt-3.5-turbo", + "model": "gpt-4-turbo", "instructions": null, "tools": [ { @@ -3877,7 +4282,8 @@ paths: "prompt_tokens": 123, "completion_tokens": 456, "total_tokens": 579 - } + }, + "temperature": 1 }, { "id": "run_abc456", @@ -3892,7 +4298,7 @@ paths: "failed_at": null, "completed_at": 1699063291, "last_error": null, - "model": "gpt-3.5-turbo", + "model": "gpt-4-turbo", "instructions": null, "tools": [ { @@ -3908,7 +4314,8 @@ paths: "prompt_tokens": 123, "completion_tokens": 456, "total_tokens": 579 - } + }, + "temperature": 1 } ], "first_id": "run_abc123", @@ -3946,67 +4353,310 @@ paths: beta: true returns: A [run](/docs/api-reference/runs/object) object. examples: - request: - curl: | - curl https://api.openai.com/v1/threads/thread_abc123/runs \ - -H "Authorization: Bearer $OPENAI_API_KEY" \ - -H "Content-Type: application/json" \ - -H "OpenAI-Beta: assistants=v1" \ - -d '{ - "assistant_id": "asst_abc123" - }' - python: | - from openai import OpenAI - client = OpenAI() + - title: Default + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123/runs \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v1" \ + -d '{ + "assistant_id": "asst_abc123" + }' + python: | + from openai import OpenAI + client = OpenAI() - run = client.beta.threads.runs.create( - thread_id="thread_abc123", - assistant_id="asst_abc123" - ) - print(run) - node.js: | - import OpenAI from "openai"; + run = client.beta.threads.runs.create( + thread_id="thread_abc123", + assistant_id="asst_abc123" + ) - const openai = new OpenAI(); + print(run) + node.js: | + import OpenAI from "openai"; - async function main() { - const run = await openai.beta.threads.runs.create( - "thread_abc123", - { assistant_id: "asst_abc123" } - ); + const openai = new OpenAI(); - console.log(run); + async function main() { + const run = await openai.beta.threads.runs.create( + "thread_abc123", + { assistant_id: "asst_abc123" } + ); + + console.log(run); + } + + main(); + response: &run_object_example | + { + "id": "run_abc123", + "object": "thread.run", + "created_at": 1699063290, + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "status": "queued", + "started_at": 1699063290, + "expires_at": null, + "cancelled_at": null, + "failed_at": null, + "completed_at": 1699063291, + "last_error": null, + "model": "gpt-4-turbo", + "instructions": null, + "tools": [ + { + "type": "code_interpreter" + } + ], + "file_ids": [ + "file-abc123", + "file-abc456" + ], + "metadata": {}, + "usage": null, + "temperature": 1 } + - title: Streaming + request: + curl: | + curl https://api.openai.com/v1/threads/thread_123/runs \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v1" \ + -d '{ + "assistant_id": "asst_123", + "stream": true + }' + python: | + from openai import OpenAI + client = OpenAI() - main(); - response: &run_object_example | - { - "id": "run_abc123", - "object": "thread.run", - "created_at": 1699063290, - "assistant_id": "asst_abc123", - "thread_id": "thread_abc123", - "status": "queued", - "started_at": 1699063290, - "expires_at": null, - "cancelled_at": null, - "failed_at": null, - "completed_at": 1699063291, - "last_error": null, - "model": "gpt-4", - "instructions": null, - "tools": [ - { - "type": "code_interpreter" + stream = client.beta.threads.runs.create( + thread_id="thread_123", + assistant_id="asst_123", + stream=True + ) + + for event in stream: + print(event) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const stream = await openai.beta.threads.runs.create( + "thread_123", + { assistant_id: "asst_123", stream: true } + ); + + for await (const event of stream) { + console.log(event); + } } - ], - "file_ids": [ - "file-abc123", - "file-abc456" - ], - "metadata": {}, - "usage": null - } + + main(); + response: | + event: thread.run.created + data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"file_ids":[],"metadata":{},"usage":null} + + event: thread.run.queued + data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"file_ids":[],"metadata":{},"usage":null} + + event: thread.run.in_progress + data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710330641,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"file_ids":[],"metadata":{},"usage":null} + + event: thread.run.step.created + data: {"id":"step_001","object":"thread.run.step","created_at":1710330641,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710331240,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} + + event: thread.run.step.in_progress + data: {"id":"step_001","object":"thread.run.step","created_at":1710330641,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710331240,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} + + event: thread.message.created + data: {"id":"msg_001","object":"thread.message","created_at":1710330641,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"file_ids":[],"metadata":{}} + + event: thread.message.in_progress + data: {"id":"msg_001","object":"thread.message","created_at":1710330641,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"file_ids":[],"metadata":{}} + + event: thread.message.delta + data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"Hello","annotations":[]}}]}} + + ... + + event: thread.message.delta + data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" today"}}]}} + + event: thread.message.delta + data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"?"}}]}} + + event: thread.message.completed + data: {"id":"msg_001","object":"thread.message","created_at":1710330641,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"completed","incomplete_details":null,"incomplete_at":null,"completed_at":1710330642,"role":"assistant","content":[{"type":"text","text":{"value":"Hello! How can I assist you today?","annotations":[]}}],"file_ids":[],"metadata":{}} + + event: thread.run.step.completed + data: {"id":"step_001","object":"thread.run.step","created_at":1710330641,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1710330642,"expires_at":1710331240,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31}} + + event: thread.run.completed + data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710330641,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710330642,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"file_ids":[],"metadata":{},"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31}} + + event: done + data: [DONE] + + - title: Streaming with Functions + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123/runs \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v1" \ + -d '{ + "assistant_id": "asst_abc123", + "tools": [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA" + }, + "unit": { + "type": "string", + "enum": ["celsius", "fahrenheit"] + } + }, + "required": ["location"] + } + } + } + ], + "stream": true + }' + python: | + from openai import OpenAI + client = OpenAI() + + tools = [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA", + }, + "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, + }, + "required": ["location"], + }, + } + } + ] + + stream = client.beta.threads.runs.create( + thread_id="thread_abc123", + assistant_id="asst_abc123", + tools=tools, + stream=True + ) + + for event in stream: + print(event) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + const tools = [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA", + }, + "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, + }, + "required": ["location"], + }, + } + } + ]; + + async function main() { + const stream = await openai.beta.threads.runs.create( + "thread_abc123", + { + assistant_id: "asst_abc123", + tools: tools, + stream: true + } + ); + + for await (const event of stream) { + console.log(event); + } + } + + main(); + response: | + event: thread.run.created + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"file_ids":[],"metadata":{},"usage":null} + + event: thread.run.queued + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"file_ids":[],"metadata":{},"usage":null} + + event: thread.run.in_progress + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710348075,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"file_ids":[],"metadata":{},"usage":null} + + event: thread.run.step.created + data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} + + event: thread.run.step.in_progress + data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} + + event: thread.message.created + data: {"id":"msg_001","object":"thread.message","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"file_ids":[],"metadata":{}} + + event: thread.message.in_progress + data: {"id":"msg_001","object":"thread.message","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"file_ids":[],"metadata":{}} + + event: thread.message.delta + data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"Hello","annotations":[]}}]}} + + ... + + event: thread.message.delta + data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" today"}}]}} + + event: thread.message.delta + data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"?"}}]}} + + event: thread.message.completed + data: {"id":"msg_001","object":"thread.message","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"completed","incomplete_details":null,"incomplete_at":null,"completed_at":1710348077,"role":"assistant","content":[{"type":"text","text":{"value":"Hello! How can I assist you today?","annotations":[]}}],"file_ids":[],"metadata":{}} + + event: thread.run.step.completed + data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1710348077,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31}} + + event: thread.run.completed + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710348075,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710348077,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"file_ids":[],"metadata":{},"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31}} + + event: done + data: [DONE] + /threads/{thread_id}/runs/{run_id}: get: @@ -4053,6 +4703,7 @@ paths: thread_id="thread_abc123", run_id="run_abc123" ) + print(run) node.js: | import OpenAI from "openai"; @@ -4083,7 +4734,7 @@ paths: "failed_at": null, "completed_at": 1699075073, "last_error": null, - "model": "gpt-3.5-turbo", + "model": "gpt-4-turbo", "instructions": null, "tools": [ { @@ -4099,7 +4750,8 @@ paths: "prompt_tokens": 123, "completion_tokens": 456, "total_tokens": 579 - } + }, + "temperature": 1 } post: operationId: modifyRun @@ -4158,6 +4810,7 @@ paths: run_id="run_abc123", metadata={"user_id": "user_abc123"}, ) + print(run) node.js: | import OpenAI from "openai"; @@ -4193,7 +4846,7 @@ paths: "failed_at": null, "completed_at": 1699075073, "last_error": null, - "model": "gpt-3.5-turbo", + "model": "gpt-4-turbo", "instructions": null, "tools": [ { @@ -4211,7 +4864,8 @@ paths: "prompt_tokens": 123, "completion_tokens": 456, "total_tokens": 579 - } + }, + "temperature": 1 } /threads/{thread_id}/runs/{run_id}/submit_tool_outputs: @@ -4253,106 +4907,214 @@ paths: beta: true returns: The modified [run](/docs/api-reference/runs/object) object matching the specified ID. examples: - request: - curl: | - curl https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123/submit_tool_outputs \ - -H "Authorization: Bearer $OPENAI_API_KEY" \ - -H "Content-Type: application/json" \ - -H "OpenAI-Beta: assistants=v1" \ - -d '{ - "tool_outputs": [ + - title: Default + request: + curl: | + curl https://api.openai.com/v1/threads/thread_123/runs/run_123/submit_tool_outputs \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v1" \ + -d '{ + "tool_outputs": [ + { + "tool_call_id": "call_001", + "output": "70 degrees and sunny." + } + ] + }' + python: | + from openai import OpenAI + client = OpenAI() + + run = client.beta.threads.runs.submit_tool_outputs( + thread_id="thread_123", + run_id="run_123", + tool_outputs=[ { - "tool_call_id": "call_abc123", - "output": "28C" + "tool_call_id": "call_001", + "output": "70 degrees and sunny." } ] - }' - python: | - from openai import OpenAI - client = OpenAI() + ) - run = client.beta.threads.runs.submit_tool_outputs( - thread_id="thread_abc123", - run_id="run_abc123", - tool_outputs=[ - { - "tool_call_id": "call_abc123", - "output": "28C" - } - ] - ) - print(run) - node.js: | - import OpenAI from "openai"; + print(run) + node.js: | + import OpenAI from "openai"; - const openai = new OpenAI(); + const openai = new OpenAI(); - async function main() { - const run = await openai.beta.threads.runs.submitToolOutputs( - "thread_abc123", - "run_abc123", + async function main() { + const run = await openai.beta.threads.runs.submitToolOutputs( + "thread_123", + "run_123", + { + tool_outputs: [ + { + tool_call_id: "call_001", + output: "70 degrees and sunny.", + }, + ], + } + ); + + console.log(run); + } + + main(); + response: | + { + "id": "run_123", + "object": "thread.run", + "created_at": 1699075592, + "assistant_id": "asst_123", + "thread_id": "thread_123", + "status": "queued", + "started_at": 1699075592, + "expires_at": 1699076192, + "cancelled_at": null, + "failed_at": null, + "completed_at": null, + "last_error": null, + "model": "gpt-4-turbo", + "instructions": null, + "tools": [ { - tool_outputs: [ + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA" + }, + "unit": { + "type": "string", + "enum": ["celsius", "fahrenheit"] + } + }, + "required": ["location"] + } + } + } + ], + "file_ids": [], + "metadata": {}, + "usage": null, + "temperature": 1 + } + + - title: Streaming + request: + curl: | + curl https://api.openai.com/v1/threads/thread_123/runs/run_123/submit_tool_outputs \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v1" \ + -d '{ + "tool_outputs": [ { - tool_call_id: "call_abc123", - output: "28C", - }, + "tool_call_id": "call_001", + "output": "70 degrees and sunny." + } ], - } - ); + "stream": true + }' + python: | + from openai import OpenAI + client = OpenAI() + + stream = client.beta.threads.runs.submit_tool_outputs( + thread_id="thread_123", + run_id="run_123", + tool_outputs=[ + { + "tool_call_id": "call_001", + "output": "70 degrees and sunny." + } + ], + stream=True + ) + + for event in stream: + print(event) + node.js: | + import OpenAI from "openai"; - console.log(run); - } + const openai = new OpenAI(); - main(); - response: | - { - "id": "run_abc123", - "object": "thread.run", - "created_at": 1699075592, - "assistant_id": "asst_abc123", - "thread_id": "thread_abc123", - "status": "queued", - "started_at": 1699075592, - "expires_at": 1699076192, - "cancelled_at": null, - "failed_at": null, - "completed_at": null, - "last_error": null, - "model": "gpt-4", - "instructions": "You tell the weather.", - "tools": [ - { - "type": "function", - "function": { - "name": "get_weather", - "description": "Determine weather in my location", - "parameters": { - "type": "object", - "properties": { - "location": { - "type": "string", - "description": "The city and state e.g. San Francisco, CA" + async function main() { + const stream = await openai.beta.threads.runs.submitToolOutputs( + "thread_123", + "run_123", + { + tool_outputs: [ + { + tool_call_id: "call_001", + output: "70 degrees and sunny.", }, - "unit": { - "type": "string", - "enum": [ - "c", - "f" - ] - } - }, - "required": [ - "location" - ] + ], } + ); + + for await (const event of stream) { + console.log(event); } } - ], - "file_ids": [], - "metadata": {}, - "usage": null - } + + main(); + response: | + event: thread.run.step.completed + data: {"id":"step_001","object":"thread.run.step","created_at":1710352449,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"tool_calls","status":"completed","cancelled_at":null,"completed_at":1710352475,"expires_at":1710353047,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[{"id":"call_iWr0kQ2EaYMaxNdl0v3KYkx7","type":"function","function":{"name":"get_current_weather","arguments":"{\"location\":\"San Francisco, CA\",\"unit\":\"fahrenheit\"}","output":"70 degrees and sunny."}}]},"usage":{"prompt_tokens":291,"completion_tokens":24,"total_tokens":315}} + + event: thread.run.queued + data: {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":1710352448,"expires_at":1710353047,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"file_ids":[],"metadata":{},"usage":null} + + event: thread.run.in_progress + data: {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710352475,"expires_at":1710353047,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"file_ids":[],"metadata":{},"usage":null} + + event: thread.run.step.created + data: {"id":"step_002","object":"thread.run.step","created_at":1710352476,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710353047,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_002"}},"usage":null} + + event: thread.run.step.in_progress + data: {"id":"step_002","object":"thread.run.step","created_at":1710352476,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710353047,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_002"}},"usage":null} + + event: thread.message.created + data: {"id":"msg_002","object":"thread.message","created_at":1710352476,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"file_ids":[],"metadata":{}} + + event: thread.message.in_progress + data: {"id":"msg_002","object":"thread.message","created_at":1710352476,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"file_ids":[],"metadata":{}} + + event: thread.message.delta + data: {"id":"msg_002","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"The","annotations":[]}}]}} + + event: thread.message.delta + data: {"id":"msg_002","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" current"}}]}} + + event: thread.message.delta + data: {"id":"msg_002","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" weather"}}]}} + + ... + + event: thread.message.delta + data: {"id":"msg_002","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" sunny"}}]}} + + event: thread.message.delta + data: {"id":"msg_002","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"."}}]}} + + event: thread.message.completed + data: {"id":"msg_002","object":"thread.message","created_at":1710352476,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"completed","incomplete_details":null,"incomplete_at":null,"completed_at":1710352477,"role":"assistant","content":[{"type":"text","text":{"value":"The current weather in San Francisco, CA is 70 degrees Fahrenheit and sunny.","annotations":[]}}],"file_ids":[],"metadata":{}} + + event: thread.run.step.completed + data: {"id":"step_002","object":"thread.run.step","created_at":1710352476,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1710352477,"expires_at":1710353047,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_002"}},"usage":{"prompt_tokens":329,"completion_tokens":18,"total_tokens":347}} + + event: thread.run.completed + data: {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710352475,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710352477,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"file_ids":[],"metadata":{},"usage":{"prompt_tokens":620,"completion_tokens":42,"total_tokens":662}} + + event: done + data: [DONE] /threads/{thread_id}/runs/{run_id}/cancel: post: @@ -4400,6 +5162,7 @@ paths: thread_id="thread_abc123", run_id="run_abc123" ) + print(run) node.js: | import OpenAI from "openai"; @@ -4430,7 +5193,7 @@ paths: "failed_at": null, "completed_at": null, "last_error": null, - "model": "gpt-4", + "model": "gpt-4-turbo", "instructions": "You summarize books.", "tools": [ { @@ -4439,7 +5202,8 @@ paths: ], "file_ids": [], "metadata": {}, - "usage": null + "usage": null, + "temperature": 1 } /threads/{thread_id}/runs/{run_id}/steps: @@ -4512,6 +5276,7 @@ paths: thread_id="thread_abc123", run_id="run_abc123" ) + print(run_steps) node.js: | import OpenAI from "openai"; @@ -4615,6 +5380,7 @@ paths: run_id="run_abc123", step_id="step_abc123" ) + print(run_step) node.js: | import OpenAI from "openai"; @@ -5359,7 +6125,10 @@ components: nullable: true default: false suffix: - description: The suffix that comes after a completion of inserted text. + description: | + The suffix that comes after a completion of inserted text. + + This parameter is only supported for `gpt-3.5-turbo-instruct`. default: null nullable: true type: string @@ -5479,7 +6248,7 @@ components: "id": "cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7", "object": "text_completion", "created": 1589478378, - "model": "gpt-3.5-turbo", + "model": "gpt-4-turbo", "choices": [ { "text": "\n\nThis is indeed a test", @@ -5909,12 +6678,14 @@ components: $ref: "#/components/schemas/ChatCompletionRequestMessage" model: description: ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. - example: "gpt-3.5-turbo" + example: "gpt-4-turbo" anyOf: - type: string - type: string enum: [ + "gpt-4-turbo", + "gpt-4-turbo-2024-04-09", "gpt-4-0125-preview", "gpt-4-turbo-preview", "gpt-4-1106-preview", @@ -5953,7 +6724,7 @@ components: Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. logprobs: - description: Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. This option is currently not available on the `gpt-4-vision-preview` model. + description: Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. type: boolean default: false nullable: true @@ -6050,7 +6821,7 @@ components: type: array description: > A list of tools the model may call. Currently, only functions are supported as a tool. - Use this to provide a list of functions the model may generate JSON inputs for. + Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported. items: $ref: "#/components/schemas/ChatCompletionTool" tool_choice: @@ -6802,6 +7573,65 @@ components: type: string nullable: true example: "file-abc123" + integrations: + type: array + description: A list of integrations to enable for your fine-tuning job. + nullable: true + items: + type: object + required: + - type + - wandb + properties: + type: + description: | + The type of integration to enable. Currently, only "wandb" (Weights and Biases) is supported. + oneOf: + - type: string + enum: [wandb] + wandb: + type: object + description: | + The settings for your integration with Weights and Biases. This payload specifies the project that + metrics will be sent to. Optionally, you can set an explicit display name for your run, add tags + to your run, and set a default entity (team, username, etc) to be associated with your run. + required: + - project + properties: + project: + description: | + The name of the project that the new run will be created under. + type: string + example: "my-wandb-project" + name: + description: | + A display name to set for the run. If not set, we will use the Job ID as the name. + nullable: true + type: string + entity: + description: | + The entity to use for the run. This allows you to set the team or username of the WandB user that you would + like associated with the run. If not set, the default entity for the registered WandB API key is used. + nullable: true + type: string + tags: + description: | + A list of tags to be attached to the newly created run. These tags are passed through directly to WandB. Some + default tags are generated by OpenAI: "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". + type: array + items: + type: string + example: "custom-tag" + + seed: + description: | + The seed controls the reproducibility of the job. Passing in the same seed and job parameters should produce the same results, but may differ in rare cases. + If a seed is not specified, one will be generated for you. + type: integer + nullable: true + minimum: 0 + maximum: 2147483647 + example: 42 required: - model - training_file @@ -6820,6 +7650,29 @@ components: - object - data + ListFineTuningJobCheckpointsResponse: + type: object + properties: + data: + type: array + items: + $ref: "#/components/schemas/FineTuningJobCheckpoint" + object: + type: string + enum: [list] + first_id: + type: string + nullable: true + last_id: + type: string + nullable: true + has_more: + type: boolean + required: + - object + - data + - has_more + CreateEmbeddingRequest: type: object additionalProperties: false @@ -7408,6 +8261,18 @@ components: type: string nullable: true description: The file ID used for validation. You can retrieve the validation results with the [Files API](/docs/api-reference/files/retrieve-contents). + integrations: + type: array + nullable: true + description: A list of integrations to enable for this fine-tuning job. + maxItems: 5 + items: + oneOf: + - $ref: "#/components/schemas/FineTuningIntegration" + x-oaiExpandable: true + seed: + type: integer + description: The seed used for the fine-tuning job. required: - created_at - error @@ -7423,10 +8288,56 @@ components: - trained_tokens - training_file - validation_file + - seed x-oaiMeta: name: The fine-tuning job object example: *fine_tuning_example + FineTuningIntegration: + type: object + title: Fine-Tuning Job Integration + required: + - type + - wandb + properties: + type: + type: string + description: "The type of the integration being enabled for the fine-tuning job" + enum: ["wandb"] + wandb: + type: object + description: | + The settings for your integration with Weights and Biases. This payload specifies the project that + metrics will be sent to. Optionally, you can set an explicit display name for your run, add tags + to your run, and set a default entity (team, username, etc) to be associated with your run. + required: + - project + properties: + project: + description: | + The name of the project that the new run will be created under. + type: string + example: "my-wandb-project" + name: + description: | + A display name to set for the run. If not set, we will use the Job ID as the name. + nullable: true + type: string + entity: + description: | + The entity to use for the run. This allows you to set the team or username of the WandB user that you would + like associated with the run. If not set, the default entity for the registered WandB API key is used. + nullable: true + type: string + tags: + description: | + A list of tags to be attached to the newly created run. These tags are passed through directly to WandB. Some + default tags are generated by OpenAI: "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". + type: array + items: + type: string + example: "custom-tag" + FineTuningJobEvent: type: object description: Fine-tuning job event object @@ -7460,6 +8371,78 @@ components: "message": "Created fine-tuning job" } + FineTuningJobCheckpoint: + type: object + title: FineTuningJobCheckpoint + description: | + The `fine_tuning.job.checkpoint` object represents a model checkpoint for a fine-tuning job that is ready to use. + properties: + id: + type: string + description: The checkpoint identifier, which can be referenced in the API endpoints. + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the checkpoint was created. + fine_tuned_model_checkpoint: + type: string + description: The name of the fine-tuned checkpoint model that is created. + step_number: + type: integer + description: The step number that the checkpoint was created at. + metrics: + type: object + description: Metrics at the step number during the fine-tuning job. + properties: + step: + type: number + train_loss: + type: number + train_mean_token_accuracy: + type: number + valid_loss: + type: number + valid_mean_token_accuracy: + type: number + full_valid_loss: + type: number + full_valid_mean_token_accuracy: + type: number + fine_tuning_job_id: + type: string + description: The name of the fine-tuning job that this checkpoint was created from. + object: + type: string + description: The object type, which is always "fine_tuning.job.checkpoint". + enum: [fine_tuning.job.checkpoint] + required: + - created_at + - fine_tuning_job_id + - fine_tuned_model_checkpoint + - id + - metrics + - object + - step_number + x-oaiMeta: + name: The fine-tuning job checkpoint object + example: | + { + "object": "fine_tuning.job.checkpoint", + "id": "ftckpt_qtZ5Gyk4BLq1SfLFWp3RtO3P", + "created_at": 1712211699, + "fine_tuned_model_checkpoint": "ft:gpt-3.5-turbo-0125:my-org:custom_suffix:9ABel2dg:ckpt-step-88", + "fine_tuning_job_id": "ftjob-fpbNQ3H1GrMehXRf8cO97xTN", + "metrics": { + "step": 88, + "train_loss": 0.478, + "train_mean_token_accuracy": 0.924, + "valid_loss": 10.112, + "valid_mean_token_accuracy": 0.145, + "full_valid_loss": 0.567, + "full_valid_mean_token_accuracy": 0.944 + }, + "step_number": 88 + } + CompletionUsage: type: object description: Usage statistics for the completion request. @@ -7548,9 +8531,9 @@ components: type: string instructions: description: &assistant_instructions_param_description | - The system instructions that the assistant uses. The maximum length is 32768 characters. + The system instructions that the assistant uses. The maximum length is 256,000 characters. type: string - maxLength: 32768 + maxLength: 256000 nullable: true tools: description: &assistant_tools_param_description | @@ -7600,8 +8583,32 @@ components: properties: model: description: *model_description + example: "gpt-4-turbo" anyOf: - type: string + - type: string + enum: + [ + "gpt-4-turbo", + "gpt-4-turbo-2024-04-09", + "gpt-4-0125-preview", + "gpt-4-turbo-preview", + "gpt-4-1106-preview", + "gpt-4-vision-preview", + "gpt-4", + "gpt-4-0314", + "gpt-4-0613", + "gpt-4-32k", + "gpt-4-32k-0314", + "gpt-4-32k-0613", + "gpt-3.5-turbo", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-0125", + "gpt-3.5-turbo-16k-0613", + ] + x-oaiTypeLabel: string name: description: *assistant_name_param_description type: string @@ -7616,7 +8623,7 @@ components: description: *assistant_instructions_param_description type: string nullable: true - maxLength: 32768 + maxLength: 256000 tools: description: *assistant_tools_param_description default: [] @@ -7665,7 +8672,7 @@ components: description: *assistant_instructions_param_description type: string nullable: true - maxLength: 32768 + maxLength: 256000 tools: description: *assistant_tools_param_description default: [] @@ -7848,6 +8855,7 @@ components: expires_at: description: The Unix timestamp (in seconds) for when the run will expire. type: integer + nullable: true started_at: description: The Unix timestamp (in seconds) for when the run was started. type: integer @@ -7894,6 +8902,10 @@ components: nullable: true usage: $ref: "#/components/schemas/RunCompletionUsage" + temperature: + description: The sampling temperature used for this run. If not set, defaults to 1. + type: number + nullable: true required: - id - object @@ -7931,7 +8943,7 @@ components: "failed_at": null, "completed_at": 1699073498, "last_error": null, - "model": "gpt-4", + "model": "gpt-4-turbo", "instructions": null, "tools": [{"type": "retrieval"}, {"type": "code_interpreter"}], "file_ids": [], @@ -7940,7 +8952,8 @@ components: "prompt_tokens": 123, "completion_tokens": 456, "total_tokens": 579 - } + }, + "temperature": 1 } CreateRunRequest: type: object @@ -7951,7 +8964,32 @@ components: type: string model: description: The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used. - type: string + example: "gpt-4-turbo" + anyOf: + - type: string + - type: string + enum: + [ + "gpt-4-turbo", + "gpt-4-turbo-2024-04-09", + "gpt-4-0125-preview", + "gpt-4-turbo-preview", + "gpt-4-1106-preview", + "gpt-4-vision-preview", + "gpt-4", + "gpt-4-0314", + "gpt-4-0613", + "gpt-4-32k", + "gpt-4-32k-0314", + "gpt-4-32k-0613", + "gpt-3.5-turbo", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-0125", + "gpt-3.5-turbo-16k-0613", + ] + x-oaiTypeLabel: string nullable: true instructions: description: Overrides the [instructions](/docs/api-reference/assistants/createAssistant) of the assistant. This is useful for modifying the behavior on a per-run basis. @@ -7961,6 +8999,12 @@ components: description: Appends additional instructions at the end of the instructions for the run. This is useful for modifying the behavior on a per-run basis without overriding other instructions. type: string nullable: true + additional_messages: + description: Adds additional messages to the thread before creating the run. + type: array + items: + $ref: "#/components/schemas/CreateMessageRequest" + nullable: true tools: description: Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. nullable: true @@ -7977,6 +9021,20 @@ components: type: object x-oaiTypeLabel: map nullable: true + temperature: + type: number + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + description: &run_temperature_description | + What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + stream: + type: boolean + nullable: true + description: | + If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message. required: - thread_id - assistant_id @@ -8030,6 +9088,11 @@ components: output: type: string description: The output of the tool call to be submitted to continue the run. + stream: + type: boolean + nullable: true + description: | + If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message. required: - tool_outputs @@ -8074,7 +9137,32 @@ components: description: If no thread is provided, an empty thread will be created. model: description: The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used. - type: string + example: "gpt-4-turbo" + anyOf: + - type: string + - type: string + enum: + [ + "gpt-4-turbo", + "gpt-4-turbo-2024-04-09", + "gpt-4-0125-preview", + "gpt-4-turbo-preview", + "gpt-4-1106-preview", + "gpt-4-vision-preview", + "gpt-4", + "gpt-4-0314", + "gpt-4-0613", + "gpt-4-32k", + "gpt-4-32k-0314", + "gpt-4-32k-0613", + "gpt-3.5-turbo", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-0125", + "gpt-3.5-turbo-16k-0613", + ] + x-oaiTypeLabel: string nullable: true instructions: description: Override the default system message of the assistant. This is useful for modifying the behavior on a per-run basis. @@ -8095,6 +9183,19 @@ components: type: object x-oaiTypeLabel: map nullable: true + temperature: + type: number + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + description: *run_temperature_description + stream: + type: boolean + nullable: true + description: | + If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message. required: - thread_id - assistant_id @@ -8218,6 +9319,29 @@ components: thread_id: description: The [thread](/docs/api-reference/threads) ID that this message belongs to. type: string + status: + description: The status of the message, which can be either `in_progress`, `incomplete`, or `completed`. + type: string + enum: ["in_progress", "incomplete", "completed"] + incomplete_details: + description: On an incomplete message, details about why the message is incomplete. + type: object + properties: + reason: + type: string + description: The reason the message is incomplete. + enum: ["content_filter", "max_tokens", "run_cancelled", "run_expired", "run_failed"] + nullable: true + required: + - reason + completed_at: + description: The Unix timestamp (in seconds) for when the message was completed. + type: integer + nullable: true + incomplete_at: + description: The Unix timestamp (in seconds) for when the message was marked as incomplete. + type: integer + nullable: true role: description: The entity that produced the message. One of `user` or `assistant`. type: string @@ -8235,7 +9359,7 @@ components: type: string nullable: true run_id: - description: If applicable, the ID of the [run](/docs/api-reference/runs) associated with the authoring of this message. + description: The ID of the [run](/docs/api-reference/runs) associated with the creation of this message. Value is `null` when messages are created manually using the create message or create thread endpoints. type: string nullable: true file_ids: @@ -8255,6 +9379,10 @@ components: - object - created_at - thread_id + - status + - incomplete_details + - completed_at + - incomplete_at - role - content - assistant_id @@ -8286,6 +9414,65 @@ components: "metadata": {} } + MessageDeltaObject: + type: object + title: Message delta object + description: | + Represents a message delta i.e. any changed fields on a message during streaming. + properties: + id: + description: The identifier of the message, which can be referenced in API endpoints. + type: string + object: + description: The object type, which is always `thread.message.delta`. + type: string + enum: ["thread.message.delta"] + delta: + description: The delta containing the fields that have changed on the Message. + type: object + properties: + role: + description: The entity that produced the message. One of `user` or `assistant`. + type: string + enum: ["user", "assistant"] + content: + description: The content of the message in array of text and/or images. + type: array + items: + oneOf: + - $ref: "#/components/schemas/MessageDeltaContentImageFileObject" + - $ref: "#/components/schemas/MessageDeltaContentTextObject" + x-oaiExpandable: true + file_ids: + description: A list of [file](/docs/api-reference/files) IDs that the assistant should use. Useful for tools like retrieval and code_interpreter that can access files. A maximum of 10 files can be attached to a message. + default: [] + maxItems: 10 + type: array + items: + type: string + required: + - id + - object + - delta + x-oaiMeta: + name: The message delta object + beta: true + example: | + { + "id": "msg_123", + "object": "thread.message.delta", + "delta": { + "content": [ + { + "index": 0, + "type": "text", + "text": { "value": "Hello", "annotations": [] } + } + ] + } + } + + CreateMessageRequest: type: object additionalProperties: false @@ -8295,12 +9482,15 @@ components: properties: role: type: string - enum: ["user"] - description: The role of the entity that is creating the message. Currently only `user` is supported. + enum: ["user", "assistant"] + description: | + The role of the entity that is creating the message. Allowed values include: + - `user`: Indicates the message is sent by an actual user and should be used in most cases to represent user-generated messages. + - `assistant`: Indicates the message is generated by the assistant. Use this value to insert messages from the assistant into the conversation. content: type: string minLength: 1 - maxLength: 32768 + maxLength: 256000 description: The content of the message. file_ids: description: A list of [File](/docs/api-reference/files) IDs that the message should use. There can be a maximum of 10 files attached to a message. Useful for tools like `retrieval` and `code_interpreter` that can access and use files. @@ -8387,6 +9577,28 @@ components: - type - image_file + MessageDeltaContentImageFileObject: + title: Image file + type: object + description: References an image [File](/docs/api-reference/files) in the content of a message. + properties: + index: + type: integer + description: The index of the content part in the message. + type: + description: Always `image_file`. + type: string + enum: ["image_file"] + image_file: + type: object + properties: + file_id: + description: The [File](/docs/api-reference/files) ID of the image in the message content. + type: string + required: + - index + - type + MessageContentTextObject: title: Text type: object @@ -8486,6 +9698,101 @@ components: - start_index - end_index + MessageDeltaContentTextObject: + title: Text + type: object + description: The text content that is part of a message. + properties: + index: + type: integer + description: The index of the content part in the message. + type: + description: Always `text`. + type: string + enum: ["text"] + text: + type: object + properties: + value: + description: The data that makes up the text. + type: string + annotations: + type: array + items: + oneOf: + - $ref: "#/components/schemas/MessageDeltaContentTextAnnotationsFileCitationObject" + - $ref: "#/components/schemas/MessageDeltaContentTextAnnotationsFilePathObject" + x-oaiExpandable: true + required: + - index + - type + + MessageDeltaContentTextAnnotationsFileCitationObject: + title: File citation + type: object + description: A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the "retrieval" tool to search files. + properties: + index: + type: integer + description: The index of the annotation in the text content part. + type: + description: Always `file_citation`. + type: string + enum: ["file_citation"] + text: + description: The text in the message content that needs to be replaced. + type: string + file_citation: + type: object + properties: + file_id: + description: The ID of the specific File the citation is from. + type: string + quote: + description: The specific quote in the file. + type: string + start_index: + type: integer + minimum: 0 + end_index: + type: integer + minimum: 0 + required: + - index + - type + + MessageDeltaContentTextAnnotationsFilePathObject: + title: File path + type: object + description: A URL for the file that's generated when the assistant used the `code_interpreter` tool to generate a file. + properties: + index: + type: integer + description: The index of the annotation in the text content part. + type: + description: Always `file_path`. + type: string + enum: ["file_path"] + text: + description: The text in the message content that needs to be replaced. + type: string + file_path: + type: object + properties: + file_id: + description: The ID of the file that was generated. + type: string + start_index: + type: integer + minimum: 0 + end_index: + type: integer + minimum: 0 + required: + - index + - type + + RunStepObject: type: object title: Run steps @@ -8586,6 +9893,57 @@ components: beta: true example: *run_step_object_example + RunStepDeltaObject: + type: object + title: Run step delta object + description: | + Represents a run step delta i.e. any changed fields on a run step during streaming. + properties: + id: + description: The identifier of the run step, which can be referenced in API endpoints. + type: string + object: + description: The object type, which is always `thread.run.step.delta`. + type: string + enum: ["thread.run.step.delta"] + delta: + description: The delta containing the fields that have changed on the run step. + type: object + properties: + step_details: + type: object + description: The details of the run step. + oneOf: + - $ref: "#/components/schemas/RunStepDeltaStepDetailsMessageCreationObject" + - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsObject" + x-oaiExpandable: true + required: + - id + - object + - delta + x-oaiMeta: + name: The run step delta object + beta: true + example: | + { + "id": "step_123", + "object": "thread.run.step.delta", + "delta": { + "step_details": { + "type": "tool_calls", + "tool_calls": [ + { + "index": 0, + "id": "call_123", + "type": "code_interpreter", + "code_interpreter": { "input": "", "outputs": [] } + } + ] + } + } + } + + ListRunStepsResponse: properties: object: @@ -8632,6 +9990,24 @@ components: - type - message_creation + RunStepDeltaStepDetailsMessageCreationObject: + title: Message creation + type: object + description: Details of the message creation by the run step. + properties: + type: + description: Always `message_creation`. + type: string + enum: ["message_creation"] + message_creation: + type: object + properties: + message_id: + type: string + description: The ID of the message that was created by this run step. + required: + - type + RunStepDetailsToolCallsObject: title: Tool calls type: object @@ -8655,6 +10031,28 @@ components: - type - tool_calls + RunStepDeltaStepDetailsToolCallsObject: + title: Tool calls + type: object + description: Details of the tool call. + properties: + type: + description: Always `tool_calls`. + type: string + enum: ["tool_calls"] + tool_calls: + type: array + description: | + An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `retrieval`, or `function`. + items: + oneOf: + - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeObject" + - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsRetrievalObject" + - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsFunctionObject" + x-oaiExpandable: true + required: + - type + RunStepDetailsToolCallsCodeObject: title: Code interpreter tool call type: object @@ -8670,9 +10068,45 @@ components: code_interpreter: type: object description: The Code Interpreter tool call definition. - required: - - input - - outputs + required: + - input + - outputs + properties: + input: + type: string + description: The input to the Code Interpreter tool call. + outputs: + type: array + description: The outputs from the Code Interpreter tool call. Code Interpreter can output one or more items, including text (`logs`) or images (`image`). Each of these are represented by a different object type. + items: + type: object + oneOf: + - $ref: "#/components/schemas/RunStepDetailsToolCallsCodeOutputLogsObject" + - $ref: "#/components/schemas/RunStepDetailsToolCallsCodeOutputImageObject" + x-oaiExpandable: true + required: + - id + - type + - code_interpreter + + RunStepDeltaStepDetailsToolCallsCodeObject: + title: Code interpreter tool call + type: object + description: Details of the Code Interpreter tool call the run step was involved in. + properties: + index: + type: integer + description: The index of the tool call in the tool calls array. + id: + type: string + description: The ID of the tool call. + type: + type: string + description: The type of tool call. This is always going to be `code_interpreter` for this type of tool call. + enum: ["code_interpreter"] + code_interpreter: + type: object + description: The Code Interpreter tool call definition. properties: input: type: string @@ -8683,13 +10117,13 @@ components: items: type: object oneOf: - - $ref: "#/components/schemas/RunStepDetailsToolCallsCodeOutputLogsObject" - - $ref: "#/components/schemas/RunStepDetailsToolCallsCodeOutputImageObject" + - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject" + - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeOutputImageObject" x-oaiExpandable: true required: - - id + - index - type - - code_interpreter + RunStepDetailsToolCallsCodeOutputLogsObject: title: Code interpreter log output @@ -8707,6 +10141,26 @@ components: - type - logs + RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject: + title: Code interpreter log output + type: object + description: Text output from the Code Interpreter tool call as part of a run step. + properties: + index: + type: integer + description: The index of the output in the outputs array. + type: + description: Always `logs`. + type: string + enum: ["logs"] + logs: + type: string + description: The text output from the Code Interpreter tool call. + required: + - index + - type + + RunStepDetailsToolCallsCodeOutputImageObject: title: Code interpreter image output type: object @@ -8727,6 +10181,27 @@ components: - type - image + RunStepDeltaStepDetailsToolCallsCodeOutputImageObject: + title: Code interpreter image output + type: object + properties: + index: + type: integer + description: The index of the output in the outputs array. + type: + description: Always `image`. + type: string + enum: ["image"] + image: + type: object + properties: + file_id: + description: The [file](/docs/api-reference/files) ID of the image. + type: string + required: + - index + - type + RunStepDetailsToolCallsRetrievalObject: title: Retrieval tool call type: object @@ -8747,6 +10222,28 @@ components: - type - retrieval + RunStepDeltaStepDetailsToolCallsRetrievalObject: + title: Retrieval tool call + type: object + properties: + index: + type: integer + description: The index of the tool call in the tool calls array. + id: + type: string + description: The ID of the tool call object. + type: + type: string + description: The type of tool call. This is always going to be `retrieval` for this type of tool call. + enum: ["retrieval"] + retrieval: + type: object + description: For now, this is always going to be an empty object. + x-oaiTypeLabel: map + required: + - index + - type + RunStepDetailsToolCallsFunctionObject: type: object title: Function tool call @@ -8781,6 +10278,39 @@ components: - type - function + RunStepDeltaStepDetailsToolCallsFunctionObject: + type: object + title: Function tool call + properties: + index: + type: integer + description: The index of the tool call in the tool calls array. + id: + type: string + description: The ID of the tool call object. + type: + type: string + description: The type of tool call. This is always going to be `function` for this type of tool call. + enum: ["function"] + function: + type: object + description: The definition of the function that was called. + properties: + name: + type: string + description: The name of the function. + arguments: + type: string + description: The arguments passed to the function. + output: + type: string + description: The output of the function. This will be `null` if the outputs have not been [submitted](/docs/api-reference/runs/submitToolOutputs) yet. + nullable: true + required: + - index + - type + + AssistantFileObject: type: object title: Assistant files @@ -8927,9 +10457,377 @@ components: - last_id - has_more + AssistantStreamEvent: + description: | + Represents an event emitted when streaming a Run. + + Each event in a server-sent events stream has an `event` and `data` property: + + ``` + event: thread.created + data: {"id": "thread_123", "object": "thread", ...} + ``` + + We emit events whenever a new object is created, transitions to a new state, or is being + streamed in parts (deltas). For example, we emit `thread.run.created` when a new run + is created, `thread.run.completed` when a run completes, and so on. When an Assistant chooses + to create a message during a run, we emit a `thread.message.created event`, a + `thread.message.in_progress` event, many `thread.message.delta` events, and finally a + `thread.message.completed` event. + + We may add additional events over time, so we recommend handling unknown events gracefully + in your code. See the [Assistants API quickstart](/docs/assistants/overview) to learn how to + integrate the Assistants API with streaming. + oneOf: + - $ref: "#/components/schemas/ThreadStreamEvent" + - $ref: "#/components/schemas/RunStreamEvent" + - $ref: "#/components/schemas/RunStepStreamEvent" + - $ref: "#/components/schemas/MessageStreamEvent" + - $ref: "#/components/schemas/ErrorEvent" + - $ref: "#/components/schemas/DoneEvent" + x-oaiMeta: + name: Assistant stream events + beta: true + + ThreadStreamEvent: + oneOf: + - type: object + properties: + event: + type: string + enum: ["thread.created"] + data: + $ref: "#/components/schemas/ThreadObject" + required: + - event + - data + description: Occurs when a new [thread](/docs/api-reference/threads/object) is created. + x-oaiMeta: + dataDescription: "`data` is a [thread](/docs/api-reference/threads/object)" + + RunStreamEvent: + oneOf: + - type: object + properties: + event: + type: string + enum: ["thread.run.created"] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a new [run](/docs/api-reference/runs/object) is created. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: ["thread.run.queued"] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) moves to a `queued` status. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: ["thread.run.in_progress"] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) moves to an `in_progress` status. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: ["thread.run.requires_action"] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) moves to a `requires_action` status. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: ["thread.run.completed"] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) is completed. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: ["thread.run.failed"] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) fails. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: ["thread.run.cancelling"] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) moves to a `cancelling` status. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: ["thread.run.cancelled"] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) is cancelled. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: ["thread.run.expired"] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) expires. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + + RunStepStreamEvent: + oneOf: + - type: object + properties: + event: + type: string + enum: ["thread.run.step.created"] + data: + $ref: "#/components/schemas/RunStepObject" + required: + - event + - data + description: Occurs when a [run step](/docs/api-reference/runs/step-object) is created. + x-oaiMeta: + dataDescription: "`data` is a [run step](/docs/api-reference/runs/step-object)" + - type: object + properties: + event: + type: string + enum: ["thread.run.step.in_progress"] + data: + $ref: "#/components/schemas/RunStepObject" + required: + - event + - data + description: Occurs when a [run step](/docs/api-reference/runs/step-object) moves to an `in_progress` state. + x-oaiMeta: + dataDescription: "`data` is a [run step](/docs/api-reference/runs/step-object)" + - type: object + properties: + event: + type: string + enum: ["thread.run.step.delta"] + data: + $ref: "#/components/schemas/RunStepDeltaObject" + required: + - event + - data + description: Occurs when parts of a [run step](/docs/api-reference/runs/step-object) are being streamed. + x-oaiMeta: + dataDescription: "`data` is a [run step delta](/docs/api-reference/assistants-streaming/run-step-delta-object)" + - type: object + properties: + event: + type: string + enum: ["thread.run.step.completed"] + data: + $ref: "#/components/schemas/RunStepObject" + required: + - event + - data + description: Occurs when a [run step](/docs/api-reference/runs/step-object) is completed. + x-oaiMeta: + dataDescription: "`data` is a [run step](/docs/api-reference/runs/step-object)" + - type: object + properties: + event: + type: string + enum: ["thread.run.step.failed"] + data: + $ref: "#/components/schemas/RunStepObject" + required: + - event + - data + description: Occurs when a [run step](/docs/api-reference/runs/step-object) fails. + x-oaiMeta: + dataDescription: "`data` is a [run step](/docs/api-reference/runs/step-object)" + - type: object + properties: + event: + type: string + enum: ["thread.run.step.cancelled"] + data: + $ref: "#/components/schemas/RunStepObject" + required: + - event + - data + description: Occurs when a [run step](/docs/api-reference/runs/step-object) is cancelled. + x-oaiMeta: + dataDescription: "`data` is a [run step](/docs/api-reference/runs/step-object)" + - type: object + properties: + event: + type: string + enum: ["thread.run.step.expired"] + data: + $ref: "#/components/schemas/RunStepObject" + required: + - event + - data + description: Occurs when a [run step](/docs/api-reference/runs/step-object) expires. + x-oaiMeta: + dataDescription: "`data` is a [run step](/docs/api-reference/runs/step-object)" + + MessageStreamEvent: + oneOf: + - type: object + properties: + event: + type: string + enum: ["thread.message.created"] + data: + $ref: "#/components/schemas/MessageObject" + required: + - event + - data + description: Occurs when a [message](/docs/api-reference/messages/object) is created. + x-oaiMeta: + dataDescription: "`data` is a [message](/docs/api-reference/messages/object)" + - type: object + properties: + event: + type: string + enum: ["thread.message.in_progress"] + data: + $ref: "#/components/schemas/MessageObject" + required: + - event + - data + description: Occurs when a [message](/docs/api-reference/messages/object) moves to an `in_progress` state. + x-oaiMeta: + dataDescription: "`data` is a [message](/docs/api-reference/messages/object)" + - type: object + properties: + event: + type: string + enum: ["thread.message.delta"] + data: + $ref: "#/components/schemas/MessageDeltaObject" + required: + - event + - data + description: Occurs when parts of a [Message](/docs/api-reference/messages/object) are being streamed. + x-oaiMeta: + dataDescription: "`data` is a [message delta](/docs/api-reference/assistants-streaming/message-delta-object)" + - type: object + properties: + event: + type: string + enum: ["thread.message.completed"] + data: + $ref: "#/components/schemas/MessageObject" + required: + - event + - data + description: Occurs when a [message](/docs/api-reference/messages/object) is completed. + x-oaiMeta: + dataDescription: "`data` is a [message](/docs/api-reference/messages/object)" + - type: object + properties: + event: + type: string + enum: ["thread.message.incomplete"] + data: + $ref: "#/components/schemas/MessageObject" + required: + - event + - data + description: Occurs when a [message](/docs/api-reference/messages/object) ends before it is completed. + x-oaiMeta: + dataDescription: "`data` is a [message](/docs/api-reference/messages/object)" + + ErrorEvent: + type: object + properties: + event: + type: string + enum: ["error"] + data: + $ref: "#/components/schemas/Error" + required: + - event + - data + description: Occurs when an [error](/docs/guides/error-codes/api-errors) occurs. This can happen due to an internal server error or a timeout. + x-oaiMeta: + dataDescription: "`data` is an [error](/docs/guides/error-codes/api-errors)" + + DoneEvent: + type: object + properties: + event: + type: string + enum: ["done"] + data: + type: string + enum: ["[DONE]"] + required: + - event + - data + description: Occurs when a stream ends. + x-oaiMeta: + dataDescription: "`data` is `[DONE]`" + security: - ApiKeyAuth: [] x-oaiMeta: + navigationGroups: + - id: endpoints + title: Endpoints + - id: assistants + title: Assistants + - id: legacy + title: Legacy groups: # > General Notes # The `groups` section is used to generate the API reference pages and navigation, in the same @@ -8958,6 +10856,7 @@ x-oaiMeta: Learn how to turn audio into text or text into audio. Related guide: [Speech to text](/docs/guides/speech-to-text) + navigationGroup: endpoints sections: - type: endpoint key: createSpeech @@ -8980,6 +10879,7 @@ x-oaiMeta: Given a list of messages comprising a conversation, the model will return a response. Related guide: [Chat Completions](/docs/guides/text-generation) + navigationGroup: endpoints sections: - type: endpoint key: createChatCompletion @@ -8996,6 +10896,7 @@ x-oaiMeta: Get a vector representation of a given input that can be easily consumed by machine learning models and algorithms. Related guide: [Embeddings](/docs/guides/embeddings) + navigationGroup: endpoints sections: - type: endpoint key: createEmbedding @@ -9009,6 +10910,7 @@ x-oaiMeta: Manage fine-tuning jobs to tailor a model to your specific training data. Related guide: [Fine-tune models](/docs/guides/fine-tuning) + navigationGroup: endpoints sections: - type: endpoint key: createFineTuningJob @@ -9019,6 +10921,9 @@ x-oaiMeta: - type: endpoint key: listFineTuningEvents path: list-events + - type: endpoint + key: listFineTuningJobCheckpoints + path: list-checkpoints - type: endpoint key: retrieveFineTuningJob path: retrieve @@ -9031,10 +10936,14 @@ x-oaiMeta: - type: object key: FineTuningJobEvent path: event-object + - type: object + key: FineTuningJobCheckpoint + path: checkpoint-object - id: files title: Files description: | Files are used to upload documents that can be used with features like [Assistants](/docs/api-reference/assistants) and [Fine-tuning](/docs/api-reference/fine-tuning). + navigationGroup: endpoints sections: - type: endpoint key: createFile @@ -9060,6 +10969,7 @@ x-oaiMeta: Given a prompt and/or an input image, the model will generate a new image. Related guide: [Image generation](/docs/guides/images) + navigationGroup: endpoints sections: - type: endpoint key: createImage @@ -9077,6 +10987,7 @@ x-oaiMeta: title: Models description: | List and describe the various models available in the API. You can refer to the [Models](/docs/models) documentation to understand what models are available and the differences between them. + navigationGroup: endpoints sections: - type: endpoint key: listModels @@ -9096,6 +11007,7 @@ x-oaiMeta: Given some input text, outputs if the model classifies it as potentially harmful across several categories. Related guide: [Moderations](/docs/guides/moderation) + navigationGroup: endpoints sections: - type: endpoint key: createModeration @@ -9110,6 +11022,7 @@ x-oaiMeta: Build assistants that can call models and use tools to perform tasks. [Get started with the Assistants API](/docs/assistants) + navigationGroup: assistants sections: - type: endpoint key: createAssistant @@ -9151,6 +11064,7 @@ x-oaiMeta: Create threads that assistants can interact with. Related guide: [Assistants](/docs/assistants/overview) + navigationGroup: assistants sections: - type: endpoint key: createThread @@ -9174,6 +11088,7 @@ x-oaiMeta: Create messages within threads Related guide: [Assistants](/docs/assistants/overview) + navigationGroup: assistants sections: - type: endpoint key: createMessage @@ -9206,6 +11121,7 @@ x-oaiMeta: Represents an execution run on a thread. Related guide: [Assistants](/docs/assistants/overview) + navigationGroup: assistants sections: - type: endpoint key: createRun @@ -9240,9 +11156,34 @@ x-oaiMeta: - type: object key: RunStepObject path: step-object + - id: assistants-streaming + title: Streaming + beta: true + description: | + Stream the result of executing a Run or resuming a Run after submitting tool outputs. + + You can stream events from the [Create Thread and Run](/docs/api-reference/runs/createThreadAndRun), + [Create Run](/docs/api-reference/runs/createRun), and [Submit Tool Outputs](/docs/api-reference/runs/submitToolOutputs) + endpoints by passing `"stream": true`. The response will be a [Server-Sent events](https://html.spec.whatwg.org/multipage/server-sent-events.html#server-sent-events) stream. + + Our Node and Python SDKs provide helpful utilities to make streaming easy. Reference the + [Assistants API quickstart](/docs/assistants/overview) to learn more. + navigationGroup: assistants + sections: + - type: object + key: MessageDeltaObject + path: message-delta-object + - type: object + key: RunStepDeltaObject + path: run-step-delta-object + - type: object + key: AssistantStreamEvent + path: events + - id: completions title: Completions legacy: true + navigationGroup: legacy description: | Given a prompt, the model will return one or more predicted completions along with the probabilities of alternative tokens at each position. Most developer should use our [Chat Completions API](/docs/guides/text-generation/text-generation-models) to leverage our best and newest models. sections: diff --git a/packages/openai_dart/test/openai_client_fine_tuning_test.dart b/packages/openai_dart/test/openai_client_fine_tuning_test.dart index 13ff9cb1..3ba5b5e2 100644 --- a/packages/openai_dart/test/openai_client_fine_tuning_test.dart +++ b/packages/openai_dart/test/openai_client_fine_tuning_test.dart @@ -62,6 +62,15 @@ void main() { expect(res.status, FineTuningJobStatus.cancelled); }); + test('Test list fine-tuning jobs checkpoints', skip: true, () async { + final res = await client.listFineTuningJobCheckpoints( + fineTuningJobId: 'ft-AF1WoRqd3aJAHsqc9NY7iL8F', + ); + final fistCheckpoint = res.data.first; + expect(fistCheckpoint.id, isNotEmpty); + }); + + test('Test list fine-tuning events', skip: true, () async { final res = await client.listFineTuningEvents( fineTuningJobId: 'ft-AF1WoRqd3aJAHsqc9NY7iL8F',