From 4f114d5121f5c66619c7bdd18d0aa2b7a627e3ff Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Tue, 8 Oct 2024 16:41:22 +0100 Subject: [PATCH] feat(api): add message batches api For more information see https://anthropic.com/news/message-batches-api --- .stats.yml | 4 +- README.md | 76 ++ api.md | 71 ++ examples/batch-results.ts | 20 + src/index.ts | 15 + src/internal/decoders/jsonl.ts | 41 + src/internal/decoders/line.ts | 2 +- src/pagination.ts | 84 ++ src/resources/beta/beta.ts | 104 +++ src/resources/beta/index.ts | 48 +- src/resources/beta/messages/batches.ts | 393 +++++++++ src/resources/beta/messages/index.ts | 53 ++ src/resources/beta/messages/messages.ts | 795 ++++++++++++++++++ src/resources/beta/prompt-caching/messages.ts | 57 +- src/resources/index.ts | 14 +- src/resources/messages.ts | 2 - .../beta/messages/batches.test.ts | 356 ++++++++ .../beta/messages/messages.test.ts | 81 ++ .../beta/prompt-caching/messages.test.ts | 1 + 19 files changed, 2187 insertions(+), 30 deletions(-) create mode 100644 examples/batch-results.ts create mode 100644 src/internal/decoders/jsonl.ts create mode 100644 src/pagination.ts create mode 100644 src/resources/beta/messages/batches.ts create mode 100644 src/resources/beta/messages/index.ts create mode 100644 src/resources/beta/messages/messages.ts create mode 100644 tests/api-resources/beta/messages/batches.test.ts create mode 100644 tests/api-resources/beta/messages/messages.test.ts diff --git a/.stats.yml b/.stats.yml index 87c4c8d2..2523c283 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ -configured_endpoints: 3 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/anthropic-286f00929e2a4d28d991e6a7e660fa801dca7ec91d8ecb2fc17654bb8173eb0d.yml +configured_endpoints: 9 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/anthropic-aedee7570aa925baba404fc5bd3c8c1fffe8845517e492751db9b175c5cae9da.yml diff --git a/README.md b/README.md index 6ccba755..b11349af 100644 --- a/README.md +++ b/README.md @@ -132,6 +132,51 @@ Streaming with `client.messages.stream(...)` exposes [various helpers for your c Alternatively, you can use `client.messages.create({ ..., stream: true })` which only returns an async iterable of the events in the stream and thus uses less memory (it does not build up a final message object for you). +## Message Batches + +This SDK provides beta support for the [Message Batches API](https://docs.anthropic.com/en/docs/build-with-claude/message-batches) under the `client.beta.messages.batches` namespace. + +### Creating a batch + +Message Batches take the exact same request params as the standard Messages API: + +```ts +await anthropic.beta.messages.batches.create({ + requests: [ + { + custom_id: 'my-first-request', + params: { + model: 'claude-3-5-sonnet-20240620', + max_tokens: 1024, + messages: [{ role: 'user', content: 'Hello, world' }], + }, + }, + { + custom_id: 'my-second-request', + params: { + model: 'claude-3-5-sonnet-20240620', + max_tokens: 1024, + messages: [{ role: 'user', content: 'Hi again, friend' }], + }, + }, + ], +}); +``` + + +### Getting results from a batch + +Once a Message Batch has been processed, indicated by `.processing_status === 'ended'`, you can access the results with `.batches.results()` + +```ts +const results = await anthropic.beta.messages.batches.results(batch_id); +for await (const entry of results) { + if (entry.result.type === 'succeeded') { + console.log(entry.result.message.content) + } +} +``` + ## Tool use beta This SDK provides beta support for tool use, aka function calling. More details can be found in [the documentation](https://docs.anthropic.com/claude/docs/tool-use). @@ -224,6 +269,37 @@ On timeout, an `APIConnectionTimeoutError` is thrown. Note that requests which time out will be [retried twice by default](#retries). +## Auto-pagination + +List methods in the Anthropic API are paginated. +You can use `for await … of` syntax to iterate through items across all pages: + +```ts +async function fetchAllBetaMessagesBatches(params) { + const allBetaMessagesBatches = []; + // Automatically fetches more pages as needed. + for await (const betaMessageBatch of client.beta.messages.batches.list({ limit: 20 })) { + allBetaMessagesBatches.push(betaMessageBatch); + } + return allBetaMessagesBatches; +} +``` + +Alternatively, you can make request a single page at a time: + +```ts +let page = await client.beta.messages.batches.list({ limit: 20 }); +for (const betaMessageBatch of page.data) { + console.log(betaMessageBatch); +} + +// Convenience methods are provided for manually paginating: +while (page.hasNextPage()) { + page = page.getNextPage(); + // ... +} +``` + ## Default Headers We automatically send the `anthropic-version` header set to `2023-06-01`. diff --git a/api.md b/api.md index a8cd8c9a..ce30c20a 100644 --- a/api.md +++ b/api.md @@ -46,6 +46,77 @@ Methods: # Beta +Types: + +- AnthropicBeta +- BetaAPIError +- BetaAuthenticationError +- BetaError +- BetaErrorResponse +- BetaInvalidRequestError +- BetaNotFoundError +- BetaOverloadedError +- BetaPermissionError +- BetaRateLimitError + +## Messages + +Types: + +- BetaCacheControlEphemeral +- BetaContentBlock +- BetaContentBlockParam +- BetaImageBlockParam +- BetaInputJSONDelta +- BetaMessage +- BetaMessageDeltaUsage +- BetaMessageParam +- BetaMetadata +- BetaRawContentBlockDeltaEvent +- BetaRawContentBlockStartEvent +- BetaRawContentBlockStopEvent +- BetaRawMessageDeltaEvent +- BetaRawMessageStartEvent +- BetaRawMessageStopEvent +- BetaRawMessageStreamEvent +- BetaTextBlock +- BetaTextBlockParam +- BetaTextDelta +- BetaTool +- BetaToolChoice +- BetaToolChoiceAny +- BetaToolChoiceAuto +- BetaToolChoiceTool +- BetaToolResultBlockParam +- BetaToolUseBlock +- BetaToolUseBlockParam +- BetaUsage + +Methods: + +- client.beta.messages.create({ ...params }) -> BetaMessage + +### Batches + +Types: + +- BetaMessageBatch +- BetaMessageBatchCanceledResult +- BetaMessageBatchErroredResult +- BetaMessageBatchExpiredResult +- BetaMessageBatchIndividualResponse +- BetaMessageBatchRequestCounts +- BetaMessageBatchResult +- BetaMessageBatchSucceededResult + +Methods: + +- client.beta.messages.batches.create({ ...params }) -> BetaMessageBatch +- client.beta.messages.batches.retrieve(messageBatchId, { ...params }) -> BetaMessageBatch +- client.beta.messages.batches.list({ ...params }) -> BetaMessageBatchesPage +- client.beta.messages.batches.cancel(messageBatchId, { ...params }) -> BetaMessageBatch +- client.beta.messages.batches.results(messageBatchId, { ...params }) -> Response + ## PromptCaching ### Messages diff --git a/examples/batch-results.ts b/examples/batch-results.ts new file mode 100644 index 00000000..a3399f4c --- /dev/null +++ b/examples/batch-results.ts @@ -0,0 +1,20 @@ +import Anthropic from '@anthropic-ai/sdk/index'; + +const anthropic = new Anthropic(); + +async function main() { + const batch_id = process.argv[2]; + if (!batch_id) { + throw new Error('must specify a message batch ID, `yarn tsn examples/batch-results.ts msgbatch_123`'); + } + + console.log(`fetching results for ${batch_id}`); + + const results = await anthropic.beta.messages.batches.results(batch_id); + + for await (const result of results) { + console.log(result); + } +} + +main(); diff --git a/src/index.ts b/src/index.ts index 3217bb30..ed92e8d1 100644 --- a/src/index.ts +++ b/src/index.ts @@ -4,6 +4,7 @@ import * as Errors from './error'; import * as Uploads from './uploads'; import { type Agent } from './_shims/index'; import * as Core from './core'; +import * as Pagination from './pagination'; import * as API from './resources/index'; export interface ClientOptions { @@ -250,6 +251,10 @@ export import fileFromPath = Uploads.fileFromPath; export namespace Anthropic { export import RequestOptions = Core.RequestOptions; + export import Page = Pagination.Page; + export import PageParams = Pagination.PageParams; + export import PageResponse = Pagination.PageResponse; + export import Completions = API.Completions; export import Completion = API.Completion; export import CompletionCreateParams = API.CompletionCreateParams; @@ -297,6 +302,16 @@ export namespace Anthropic { export import MessageStreamParams = API.MessageStreamParams; export import Beta = API.Beta; + export import AnthropicBeta = API.AnthropicBeta; + export import BetaAPIError = API.BetaAPIError; + export import BetaAuthenticationError = API.BetaAuthenticationError; + export import BetaError = API.BetaError; + export import BetaErrorResponse = API.BetaErrorResponse; + export import BetaInvalidRequestError = API.BetaInvalidRequestError; + export import BetaNotFoundError = API.BetaNotFoundError; + export import BetaOverloadedError = API.BetaOverloadedError; + export import BetaPermissionError = API.BetaPermissionError; + export import BetaRateLimitError = API.BetaRateLimitError; } export default Anthropic; diff --git a/src/internal/decoders/jsonl.ts b/src/internal/decoders/jsonl.ts new file mode 100644 index 00000000..8d3693d8 --- /dev/null +++ b/src/internal/decoders/jsonl.ts @@ -0,0 +1,41 @@ +import { AnthropicError } from '../../error'; +import { readableStreamAsyncIterable } from '../../streaming'; +import { type Response } from '../../_shims/index'; +import { LineDecoder, type Bytes } from './line'; + +export class JSONLDecoder { + controller: AbortController; + + constructor( + private iterator: AsyncIterableIterator, + controller: AbortController, + ) { + this.controller = controller; + } + + private async *decoder(): AsyncIterator { + const lineDecoder = new LineDecoder(); + for await (const chunk of this.iterator) { + for (const line of lineDecoder.decode(chunk)) { + yield JSON.parse(line); + } + } + + for (const line of lineDecoder.flush()) { + yield JSON.parse(line); + } + } + + [Symbol.asyncIterator](): AsyncIterator { + return this.decoder(); + } + + static fromResponse(response: Response, controller: AbortController): JSONLDecoder { + if (!response.body) { + controller.abort(); + throw new AnthropicError(`Attempted to iterate over a response with no body`); + } + + return new JSONLDecoder(readableStreamAsyncIterable(response.body), controller); + } +} diff --git a/src/internal/decoders/line.ts b/src/internal/decoders/line.ts index fe9df101..a71f9ea0 100644 --- a/src/internal/decoders/line.ts +++ b/src/internal/decoders/line.ts @@ -1,6 +1,6 @@ import { AnthropicError } from '../../error'; -type Bytes = string | ArrayBuffer | Uint8Array | Buffer | null | undefined; +export type Bytes = string | ArrayBuffer | Uint8Array | Buffer | null | undefined; /** * A re-implementation of httpx's `LineDecoder` in Python that handles incrementally diff --git a/src/pagination.ts b/src/pagination.ts new file mode 100644 index 00000000..ac0d581b --- /dev/null +++ b/src/pagination.ts @@ -0,0 +1,84 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { AbstractPage, Response, APIClient, FinalRequestOptions, PageInfo } from './core'; + +export interface PageResponse { + data: Array; + + has_more: boolean; + + first_id: string | null; + + last_id: string | null; +} + +export interface PageParams { + /** + * Number of items per page. + */ + limit?: number; + + before_id?: string; + + after_id?: string; +} + +export class Page extends AbstractPage implements PageResponse { + data: Array; + + has_more: boolean; + + first_id: string | null; + + last_id: string | null; + + constructor(client: APIClient, response: Response, body: PageResponse, options: FinalRequestOptions) { + super(client, response, body, options); + + this.data = body.data || []; + this.has_more = body.has_more || false; + this.first_id = body.first_id || null; + this.last_id = body.last_id || null; + } + + getPaginatedItems(): Item[] { + return this.data ?? []; + } + + // @deprecated Please use `nextPageInfo()` instead + nextPageParams(): Partial | null { + const info = this.nextPageInfo(); + if (!info) return null; + if ('params' in info) return info.params; + const params = Object.fromEntries(info.url.searchParams); + if (!Object.keys(params).length) return null; + return params; + } + + nextPageInfo(): PageInfo | null { + if ((this.options.query as Record)?.['before_id']) { + // in reverse + const firstId = this.first_id; + if (!firstId) { + return null; + } + + return { + params: { + before_id: firstId, + }, + }; + } + + const cursor = this.last_id; + if (!cursor) { + return null; + } + + return { + params: { + after_id: cursor, + }, + }; + } +} diff --git a/src/resources/beta/beta.ts b/src/resources/beta/beta.ts index be3873ee..59a92501 100644 --- a/src/resources/beta/beta.ts +++ b/src/resources/beta/beta.ts @@ -1,12 +1,116 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import { APIResource } from '../../resource'; +import * as BetaAPI from './beta'; +import * as MessagesAPI from './messages/messages'; import * as PromptCachingAPI from './prompt-caching/prompt-caching'; export class Beta extends APIResource { + messages: MessagesAPI.Messages = new MessagesAPI.Messages(this._client); promptCaching: PromptCachingAPI.PromptCaching = new PromptCachingAPI.PromptCaching(this._client); } +export type AnthropicBeta = (string & {}) | 'message-batches-2024-09-24' | 'prompt-caching-2024-07-31'; + +export interface BetaAPIError { + message: string; + + type: 'api_error'; +} + +export interface BetaAuthenticationError { + message: string; + + type: 'authentication_error'; +} + +export type BetaError = + | BetaInvalidRequestError + | BetaAuthenticationError + | BetaPermissionError + | BetaNotFoundError + | BetaRateLimitError + | BetaAPIError + | BetaOverloadedError; + +export interface BetaErrorResponse { + error: BetaError; + + type: 'error'; +} + +export interface BetaInvalidRequestError { + message: string; + + type: 'invalid_request_error'; +} + +export interface BetaNotFoundError { + message: string; + + type: 'not_found_error'; +} + +export interface BetaOverloadedError { + message: string; + + type: 'overloaded_error'; +} + +export interface BetaPermissionError { + message: string; + + type: 'permission_error'; +} + +export interface BetaRateLimitError { + message: string; + + type: 'rate_limit_error'; +} + export namespace Beta { + export import AnthropicBeta = BetaAPI.AnthropicBeta; + export import BetaAPIError = BetaAPI.BetaAPIError; + export import BetaAuthenticationError = BetaAPI.BetaAuthenticationError; + export import BetaError = BetaAPI.BetaError; + export import BetaErrorResponse = BetaAPI.BetaErrorResponse; + export import BetaInvalidRequestError = BetaAPI.BetaInvalidRequestError; + export import BetaNotFoundError = BetaAPI.BetaNotFoundError; + export import BetaOverloadedError = BetaAPI.BetaOverloadedError; + export import BetaPermissionError = BetaAPI.BetaPermissionError; + export import BetaRateLimitError = BetaAPI.BetaRateLimitError; + export import Messages = MessagesAPI.Messages; + export import BetaCacheControlEphemeral = MessagesAPI.BetaCacheControlEphemeral; + export import BetaContentBlock = MessagesAPI.BetaContentBlock; + export import BetaContentBlockParam = MessagesAPI.BetaContentBlockParam; + export import BetaImageBlockParam = MessagesAPI.BetaImageBlockParam; + export import BetaInputJSONDelta = MessagesAPI.BetaInputJSONDelta; + export import BetaMessage = MessagesAPI.BetaMessage; + export import BetaMessageDeltaUsage = MessagesAPI.BetaMessageDeltaUsage; + export import BetaMessageParam = MessagesAPI.BetaMessageParam; + export import BetaMetadata = MessagesAPI.BetaMetadata; + export import BetaRawContentBlockDeltaEvent = MessagesAPI.BetaRawContentBlockDeltaEvent; + export import BetaRawContentBlockStartEvent = MessagesAPI.BetaRawContentBlockStartEvent; + export import BetaRawContentBlockStopEvent = MessagesAPI.BetaRawContentBlockStopEvent; + export import BetaRawMessageDeltaEvent = MessagesAPI.BetaRawMessageDeltaEvent; + export import BetaRawMessageStartEvent = MessagesAPI.BetaRawMessageStartEvent; + export import BetaRawMessageStopEvent = MessagesAPI.BetaRawMessageStopEvent; + export import BetaRawMessageStreamEvent = MessagesAPI.BetaRawMessageStreamEvent; + export import BetaTextBlock = MessagesAPI.BetaTextBlock; + export import BetaTextBlockParam = MessagesAPI.BetaTextBlockParam; + export import BetaTextDelta = MessagesAPI.BetaTextDelta; + export import BetaTool = MessagesAPI.BetaTool; + export import BetaToolChoice = MessagesAPI.BetaToolChoice; + export import BetaToolChoiceAny = MessagesAPI.BetaToolChoiceAny; + export import BetaToolChoiceAuto = MessagesAPI.BetaToolChoiceAuto; + export import BetaToolChoiceTool = MessagesAPI.BetaToolChoiceTool; + export import BetaToolResultBlockParam = MessagesAPI.BetaToolResultBlockParam; + export import BetaToolUseBlock = MessagesAPI.BetaToolUseBlock; + export import BetaToolUseBlockParam = MessagesAPI.BetaToolUseBlockParam; + export import BetaUsage = MessagesAPI.BetaUsage; + export import MessageCreateParams = MessagesAPI.MessageCreateParams; + export import MessageCreateParamsNonStreaming = MessagesAPI.MessageCreateParamsNonStreaming; + export import MessageCreateParamsStreaming = MessagesAPI.MessageCreateParamsStreaming; export import PromptCaching = PromptCachingAPI.PromptCaching; } diff --git a/src/resources/beta/index.ts b/src/resources/beta/index.ts index 770fa945..112d9ad1 100644 --- a/src/resources/beta/index.ts +++ b/src/resources/beta/index.ts @@ -1,4 +1,50 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -export { Beta } from './beta'; +export { + AnthropicBeta, + BetaAPIError, + BetaAuthenticationError, + BetaError, + BetaErrorResponse, + BetaInvalidRequestError, + BetaNotFoundError, + BetaOverloadedError, + BetaPermissionError, + BetaRateLimitError, + Beta, +} from './beta'; +export { + BetaCacheControlEphemeral, + BetaContentBlock, + BetaContentBlockParam, + BetaImageBlockParam, + BetaInputJSONDelta, + BetaMessage, + BetaMessageDeltaUsage, + BetaMessageParam, + BetaMetadata, + BetaRawContentBlockDeltaEvent, + BetaRawContentBlockStartEvent, + BetaRawContentBlockStopEvent, + BetaRawMessageDeltaEvent, + BetaRawMessageStartEvent, + BetaRawMessageStopEvent, + BetaRawMessageStreamEvent, + BetaTextBlock, + BetaTextBlockParam, + BetaTextDelta, + BetaTool, + BetaToolChoice, + BetaToolChoiceAny, + BetaToolChoiceAuto, + BetaToolChoiceTool, + BetaToolResultBlockParam, + BetaToolUseBlock, + BetaToolUseBlockParam, + BetaUsage, + MessageCreateParams, + MessageCreateParamsNonStreaming, + MessageCreateParamsStreaming, + Messages, +} from './messages/index'; export { PromptCaching } from './prompt-caching/index'; diff --git a/src/resources/beta/messages/batches.ts b/src/resources/beta/messages/batches.ts new file mode 100644 index 00000000..57e4c91c --- /dev/null +++ b/src/resources/beta/messages/batches.ts @@ -0,0 +1,393 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../../resource'; +import { isRequestOptions } from '../../../core'; +import * as Core from '../../../core'; +import * as BatchesAPI from './batches'; +import * as BetaAPI from '../beta'; +import * as BetaMessagesAPI from './messages'; +import { Page, type PageParams } from '../../../pagination'; +import { JSONLDecoder } from '../../../internal/decoders/jsonl'; +import { AnthropicError } from '../../../error'; + +export class Batches extends APIResource { + /** + * Send a batch of requests to create Messages. + * + * The Messages Batch API can be used to process multiple Messages API requests at + * once. Once a Message Batch is created, it begins processing immediately. + */ + create(params: BatchCreateParams, options?: Core.RequestOptions): Core.APIPromise { + const { betas, ...body } = params; + return this._client.post('/v1/messages/batches?beta=true', { + body, + ...options, + headers: { + 'anthropic-beta': betas != null ? betas.toString() : 'message-batches-2024-09-24', + ...options?.headers, + }, + }); + } + + /** + * This endpoint is idempotent and can be used to poll for Message Batch + * completion. To access the results of a Message Batch, use the `responses_url` + * field in the response. + */ + retrieve( + messageBatchId: string, + params?: BatchRetrieveParams, + options?: Core.RequestOptions, + ): Core.APIPromise; + retrieve(messageBatchId: string, options?: Core.RequestOptions): Core.APIPromise; + retrieve( + messageBatchId: string, + params: BatchRetrieveParams | Core.RequestOptions = {}, + options?: Core.RequestOptions, + ): Core.APIPromise { + if (isRequestOptions(params)) { + return this.retrieve(messageBatchId, {}, params); + } + const { betas } = params; + return this._client.get(`/v1/messages/batches/${messageBatchId}?beta=true`, { + ...options, + headers: { + 'anthropic-beta': betas != null ? betas.toString() : 'message-batches-2024-09-24', + ...options?.headers, + }, + }); + } + + /** + * List all Message Batches within a Workspace. + */ + list( + params?: BatchListParams, + options?: Core.RequestOptions, + ): Core.PagePromise; + list(options?: Core.RequestOptions): Core.PagePromise; + list( + params: BatchListParams | Core.RequestOptions = {}, + options?: Core.RequestOptions, + ): Core.PagePromise { + if (isRequestOptions(params)) { + return this.list({}, params); + } + const { betas, ...query } = params; + return this._client.getAPIList('/v1/messages/batches?beta=true', BetaMessageBatchesPage, { + query, + ...options, + headers: { + 'anthropic-beta': betas != null ? betas.toString() : 'message-batches-2024-09-24', + ...options?.headers, + }, + }); + } + + /** + * Batches may be canceled any time before processing ends. The system may complete + * any in-progress, non-interruptible operations before finalizing cancellation. + */ + cancel( + messageBatchId: string, + params?: BatchCancelParams, + options?: Core.RequestOptions, + ): Core.APIPromise; + cancel(messageBatchId: string, options?: Core.RequestOptions): Core.APIPromise; + cancel( + messageBatchId: string, + params: BatchCancelParams | Core.RequestOptions = {}, + options?: Core.RequestOptions, + ): Core.APIPromise { + if (isRequestOptions(params)) { + return this.cancel(messageBatchId, {}, params); + } + const { betas } = params; + return this._client.post(`/v1/messages/batches/${messageBatchId}/cancel?beta=true`, { + ...options, + headers: { + 'anthropic-beta': betas != null ? betas.toString() : 'message-batches-2024-09-24', + ...options?.headers, + }, + }); + } + + /** + * Streams the results of a Message Batch as a `.jsonl` file. + * + * Each line in the file is a JSON object containing the result of a single request + * in the Message Batch. Results are not guaranteed to be in the same order as + * requests. Use the `custom_id` field to match results to requests. + */ + async results( + messageBatchId: string, + params?: BatchResultsParams, + options?: Core.RequestOptions, + ): Promise>; + async results( + messageBatchId: string, + options?: Core.RequestOptions, + ): Promise>; + async results( + messageBatchId: string, + params: BatchResultsParams | Core.RequestOptions = {}, + options?: Core.RequestOptions, + ): Promise> { + if (isRequestOptions(params)) { + return this.results(messageBatchId, {}, params); + } + + const batch = await this.retrieve(messageBatchId); + if (!batch.results_url) { + throw new AnthropicError( + `No batch \`results_url\`; Has it finished processing? ${batch.processing_status} - ${batch.id}`, + ); + } + + const { betas } = params; + return this._client + .get(batch.results_url, { + ...options, + headers: { + 'anthropic-beta': betas != null ? betas.toString() : 'message-batches-2024-09-24', + ...options?.headers, + }, + __binaryResponse: true, + }) + ._thenUnwrap((_, props) => JSONLDecoder.fromResponse(props.response, props.controller)); + } +} + +export class BetaMessageBatchesPage extends Page {} + +export interface BetaMessageBatch { + /** + * Unique object identifier. + * + * The format and length of IDs may change over time. + */ + id: string; + + /** + * RFC 3339 datetime string representing the time at which cancellation was + * initiated for the Message Batch. Specified only if cancellation was initiated. + */ + cancel_initiated_at: string | null; + + /** + * RFC 3339 datetime string representing the time at which the Message Batch was + * created. + */ + created_at: string; + + /** + * RFC 3339 datetime string representing the time at which processing for the + * Message Batch ended. Specified only once processing ends. + * + * Processing ends when every request in a Message Batch has either succeeded, + * errored, canceled, or expired. + */ + ended_at: string | null; + + /** + * RFC 3339 datetime string representing the time at which the Message Batch will + * expire and end processing, which is 24 hours after creation. + */ + expires_at: string; + + /** + * Processing status of the Message Batch. + * + * This is one of: `in_progress`, `canceling`, or `ended`. + */ + processing_status: 'in_progress' | 'canceling' | 'ended'; + + /** + * Overview of the number of requests within the Message Batch and their statuses. + * + * Requests start as `processing` and move to one of the other statuses only once + * processing of entire batch ends. + */ + request_counts: BetaMessageBatchRequestCounts; + + /** + * URL to a `.jsonl` file containing the results of the Message Batch requests. + * Specified only once processing ends. + * + * Results in the file are not guaranteed to be in the same order as requests. Use + * the `custom_id` field to match results to requests. + */ + results_url: string | null; + + /** + * Object type. + * + * For Message Batches, this is always `"message_batch"`. + */ + type: 'message_batch'; +} + +export interface BetaMessageBatchCanceledResult { + type: 'canceled'; +} + +export interface BetaMessageBatchErroredResult { + error: BetaAPI.BetaErrorResponse; + + type: 'errored'; +} + +export interface BetaMessageBatchExpiredResult { + type: 'expired'; +} + +export interface BetaMessageBatchIndividualResponse { + /** + * Developer-provided ID created for each request in a Message Batch. Useful for + * matching results to requests. + * + * Must be unique for each request within the Message Batch. + */ + custom_id: string; + + /** + * Processing result for this request. + * + * Contains a Message output if processing was successful, an error response if + * processing failed, or the reason why processing was not attempted, such as + * cancellation or expiration. + */ + result: BetaMessageBatchResult; +} + +export interface BetaMessageBatchRequestCounts { + /** + * Number of requests in the Message Batch that have been canceled. + * + * This is zero until processing of the entire Message Batch has ended. + */ + canceled: number; + + /** + * Number of requests in the Message Batch that encountered an error. + * + * This is zero until processing of the entire Message Batch has ended. + */ + errored: number; + + /** + * Number of requests in the Message Batch that have expired. + * + * This is zero until processing of the entire Message Batch has ended. + */ + expired: number; + + /** + * Number of requests in the Message Batch that are processing. + */ + processing: number; + + /** + * Number of requests in the Message Batch that have completed successfully. + * + * This is zero until processing of the entire Message Batch has ended. + */ + succeeded: number; +} + +/** + * Processing result for this request. + * + * Contains a Message output if processing was successful, an error response if + * processing failed, or the reason why processing was not attempted, such as + * cancellation or expiration. + */ +export type BetaMessageBatchResult = + | BetaMessageBatchSucceededResult + | BetaMessageBatchErroredResult + | BetaMessageBatchCanceledResult + | BetaMessageBatchExpiredResult; + +export interface BetaMessageBatchSucceededResult { + message: BetaMessagesAPI.BetaMessage; + + type: 'succeeded'; +} + +export interface BatchCreateParams { + /** + * Body param: List of requests for prompt completion. Each is an individual + * request to create a Message. + */ + requests: Array; + + /** + * Header param: Optional header to specify the beta version(s) you want to use. + */ + betas?: Array; +} + +export namespace BatchCreateParams { + export interface Request { + /** + * Developer-provided ID created for each request in a Message Batch. Useful for + * matching results to requests. + * + * Must be unique for each request within the Message Batch. + */ + custom_id: string; + + /** + * Messages API creation parameters for the individual request. + * + * See the [Messages API reference](/en/api/messages) for full documentation on + * available parameters. + */ + params: BetaMessagesAPI.MessageCreateParamsNonStreaming; + } +} + +export interface BatchRetrieveParams { + /** + * Optional header to specify the beta version(s) you want to use. + */ + betas?: Array; +} + +export interface BatchListParams extends PageParams { + /** + * Header param: Optional header to specify the beta version(s) you want to use. + */ + betas?: Array; +} + +export interface BatchCancelParams { + /** + * Optional header to specify the beta version(s) you want to use. + */ + betas?: Array; +} + +export interface BatchResultsParams { + /** + * Optional header to specify the beta version(s) you want to use. + */ + betas?: Array; +} + +export namespace Batches { + export import BetaMessageBatch = BatchesAPI.BetaMessageBatch; + export import BetaMessageBatchCanceledResult = BatchesAPI.BetaMessageBatchCanceledResult; + export import BetaMessageBatchErroredResult = BatchesAPI.BetaMessageBatchErroredResult; + export import BetaMessageBatchExpiredResult = BatchesAPI.BetaMessageBatchExpiredResult; + export import BetaMessageBatchIndividualResponse = BatchesAPI.BetaMessageBatchIndividualResponse; + export import BetaMessageBatchRequestCounts = BatchesAPI.BetaMessageBatchRequestCounts; + export import BetaMessageBatchResult = BatchesAPI.BetaMessageBatchResult; + export import BetaMessageBatchSucceededResult = BatchesAPI.BetaMessageBatchSucceededResult; + export import BetaMessageBatchesPage = BatchesAPI.BetaMessageBatchesPage; + export import BatchCreateParams = BatchesAPI.BatchCreateParams; + export import BatchRetrieveParams = BatchesAPI.BatchRetrieveParams; + export import BatchListParams = BatchesAPI.BatchListParams; + export import BatchCancelParams = BatchesAPI.BatchCancelParams; + export import BatchResultsParams = BatchesAPI.BatchResultsParams; +} diff --git a/src/resources/beta/messages/index.ts b/src/resources/beta/messages/index.ts new file mode 100644 index 00000000..54759740 --- /dev/null +++ b/src/resources/beta/messages/index.ts @@ -0,0 +1,53 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export { + BetaCacheControlEphemeral, + BetaContentBlock, + BetaContentBlockParam, + BetaImageBlockParam, + BetaInputJSONDelta, + BetaMessage, + BetaMessageDeltaUsage, + BetaMessageParam, + BetaMetadata, + BetaRawContentBlockDeltaEvent, + BetaRawContentBlockStartEvent, + BetaRawContentBlockStopEvent, + BetaRawMessageDeltaEvent, + BetaRawMessageStartEvent, + BetaRawMessageStopEvent, + BetaRawMessageStreamEvent, + BetaTextBlock, + BetaTextBlockParam, + BetaTextDelta, + BetaTool, + BetaToolChoice, + BetaToolChoiceAny, + BetaToolChoiceAuto, + BetaToolChoiceTool, + BetaToolResultBlockParam, + BetaToolUseBlock, + BetaToolUseBlockParam, + BetaUsage, + MessageCreateParams, + MessageCreateParamsNonStreaming, + MessageCreateParamsStreaming, + Messages, +} from './messages'; +export { + BetaMessageBatch, + BetaMessageBatchCanceledResult, + BetaMessageBatchErroredResult, + BetaMessageBatchExpiredResult, + BetaMessageBatchIndividualResponse, + BetaMessageBatchRequestCounts, + BetaMessageBatchResult, + BetaMessageBatchSucceededResult, + BatchCreateParams, + BatchRetrieveParams, + BatchListParams, + BatchCancelParams, + BatchResultsParams, + BetaMessageBatchesPage, + Batches, +} from './batches'; diff --git a/src/resources/beta/messages/messages.ts b/src/resources/beta/messages/messages.ts new file mode 100644 index 00000000..d8d0d003 --- /dev/null +++ b/src/resources/beta/messages/messages.ts @@ -0,0 +1,795 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../../resource'; +import { APIPromise } from '../../../core'; +import * as Core from '../../../core'; +import * as MessagesMessagesAPI from './messages'; +import * as MessagesAPI from '../../messages'; +import * as BetaAPI from '../beta'; +import * as BatchesAPI from './batches'; +import { Stream } from '../../../streaming'; + +export class Messages extends APIResource { + batches: BatchesAPI.Batches = new BatchesAPI.Batches(this._client); + + /** + * Send a structured list of input messages with text and/or image content, and the + * model will generate the next message in the conversation. + * + * The Messages API can be used for either single queries or stateless multi-turn + * conversations. + */ + create(params: MessageCreateParamsNonStreaming, options?: Core.RequestOptions): APIPromise; + create( + params: MessageCreateParamsStreaming, + options?: Core.RequestOptions, + ): APIPromise>; + create( + params: MessageCreateParamsBase, + options?: Core.RequestOptions, + ): APIPromise | BetaMessage>; + create( + params: MessageCreateParams, + options?: Core.RequestOptions, + ): APIPromise | APIPromise> { + const { betas, ...body } = params; + return this._client.post('/v1/messages?beta=true', { + body, + timeout: (this._client as any)._options.timeout ?? 600000, + ...options, + headers: { + ...(betas?.toString() != null ? { 'anthropic-beta': betas?.toString() } : undefined), + ...options?.headers, + }, + stream: params.stream ?? false, + }) as APIPromise | APIPromise>; + } +} + +export interface BetaCacheControlEphemeral { + type: 'ephemeral'; +} + +export type BetaContentBlock = BetaTextBlock | BetaToolUseBlock; + +export type BetaContentBlockParam = + | BetaTextBlockParam + | BetaImageBlockParam + | BetaToolUseBlockParam + | BetaToolResultBlockParam; + +export interface BetaImageBlockParam { + source: BetaImageBlockParam.Source; + + type: 'image'; + + cache_control?: BetaCacheControlEphemeral | null; +} + +export namespace BetaImageBlockParam { + export interface Source { + data: string; + + media_type: 'image/jpeg' | 'image/png' | 'image/gif' | 'image/webp'; + + type: 'base64'; + } +} + +export interface BetaInputJSONDelta { + partial_json: string; + + type: 'input_json_delta'; +} + +export interface BetaMessage { + /** + * Unique object identifier. + * + * The format and length of IDs may change over time. + */ + id: string; + + /** + * Content generated by the model. + * + * This is an array of content blocks, each of which has a `type` that determines + * its shape. + * + * Example: + * + * ```json + * [{ "type": "text", "text": "Hi, I'm Claude." }] + * ``` + * + * If the request input `messages` ended with an `assistant` turn, then the + * response `content` will continue directly from that last turn. You can use this + * to constrain the model's output. + * + * For example, if the input `messages` were: + * + * ```json + * [ + * { + * "role": "user", + * "content": "What's the Greek name for Sun? (A) Sol (B) Helios (C) Sun" + * }, + * { "role": "assistant", "content": "The best answer is (" } + * ] + * ``` + * + * Then the response `content` might be: + * + * ```json + * [{ "type": "text", "text": "B)" }] + * ``` + */ + content: Array; + + /** + * The model that will complete your prompt.\n\nSee + * [models](https://docs.anthropic.com/en/docs/models-overview) for additional + * details and options. + */ + model: MessagesAPI.Model; + + /** + * Conversational role of the generated message. + * + * This will always be `"assistant"`. + */ + role: 'assistant'; + + /** + * The reason that we stopped. + * + * This may be one the following values: + * + * - `"end_turn"`: the model reached a natural stopping point + * - `"max_tokens"`: we exceeded the requested `max_tokens` or the model's maximum + * - `"stop_sequence"`: one of your provided custom `stop_sequences` was generated + * - `"tool_use"`: the model invoked one or more tools + * + * In non-streaming mode this value is always non-null. In streaming mode, it is + * null in the `message_start` event and non-null otherwise. + */ + stop_reason: 'end_turn' | 'max_tokens' | 'stop_sequence' | 'tool_use' | null; + + /** + * Which custom stop sequence was generated, if any. + * + * This value will be a non-null string if one of your custom stop sequences was + * generated. + */ + stop_sequence: string | null; + + /** + * Object type. + * + * For Messages, this is always `"message"`. + */ + type: 'message'; + + /** + * Billing and rate-limit usage. + * + * Anthropic's API bills and rate-limits by token counts, as tokens represent the + * underlying cost to our systems. + * + * Under the hood, the API transforms requests into a format suitable for the + * model. The model's output then goes through a parsing stage before becoming an + * API response. As a result, the token counts in `usage` will not match one-to-one + * with the exact visible content of an API request or response. + * + * For example, `output_tokens` will be non-zero, even for an empty string response + * from Claude. + */ + usage: BetaUsage; +} + +export interface BetaMessageDeltaUsage { + /** + * The cumulative number of output tokens which were used. + */ + output_tokens: number; +} + +export interface BetaMessageParam { + content: string | Array; + + role: 'user' | 'assistant'; +} + +export interface BetaMetadata { + /** + * An external identifier for the user who is associated with the request. + * + * This should be a uuid, hash value, or other opaque identifier. Anthropic may use + * this id to help detect abuse. Do not include any identifying information such as + * name, email address, or phone number. + */ + user_id?: string | null; +} + +export interface BetaRawContentBlockDeltaEvent { + delta: BetaTextDelta | BetaInputJSONDelta; + + index: number; + + type: 'content_block_delta'; +} + +export interface BetaRawContentBlockStartEvent { + content_block: BetaTextBlock | BetaToolUseBlock; + + index: number; + + type: 'content_block_start'; +} + +export interface BetaRawContentBlockStopEvent { + index: number; + + type: 'content_block_stop'; +} + +export interface BetaRawMessageDeltaEvent { + delta: BetaRawMessageDeltaEvent.Delta; + + type: 'message_delta'; + + /** + * Billing and rate-limit usage. + * + * Anthropic's API bills and rate-limits by token counts, as tokens represent the + * underlying cost to our systems. + * + * Under the hood, the API transforms requests into a format suitable for the + * model. The model's output then goes through a parsing stage before becoming an + * API response. As a result, the token counts in `usage` will not match one-to-one + * with the exact visible content of an API request or response. + * + * For example, `output_tokens` will be non-zero, even for an empty string response + * from Claude. + */ + usage: BetaMessageDeltaUsage; +} + +export namespace BetaRawMessageDeltaEvent { + export interface Delta { + stop_reason: 'end_turn' | 'max_tokens' | 'stop_sequence' | 'tool_use' | null; + + stop_sequence: string | null; + } +} + +export interface BetaRawMessageStartEvent { + message: BetaMessage; + + type: 'message_start'; +} + +export interface BetaRawMessageStopEvent { + type: 'message_stop'; +} + +export type BetaRawMessageStreamEvent = + | BetaRawMessageStartEvent + | BetaRawMessageDeltaEvent + | BetaRawMessageStopEvent + | BetaRawContentBlockStartEvent + | BetaRawContentBlockDeltaEvent + | BetaRawContentBlockStopEvent; + +export interface BetaTextBlock { + text: string; + + type: 'text'; +} + +export interface BetaTextBlockParam { + text: string; + + type: 'text'; + + cache_control?: BetaCacheControlEphemeral | null; +} + +export interface BetaTextDelta { + text: string; + + type: 'text_delta'; +} + +export interface BetaTool { + /** + * [JSON schema](https://json-schema.org/) for this tool's input. + * + * This defines the shape of the `input` that your tool accepts and that the model + * will produce. + */ + input_schema: BetaTool.InputSchema; + + name: string; + + cache_control?: BetaCacheControlEphemeral | null; + + /** + * Description of what this tool does. + * + * Tool descriptions should be as detailed as possible. The more information that + * the model has about what the tool is and how to use it, the better it will + * perform. You can use natural language descriptions to reinforce important + * aspects of the tool input JSON schema. + */ + description?: string; +} + +export namespace BetaTool { + /** + * [JSON schema](https://json-schema.org/) for this tool's input. + * + * This defines the shape of the `input` that your tool accepts and that the model + * will produce. + */ + export interface InputSchema { + type: 'object'; + + properties?: unknown | null; + [k: string]: unknown; + } +} + +/** + * How the model should use the provided tools. The model can use a specific tool, + * any available tool, or decide by itself. + */ +export type BetaToolChoice = BetaToolChoiceAuto | BetaToolChoiceAny | BetaToolChoiceTool; + +/** + * The model will use any available tools. + */ +export interface BetaToolChoiceAny { + type: 'any'; + + /** + * Whether to disable parallel tool use. + * + * Defaults to `false`. If set to `true`, the model will output exactly one tool + * use. + */ + disable_parallel_tool_use?: boolean; +} + +/** + * The model will automatically decide whether to use tools. + */ +export interface BetaToolChoiceAuto { + type: 'auto'; + + /** + * Whether to disable parallel tool use. + * + * Defaults to `false`. If set to `true`, the model will output at most one tool + * use. + */ + disable_parallel_tool_use?: boolean; +} + +/** + * The model will use the specified tool with `tool_choice.name`. + */ +export interface BetaToolChoiceTool { + /** + * The name of the tool to use. + */ + name: string; + + type: 'tool'; + + /** + * Whether to disable parallel tool use. + * + * Defaults to `false`. If set to `true`, the model will output exactly one tool + * use. + */ + disable_parallel_tool_use?: boolean; +} + +export interface BetaToolResultBlockParam { + tool_use_id: string; + + type: 'tool_result'; + + cache_control?: BetaCacheControlEphemeral | null; + + content?: string | Array; + + is_error?: boolean; +} + +export interface BetaToolUseBlock { + id: string; + + input: unknown; + + name: string; + + type: 'tool_use'; +} + +export interface BetaToolUseBlockParam { + id: string; + + input: unknown; + + name: string; + + type: 'tool_use'; + + cache_control?: BetaCacheControlEphemeral | null; +} + +export interface BetaUsage { + /** + * The number of input tokens used to create the cache entry. + */ + cache_creation_input_tokens: number | null; + + /** + * The number of input tokens read from the cache. + */ + cache_read_input_tokens: number | null; + + /** + * The number of input tokens which were used. + */ + input_tokens: number; + + /** + * The number of output tokens which were used. + */ + output_tokens: number; +} + +export type MessageCreateParams = MessageCreateParamsNonStreaming | MessageCreateParamsStreaming; + +export interface MessageCreateParamsBase { + /** + * Body param: The maximum number of tokens to generate before stopping. + * + * Note that our models may stop _before_ reaching this maximum. This parameter + * only specifies the absolute maximum number of tokens to generate. + * + * Different models have different maximum values for this parameter. See + * [models](https://docs.anthropic.com/en/docs/models-overview) for details. + */ + max_tokens: number; + + /** + * Body param: Input messages. + * + * Our models are trained to operate on alternating `user` and `assistant` + * conversational turns. When creating a new `Message`, you specify the prior + * conversational turns with the `messages` parameter, and the model then generates + * the next `Message` in the conversation. + * + * Each input message must be an object with a `role` and `content`. You can + * specify a single `user`-role message, or you can include multiple `user` and + * `assistant` messages. The first message must always use the `user` role. + * + * If the final message uses the `assistant` role, the response content will + * continue immediately from the content in that message. This can be used to + * constrain part of the model's response. + * + * Example with a single `user` message: + * + * ```json + * [{ "role": "user", "content": "Hello, Claude" }] + * ``` + * + * Example with multiple conversational turns: + * + * ```json + * [ + * { "role": "user", "content": "Hello there." }, + * { "role": "assistant", "content": "Hi, I'm Claude. How can I help you?" }, + * { "role": "user", "content": "Can you explain LLMs in plain English?" } + * ] + * ``` + * + * Example with a partially-filled response from Claude: + * + * ```json + * [ + * { + * "role": "user", + * "content": "What's the Greek name for Sun? (A) Sol (B) Helios (C) Sun" + * }, + * { "role": "assistant", "content": "The best answer is (" } + * ] + * ``` + * + * Each input message `content` may be either a single `string` or an array of + * content blocks, where each block has a specific `type`. Using a `string` for + * `content` is shorthand for an array of one content block of type `"text"`. The + * following input messages are equivalent: + * + * ```json + * { "role": "user", "content": "Hello, Claude" } + * ``` + * + * ```json + * { "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] } + * ``` + * + * Starting with Claude 3 models, you can also send image content blocks: + * + * ```json + * { + * "role": "user", + * "content": [ + * { + * "type": "image", + * "source": { + * "type": "base64", + * "media_type": "image/jpeg", + * "data": "/9j/4AAQSkZJRg..." + * } + * }, + * { "type": "text", "text": "What is in this image?" } + * ] + * } + * ``` + * + * We currently support the `base64` source type for images, and the `image/jpeg`, + * `image/png`, `image/gif`, and `image/webp` media types. + * + * See [examples](https://docs.anthropic.com/en/api/messages-examples#vision) for + * more input examples. + * + * Note that if you want to include a + * [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use + * the top-level `system` parameter — there is no `"system"` role for input + * messages in the Messages API. + */ + messages: Array; + + /** + * Body param: The model that will complete your prompt.\n\nSee + * [models](https://docs.anthropic.com/en/docs/models-overview) for additional + * details and options. + */ + model: MessagesAPI.Model; + + /** + * Body param: An object describing metadata about the request. + */ + metadata?: BetaMetadata; + + /** + * Body param: Custom text sequences that will cause the model to stop generating. + * + * Our models will normally stop when they have naturally completed their turn, + * which will result in a response `stop_reason` of `"end_turn"`. + * + * If you want the model to stop generating when it encounters custom strings of + * text, you can use the `stop_sequences` parameter. If the model encounters one of + * the custom sequences, the response `stop_reason` value will be `"stop_sequence"` + * and the response `stop_sequence` value will contain the matched stop sequence. + */ + stop_sequences?: Array; + + /** + * Body param: Whether to incrementally stream the response using server-sent + * events. + * + * See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for + * details. + */ + stream?: boolean; + + /** + * Body param: System prompt. + * + * A system prompt is a way of providing context and instructions to Claude, such + * as specifying a particular goal or role. See our + * [guide to system prompts](https://docs.anthropic.com/en/docs/system-prompts). + */ + system?: string | Array; + + /** + * Body param: Amount of randomness injected into the response. + * + * Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0` + * for analytical / multiple choice, and closer to `1.0` for creative and + * generative tasks. + * + * Note that even with `temperature` of `0.0`, the results will not be fully + * deterministic. + */ + temperature?: number; + + /** + * Body param: How the model should use the provided tools. The model can use a + * specific tool, any available tool, or decide by itself. + */ + tool_choice?: BetaToolChoice; + + /** + * Body param: Definitions of tools that the model may use. + * + * If you include `tools` in your API request, the model may return `tool_use` + * content blocks that represent the model's use of those tools. You can then run + * those tools using the tool input generated by the model and then optionally + * return results back to the model using `tool_result` content blocks. + * + * Each tool definition includes: + * + * - `name`: Name of the tool. + * - `description`: Optional, but strongly-recommended description of the tool. + * - `input_schema`: [JSON schema](https://json-schema.org/) for the tool `input` + * shape that the model will produce in `tool_use` output content blocks. + * + * For example, if you defined `tools` as: + * + * ```json + * [ + * { + * "name": "get_stock_price", + * "description": "Get the current stock price for a given ticker symbol.", + * "input_schema": { + * "type": "object", + * "properties": { + * "ticker": { + * "type": "string", + * "description": "The stock ticker symbol, e.g. AAPL for Apple Inc." + * } + * }, + * "required": ["ticker"] + * } + * } + * ] + * ``` + * + * And then asked the model "What's the S&P 500 at today?", the model might produce + * `tool_use` content blocks in the response like this: + * + * ```json + * [ + * { + * "type": "tool_use", + * "id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV", + * "name": "get_stock_price", + * "input": { "ticker": "^GSPC" } + * } + * ] + * ``` + * + * You might then run your `get_stock_price` tool with `{"ticker": "^GSPC"}` as an + * input, and return the following back to the model in a subsequent `user` + * message: + * + * ```json + * [ + * { + * "type": "tool_result", + * "tool_use_id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV", + * "content": "259.75 USD" + * } + * ] + * ``` + * + * Tools can be used for workflows that include running client-side tools and + * functions, or more generally whenever you want the model to produce a particular + * JSON structure of output. + * + * See our [guide](https://docs.anthropic.com/en/docs/tool-use) for more details. + */ + tools?: Array; + + /** + * Body param: Only sample from the top K options for each subsequent token. + * + * Used to remove "long tail" low probability responses. + * [Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277). + * + * Recommended for advanced use cases only. You usually only need to use + * `temperature`. + */ + top_k?: number; + + /** + * Body param: Use nucleus sampling. + * + * In nucleus sampling, we compute the cumulative distribution over all the options + * for each subsequent token in decreasing probability order and cut it off once it + * reaches a particular probability specified by `top_p`. You should either alter + * `temperature` or `top_p`, but not both. + * + * Recommended for advanced use cases only. You usually only need to use + * `temperature`. + */ + top_p?: number; + + /** + * Header param: Optional header to specify the beta version(s) you want to use. + */ + betas?: Array; +} + +export namespace MessageCreateParams { + export type MessageCreateParamsNonStreaming = MessagesMessagesAPI.MessageCreateParamsNonStreaming; + export type MessageCreateParamsStreaming = MessagesMessagesAPI.MessageCreateParamsStreaming; +} + +export interface MessageCreateParamsNonStreaming extends MessageCreateParamsBase { + /** + * Body param: Whether to incrementally stream the response using server-sent + * events. + * + * See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for + * details. + */ + stream?: false; +} + +export interface MessageCreateParamsStreaming extends MessageCreateParamsBase { + /** + * Body param: Whether to incrementally stream the response using server-sent + * events. + * + * See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for + * details. + */ + stream: true; +} + +export namespace Messages { + export import BetaCacheControlEphemeral = MessagesMessagesAPI.BetaCacheControlEphemeral; + export import BetaContentBlock = MessagesMessagesAPI.BetaContentBlock; + export import BetaContentBlockParam = MessagesMessagesAPI.BetaContentBlockParam; + export import BetaImageBlockParam = MessagesMessagesAPI.BetaImageBlockParam; + export import BetaInputJSONDelta = MessagesMessagesAPI.BetaInputJSONDelta; + export import BetaMessage = MessagesMessagesAPI.BetaMessage; + export import BetaMessageDeltaUsage = MessagesMessagesAPI.BetaMessageDeltaUsage; + export import BetaMessageParam = MessagesMessagesAPI.BetaMessageParam; + export import BetaMetadata = MessagesMessagesAPI.BetaMetadata; + export import BetaRawContentBlockDeltaEvent = MessagesMessagesAPI.BetaRawContentBlockDeltaEvent; + export import BetaRawContentBlockStartEvent = MessagesMessagesAPI.BetaRawContentBlockStartEvent; + export import BetaRawContentBlockStopEvent = MessagesMessagesAPI.BetaRawContentBlockStopEvent; + export import BetaRawMessageDeltaEvent = MessagesMessagesAPI.BetaRawMessageDeltaEvent; + export import BetaRawMessageStartEvent = MessagesMessagesAPI.BetaRawMessageStartEvent; + export import BetaRawMessageStopEvent = MessagesMessagesAPI.BetaRawMessageStopEvent; + export import BetaRawMessageStreamEvent = MessagesMessagesAPI.BetaRawMessageStreamEvent; + export import BetaTextBlock = MessagesMessagesAPI.BetaTextBlock; + export import BetaTextBlockParam = MessagesMessagesAPI.BetaTextBlockParam; + export import BetaTextDelta = MessagesMessagesAPI.BetaTextDelta; + export import BetaTool = MessagesMessagesAPI.BetaTool; + export import BetaToolChoice = MessagesMessagesAPI.BetaToolChoice; + export import BetaToolChoiceAny = MessagesMessagesAPI.BetaToolChoiceAny; + export import BetaToolChoiceAuto = MessagesMessagesAPI.BetaToolChoiceAuto; + export import BetaToolChoiceTool = MessagesMessagesAPI.BetaToolChoiceTool; + export import BetaToolResultBlockParam = MessagesMessagesAPI.BetaToolResultBlockParam; + export import BetaToolUseBlock = MessagesMessagesAPI.BetaToolUseBlock; + export import BetaToolUseBlockParam = MessagesMessagesAPI.BetaToolUseBlockParam; + export import BetaUsage = MessagesMessagesAPI.BetaUsage; + export import MessageCreateParams = MessagesMessagesAPI.MessageCreateParams; + export import MessageCreateParamsNonStreaming = MessagesMessagesAPI.MessageCreateParamsNonStreaming; + export import MessageCreateParamsStreaming = MessagesMessagesAPI.MessageCreateParamsStreaming; + export import Batches = BatchesAPI.Batches; + export import BetaMessageBatch = BatchesAPI.BetaMessageBatch; + export import BetaMessageBatchCanceledResult = BatchesAPI.BetaMessageBatchCanceledResult; + export import BetaMessageBatchErroredResult = BatchesAPI.BetaMessageBatchErroredResult; + export import BetaMessageBatchExpiredResult = BatchesAPI.BetaMessageBatchExpiredResult; + export import BetaMessageBatchIndividualResponse = BatchesAPI.BetaMessageBatchIndividualResponse; + export import BetaMessageBatchRequestCounts = BatchesAPI.BetaMessageBatchRequestCounts; + export import BetaMessageBatchResult = BatchesAPI.BetaMessageBatchResult; + export import BetaMessageBatchSucceededResult = BatchesAPI.BetaMessageBatchSucceededResult; + export import BetaMessageBatchesPage = BatchesAPI.BetaMessageBatchesPage; + export import BatchCreateParams = BatchesAPI.BatchCreateParams; + export import BatchRetrieveParams = BatchesAPI.BatchRetrieveParams; + export import BatchListParams = BatchesAPI.BatchListParams; + export import BatchCancelParams = BatchesAPI.BatchCancelParams; + export import BatchResultsParams = BatchesAPI.BatchResultsParams; +} diff --git a/src/resources/beta/prompt-caching/messages.ts b/src/resources/beta/prompt-caching/messages.ts index a17d45e1..e2295e0b 100644 --- a/src/resources/beta/prompt-caching/messages.ts +++ b/src/resources/beta/prompt-caching/messages.ts @@ -5,13 +5,12 @@ import { APIPromise } from '../../../core'; import * as Core from '../../../core'; import * as PromptCachingMessagesAPI from './messages'; import * as MessagesAPI from '../../messages'; +import * as BetaAPI from '../beta'; import { Stream } from '../../../streaming'; import { PromptCachingBetaMessageStream } from '../../../lib/PromptCachingBetaMessageStream'; export class Messages extends APIResource { /** - * Create a Message. - * * Send a structured list of input messages with text and/or image content, and the * model will generate the next message in the conversation. * @@ -19,27 +18,31 @@ export class Messages extends APIResource { * conversations. */ create( - body: MessageCreateParamsNonStreaming, + params: MessageCreateParamsNonStreaming, options?: Core.RequestOptions, ): APIPromise; create( - body: MessageCreateParamsStreaming, + params: MessageCreateParamsStreaming, options?: Core.RequestOptions, ): APIPromise>; create( - body: MessageCreateParamsBase, + params: MessageCreateParamsBase, options?: Core.RequestOptions, ): APIPromise | PromptCachingBetaMessage>; create( - body: MessageCreateParams, + params: MessageCreateParams, options?: Core.RequestOptions, ): APIPromise | APIPromise> { + const { betas, ...body } = params; return this._client.post('/v1/messages?beta=prompt_caching', { body, timeout: (this._client as any)._options.timeout ?? 600000, ...options, - headers: { 'anthropic-beta': 'prompt-caching-2024-07-31', ...options?.headers }, - stream: body.stream ?? false, + headers: { + 'anthropic-beta': betas != null ? betas.toString() : 'prompt-caching-2024-07-31', + ...options?.headers, + }, + stream: params.stream ?? false, }) as APIPromise | APIPromise>; } @@ -304,7 +307,7 @@ export type MessageCreateParams = MessageCreateParamsNonStreaming | MessageCreat export interface MessageCreateParamsBase { /** - * The maximum number of tokens to generate before stopping. + * Body param: The maximum number of tokens to generate before stopping. * * Note that our models may stop _before_ reaching this maximum. This parameter * only specifies the absolute maximum number of tokens to generate. @@ -315,7 +318,7 @@ export interface MessageCreateParamsBase { max_tokens: number; /** - * Input messages. + * Body param: Input messages. * * Our models are trained to operate on alternating `user` and `assistant` * conversational turns. When creating a new `Message`, you specify the prior @@ -404,19 +407,19 @@ export interface MessageCreateParamsBase { messages: Array; /** - * The model that will complete your prompt.\n\nSee + * Body param: The model that will complete your prompt.\n\nSee * [models](https://docs.anthropic.com/en/docs/models-overview) for additional * details and options. */ model: MessagesAPI.Model; /** - * An object describing metadata about the request. + * Body param: An object describing metadata about the request. */ metadata?: MessagesAPI.Metadata; /** - * Custom text sequences that will cause the model to stop generating. + * Body param: Custom text sequences that will cause the model to stop generating. * * Our models will normally stop when they have naturally completed their turn, * which will result in a response `stop_reason` of `"end_turn"`. @@ -429,7 +432,8 @@ export interface MessageCreateParamsBase { stop_sequences?: Array; /** - * Whether to incrementally stream the response using server-sent events. + * Body param: Whether to incrementally stream the response using server-sent + * events. * * See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for * details. @@ -437,7 +441,7 @@ export interface MessageCreateParamsBase { stream?: boolean; /** - * System prompt. + * Body param: System prompt. * * A system prompt is a way of providing context and instructions to Claude, such * as specifying a particular goal or role. See our @@ -446,7 +450,7 @@ export interface MessageCreateParamsBase { system?: string | Array; /** - * Amount of randomness injected into the response. + * Body param: Amount of randomness injected into the response. * * Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0` * for analytical / multiple choice, and closer to `1.0` for creative and @@ -458,13 +462,13 @@ export interface MessageCreateParamsBase { temperature?: number; /** - * How the model should use the provided tools. The model can use a specific tool, - * any available tool, or decide by itself. + * Body param: How the model should use the provided tools. The model can use a + * specific tool, any available tool, or decide by itself. */ tool_choice?: MessagesAPI.ToolChoice; /** - * Definitions of tools that the model may use. + * Body param: Definitions of tools that the model may use. * * If you include `tools` in your API request, the model may return `tool_use` * content blocks that represent the model's use of those tools. You can then run @@ -536,7 +540,7 @@ export interface MessageCreateParamsBase { tools?: Array; /** - * Only sample from the top K options for each subsequent token. + * Body param: Only sample from the top K options for each subsequent token. * * Used to remove "long tail" low probability responses. * [Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277). @@ -547,7 +551,7 @@ export interface MessageCreateParamsBase { top_k?: number; /** - * Use nucleus sampling. + * Body param: Use nucleus sampling. * * In nucleus sampling, we compute the cumulative distribution over all the options * for each subsequent token in decreasing probability order and cut it off once it @@ -558,6 +562,11 @@ export interface MessageCreateParamsBase { * `temperature`. */ top_p?: number; + + /** + * Header param: Optional header to specify the beta version(s) you want to use. + */ + betas?: Array; } export namespace MessageCreateParams { @@ -587,7 +596,8 @@ export namespace MessageCreateParams { export interface MessageCreateParamsNonStreaming extends MessageCreateParamsBase { /** - * Whether to incrementally stream the response using server-sent events. + * Body param: Whether to incrementally stream the response using server-sent + * events. * * See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for * details. @@ -597,7 +607,8 @@ export interface MessageCreateParamsNonStreaming extends MessageCreateParamsBase export interface MessageCreateParamsStreaming extends MessageCreateParamsBase { /** - * Whether to incrementally stream the response using server-sent events. + * Body param: Whether to incrementally stream the response using server-sent + * events. * * See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for * details. diff --git a/src/resources/index.ts b/src/resources/index.ts index 731d789d..492921df 100644 --- a/src/resources/index.ts +++ b/src/resources/index.ts @@ -1,6 +1,18 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -export { Beta } from './beta/beta'; +export { + AnthropicBeta, + BetaAPIError, + BetaAuthenticationError, + BetaError, + BetaErrorResponse, + BetaInvalidRequestError, + BetaNotFoundError, + BetaOverloadedError, + BetaPermissionError, + BetaRateLimitError, + Beta, +} from './beta/beta'; export { Completion, CompletionCreateParams, diff --git a/src/resources/messages.ts b/src/resources/messages.ts index 049624b2..542f7c01 100644 --- a/src/resources/messages.ts +++ b/src/resources/messages.ts @@ -11,8 +11,6 @@ export { MessageStream } from '../lib/MessageStream'; export class Messages extends APIResource { /** - * Create a Message. - * * Send a structured list of input messages with text and/or image content, and the * model will generate the next message in the conversation. * diff --git a/tests/api-resources/beta/messages/batches.test.ts b/tests/api-resources/beta/messages/batches.test.ts new file mode 100644 index 00000000..4bc02208 --- /dev/null +++ b/tests/api-resources/beta/messages/batches.test.ts @@ -0,0 +1,356 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import Anthropic from '@anthropic-ai/sdk'; +import { Response } from 'node-fetch'; + +const client = new Anthropic({ + apiKey: 'my-anthropic-api-key', + baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010', +}); + +describe('resource batches', () => { + test('create: only required params', async () => { + const responsePromise = client.beta.messages.batches.create({ + requests: [ + { + custom_id: 'my-custom-id-1', + params: { + max_tokens: 1024, + messages: [{ content: 'Hello, world', role: 'user' }], + model: 'claude-3-5-sonnet-20240620', + }, + }, + { + custom_id: 'my-custom-id-1', + params: { + max_tokens: 1024, + messages: [{ content: 'Hello, world', role: 'user' }], + model: 'claude-3-5-sonnet-20240620', + }, + }, + { + custom_id: 'my-custom-id-1', + params: { + max_tokens: 1024, + messages: [{ content: 'Hello, world', role: 'user' }], + model: 'claude-3-5-sonnet-20240620', + }, + }, + ], + }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('create: required and optional params', async () => { + const response = await client.beta.messages.batches.create({ + requests: [ + { + custom_id: 'my-custom-id-1', + params: { + max_tokens: 1024, + messages: [{ content: 'Hello, world', role: 'user' }], + model: 'claude-3-5-sonnet-20240620', + metadata: { user_id: '13803d75-b4b5-4c3e-b2a2-6f21399b021b' }, + stop_sequences: ['string', 'string', 'string'], + stream: false, + system: [ + { text: "Today's date is 2024-06-01.", type: 'text', cache_control: { type: 'ephemeral' } }, + ], + temperature: 1, + tool_choice: { type: 'auto', disable_parallel_tool_use: true }, + tools: [ + { + input_schema: { + type: 'object', + properties: { + location: { description: 'The city and state, e.g. San Francisco, CA', type: 'string' }, + unit: { + description: 'Unit for the output - one of (celsius, fahrenheit)', + type: 'string', + }, + }, + }, + name: 'x', + cache_control: { type: 'ephemeral' }, + description: 'Get the current weather in a given location', + }, + { + input_schema: { + type: 'object', + properties: { + location: { description: 'The city and state, e.g. San Francisco, CA', type: 'string' }, + unit: { + description: 'Unit for the output - one of (celsius, fahrenheit)', + type: 'string', + }, + }, + }, + name: 'x', + cache_control: { type: 'ephemeral' }, + description: 'Get the current weather in a given location', + }, + { + input_schema: { + type: 'object', + properties: { + location: { description: 'The city and state, e.g. San Francisco, CA', type: 'string' }, + unit: { + description: 'Unit for the output - one of (celsius, fahrenheit)', + type: 'string', + }, + }, + }, + name: 'x', + cache_control: { type: 'ephemeral' }, + description: 'Get the current weather in a given location', + }, + ], + top_k: 5, + top_p: 0.7, + }, + }, + { + custom_id: 'my-custom-id-1', + params: { + max_tokens: 1024, + messages: [{ content: 'Hello, world', role: 'user' }], + model: 'claude-3-5-sonnet-20240620', + metadata: { user_id: '13803d75-b4b5-4c3e-b2a2-6f21399b021b' }, + stop_sequences: ['string', 'string', 'string'], + stream: false, + system: [ + { text: "Today's date is 2024-06-01.", type: 'text', cache_control: { type: 'ephemeral' } }, + ], + temperature: 1, + tool_choice: { type: 'auto', disable_parallel_tool_use: true }, + tools: [ + { + input_schema: { + type: 'object', + properties: { + location: { description: 'The city and state, e.g. San Francisco, CA', type: 'string' }, + unit: { + description: 'Unit for the output - one of (celsius, fahrenheit)', + type: 'string', + }, + }, + }, + name: 'x', + cache_control: { type: 'ephemeral' }, + description: 'Get the current weather in a given location', + }, + { + input_schema: { + type: 'object', + properties: { + location: { description: 'The city and state, e.g. San Francisco, CA', type: 'string' }, + unit: { + description: 'Unit for the output - one of (celsius, fahrenheit)', + type: 'string', + }, + }, + }, + name: 'x', + cache_control: { type: 'ephemeral' }, + description: 'Get the current weather in a given location', + }, + { + input_schema: { + type: 'object', + properties: { + location: { description: 'The city and state, e.g. San Francisco, CA', type: 'string' }, + unit: { + description: 'Unit for the output - one of (celsius, fahrenheit)', + type: 'string', + }, + }, + }, + name: 'x', + cache_control: { type: 'ephemeral' }, + description: 'Get the current weather in a given location', + }, + ], + top_k: 5, + top_p: 0.7, + }, + }, + { + custom_id: 'my-custom-id-1', + params: { + max_tokens: 1024, + messages: [{ content: 'Hello, world', role: 'user' }], + model: 'claude-3-5-sonnet-20240620', + metadata: { user_id: '13803d75-b4b5-4c3e-b2a2-6f21399b021b' }, + stop_sequences: ['string', 'string', 'string'], + stream: false, + system: [ + { text: "Today's date is 2024-06-01.", type: 'text', cache_control: { type: 'ephemeral' } }, + ], + temperature: 1, + tool_choice: { type: 'auto', disable_parallel_tool_use: true }, + tools: [ + { + input_schema: { + type: 'object', + properties: { + location: { description: 'The city and state, e.g. San Francisco, CA', type: 'string' }, + unit: { + description: 'Unit for the output - one of (celsius, fahrenheit)', + type: 'string', + }, + }, + }, + name: 'x', + cache_control: { type: 'ephemeral' }, + description: 'Get the current weather in a given location', + }, + { + input_schema: { + type: 'object', + properties: { + location: { description: 'The city and state, e.g. San Francisco, CA', type: 'string' }, + unit: { + description: 'Unit for the output - one of (celsius, fahrenheit)', + type: 'string', + }, + }, + }, + name: 'x', + cache_control: { type: 'ephemeral' }, + description: 'Get the current weather in a given location', + }, + { + input_schema: { + type: 'object', + properties: { + location: { description: 'The city and state, e.g. San Francisco, CA', type: 'string' }, + unit: { + description: 'Unit for the output - one of (celsius, fahrenheit)', + type: 'string', + }, + }, + }, + name: 'x', + cache_control: { type: 'ephemeral' }, + description: 'Get the current weather in a given location', + }, + ], + top_k: 5, + top_p: 0.7, + }, + }, + ], + betas: ['string', 'string', 'string'], + }); + }); + + test('retrieve', async () => { + const responsePromise = client.beta.messages.batches.retrieve('message_batch_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('retrieve: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.beta.messages.batches.retrieve('message_batch_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(Anthropic.NotFoundError); + }); + + test('retrieve: request options and params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.beta.messages.batches.retrieve( + 'message_batch_id', + { betas: ['string', 'string', 'string'] }, + { path: '/_stainless_unknown_path' }, + ), + ).rejects.toThrow(Anthropic.NotFoundError); + }); + + test('list', async () => { + const responsePromise = client.beta.messages.batches.list(); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('list: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect(client.beta.messages.batches.list({ path: '/_stainless_unknown_path' })).rejects.toThrow( + Anthropic.NotFoundError, + ); + }); + + test('list: request options and params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.beta.messages.batches.list( + { after_id: 'after_id', before_id: 'before_id', limit: 1, betas: ['string', 'string', 'string'] }, + { path: '/_stainless_unknown_path' }, + ), + ).rejects.toThrow(Anthropic.NotFoundError); + }); + + test('cancel', async () => { + const responsePromise = client.beta.messages.batches.cancel('message_batch_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('cancel: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.beta.messages.batches.cancel('message_batch_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(Anthropic.NotFoundError); + }); + + test('cancel: request options and params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.beta.messages.batches.cancel( + 'message_batch_id', + { betas: ['string', 'string', 'string'] }, + { path: '/_stainless_unknown_path' }, + ), + ).rejects.toThrow(Anthropic.NotFoundError); + }); + + test('results: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.beta.messages.batches.results('message_batch_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(Anthropic.NotFoundError); + }); + + test('results: request options and params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.beta.messages.batches.results( + 'message_batch_id', + { betas: ['string', 'string', 'string'] }, + { path: '/_stainless_unknown_path' }, + ), + ).rejects.toThrow(Anthropic.NotFoundError); + }); +}); diff --git a/tests/api-resources/beta/messages/messages.test.ts b/tests/api-resources/beta/messages/messages.test.ts new file mode 100644 index 00000000..2a5f6433 --- /dev/null +++ b/tests/api-resources/beta/messages/messages.test.ts @@ -0,0 +1,81 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import Anthropic from '@anthropic-ai/sdk'; +import { Response } from 'node-fetch'; + +const client = new Anthropic({ + apiKey: 'my-anthropic-api-key', + baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010', +}); + +describe('resource messages', () => { + test('create: only required params', async () => { + const responsePromise = client.beta.messages.create({ + max_tokens: 1024, + messages: [{ content: 'Hello, world', role: 'user' }], + model: 'claude-3-5-sonnet-20240620', + }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('create: required and optional params', async () => { + const response = await client.beta.messages.create({ + max_tokens: 1024, + messages: [{ content: 'Hello, world', role: 'user' }], + model: 'claude-3-5-sonnet-20240620', + metadata: { user_id: '13803d75-b4b5-4c3e-b2a2-6f21399b021b' }, + stop_sequences: ['string', 'string', 'string'], + stream: false, + system: [{ text: "Today's date is 2024-06-01.", type: 'text', cache_control: { type: 'ephemeral' } }], + temperature: 1, + tool_choice: { type: 'auto', disable_parallel_tool_use: true }, + tools: [ + { + input_schema: { + type: 'object', + properties: { + location: { description: 'The city and state, e.g. San Francisco, CA', type: 'string' }, + unit: { description: 'Unit for the output - one of (celsius, fahrenheit)', type: 'string' }, + }, + }, + name: 'x', + cache_control: { type: 'ephemeral' }, + description: 'Get the current weather in a given location', + }, + { + input_schema: { + type: 'object', + properties: { + location: { description: 'The city and state, e.g. San Francisco, CA', type: 'string' }, + unit: { description: 'Unit for the output - one of (celsius, fahrenheit)', type: 'string' }, + }, + }, + name: 'x', + cache_control: { type: 'ephemeral' }, + description: 'Get the current weather in a given location', + }, + { + input_schema: { + type: 'object', + properties: { + location: { description: 'The city and state, e.g. San Francisco, CA', type: 'string' }, + unit: { description: 'Unit for the output - one of (celsius, fahrenheit)', type: 'string' }, + }, + }, + name: 'x', + cache_control: { type: 'ephemeral' }, + description: 'Get the current weather in a given location', + }, + ], + top_k: 5, + top_p: 0.7, + betas: ['string', 'string', 'string'], + }); + }); +}); diff --git a/tests/api-resources/beta/prompt-caching/messages.test.ts b/tests/api-resources/beta/prompt-caching/messages.test.ts index e7e2fada..37034485 100644 --- a/tests/api-resources/beta/prompt-caching/messages.test.ts +++ b/tests/api-resources/beta/prompt-caching/messages.test.ts @@ -75,6 +75,7 @@ describe('resource messages', () => { ], top_k: 5, top_p: 0.7, + betas: ['string', 'string', 'string'], }); }); });