diff --git a/.stats.yml b/.stats.yml
index e3a48530..660e6d2d 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,2 +1,2 @@
-configured_endpoints: 3
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/anthropic-11e9f674e1fe0eb85a713c2852de663254d4e9254dea701008dcd605a04987d5.yml
+configured_endpoints: 2
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/anthropic-4f6df1026ffeed840bbfada906ac51144508d1e1b099084c593aa9bac97a3362.yml
diff --git a/api.md b/api.md
index 27cb6720..5ac54252 100644
--- a/api.md
+++ b/api.md
@@ -9,6 +9,7 @@ from anthropic.types import (
ContentBlockStartEvent,
ContentBlockStopEvent,
ImageBlockParam,
+ InputJsonDelta,
Message,
MessageDeltaEvent,
MessageDeltaUsage,
@@ -26,43 +27,15 @@ from anthropic.types import (
TextBlock,
TextBlockParam,
TextDelta,
- Usage,
-)
-```
-
-Methods:
-
-- client.messages.create(\*\*params) -> Message
-- client.messages.stream(\*args) -> MessageStreamManager[MessageStream] | MessageStreamManager[MessageStreamT]
-
-# Beta
-
-## Tools
-
-### Messages
-
-Types:
-
-```python
-from anthropic.types.beta.tools import (
- InputJsonDelta,
- RawToolsBetaContentBlockDeltaEvent,
- RawToolsBetaContentBlockStartEvent,
- RawToolsBetaMessageStreamEvent,
Tool,
ToolResultBlockParam,
ToolUseBlock,
ToolUseBlockParam,
- ToolsBetaContentBlock,
- ToolsBetaContentBlockDeltaEvent,
- ToolsBetaContentBlockStartEvent,
- ToolsBetaMessage,
- ToolsBetaMessageParam,
- ToolsBetaMessageStreamEvent,
+ Usage,
)
```
Methods:
-- client.beta.tools.messages.create(\*\*params) -> ToolsBetaMessage
-- client.beta.tools.messages.stream(\*args) -> ToolsBetaMessageStreamManager[ToolsBetaMessageStream] | ToolsBetaMessageStreamManager[ToolsBetaMessageStreamT]
+- client.messages.create(\*\*params) -> Message
+- client.messages.stream(\*args) -> MessageStreamManager[MessageStream] | MessageStreamManager[MessageStreamT]
diff --git a/src/anthropic/_client.py b/src/anthropic/_client.py
index 693e3a96..ac148940 100644
--- a/src/anthropic/_client.py
+++ b/src/anthropic/_client.py
@@ -58,7 +58,6 @@
class Anthropic(SyncAPIClient):
completions: resources.Completions
messages: resources.Messages
- beta: resources.Beta
with_raw_response: AnthropicWithRawResponse
with_streaming_response: AnthropicWithStreamedResponse
@@ -137,7 +136,6 @@ def __init__(
self.completions = resources.Completions(self)
self.messages = resources.Messages(self)
- self.beta = resources.Beta(self)
self.with_raw_response = AnthropicWithRawResponse(self)
self.with_streaming_response = AnthropicWithStreamedResponse(self)
@@ -322,7 +320,6 @@ def _make_status_error(
class AsyncAnthropic(AsyncAPIClient):
completions: resources.AsyncCompletions
messages: resources.AsyncMessages
- beta: resources.AsyncBeta
with_raw_response: AsyncAnthropicWithRawResponse
with_streaming_response: AsyncAnthropicWithStreamedResponse
@@ -401,7 +398,6 @@ def __init__(
self.completions = resources.AsyncCompletions(self)
self.messages = resources.AsyncMessages(self)
- self.beta = resources.AsyncBeta(self)
self.with_raw_response = AsyncAnthropicWithRawResponse(self)
self.with_streaming_response = AsyncAnthropicWithStreamedResponse(self)
@@ -587,28 +583,24 @@ class AnthropicWithRawResponse:
def __init__(self, client: Anthropic) -> None:
self.completions = resources.CompletionsWithRawResponse(client.completions)
self.messages = resources.MessagesWithRawResponse(client.messages)
- self.beta = resources.BetaWithRawResponse(client.beta)
class AsyncAnthropicWithRawResponse:
def __init__(self, client: AsyncAnthropic) -> None:
self.completions = resources.AsyncCompletionsWithRawResponse(client.completions)
self.messages = resources.AsyncMessagesWithRawResponse(client.messages)
- self.beta = resources.AsyncBetaWithRawResponse(client.beta)
class AnthropicWithStreamedResponse:
def __init__(self, client: Anthropic) -> None:
self.completions = resources.CompletionsWithStreamingResponse(client.completions)
self.messages = resources.MessagesWithStreamingResponse(client.messages)
- self.beta = resources.BetaWithStreamingResponse(client.beta)
class AsyncAnthropicWithStreamedResponse:
def __init__(self, client: AsyncAnthropic) -> None:
self.completions = resources.AsyncCompletionsWithStreamingResponse(client.completions)
self.messages = resources.AsyncMessagesWithStreamingResponse(client.messages)
- self.beta = resources.AsyncBetaWithStreamingResponse(client.beta)
Client = Anthropic
diff --git a/src/anthropic/resources/__init__.py b/src/anthropic/resources/__init__.py
index 318d5cdd..cc6cc5be 100644
--- a/src/anthropic/resources/__init__.py
+++ b/src/anthropic/resources/__init__.py
@@ -1,13 +1,5 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from .beta import (
- Beta,
- AsyncBeta,
- BetaWithRawResponse,
- AsyncBetaWithRawResponse,
- BetaWithStreamingResponse,
- AsyncBetaWithStreamingResponse,
-)
from .messages import (
Messages,
AsyncMessages,
@@ -38,10 +30,4 @@
"AsyncMessagesWithRawResponse",
"MessagesWithStreamingResponse",
"AsyncMessagesWithStreamingResponse",
- "Beta",
- "AsyncBeta",
- "BetaWithRawResponse",
- "AsyncBetaWithRawResponse",
- "BetaWithStreamingResponse",
- "AsyncBetaWithStreamingResponse",
]
diff --git a/src/anthropic/resources/beta/__init__.py b/src/anthropic/resources/beta/__init__.py
deleted file mode 100644
index 77971b13..00000000
--- a/src/anthropic/resources/beta/__init__.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from .beta import (
- Beta,
- AsyncBeta,
- BetaWithRawResponse,
- AsyncBetaWithRawResponse,
- BetaWithStreamingResponse,
- AsyncBetaWithStreamingResponse,
-)
-from .tools import (
- Tools,
- AsyncTools,
- ToolsWithRawResponse,
- AsyncToolsWithRawResponse,
- ToolsWithStreamingResponse,
- AsyncToolsWithStreamingResponse,
-)
-
-__all__ = [
- "Tools",
- "AsyncTools",
- "ToolsWithRawResponse",
- "AsyncToolsWithRawResponse",
- "ToolsWithStreamingResponse",
- "AsyncToolsWithStreamingResponse",
- "Beta",
- "AsyncBeta",
- "BetaWithRawResponse",
- "AsyncBetaWithRawResponse",
- "BetaWithStreamingResponse",
- "AsyncBetaWithStreamingResponse",
-]
diff --git a/src/anthropic/resources/beta/beta.py b/src/anthropic/resources/beta/beta.py
deleted file mode 100644
index 3ffe29e2..00000000
--- a/src/anthropic/resources/beta/beta.py
+++ /dev/null
@@ -1,81 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from .tools import (
- Tools,
- AsyncTools,
- ToolsWithRawResponse,
- AsyncToolsWithRawResponse,
- ToolsWithStreamingResponse,
- AsyncToolsWithStreamingResponse,
-)
-from ..._compat import cached_property
-from ..._resource import SyncAPIResource, AsyncAPIResource
-from .tools.tools import Tools, AsyncTools
-
-__all__ = ["Beta", "AsyncBeta"]
-
-
-class Beta(SyncAPIResource):
- @cached_property
- def tools(self) -> Tools:
- return Tools(self._client)
-
- @cached_property
- def with_raw_response(self) -> BetaWithRawResponse:
- return BetaWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> BetaWithStreamingResponse:
- return BetaWithStreamingResponse(self)
-
-
-class AsyncBeta(AsyncAPIResource):
- @cached_property
- def tools(self) -> AsyncTools:
- return AsyncTools(self._client)
-
- @cached_property
- def with_raw_response(self) -> AsyncBetaWithRawResponse:
- return AsyncBetaWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncBetaWithStreamingResponse:
- return AsyncBetaWithStreamingResponse(self)
-
-
-class BetaWithRawResponse:
- def __init__(self, beta: Beta) -> None:
- self._beta = beta
-
- @cached_property
- def tools(self) -> ToolsWithRawResponse:
- return ToolsWithRawResponse(self._beta.tools)
-
-
-class AsyncBetaWithRawResponse:
- def __init__(self, beta: AsyncBeta) -> None:
- self._beta = beta
-
- @cached_property
- def tools(self) -> AsyncToolsWithRawResponse:
- return AsyncToolsWithRawResponse(self._beta.tools)
-
-
-class BetaWithStreamingResponse:
- def __init__(self, beta: Beta) -> None:
- self._beta = beta
-
- @cached_property
- def tools(self) -> ToolsWithStreamingResponse:
- return ToolsWithStreamingResponse(self._beta.tools)
-
-
-class AsyncBetaWithStreamingResponse:
- def __init__(self, beta: AsyncBeta) -> None:
- self._beta = beta
-
- @cached_property
- def tools(self) -> AsyncToolsWithStreamingResponse:
- return AsyncToolsWithStreamingResponse(self._beta.tools)
diff --git a/src/anthropic/resources/beta/tools/__init__.py b/src/anthropic/resources/beta/tools/__init__.py
deleted file mode 100644
index bd98aeea..00000000
--- a/src/anthropic/resources/beta/tools/__init__.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from .tools import (
- Tools,
- AsyncTools,
- ToolsWithRawResponse,
- AsyncToolsWithRawResponse,
- ToolsWithStreamingResponse,
- AsyncToolsWithStreamingResponse,
-)
-from .messages import (
- Messages,
- AsyncMessages,
- MessagesWithRawResponse,
- AsyncMessagesWithRawResponse,
- MessagesWithStreamingResponse,
- AsyncMessagesWithStreamingResponse,
-)
-
-__all__ = [
- "Messages",
- "AsyncMessages",
- "MessagesWithRawResponse",
- "AsyncMessagesWithRawResponse",
- "MessagesWithStreamingResponse",
- "AsyncMessagesWithStreamingResponse",
- "Tools",
- "AsyncTools",
- "ToolsWithRawResponse",
- "AsyncToolsWithRawResponse",
- "ToolsWithStreamingResponse",
- "AsyncToolsWithStreamingResponse",
-]
diff --git a/src/anthropic/resources/beta/tools/messages.py b/src/anthropic/resources/beta/tools/messages.py
deleted file mode 100644
index eb6aa197..00000000
--- a/src/anthropic/resources/beta/tools/messages.py
+++ /dev/null
@@ -1,2025 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List, Iterable, overload
-from functools import partial
-from typing_extensions import Literal
-
-import httpx
-
-from .... import _legacy_response
-from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ...._utils import (
- required_args,
- maybe_transform,
- async_maybe_transform,
-)
-from ...._compat import cached_property
-from ...._resource import SyncAPIResource, AsyncAPIResource
-from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
-from ...._streaming import Stream, AsyncStream
-from ...._base_client import (
- make_request_options,
-)
-from ....types.beta.tools import message_create_params
-from ....lib.streaming.beta import (
- ToolsBetaMessageStream,
- ToolsBetaMessageStreamT,
- AsyncToolsBetaMessageStream,
- AsyncToolsBetaMessageStreamT,
- ToolsBetaMessageStreamManager,
- AsyncToolsBetaMessageStreamManager,
-)
-from ....types.beta.tools.tool_param import ToolParam
-from ....types.beta.tools.tools_beta_message import ToolsBetaMessage
-from ....types.beta.tools.tools_beta_message_param import ToolsBetaMessageParam
-from ....types.beta.tools.raw_tools_beta_message_stream_event import RawToolsBetaMessageStreamEvent
-
-__all__ = ["Messages", "AsyncMessages"]
-
-
-class Messages(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> MessagesWithRawResponse:
- return MessagesWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> MessagesWithStreamingResponse:
- return MessagesWithStreamingResponse(self)
-
- @overload
- def create(
- self,
- *,
- max_tokens: int,
- messages: Iterable[ToolsBetaMessageParam],
- model: str,
- metadata: message_create_params.Metadata | NotGiven = NOT_GIVEN,
- stop_sequences: List[str] | NotGiven = NOT_GIVEN,
- stream: Literal[False] | NotGiven = NOT_GIVEN,
- system: str | NotGiven = NOT_GIVEN,
- temperature: float | NotGiven = NOT_GIVEN,
- tool_choice: message_create_params.ToolChoice | NotGiven = NOT_GIVEN,
- tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
- top_k: int | NotGiven = NOT_GIVEN,
- top_p: float | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = 600,
- ) -> ToolsBetaMessage:
- """
- Create a Message.
-
- Send a structured list of input messages with text and/or image content, and the
- model will generate the next message in the conversation.
-
- The Messages API can be used for either single queries or stateless multi-turn
- conversations.
-
- Args:
- max_tokens: The maximum number of tokens to generate before stopping.
-
- Note that our models may stop _before_ reaching this maximum. This parameter
- only specifies the absolute maximum number of tokens to generate.
-
- Different models have different maximum values for this parameter. See
- [models](https://docs.anthropic.com/en/docs/models-overview) for details.
-
- messages: Input messages.
-
- Our models are trained to operate on alternating `user` and `assistant`
- conversational turns. When creating a new `Message`, you specify the prior
- conversational turns with the `messages` parameter, and the model then generates
- the next `Message` in the conversation.
-
- Each input message must be an object with a `role` and `content`. You can
- specify a single `user`-role message, or you can include multiple `user` and
- `assistant` messages. The first message must always use the `user` role.
-
- If the final message uses the `assistant` role, the response content will
- continue immediately from the content in that message. This can be used to
- constrain part of the model's response.
-
- Example with a single `user` message:
-
- ```json
- [{ "role": "user", "content": "Hello, Claude" }]
- ```
-
- Example with multiple conversational turns:
-
- ```json
- [
- { "role": "user", "content": "Hello there." },
- { "role": "assistant", "content": "Hi, I'm Claude. How can I help you?" },
- { "role": "user", "content": "Can you explain LLMs in plain English?" }
- ]
- ```
-
- Example with a partially-filled response from Claude:
-
- ```json
- [
- {
- "role": "user",
- "content": "What's the Greek name for Sun? (A) Sol (B) Helios (C) Sun"
- },
- { "role": "assistant", "content": "The best answer is (" }
- ]
- ```
-
- Each input message `content` may be either a single `string` or an array of
- content blocks, where each block has a specific `type`. Using a `string` for
- `content` is shorthand for an array of one content block of type `"text"`. The
- following input messages are equivalent:
-
- ```json
- { "role": "user", "content": "Hello, Claude" }
- ```
-
- ```json
- { "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] }
- ```
-
- Starting with Claude 3 models, you can also send image content blocks:
-
- ```json
- {
- "role": "user",
- "content": [
- {
- "type": "image",
- "source": {
- "type": "base64",
- "media_type": "image/jpeg",
- "data": "/9j/4AAQSkZJRg..."
- }
- },
- { "type": "text", "text": "What is in this image?" }
- ]
- }
- ```
-
- We currently support the `base64` source type for images, and the `image/jpeg`,
- `image/png`, `image/gif`, and `image/webp` media types.
-
- See [examples](https://docs.anthropic.com/en/api/messages-examples) for more
- input examples.
-
- Note that if you want to include a
- [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use
- the top-level `system` parameter — there is no `"system"` role for input
- messages in the Messages API.
-
- model: The model that will complete your prompt.
-
- See [models](https://docs.anthropic.com/en/docs/models-overview) for additional
- details and options.
-
- metadata: An object describing metadata about the request.
-
- stop_sequences: Custom text sequences that will cause the model to stop generating.
-
- Our models will normally stop when they have naturally completed their turn,
- which will result in a response `stop_reason` of `"end_turn"`.
-
- If you want the model to stop generating when it encounters custom strings of
- text, you can use the `stop_sequences` parameter. If the model encounters one of
- the custom sequences, the response `stop_reason` value will be `"stop_sequence"`
- and the response `stop_sequence` value will contain the matched stop sequence.
-
- stream: Whether to incrementally stream the response using server-sent events.
-
- See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for
- details.
-
- system: System prompt.
-
- A system prompt is a way of providing context and instructions to Claude, such
- as specifying a particular goal or role. See our
- [guide to system prompts](https://docs.anthropic.com/en/docs/system-prompts).
-
- temperature: Amount of randomness injected into the response.
-
- Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0`
- for analytical / multiple choice, and closer to `1.0` for creative and
- generative tasks.
-
- Note that even with `temperature` of `0.0`, the results will not be fully
- deterministic.
-
- tool_choice: How the model should use the provided tools. The model can use a specific tool,
- any available tool, or decide by itself.
-
- tools: [beta] Definitions of tools that the model may use.
-
- If you include `tools` in your API request, the model may return `tool_use`
- content blocks that represent the model's use of those tools. You can then run
- those tools using the tool input generated by the model and then optionally
- return results back to the model using `tool_result` content blocks.
-
- Each tool definition includes:
-
- - `name`: Name of the tool.
- - `description`: Optional, but strongly-recommended description of the tool.
- - `input_schema`: [JSON schema](https://json-schema.org/) for the tool `input`
- shape that the model will produce in `tool_use` output content blocks.
-
- For example, if you defined `tools` as:
-
- ```json
- [
- {
- "name": "get_stock_price",
- "description": "Get the current stock price for a given ticker symbol.",
- "input_schema": {
- "type": "object",
- "properties": {
- "ticker": {
- "type": "string",
- "description": "The stock ticker symbol, e.g. AAPL for Apple Inc."
- }
- },
- "required": ["ticker"]
- }
- }
- ]
- ```
-
- And then asked the model "What's the S&P 500 at today?", the model might produce
- `tool_use` content blocks in the response like this:
-
- ```json
- [
- {
- "type": "tool_use",
- "id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
- "name": "get_stock_price",
- "input": { "ticker": "^GSPC" }
- }
- ]
- ```
-
- You might then run your `get_stock_price` tool with `{"ticker": "^GSPC"}` as an
- input, and return the following back to the model in a subsequent `user`
- message:
-
- ```json
- [
- {
- "type": "tool_result",
- "tool_use_id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
- "content": "259.75 USD"
- }
- ]
- ```
-
- Tools can be used for workflows that include running client-side tools and
- functions, or more generally whenever you want the model to produce a particular
- JSON structure of output.
-
- See our [beta guide](https://docs.anthropic.com/en/docs/tool-use) for more
- details.
-
- top_k: Only sample from the top K options for each subsequent token.
-
- Used to remove "long tail" low probability responses.
- [Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277).
-
- Recommended for advanced use cases only. You usually only need to use
- `temperature`.
-
- top_p: Use nucleus sampling.
-
- In nucleus sampling, we compute the cumulative distribution over all the options
- for each subsequent token in decreasing probability order and cut it off once it
- reaches a particular probability specified by `top_p`. You should either alter
- `temperature` or `top_p`, but not both.
-
- Recommended for advanced use cases only. You usually only need to use
- `temperature`.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- ...
-
- @overload
- def create(
- self,
- *,
- max_tokens: int,
- messages: Iterable[ToolsBetaMessageParam],
- model: str,
- stream: Literal[True],
- metadata: message_create_params.Metadata | NotGiven = NOT_GIVEN,
- stop_sequences: List[str] | NotGiven = NOT_GIVEN,
- system: str | NotGiven = NOT_GIVEN,
- temperature: float | NotGiven = NOT_GIVEN,
- tool_choice: message_create_params.ToolChoice | NotGiven = NOT_GIVEN,
- tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
- top_k: int | NotGiven = NOT_GIVEN,
- top_p: float | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = 600,
- ) -> Stream[RawToolsBetaMessageStreamEvent]:
- """
- Create a Message.
-
- Send a structured list of input messages with text and/or image content, and the
- model will generate the next message in the conversation.
-
- The Messages API can be used for either single queries or stateless multi-turn
- conversations.
-
- Args:
- max_tokens: The maximum number of tokens to generate before stopping.
-
- Note that our models may stop _before_ reaching this maximum. This parameter
- only specifies the absolute maximum number of tokens to generate.
-
- Different models have different maximum values for this parameter. See
- [models](https://docs.anthropic.com/en/docs/models-overview) for details.
-
- messages: Input messages.
-
- Our models are trained to operate on alternating `user` and `assistant`
- conversational turns. When creating a new `Message`, you specify the prior
- conversational turns with the `messages` parameter, and the model then generates
- the next `Message` in the conversation.
-
- Each input message must be an object with a `role` and `content`. You can
- specify a single `user`-role message, or you can include multiple `user` and
- `assistant` messages. The first message must always use the `user` role.
-
- If the final message uses the `assistant` role, the response content will
- continue immediately from the content in that message. This can be used to
- constrain part of the model's response.
-
- Example with a single `user` message:
-
- ```json
- [{ "role": "user", "content": "Hello, Claude" }]
- ```
-
- Example with multiple conversational turns:
-
- ```json
- [
- { "role": "user", "content": "Hello there." },
- { "role": "assistant", "content": "Hi, I'm Claude. How can I help you?" },
- { "role": "user", "content": "Can you explain LLMs in plain English?" }
- ]
- ```
-
- Example with a partially-filled response from Claude:
-
- ```json
- [
- {
- "role": "user",
- "content": "What's the Greek name for Sun? (A) Sol (B) Helios (C) Sun"
- },
- { "role": "assistant", "content": "The best answer is (" }
- ]
- ```
-
- Each input message `content` may be either a single `string` or an array of
- content blocks, where each block has a specific `type`. Using a `string` for
- `content` is shorthand for an array of one content block of type `"text"`. The
- following input messages are equivalent:
-
- ```json
- { "role": "user", "content": "Hello, Claude" }
- ```
-
- ```json
- { "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] }
- ```
-
- Starting with Claude 3 models, you can also send image content blocks:
-
- ```json
- {
- "role": "user",
- "content": [
- {
- "type": "image",
- "source": {
- "type": "base64",
- "media_type": "image/jpeg",
- "data": "/9j/4AAQSkZJRg..."
- }
- },
- { "type": "text", "text": "What is in this image?" }
- ]
- }
- ```
-
- We currently support the `base64` source type for images, and the `image/jpeg`,
- `image/png`, `image/gif`, and `image/webp` media types.
-
- See [examples](https://docs.anthropic.com/en/api/messages-examples) for more
- input examples.
-
- Note that if you want to include a
- [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use
- the top-level `system` parameter — there is no `"system"` role for input
- messages in the Messages API.
-
- model: The model that will complete your prompt.
-
- See [models](https://docs.anthropic.com/en/docs/models-overview) for additional
- details and options.
-
- stream: Whether to incrementally stream the response using server-sent events.
-
- See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for
- details.
-
- metadata: An object describing metadata about the request.
-
- stop_sequences: Custom text sequences that will cause the model to stop generating.
-
- Our models will normally stop when they have naturally completed their turn,
- which will result in a response `stop_reason` of `"end_turn"`.
-
- If you want the model to stop generating when it encounters custom strings of
- text, you can use the `stop_sequences` parameter. If the model encounters one of
- the custom sequences, the response `stop_reason` value will be `"stop_sequence"`
- and the response `stop_sequence` value will contain the matched stop sequence.
-
- system: System prompt.
-
- A system prompt is a way of providing context and instructions to Claude, such
- as specifying a particular goal or role. See our
- [guide to system prompts](https://docs.anthropic.com/en/docs/system-prompts).
-
- temperature: Amount of randomness injected into the response.
-
- Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0`
- for analytical / multiple choice, and closer to `1.0` for creative and
- generative tasks.
-
- Note that even with `temperature` of `0.0`, the results will not be fully
- deterministic.
-
- tool_choice: How the model should use the provided tools. The model can use a specific tool,
- any available tool, or decide by itself.
-
- tools: [beta] Definitions of tools that the model may use.
-
- If you include `tools` in your API request, the model may return `tool_use`
- content blocks that represent the model's use of those tools. You can then run
- those tools using the tool input generated by the model and then optionally
- return results back to the model using `tool_result` content blocks.
-
- Each tool definition includes:
-
- - `name`: Name of the tool.
- - `description`: Optional, but strongly-recommended description of the tool.
- - `input_schema`: [JSON schema](https://json-schema.org/) for the tool `input`
- shape that the model will produce in `tool_use` output content blocks.
-
- For example, if you defined `tools` as:
-
- ```json
- [
- {
- "name": "get_stock_price",
- "description": "Get the current stock price for a given ticker symbol.",
- "input_schema": {
- "type": "object",
- "properties": {
- "ticker": {
- "type": "string",
- "description": "The stock ticker symbol, e.g. AAPL for Apple Inc."
- }
- },
- "required": ["ticker"]
- }
- }
- ]
- ```
-
- And then asked the model "What's the S&P 500 at today?", the model might produce
- `tool_use` content blocks in the response like this:
-
- ```json
- [
- {
- "type": "tool_use",
- "id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
- "name": "get_stock_price",
- "input": { "ticker": "^GSPC" }
- }
- ]
- ```
-
- You might then run your `get_stock_price` tool with `{"ticker": "^GSPC"}` as an
- input, and return the following back to the model in a subsequent `user`
- message:
-
- ```json
- [
- {
- "type": "tool_result",
- "tool_use_id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
- "content": "259.75 USD"
- }
- ]
- ```
-
- Tools can be used for workflows that include running client-side tools and
- functions, or more generally whenever you want the model to produce a particular
- JSON structure of output.
-
- See our [beta guide](https://docs.anthropic.com/en/docs/tool-use) for more
- details.
-
- top_k: Only sample from the top K options for each subsequent token.
-
- Used to remove "long tail" low probability responses.
- [Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277).
-
- Recommended for advanced use cases only. You usually only need to use
- `temperature`.
-
- top_p: Use nucleus sampling.
-
- In nucleus sampling, we compute the cumulative distribution over all the options
- for each subsequent token in decreasing probability order and cut it off once it
- reaches a particular probability specified by `top_p`. You should either alter
- `temperature` or `top_p`, but not both.
-
- Recommended for advanced use cases only. You usually only need to use
- `temperature`.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- ...
-
- @overload
- def create(
- self,
- *,
- max_tokens: int,
- messages: Iterable[ToolsBetaMessageParam],
- model: str,
- stream: bool,
- metadata: message_create_params.Metadata | NotGiven = NOT_GIVEN,
- stop_sequences: List[str] | NotGiven = NOT_GIVEN,
- system: str | NotGiven = NOT_GIVEN,
- temperature: float | NotGiven = NOT_GIVEN,
- tool_choice: message_create_params.ToolChoice | NotGiven = NOT_GIVEN,
- tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
- top_k: int | NotGiven = NOT_GIVEN,
- top_p: float | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = 600,
- ) -> ToolsBetaMessage | Stream[RawToolsBetaMessageStreamEvent]:
- """
- Create a Message.
-
- Send a structured list of input messages with text and/or image content, and the
- model will generate the next message in the conversation.
-
- The Messages API can be used for either single queries or stateless multi-turn
- conversations.
-
- Args:
- max_tokens: The maximum number of tokens to generate before stopping.
-
- Note that our models may stop _before_ reaching this maximum. This parameter
- only specifies the absolute maximum number of tokens to generate.
-
- Different models have different maximum values for this parameter. See
- [models](https://docs.anthropic.com/en/docs/models-overview) for details.
-
- messages: Input messages.
-
- Our models are trained to operate on alternating `user` and `assistant`
- conversational turns. When creating a new `Message`, you specify the prior
- conversational turns with the `messages` parameter, and the model then generates
- the next `Message` in the conversation.
-
- Each input message must be an object with a `role` and `content`. You can
- specify a single `user`-role message, or you can include multiple `user` and
- `assistant` messages. The first message must always use the `user` role.
-
- If the final message uses the `assistant` role, the response content will
- continue immediately from the content in that message. This can be used to
- constrain part of the model's response.
-
- Example with a single `user` message:
-
- ```json
- [{ "role": "user", "content": "Hello, Claude" }]
- ```
-
- Example with multiple conversational turns:
-
- ```json
- [
- { "role": "user", "content": "Hello there." },
- { "role": "assistant", "content": "Hi, I'm Claude. How can I help you?" },
- { "role": "user", "content": "Can you explain LLMs in plain English?" }
- ]
- ```
-
- Example with a partially-filled response from Claude:
-
- ```json
- [
- {
- "role": "user",
- "content": "What's the Greek name for Sun? (A) Sol (B) Helios (C) Sun"
- },
- { "role": "assistant", "content": "The best answer is (" }
- ]
- ```
-
- Each input message `content` may be either a single `string` or an array of
- content blocks, where each block has a specific `type`. Using a `string` for
- `content` is shorthand for an array of one content block of type `"text"`. The
- following input messages are equivalent:
-
- ```json
- { "role": "user", "content": "Hello, Claude" }
- ```
-
- ```json
- { "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] }
- ```
-
- Starting with Claude 3 models, you can also send image content blocks:
-
- ```json
- {
- "role": "user",
- "content": [
- {
- "type": "image",
- "source": {
- "type": "base64",
- "media_type": "image/jpeg",
- "data": "/9j/4AAQSkZJRg..."
- }
- },
- { "type": "text", "text": "What is in this image?" }
- ]
- }
- ```
-
- We currently support the `base64` source type for images, and the `image/jpeg`,
- `image/png`, `image/gif`, and `image/webp` media types.
-
- See [examples](https://docs.anthropic.com/en/api/messages-examples) for more
- input examples.
-
- Note that if you want to include a
- [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use
- the top-level `system` parameter — there is no `"system"` role for input
- messages in the Messages API.
-
- model: The model that will complete your prompt.
-
- See [models](https://docs.anthropic.com/en/docs/models-overview) for additional
- details and options.
-
- stream: Whether to incrementally stream the response using server-sent events.
-
- See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for
- details.
-
- metadata: An object describing metadata about the request.
-
- stop_sequences: Custom text sequences that will cause the model to stop generating.
-
- Our models will normally stop when they have naturally completed their turn,
- which will result in a response `stop_reason` of `"end_turn"`.
-
- If you want the model to stop generating when it encounters custom strings of
- text, you can use the `stop_sequences` parameter. If the model encounters one of
- the custom sequences, the response `stop_reason` value will be `"stop_sequence"`
- and the response `stop_sequence` value will contain the matched stop sequence.
-
- system: System prompt.
-
- A system prompt is a way of providing context and instructions to Claude, such
- as specifying a particular goal or role. See our
- [guide to system prompts](https://docs.anthropic.com/en/docs/system-prompts).
-
- temperature: Amount of randomness injected into the response.
-
- Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0`
- for analytical / multiple choice, and closer to `1.0` for creative and
- generative tasks.
-
- Note that even with `temperature` of `0.0`, the results will not be fully
- deterministic.
-
- tool_choice: How the model should use the provided tools. The model can use a specific tool,
- any available tool, or decide by itself.
-
- tools: [beta] Definitions of tools that the model may use.
-
- If you include `tools` in your API request, the model may return `tool_use`
- content blocks that represent the model's use of those tools. You can then run
- those tools using the tool input generated by the model and then optionally
- return results back to the model using `tool_result` content blocks.
-
- Each tool definition includes:
-
- - `name`: Name of the tool.
- - `description`: Optional, but strongly-recommended description of the tool.
- - `input_schema`: [JSON schema](https://json-schema.org/) for the tool `input`
- shape that the model will produce in `tool_use` output content blocks.
-
- For example, if you defined `tools` as:
-
- ```json
- [
- {
- "name": "get_stock_price",
- "description": "Get the current stock price for a given ticker symbol.",
- "input_schema": {
- "type": "object",
- "properties": {
- "ticker": {
- "type": "string",
- "description": "The stock ticker symbol, e.g. AAPL for Apple Inc."
- }
- },
- "required": ["ticker"]
- }
- }
- ]
- ```
-
- And then asked the model "What's the S&P 500 at today?", the model might produce
- `tool_use` content blocks in the response like this:
-
- ```json
- [
- {
- "type": "tool_use",
- "id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
- "name": "get_stock_price",
- "input": { "ticker": "^GSPC" }
- }
- ]
- ```
-
- You might then run your `get_stock_price` tool with `{"ticker": "^GSPC"}` as an
- input, and return the following back to the model in a subsequent `user`
- message:
-
- ```json
- [
- {
- "type": "tool_result",
- "tool_use_id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
- "content": "259.75 USD"
- }
- ]
- ```
-
- Tools can be used for workflows that include running client-side tools and
- functions, or more generally whenever you want the model to produce a particular
- JSON structure of output.
-
- See our [beta guide](https://docs.anthropic.com/en/docs/tool-use) for more
- details.
-
- top_k: Only sample from the top K options for each subsequent token.
-
- Used to remove "long tail" low probability responses.
- [Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277).
-
- Recommended for advanced use cases only. You usually only need to use
- `temperature`.
-
- top_p: Use nucleus sampling.
-
- In nucleus sampling, we compute the cumulative distribution over all the options
- for each subsequent token in decreasing probability order and cut it off once it
- reaches a particular probability specified by `top_p`. You should either alter
- `temperature` or `top_p`, but not both.
-
- Recommended for advanced use cases only. You usually only need to use
- `temperature`.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- ...
-
- @required_args(["max_tokens", "messages", "model"], ["max_tokens", "messages", "model", "stream"])
- def create(
- self,
- *,
- max_tokens: int,
- messages: Iterable[ToolsBetaMessageParam],
- model: str,
- metadata: message_create_params.Metadata | NotGiven = NOT_GIVEN,
- stop_sequences: List[str] | NotGiven = NOT_GIVEN,
- stream: Literal[False] | Literal[True] | NotGiven = NOT_GIVEN,
- system: str | NotGiven = NOT_GIVEN,
- temperature: float | NotGiven = NOT_GIVEN,
- tool_choice: message_create_params.ToolChoice | NotGiven = NOT_GIVEN,
- tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
- top_k: int | NotGiven = NOT_GIVEN,
- top_p: float | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = 600,
- ) -> ToolsBetaMessage | Stream[RawToolsBetaMessageStreamEvent]:
- extra_headers = {"anthropic-beta": "tools-2024-05-16", **(extra_headers or {})}
- return self._post(
- "/v1/messages?beta=tools",
- body=maybe_transform(
- {
- "max_tokens": max_tokens,
- "messages": messages,
- "model": model,
- "metadata": metadata,
- "stop_sequences": stop_sequences,
- "stream": stream,
- "system": system,
- "temperature": temperature,
- "tool_choice": tool_choice,
- "tools": tools,
- "top_k": top_k,
- "top_p": top_p,
- },
- message_create_params.MessageCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ToolsBetaMessage,
- stream=stream or False,
- stream_cls=Stream[RawToolsBetaMessageStreamEvent],
- )
-
- @overload
- def stream(
- self,
- *,
- max_tokens: int,
- messages: Iterable[ToolsBetaMessageParam],
- model: str,
- metadata: message_create_params.Metadata | NotGiven = NOT_GIVEN,
- stop_sequences: List[str] | NotGiven = NOT_GIVEN,
- system: str | NotGiven = NOT_GIVEN,
- temperature: float | NotGiven = NOT_GIVEN,
- tool_choice: message_create_params.ToolChoice | NotGiven = NOT_GIVEN,
- tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
- top_k: int | NotGiven = NOT_GIVEN,
- top_p: float | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ToolsBetaMessageStreamManager[ToolsBetaMessageStream]:
- """Create a message stream with the beta tools API.
-
- https://docs.anthropic.com/en/docs/tool-use-examples
- """
- ...
-
- @overload
- def stream(
- self,
- *,
- max_tokens: int,
- messages: Iterable[ToolsBetaMessageParam],
- model: str,
- metadata: message_create_params.Metadata | NotGiven = NOT_GIVEN,
- stop_sequences: List[str] | NotGiven = NOT_GIVEN,
- system: str | NotGiven = NOT_GIVEN,
- temperature: float | NotGiven = NOT_GIVEN,
- tool_choice: message_create_params.ToolChoice | NotGiven = NOT_GIVEN,
- tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
- top_k: int | NotGiven = NOT_GIVEN,
- top_p: float | NotGiven = NOT_GIVEN,
- event_handler: type[ToolsBetaMessageStreamT],
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ToolsBetaMessageStreamManager[ToolsBetaMessageStreamT]:
- """Create a message stream with the beta tools API.
-
- https://docs.anthropic.com/en/docs/tool-use-examples
- """
- ...
-
- def stream( # pyright: ignore[reportInconsistentOverload]
- self,
- *,
- max_tokens: int,
- messages: Iterable[ToolsBetaMessageParam],
- model: str,
- metadata: message_create_params.Metadata | NotGiven = NOT_GIVEN,
- stop_sequences: List[str] | NotGiven = NOT_GIVEN,
- system: str | NotGiven = NOT_GIVEN,
- temperature: float | NotGiven = NOT_GIVEN,
- tool_choice: message_create_params.ToolChoice | NotGiven = NOT_GIVEN,
- tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
- top_k: int | NotGiven = NOT_GIVEN,
- top_p: float | NotGiven = NOT_GIVEN,
- event_handler: type[ToolsBetaMessageStreamT] = ToolsBetaMessageStream, # type: ignore[assignment]
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ToolsBetaMessageStreamManager[ToolsBetaMessageStream] | ToolsBetaMessageStreamManager[ToolsBetaMessageStreamT]:
- """Create a message stream with the beta tools API.
-
- https://docs.anthropic.com/en/docs/tool-use-examples
- """
- extra_headers = {
- "X-Stainless-Stream-Helper": "messages",
- "X-Stainless-Custom-Event-Handler": "true" if event_handler != ToolsBetaMessageStream else "false",
- "anthropic-beta": "tools-2024-05-16",
- **(extra_headers or {}),
- }
- make_request = partial(
- self._post,
- "/v1/messages?beta=tools",
- body=maybe_transform(
- {
- "max_tokens": max_tokens,
- "messages": messages,
- "model": model,
- "metadata": metadata,
- "stop_sequences": stop_sequences,
- "stream": True,
- "system": system,
- "temperature": temperature,
- "tool_choice": tool_choice,
- "tools": tools,
- "top_k": top_k,
- "top_p": top_p,
- },
- message_create_params.MessageCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ToolsBetaMessage,
- stream=True,
- stream_cls=Stream[RawToolsBetaMessageStreamEvent],
- )
- return ToolsBetaMessageStreamManager(make_request, event_handler)
-
-
-class AsyncMessages(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncMessagesWithRawResponse:
- return AsyncMessagesWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncMessagesWithStreamingResponse:
- return AsyncMessagesWithStreamingResponse(self)
-
- @overload
- async def create(
- self,
- *,
- max_tokens: int,
- messages: Iterable[ToolsBetaMessageParam],
- model: str,
- metadata: message_create_params.Metadata | NotGiven = NOT_GIVEN,
- stop_sequences: List[str] | NotGiven = NOT_GIVEN,
- stream: Literal[False] | NotGiven = NOT_GIVEN,
- system: str | NotGiven = NOT_GIVEN,
- temperature: float | NotGiven = NOT_GIVEN,
- tool_choice: message_create_params.ToolChoice | NotGiven = NOT_GIVEN,
- tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
- top_k: int | NotGiven = NOT_GIVEN,
- top_p: float | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = 600,
- ) -> ToolsBetaMessage:
- """
- Create a Message.
-
- Send a structured list of input messages with text and/or image content, and the
- model will generate the next message in the conversation.
-
- The Messages API can be used for either single queries or stateless multi-turn
- conversations.
-
- Args:
- max_tokens: The maximum number of tokens to generate before stopping.
-
- Note that our models may stop _before_ reaching this maximum. This parameter
- only specifies the absolute maximum number of tokens to generate.
-
- Different models have different maximum values for this parameter. See
- [models](https://docs.anthropic.com/en/docs/models-overview) for details.
-
- messages: Input messages.
-
- Our models are trained to operate on alternating `user` and `assistant`
- conversational turns. When creating a new `Message`, you specify the prior
- conversational turns with the `messages` parameter, and the model then generates
- the next `Message` in the conversation.
-
- Each input message must be an object with a `role` and `content`. You can
- specify a single `user`-role message, or you can include multiple `user` and
- `assistant` messages. The first message must always use the `user` role.
-
- If the final message uses the `assistant` role, the response content will
- continue immediately from the content in that message. This can be used to
- constrain part of the model's response.
-
- Example with a single `user` message:
-
- ```json
- [{ "role": "user", "content": "Hello, Claude" }]
- ```
-
- Example with multiple conversational turns:
-
- ```json
- [
- { "role": "user", "content": "Hello there." },
- { "role": "assistant", "content": "Hi, I'm Claude. How can I help you?" },
- { "role": "user", "content": "Can you explain LLMs in plain English?" }
- ]
- ```
-
- Example with a partially-filled response from Claude:
-
- ```json
- [
- {
- "role": "user",
- "content": "What's the Greek name for Sun? (A) Sol (B) Helios (C) Sun"
- },
- { "role": "assistant", "content": "The best answer is (" }
- ]
- ```
-
- Each input message `content` may be either a single `string` or an array of
- content blocks, where each block has a specific `type`. Using a `string` for
- `content` is shorthand for an array of one content block of type `"text"`. The
- following input messages are equivalent:
-
- ```json
- { "role": "user", "content": "Hello, Claude" }
- ```
-
- ```json
- { "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] }
- ```
-
- Starting with Claude 3 models, you can also send image content blocks:
-
- ```json
- {
- "role": "user",
- "content": [
- {
- "type": "image",
- "source": {
- "type": "base64",
- "media_type": "image/jpeg",
- "data": "/9j/4AAQSkZJRg..."
- }
- },
- { "type": "text", "text": "What is in this image?" }
- ]
- }
- ```
-
- We currently support the `base64` source type for images, and the `image/jpeg`,
- `image/png`, `image/gif`, and `image/webp` media types.
-
- See [examples](https://docs.anthropic.com/en/api/messages-examples) for more
- input examples.
-
- Note that if you want to include a
- [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use
- the top-level `system` parameter — there is no `"system"` role for input
- messages in the Messages API.
-
- model: The model that will complete your prompt.
-
- See [models](https://docs.anthropic.com/en/docs/models-overview) for additional
- details and options.
-
- metadata: An object describing metadata about the request.
-
- stop_sequences: Custom text sequences that will cause the model to stop generating.
-
- Our models will normally stop when they have naturally completed their turn,
- which will result in a response `stop_reason` of `"end_turn"`.
-
- If you want the model to stop generating when it encounters custom strings of
- text, you can use the `stop_sequences` parameter. If the model encounters one of
- the custom sequences, the response `stop_reason` value will be `"stop_sequence"`
- and the response `stop_sequence` value will contain the matched stop sequence.
-
- stream: Whether to incrementally stream the response using server-sent events.
-
- See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for
- details.
-
- system: System prompt.
-
- A system prompt is a way of providing context and instructions to Claude, such
- as specifying a particular goal or role. See our
- [guide to system prompts](https://docs.anthropic.com/en/docs/system-prompts).
-
- temperature: Amount of randomness injected into the response.
-
- Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0`
- for analytical / multiple choice, and closer to `1.0` for creative and
- generative tasks.
-
- Note that even with `temperature` of `0.0`, the results will not be fully
- deterministic.
-
- tool_choice: How the model should use the provided tools. The model can use a specific tool,
- any available tool, or decide by itself.
-
- tools: [beta] Definitions of tools that the model may use.
-
- If you include `tools` in your API request, the model may return `tool_use`
- content blocks that represent the model's use of those tools. You can then run
- those tools using the tool input generated by the model and then optionally
- return results back to the model using `tool_result` content blocks.
-
- Each tool definition includes:
-
- - `name`: Name of the tool.
- - `description`: Optional, but strongly-recommended description of the tool.
- - `input_schema`: [JSON schema](https://json-schema.org/) for the tool `input`
- shape that the model will produce in `tool_use` output content blocks.
-
- For example, if you defined `tools` as:
-
- ```json
- [
- {
- "name": "get_stock_price",
- "description": "Get the current stock price for a given ticker symbol.",
- "input_schema": {
- "type": "object",
- "properties": {
- "ticker": {
- "type": "string",
- "description": "The stock ticker symbol, e.g. AAPL for Apple Inc."
- }
- },
- "required": ["ticker"]
- }
- }
- ]
- ```
-
- And then asked the model "What's the S&P 500 at today?", the model might produce
- `tool_use` content blocks in the response like this:
-
- ```json
- [
- {
- "type": "tool_use",
- "id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
- "name": "get_stock_price",
- "input": { "ticker": "^GSPC" }
- }
- ]
- ```
-
- You might then run your `get_stock_price` tool with `{"ticker": "^GSPC"}` as an
- input, and return the following back to the model in a subsequent `user`
- message:
-
- ```json
- [
- {
- "type": "tool_result",
- "tool_use_id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
- "content": "259.75 USD"
- }
- ]
- ```
-
- Tools can be used for workflows that include running client-side tools and
- functions, or more generally whenever you want the model to produce a particular
- JSON structure of output.
-
- See our [beta guide](https://docs.anthropic.com/en/docs/tool-use) for more
- details.
-
- top_k: Only sample from the top K options for each subsequent token.
-
- Used to remove "long tail" low probability responses.
- [Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277).
-
- Recommended for advanced use cases only. You usually only need to use
- `temperature`.
-
- top_p: Use nucleus sampling.
-
- In nucleus sampling, we compute the cumulative distribution over all the options
- for each subsequent token in decreasing probability order and cut it off once it
- reaches a particular probability specified by `top_p`. You should either alter
- `temperature` or `top_p`, but not both.
-
- Recommended for advanced use cases only. You usually only need to use
- `temperature`.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- ...
-
- @overload
- async def create(
- self,
- *,
- max_tokens: int,
- messages: Iterable[ToolsBetaMessageParam],
- model: str,
- stream: Literal[True],
- metadata: message_create_params.Metadata | NotGiven = NOT_GIVEN,
- stop_sequences: List[str] | NotGiven = NOT_GIVEN,
- system: str | NotGiven = NOT_GIVEN,
- temperature: float | NotGiven = NOT_GIVEN,
- tool_choice: message_create_params.ToolChoice | NotGiven = NOT_GIVEN,
- tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
- top_k: int | NotGiven = NOT_GIVEN,
- top_p: float | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = 600,
- ) -> AsyncStream[RawToolsBetaMessageStreamEvent]:
- """
- Create a Message.
-
- Send a structured list of input messages with text and/or image content, and the
- model will generate the next message in the conversation.
-
- The Messages API can be used for either single queries or stateless multi-turn
- conversations.
-
- Args:
- max_tokens: The maximum number of tokens to generate before stopping.
-
- Note that our models may stop _before_ reaching this maximum. This parameter
- only specifies the absolute maximum number of tokens to generate.
-
- Different models have different maximum values for this parameter. See
- [models](https://docs.anthropic.com/en/docs/models-overview) for details.
-
- messages: Input messages.
-
- Our models are trained to operate on alternating `user` and `assistant`
- conversational turns. When creating a new `Message`, you specify the prior
- conversational turns with the `messages` parameter, and the model then generates
- the next `Message` in the conversation.
-
- Each input message must be an object with a `role` and `content`. You can
- specify a single `user`-role message, or you can include multiple `user` and
- `assistant` messages. The first message must always use the `user` role.
-
- If the final message uses the `assistant` role, the response content will
- continue immediately from the content in that message. This can be used to
- constrain part of the model's response.
-
- Example with a single `user` message:
-
- ```json
- [{ "role": "user", "content": "Hello, Claude" }]
- ```
-
- Example with multiple conversational turns:
-
- ```json
- [
- { "role": "user", "content": "Hello there." },
- { "role": "assistant", "content": "Hi, I'm Claude. How can I help you?" },
- { "role": "user", "content": "Can you explain LLMs in plain English?" }
- ]
- ```
-
- Example with a partially-filled response from Claude:
-
- ```json
- [
- {
- "role": "user",
- "content": "What's the Greek name for Sun? (A) Sol (B) Helios (C) Sun"
- },
- { "role": "assistant", "content": "The best answer is (" }
- ]
- ```
-
- Each input message `content` may be either a single `string` or an array of
- content blocks, where each block has a specific `type`. Using a `string` for
- `content` is shorthand for an array of one content block of type `"text"`. The
- following input messages are equivalent:
-
- ```json
- { "role": "user", "content": "Hello, Claude" }
- ```
-
- ```json
- { "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] }
- ```
-
- Starting with Claude 3 models, you can also send image content blocks:
-
- ```json
- {
- "role": "user",
- "content": [
- {
- "type": "image",
- "source": {
- "type": "base64",
- "media_type": "image/jpeg",
- "data": "/9j/4AAQSkZJRg..."
- }
- },
- { "type": "text", "text": "What is in this image?" }
- ]
- }
- ```
-
- We currently support the `base64` source type for images, and the `image/jpeg`,
- `image/png`, `image/gif`, and `image/webp` media types.
-
- See [examples](https://docs.anthropic.com/en/api/messages-examples) for more
- input examples.
-
- Note that if you want to include a
- [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use
- the top-level `system` parameter — there is no `"system"` role for input
- messages in the Messages API.
-
- model: The model that will complete your prompt.
-
- See [models](https://docs.anthropic.com/en/docs/models-overview) for additional
- details and options.
-
- stream: Whether to incrementally stream the response using server-sent events.
-
- See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for
- details.
-
- metadata: An object describing metadata about the request.
-
- stop_sequences: Custom text sequences that will cause the model to stop generating.
-
- Our models will normally stop when they have naturally completed their turn,
- which will result in a response `stop_reason` of `"end_turn"`.
-
- If you want the model to stop generating when it encounters custom strings of
- text, you can use the `stop_sequences` parameter. If the model encounters one of
- the custom sequences, the response `stop_reason` value will be `"stop_sequence"`
- and the response `stop_sequence` value will contain the matched stop sequence.
-
- system: System prompt.
-
- A system prompt is a way of providing context and instructions to Claude, such
- as specifying a particular goal or role. See our
- [guide to system prompts](https://docs.anthropic.com/en/docs/system-prompts).
-
- temperature: Amount of randomness injected into the response.
-
- Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0`
- for analytical / multiple choice, and closer to `1.0` for creative and
- generative tasks.
-
- Note that even with `temperature` of `0.0`, the results will not be fully
- deterministic.
-
- tool_choice: How the model should use the provided tools. The model can use a specific tool,
- any available tool, or decide by itself.
-
- tools: [beta] Definitions of tools that the model may use.
-
- If you include `tools` in your API request, the model may return `tool_use`
- content blocks that represent the model's use of those tools. You can then run
- those tools using the tool input generated by the model and then optionally
- return results back to the model using `tool_result` content blocks.
-
- Each tool definition includes:
-
- - `name`: Name of the tool.
- - `description`: Optional, but strongly-recommended description of the tool.
- - `input_schema`: [JSON schema](https://json-schema.org/) for the tool `input`
- shape that the model will produce in `tool_use` output content blocks.
-
- For example, if you defined `tools` as:
-
- ```json
- [
- {
- "name": "get_stock_price",
- "description": "Get the current stock price for a given ticker symbol.",
- "input_schema": {
- "type": "object",
- "properties": {
- "ticker": {
- "type": "string",
- "description": "The stock ticker symbol, e.g. AAPL for Apple Inc."
- }
- },
- "required": ["ticker"]
- }
- }
- ]
- ```
-
- And then asked the model "What's the S&P 500 at today?", the model might produce
- `tool_use` content blocks in the response like this:
-
- ```json
- [
- {
- "type": "tool_use",
- "id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
- "name": "get_stock_price",
- "input": { "ticker": "^GSPC" }
- }
- ]
- ```
-
- You might then run your `get_stock_price` tool with `{"ticker": "^GSPC"}` as an
- input, and return the following back to the model in a subsequent `user`
- message:
-
- ```json
- [
- {
- "type": "tool_result",
- "tool_use_id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
- "content": "259.75 USD"
- }
- ]
- ```
-
- Tools can be used for workflows that include running client-side tools and
- functions, or more generally whenever you want the model to produce a particular
- JSON structure of output.
-
- See our [beta guide](https://docs.anthropic.com/en/docs/tool-use) for more
- details.
-
- top_k: Only sample from the top K options for each subsequent token.
-
- Used to remove "long tail" low probability responses.
- [Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277).
-
- Recommended for advanced use cases only. You usually only need to use
- `temperature`.
-
- top_p: Use nucleus sampling.
-
- In nucleus sampling, we compute the cumulative distribution over all the options
- for each subsequent token in decreasing probability order and cut it off once it
- reaches a particular probability specified by `top_p`. You should either alter
- `temperature` or `top_p`, but not both.
-
- Recommended for advanced use cases only. You usually only need to use
- `temperature`.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- ...
-
- @overload
- async def create(
- self,
- *,
- max_tokens: int,
- messages: Iterable[ToolsBetaMessageParam],
- model: str,
- stream: bool,
- metadata: message_create_params.Metadata | NotGiven = NOT_GIVEN,
- stop_sequences: List[str] | NotGiven = NOT_GIVEN,
- system: str | NotGiven = NOT_GIVEN,
- temperature: float | NotGiven = NOT_GIVEN,
- tool_choice: message_create_params.ToolChoice | NotGiven = NOT_GIVEN,
- tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
- top_k: int | NotGiven = NOT_GIVEN,
- top_p: float | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = 600,
- ) -> ToolsBetaMessage | AsyncStream[RawToolsBetaMessageStreamEvent]:
- """
- Create a Message.
-
- Send a structured list of input messages with text and/or image content, and the
- model will generate the next message in the conversation.
-
- The Messages API can be used for either single queries or stateless multi-turn
- conversations.
-
- Args:
- max_tokens: The maximum number of tokens to generate before stopping.
-
- Note that our models may stop _before_ reaching this maximum. This parameter
- only specifies the absolute maximum number of tokens to generate.
-
- Different models have different maximum values for this parameter. See
- [models](https://docs.anthropic.com/en/docs/models-overview) for details.
-
- messages: Input messages.
-
- Our models are trained to operate on alternating `user` and `assistant`
- conversational turns. When creating a new `Message`, you specify the prior
- conversational turns with the `messages` parameter, and the model then generates
- the next `Message` in the conversation.
-
- Each input message must be an object with a `role` and `content`. You can
- specify a single `user`-role message, or you can include multiple `user` and
- `assistant` messages. The first message must always use the `user` role.
-
- If the final message uses the `assistant` role, the response content will
- continue immediately from the content in that message. This can be used to
- constrain part of the model's response.
-
- Example with a single `user` message:
-
- ```json
- [{ "role": "user", "content": "Hello, Claude" }]
- ```
-
- Example with multiple conversational turns:
-
- ```json
- [
- { "role": "user", "content": "Hello there." },
- { "role": "assistant", "content": "Hi, I'm Claude. How can I help you?" },
- { "role": "user", "content": "Can you explain LLMs in plain English?" }
- ]
- ```
-
- Example with a partially-filled response from Claude:
-
- ```json
- [
- {
- "role": "user",
- "content": "What's the Greek name for Sun? (A) Sol (B) Helios (C) Sun"
- },
- { "role": "assistant", "content": "The best answer is (" }
- ]
- ```
-
- Each input message `content` may be either a single `string` or an array of
- content blocks, where each block has a specific `type`. Using a `string` for
- `content` is shorthand for an array of one content block of type `"text"`. The
- following input messages are equivalent:
-
- ```json
- { "role": "user", "content": "Hello, Claude" }
- ```
-
- ```json
- { "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] }
- ```
-
- Starting with Claude 3 models, you can also send image content blocks:
-
- ```json
- {
- "role": "user",
- "content": [
- {
- "type": "image",
- "source": {
- "type": "base64",
- "media_type": "image/jpeg",
- "data": "/9j/4AAQSkZJRg..."
- }
- },
- { "type": "text", "text": "What is in this image?" }
- ]
- }
- ```
-
- We currently support the `base64` source type for images, and the `image/jpeg`,
- `image/png`, `image/gif`, and `image/webp` media types.
-
- See [examples](https://docs.anthropic.com/en/api/messages-examples) for more
- input examples.
-
- Note that if you want to include a
- [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use
- the top-level `system` parameter — there is no `"system"` role for input
- messages in the Messages API.
-
- model: The model that will complete your prompt.
-
- See [models](https://docs.anthropic.com/en/docs/models-overview) for additional
- details and options.
-
- stream: Whether to incrementally stream the response using server-sent events.
-
- See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for
- details.
-
- metadata: An object describing metadata about the request.
-
- stop_sequences: Custom text sequences that will cause the model to stop generating.
-
- Our models will normally stop when they have naturally completed their turn,
- which will result in a response `stop_reason` of `"end_turn"`.
-
- If you want the model to stop generating when it encounters custom strings of
- text, you can use the `stop_sequences` parameter. If the model encounters one of
- the custom sequences, the response `stop_reason` value will be `"stop_sequence"`
- and the response `stop_sequence` value will contain the matched stop sequence.
-
- system: System prompt.
-
- A system prompt is a way of providing context and instructions to Claude, such
- as specifying a particular goal or role. See our
- [guide to system prompts](https://docs.anthropic.com/en/docs/system-prompts).
-
- temperature: Amount of randomness injected into the response.
-
- Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0`
- for analytical / multiple choice, and closer to `1.0` for creative and
- generative tasks.
-
- Note that even with `temperature` of `0.0`, the results will not be fully
- deterministic.
-
- tool_choice: How the model should use the provided tools. The model can use a specific tool,
- any available tool, or decide by itself.
-
- tools: [beta] Definitions of tools that the model may use.
-
- If you include `tools` in your API request, the model may return `tool_use`
- content blocks that represent the model's use of those tools. You can then run
- those tools using the tool input generated by the model and then optionally
- return results back to the model using `tool_result` content blocks.
-
- Each tool definition includes:
-
- - `name`: Name of the tool.
- - `description`: Optional, but strongly-recommended description of the tool.
- - `input_schema`: [JSON schema](https://json-schema.org/) for the tool `input`
- shape that the model will produce in `tool_use` output content blocks.
-
- For example, if you defined `tools` as:
-
- ```json
- [
- {
- "name": "get_stock_price",
- "description": "Get the current stock price for a given ticker symbol.",
- "input_schema": {
- "type": "object",
- "properties": {
- "ticker": {
- "type": "string",
- "description": "The stock ticker symbol, e.g. AAPL for Apple Inc."
- }
- },
- "required": ["ticker"]
- }
- }
- ]
- ```
-
- And then asked the model "What's the S&P 500 at today?", the model might produce
- `tool_use` content blocks in the response like this:
-
- ```json
- [
- {
- "type": "tool_use",
- "id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
- "name": "get_stock_price",
- "input": { "ticker": "^GSPC" }
- }
- ]
- ```
-
- You might then run your `get_stock_price` tool with `{"ticker": "^GSPC"}` as an
- input, and return the following back to the model in a subsequent `user`
- message:
-
- ```json
- [
- {
- "type": "tool_result",
- "tool_use_id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
- "content": "259.75 USD"
- }
- ]
- ```
-
- Tools can be used for workflows that include running client-side tools and
- functions, or more generally whenever you want the model to produce a particular
- JSON structure of output.
-
- See our [beta guide](https://docs.anthropic.com/en/docs/tool-use) for more
- details.
-
- top_k: Only sample from the top K options for each subsequent token.
-
- Used to remove "long tail" low probability responses.
- [Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277).
-
- Recommended for advanced use cases only. You usually only need to use
- `temperature`.
-
- top_p: Use nucleus sampling.
-
- In nucleus sampling, we compute the cumulative distribution over all the options
- for each subsequent token in decreasing probability order and cut it off once it
- reaches a particular probability specified by `top_p`. You should either alter
- `temperature` or `top_p`, but not both.
-
- Recommended for advanced use cases only. You usually only need to use
- `temperature`.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- ...
-
- @required_args(["max_tokens", "messages", "model"], ["max_tokens", "messages", "model", "stream"])
- async def create(
- self,
- *,
- max_tokens: int,
- messages: Iterable[ToolsBetaMessageParam],
- model: str,
- metadata: message_create_params.Metadata | NotGiven = NOT_GIVEN,
- stop_sequences: List[str] | NotGiven = NOT_GIVEN,
- stream: Literal[False] | Literal[True] | NotGiven = NOT_GIVEN,
- system: str | NotGiven = NOT_GIVEN,
- temperature: float | NotGiven = NOT_GIVEN,
- tool_choice: message_create_params.ToolChoice | NotGiven = NOT_GIVEN,
- tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
- top_k: int | NotGiven = NOT_GIVEN,
- top_p: float | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = 600,
- ) -> ToolsBetaMessage | AsyncStream[RawToolsBetaMessageStreamEvent]:
- extra_headers = {"anthropic-beta": "tools-2024-05-16", **(extra_headers or {})}
- return await self._post(
- "/v1/messages?beta=tools",
- body=await async_maybe_transform(
- {
- "max_tokens": max_tokens,
- "messages": messages,
- "model": model,
- "metadata": metadata,
- "stop_sequences": stop_sequences,
- "stream": stream,
- "system": system,
- "temperature": temperature,
- "tool_choice": tool_choice,
- "tools": tools,
- "top_k": top_k,
- "top_p": top_p,
- },
- message_create_params.MessageCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ToolsBetaMessage,
- stream=stream or False,
- stream_cls=AsyncStream[RawToolsBetaMessageStreamEvent],
- )
-
- @overload
- def stream(
- self,
- *,
- max_tokens: int,
- messages: Iterable[ToolsBetaMessageParam],
- model: str,
- metadata: message_create_params.Metadata | NotGiven = NOT_GIVEN,
- stop_sequences: List[str] | NotGiven = NOT_GIVEN,
- system: str | NotGiven = NOT_GIVEN,
- temperature: float | NotGiven = NOT_GIVEN,
- tool_choice: message_create_params.ToolChoice | NotGiven = NOT_GIVEN,
- tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
- top_k: int | NotGiven = NOT_GIVEN,
- top_p: float | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AsyncToolsBetaMessageStreamManager[AsyncToolsBetaMessageStream]:
- """Create a message stream with the beta tools API.
-
- https://docs.anthropic.com/en/docs/tool-use-examples
- """
- ...
-
- @overload
- def stream(
- self,
- *,
- max_tokens: int,
- messages: Iterable[ToolsBetaMessageParam],
- model: str,
- metadata: message_create_params.Metadata | NotGiven = NOT_GIVEN,
- stop_sequences: List[str] | NotGiven = NOT_GIVEN,
- system: str | NotGiven = NOT_GIVEN,
- temperature: float | NotGiven = NOT_GIVEN,
- tool_choice: message_create_params.ToolChoice | NotGiven = NOT_GIVEN,
- tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
- top_k: int | NotGiven = NOT_GIVEN,
- top_p: float | NotGiven = NOT_GIVEN,
- event_handler: type[AsyncToolsBetaMessageStreamT],
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AsyncToolsBetaMessageStreamManager[AsyncToolsBetaMessageStreamT]:
- """Create a message stream with the beta tools API.
-
- https://docs.anthropic.com/en/docs/tool-use-examples
- """
- ...
-
- def stream( # pyright: ignore[reportInconsistentOverload]
- self,
- *,
- max_tokens: int,
- messages: Iterable[ToolsBetaMessageParam],
- model: str,
- metadata: message_create_params.Metadata | NotGiven = NOT_GIVEN,
- stop_sequences: List[str] | NotGiven = NOT_GIVEN,
- system: str | NotGiven = NOT_GIVEN,
- temperature: float | NotGiven = NOT_GIVEN,
- tool_choice: message_create_params.ToolChoice | NotGiven = NOT_GIVEN,
- tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
- top_k: int | NotGiven = NOT_GIVEN,
- top_p: float | NotGiven = NOT_GIVEN,
- event_handler: type[AsyncToolsBetaMessageStreamT] = AsyncToolsBetaMessageStream, # type: ignore[assignment]
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> (
- AsyncToolsBetaMessageStreamManager[AsyncToolsBetaMessageStream]
- | AsyncToolsBetaMessageStreamManager[AsyncToolsBetaMessageStreamT]
- ):
- """Create a message stream with the beta tools API.
-
- https://docs.anthropic.com/en/docs/tool-use-examples
- """
- extra_headers = {
- "X-Stainless-Stream-Helper": "messages",
- "X-Stainless-Custom-Event-Handler": "true" if event_handler != AsyncToolsBetaMessageStream else "false",
- "anthropic-beta": "tools-2024-05-16",
- **(extra_headers or {}),
- }
- request = self._post(
- "/v1/messages?beta=tools",
- body=maybe_transform(
- {
- "max_tokens": max_tokens,
- "messages": messages,
- "model": model,
- "metadata": metadata,
- "stop_sequences": stop_sequences,
- "stream": True,
- "system": system,
- "temperature": temperature,
- "tool_choice": tool_choice,
- "tools": tools,
- "top_k": top_k,
- "top_p": top_p,
- },
- message_create_params.MessageCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ToolsBetaMessage,
- stream=True,
- stream_cls=AsyncStream[RawToolsBetaMessageStreamEvent],
- )
- return AsyncToolsBetaMessageStreamManager(request, event_handler)
-
-
-class MessagesWithRawResponse:
- def __init__(self, messages: Messages) -> None:
- self._messages = messages
-
- self.create = _legacy_response.to_raw_response_wrapper(
- messages.create,
- )
-
-
-class AsyncMessagesWithRawResponse:
- def __init__(self, messages: AsyncMessages) -> None:
- self._messages = messages
-
- self.create = _legacy_response.async_to_raw_response_wrapper(
- messages.create,
- )
-
-
-class MessagesWithStreamingResponse:
- def __init__(self, messages: Messages) -> None:
- self._messages = messages
-
- self.create = to_streamed_response_wrapper(
- messages.create,
- )
-
-
-class AsyncMessagesWithStreamingResponse:
- def __init__(self, messages: AsyncMessages) -> None:
- self._messages = messages
-
- self.create = async_to_streamed_response_wrapper(
- messages.create,
- )
diff --git a/src/anthropic/resources/beta/tools/tools.py b/src/anthropic/resources/beta/tools/tools.py
deleted file mode 100644
index 7fddb044..00000000
--- a/src/anthropic/resources/beta/tools/tools.py
+++ /dev/null
@@ -1,80 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from .messages import (
- Messages,
- AsyncMessages,
- MessagesWithRawResponse,
- AsyncMessagesWithRawResponse,
- MessagesWithStreamingResponse,
- AsyncMessagesWithStreamingResponse,
-)
-from ...._compat import cached_property
-from ...._resource import SyncAPIResource, AsyncAPIResource
-
-__all__ = ["Tools", "AsyncTools"]
-
-
-class Tools(SyncAPIResource):
- @cached_property
- def messages(self) -> Messages:
- return Messages(self._client)
-
- @cached_property
- def with_raw_response(self) -> ToolsWithRawResponse:
- return ToolsWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> ToolsWithStreamingResponse:
- return ToolsWithStreamingResponse(self)
-
-
-class AsyncTools(AsyncAPIResource):
- @cached_property
- def messages(self) -> AsyncMessages:
- return AsyncMessages(self._client)
-
- @cached_property
- def with_raw_response(self) -> AsyncToolsWithRawResponse:
- return AsyncToolsWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncToolsWithStreamingResponse:
- return AsyncToolsWithStreamingResponse(self)
-
-
-class ToolsWithRawResponse:
- def __init__(self, tools: Tools) -> None:
- self._tools = tools
-
- @cached_property
- def messages(self) -> MessagesWithRawResponse:
- return MessagesWithRawResponse(self._tools.messages)
-
-
-class AsyncToolsWithRawResponse:
- def __init__(self, tools: AsyncTools) -> None:
- self._tools = tools
-
- @cached_property
- def messages(self) -> AsyncMessagesWithRawResponse:
- return AsyncMessagesWithRawResponse(self._tools.messages)
-
-
-class ToolsWithStreamingResponse:
- def __init__(self, tools: Tools) -> None:
- self._tools = tools
-
- @cached_property
- def messages(self) -> MessagesWithStreamingResponse:
- return MessagesWithStreamingResponse(self._tools.messages)
-
-
-class AsyncToolsWithStreamingResponse:
- def __init__(self, tools: AsyncTools) -> None:
- self._tools = tools
-
- @cached_property
- def messages(self) -> AsyncMessagesWithStreamingResponse:
- return AsyncMessagesWithStreamingResponse(self._tools.messages)
diff --git a/src/anthropic/resources/messages.py b/src/anthropic/resources/messages.py
index 3b740c0c..5c0f5fbb 100644
--- a/src/anthropic/resources/messages.py
+++ b/src/anthropic/resources/messages.py
@@ -32,6 +32,7 @@
AsyncMessageStreamManager,
)
from ..types.message import Message
+from ..types.tool_param import ToolParam
from ..types.message_param import MessageParam
from ..types.raw_message_stream_event import RawMessageStreamEvent
@@ -69,6 +70,8 @@ def create(
stream: Literal[False] | NotGiven = NOT_GIVEN,
system: str | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
+ tool_choice: message_create_params.ToolChoice | NotGiven = NOT_GIVEN,
+ tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
top_k: int | NotGiven = NOT_GIVEN,
top_p: float | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -219,6 +222,78 @@ def create(
Note that even with `temperature` of `0.0`, the results will not be fully
deterministic.
+ tool_choice: How the model should use the provided tools. The model can use a specific tool,
+ any available tool, or decide by itself.
+
+ tools: Definitions of tools that the model may use.
+
+ If you include `tools` in your API request, the model may return `tool_use`
+ content blocks that represent the model's use of those tools. You can then run
+ those tools using the tool input generated by the model and then optionally
+ return results back to the model using `tool_result` content blocks.
+
+ Each tool definition includes:
+
+ - `name`: Name of the tool.
+ - `description`: Optional, but strongly-recommended description of the tool.
+ - `input_schema`: [JSON schema](https://json-schema.org/) for the tool `input`
+ shape that the model will produce in `tool_use` output content blocks.
+
+ For example, if you defined `tools` as:
+
+ ```json
+ [
+ {
+ "name": "get_stock_price",
+ "description": "Get the current stock price for a given ticker symbol.",
+ "input_schema": {
+ "type": "object",
+ "properties": {
+ "ticker": {
+ "type": "string",
+ "description": "The stock ticker symbol, e.g. AAPL for Apple Inc."
+ }
+ },
+ "required": ["ticker"]
+ }
+ }
+ ]
+ ```
+
+ And then asked the model "What's the S&P 500 at today?", the model might produce
+ `tool_use` content blocks in the response like this:
+
+ ```json
+ [
+ {
+ "type": "tool_use",
+ "id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
+ "name": "get_stock_price",
+ "input": { "ticker": "^GSPC" }
+ }
+ ]
+ ```
+
+ You might then run your `get_stock_price` tool with `{"ticker": "^GSPC"}` as an
+ input, and return the following back to the model in a subsequent `user`
+ message:
+
+ ```json
+ [
+ {
+ "type": "tool_result",
+ "tool_use_id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
+ "content": "259.75 USD"
+ }
+ ]
+ ```
+
+ Tools can be used for workflows that include running client-side tools and
+ functions, or more generally whenever you want the model to produce a particular
+ JSON structure of output.
+
+ See our [guide](https://docs.anthropic.com/en/docs/tool-use) for more details.
+
top_k: Only sample from the top K options for each subsequent token.
Used to remove "long tail" low probability responses.
@@ -269,6 +344,8 @@ def create(
stop_sequences: List[str] | NotGiven = NOT_GIVEN,
system: str | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
+ tool_choice: message_create_params.ToolChoice | NotGiven = NOT_GIVEN,
+ tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
top_k: int | NotGiven = NOT_GIVEN,
top_p: float | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -419,6 +496,78 @@ def create(
Note that even with `temperature` of `0.0`, the results will not be fully
deterministic.
+ tool_choice: How the model should use the provided tools. The model can use a specific tool,
+ any available tool, or decide by itself.
+
+ tools: Definitions of tools that the model may use.
+
+ If you include `tools` in your API request, the model may return `tool_use`
+ content blocks that represent the model's use of those tools. You can then run
+ those tools using the tool input generated by the model and then optionally
+ return results back to the model using `tool_result` content blocks.
+
+ Each tool definition includes:
+
+ - `name`: Name of the tool.
+ - `description`: Optional, but strongly-recommended description of the tool.
+ - `input_schema`: [JSON schema](https://json-schema.org/) for the tool `input`
+ shape that the model will produce in `tool_use` output content blocks.
+
+ For example, if you defined `tools` as:
+
+ ```json
+ [
+ {
+ "name": "get_stock_price",
+ "description": "Get the current stock price for a given ticker symbol.",
+ "input_schema": {
+ "type": "object",
+ "properties": {
+ "ticker": {
+ "type": "string",
+ "description": "The stock ticker symbol, e.g. AAPL for Apple Inc."
+ }
+ },
+ "required": ["ticker"]
+ }
+ }
+ ]
+ ```
+
+ And then asked the model "What's the S&P 500 at today?", the model might produce
+ `tool_use` content blocks in the response like this:
+
+ ```json
+ [
+ {
+ "type": "tool_use",
+ "id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
+ "name": "get_stock_price",
+ "input": { "ticker": "^GSPC" }
+ }
+ ]
+ ```
+
+ You might then run your `get_stock_price` tool with `{"ticker": "^GSPC"}` as an
+ input, and return the following back to the model in a subsequent `user`
+ message:
+
+ ```json
+ [
+ {
+ "type": "tool_result",
+ "tool_use_id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
+ "content": "259.75 USD"
+ }
+ ]
+ ```
+
+ Tools can be used for workflows that include running client-side tools and
+ functions, or more generally whenever you want the model to produce a particular
+ JSON structure of output.
+
+ See our [guide](https://docs.anthropic.com/en/docs/tool-use) for more details.
+
top_k: Only sample from the top K options for each subsequent token.
Used to remove "long tail" low probability responses.
@@ -469,6 +618,8 @@ def create(
stop_sequences: List[str] | NotGiven = NOT_GIVEN,
system: str | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
+ tool_choice: message_create_params.ToolChoice | NotGiven = NOT_GIVEN,
+ tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
top_k: int | NotGiven = NOT_GIVEN,
top_p: float | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -619,6 +770,78 @@ def create(
Note that even with `temperature` of `0.0`, the results will not be fully
deterministic.
+ tool_choice: How the model should use the provided tools. The model can use a specific tool,
+ any available tool, or decide by itself.
+
+ tools: Definitions of tools that the model may use.
+
+ If you include `tools` in your API request, the model may return `tool_use`
+ content blocks that represent the model's use of those tools. You can then run
+ those tools using the tool input generated by the model and then optionally
+ return results back to the model using `tool_result` content blocks.
+
+ Each tool definition includes:
+
+ - `name`: Name of the tool.
+ - `description`: Optional, but strongly-recommended description of the tool.
+ - `input_schema`: [JSON schema](https://json-schema.org/) for the tool `input`
+ shape that the model will produce in `tool_use` output content blocks.
+
+ For example, if you defined `tools` as:
+
+ ```json
+ [
+ {
+ "name": "get_stock_price",
+ "description": "Get the current stock price for a given ticker symbol.",
+ "input_schema": {
+ "type": "object",
+ "properties": {
+ "ticker": {
+ "type": "string",
+ "description": "The stock ticker symbol, e.g. AAPL for Apple Inc."
+ }
+ },
+ "required": ["ticker"]
+ }
+ }
+ ]
+ ```
+
+ And then asked the model "What's the S&P 500 at today?", the model might produce
+ `tool_use` content blocks in the response like this:
+
+ ```json
+ [
+ {
+ "type": "tool_use",
+ "id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
+ "name": "get_stock_price",
+ "input": { "ticker": "^GSPC" }
+ }
+ ]
+ ```
+
+ You might then run your `get_stock_price` tool with `{"ticker": "^GSPC"}` as an
+ input, and return the following back to the model in a subsequent `user`
+ message:
+
+ ```json
+ [
+ {
+ "type": "tool_result",
+ "tool_use_id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
+ "content": "259.75 USD"
+ }
+ ]
+ ```
+
+ Tools can be used for workflows that include running client-side tools and
+ functions, or more generally whenever you want the model to produce a particular
+ JSON structure of output.
+
+ See our [guide](https://docs.anthropic.com/en/docs/tool-use) for more details.
+
top_k: Only sample from the top K options for each subsequent token.
Used to remove "long tail" low probability responses.
@@ -669,6 +892,8 @@ def create(
stream: Literal[False] | Literal[True] | NotGiven = NOT_GIVEN,
system: str | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
+ tool_choice: message_create_params.ToolChoice | NotGiven = NOT_GIVEN,
+ tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
top_k: int | NotGiven = NOT_GIVEN,
top_p: float | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -690,6 +915,8 @@ def create(
"stream": stream,
"system": system,
"temperature": temperature,
+ "tool_choice": tool_choice,
+ "tools": tools,
"top_k": top_k,
"top_p": top_p,
},
@@ -865,6 +1092,8 @@ async def create(
stream: Literal[False] | NotGiven = NOT_GIVEN,
system: str | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
+ tool_choice: message_create_params.ToolChoice | NotGiven = NOT_GIVEN,
+ tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
top_k: int | NotGiven = NOT_GIVEN,
top_p: float | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -1015,6 +1244,78 @@ async def create(
Note that even with `temperature` of `0.0`, the results will not be fully
deterministic.
+ tool_choice: How the model should use the provided tools. The model can use a specific tool,
+ any available tool, or decide by itself.
+
+ tools: Definitions of tools that the model may use.
+
+ If you include `tools` in your API request, the model may return `tool_use`
+ content blocks that represent the model's use of those tools. You can then run
+ those tools using the tool input generated by the model and then optionally
+ return results back to the model using `tool_result` content blocks.
+
+ Each tool definition includes:
+
+ - `name`: Name of the tool.
+ - `description`: Optional, but strongly-recommended description of the tool.
+ - `input_schema`: [JSON schema](https://json-schema.org/) for the tool `input`
+ shape that the model will produce in `tool_use` output content blocks.
+
+ For example, if you defined `tools` as:
+
+ ```json
+ [
+ {
+ "name": "get_stock_price",
+ "description": "Get the current stock price for a given ticker symbol.",
+ "input_schema": {
+ "type": "object",
+ "properties": {
+ "ticker": {
+ "type": "string",
+ "description": "The stock ticker symbol, e.g. AAPL for Apple Inc."
+ }
+ },
+ "required": ["ticker"]
+ }
+ }
+ ]
+ ```
+
+ And then asked the model "What's the S&P 500 at today?", the model might produce
+ `tool_use` content blocks in the response like this:
+
+ ```json
+ [
+ {
+ "type": "tool_use",
+ "id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
+ "name": "get_stock_price",
+ "input": { "ticker": "^GSPC" }
+ }
+ ]
+ ```
+
+ You might then run your `get_stock_price` tool with `{"ticker": "^GSPC"}` as an
+ input, and return the following back to the model in a subsequent `user`
+ message:
+
+ ```json
+ [
+ {
+ "type": "tool_result",
+ "tool_use_id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
+ "content": "259.75 USD"
+ }
+ ]
+ ```
+
+ Tools can be used for workflows that include running client-side tools and
+ functions, or more generally whenever you want the model to produce a particular
+ JSON structure of output.
+
+ See our [guide](https://docs.anthropic.com/en/docs/tool-use) for more details.
+
top_k: Only sample from the top K options for each subsequent token.
Used to remove "long tail" low probability responses.
@@ -1065,6 +1366,8 @@ async def create(
stop_sequences: List[str] | NotGiven = NOT_GIVEN,
system: str | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
+ tool_choice: message_create_params.ToolChoice | NotGiven = NOT_GIVEN,
+ tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
top_k: int | NotGiven = NOT_GIVEN,
top_p: float | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -1215,6 +1518,78 @@ async def create(
Note that even with `temperature` of `0.0`, the results will not be fully
deterministic.
+ tool_choice: How the model should use the provided tools. The model can use a specific tool,
+ any available tool, or decide by itself.
+
+ tools: Definitions of tools that the model may use.
+
+ If you include `tools` in your API request, the model may return `tool_use`
+ content blocks that represent the model's use of those tools. You can then run
+ those tools using the tool input generated by the model and then optionally
+ return results back to the model using `tool_result` content blocks.
+
+ Each tool definition includes:
+
+ - `name`: Name of the tool.
+ - `description`: Optional, but strongly-recommended description of the tool.
+ - `input_schema`: [JSON schema](https://json-schema.org/) for the tool `input`
+ shape that the model will produce in `tool_use` output content blocks.
+
+ For example, if you defined `tools` as:
+
+ ```json
+ [
+ {
+ "name": "get_stock_price",
+ "description": "Get the current stock price for a given ticker symbol.",
+ "input_schema": {
+ "type": "object",
+ "properties": {
+ "ticker": {
+ "type": "string",
+ "description": "The stock ticker symbol, e.g. AAPL for Apple Inc."
+ }
+ },
+ "required": ["ticker"]
+ }
+ }
+ ]
+ ```
+
+ And then asked the model "What's the S&P 500 at today?", the model might produce
+ `tool_use` content blocks in the response like this:
+
+ ```json
+ [
+ {
+ "type": "tool_use",
+ "id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
+ "name": "get_stock_price",
+ "input": { "ticker": "^GSPC" }
+ }
+ ]
+ ```
+
+ You might then run your `get_stock_price` tool with `{"ticker": "^GSPC"}` as an
+ input, and return the following back to the model in a subsequent `user`
+ message:
+
+ ```json
+ [
+ {
+ "type": "tool_result",
+ "tool_use_id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
+ "content": "259.75 USD"
+ }
+ ]
+ ```
+
+ Tools can be used for workflows that include running client-side tools and
+ functions, or more generally whenever you want the model to produce a particular
+ JSON structure of output.
+
+ See our [guide](https://docs.anthropic.com/en/docs/tool-use) for more details.
+
top_k: Only sample from the top K options for each subsequent token.
Used to remove "long tail" low probability responses.
@@ -1265,6 +1640,8 @@ async def create(
stop_sequences: List[str] | NotGiven = NOT_GIVEN,
system: str | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
+ tool_choice: message_create_params.ToolChoice | NotGiven = NOT_GIVEN,
+ tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
top_k: int | NotGiven = NOT_GIVEN,
top_p: float | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -1415,6 +1792,78 @@ async def create(
Note that even with `temperature` of `0.0`, the results will not be fully
deterministic.
+ tool_choice: How the model should use the provided tools. The model can use a specific tool,
+ any available tool, or decide by itself.
+
+ tools: Definitions of tools that the model may use.
+
+ If you include `tools` in your API request, the model may return `tool_use`
+ content blocks that represent the model's use of those tools. You can then run
+ those tools using the tool input generated by the model and then optionally
+ return results back to the model using `tool_result` content blocks.
+
+ Each tool definition includes:
+
+ - `name`: Name of the tool.
+ - `description`: Optional, but strongly-recommended description of the tool.
+ - `input_schema`: [JSON schema](https://json-schema.org/) for the tool `input`
+ shape that the model will produce in `tool_use` output content blocks.
+
+ For example, if you defined `tools` as:
+
+ ```json
+ [
+ {
+ "name": "get_stock_price",
+ "description": "Get the current stock price for a given ticker symbol.",
+ "input_schema": {
+ "type": "object",
+ "properties": {
+ "ticker": {
+ "type": "string",
+ "description": "The stock ticker symbol, e.g. AAPL for Apple Inc."
+ }
+ },
+ "required": ["ticker"]
+ }
+ }
+ ]
+ ```
+
+ And then asked the model "What's the S&P 500 at today?", the model might produce
+ `tool_use` content blocks in the response like this:
+
+ ```json
+ [
+ {
+ "type": "tool_use",
+ "id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
+ "name": "get_stock_price",
+ "input": { "ticker": "^GSPC" }
+ }
+ ]
+ ```
+
+ You might then run your `get_stock_price` tool with `{"ticker": "^GSPC"}` as an
+ input, and return the following back to the model in a subsequent `user`
+ message:
+
+ ```json
+ [
+ {
+ "type": "tool_result",
+ "tool_use_id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
+ "content": "259.75 USD"
+ }
+ ]
+ ```
+
+ Tools can be used for workflows that include running client-side tools and
+ functions, or more generally whenever you want the model to produce a particular
+ JSON structure of output.
+
+ See our [guide](https://docs.anthropic.com/en/docs/tool-use) for more details.
+
top_k: Only sample from the top K options for each subsequent token.
Used to remove "long tail" low probability responses.
@@ -1465,6 +1914,8 @@ async def create(
stream: Literal[False] | Literal[True] | NotGiven = NOT_GIVEN,
system: str | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
+ tool_choice: message_create_params.ToolChoice | NotGiven = NOT_GIVEN,
+ tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN,
top_k: int | NotGiven = NOT_GIVEN,
top_p: float | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -1486,6 +1937,8 @@ async def create(
"stream": stream,
"system": system,
"temperature": temperature,
+ "tool_choice": tool_choice,
+ "tools": tools,
"top_k": top_k,
"top_p": top_p,
},
diff --git a/src/anthropic/types/__init__.py b/src/anthropic/types/__init__.py
index 1be7acbd..f6165eca 100644
--- a/src/anthropic/types/__init__.py
+++ b/src/anthropic/types/__init__.py
@@ -7,8 +7,11 @@
from .completion import Completion as Completion
from .text_block import TextBlock as TextBlock
from .text_delta import TextDelta as TextDelta
+from .tool_param import ToolParam as ToolParam
from .content_block import ContentBlock as ContentBlock
from .message_param import MessageParam as MessageParam
+from .tool_use_block import ToolUseBlock as ToolUseBlock
+from .input_json_delta import InputJsonDelta as InputJsonDelta
from .text_block_param import TextBlockParam as TextBlockParam
from .image_block_param import ImageBlockParam as ImageBlockParam
from .message_stop_event import MessageStopEvent as MessageStopEvent
@@ -16,10 +19,12 @@
from .message_delta_usage import MessageDeltaUsage as MessageDeltaUsage
from .message_start_event import MessageStartEvent as MessageStartEvent
from .message_stream_event import MessageStreamEvent as MessageStreamEvent
+from .tool_use_block_param import ToolUseBlockParam as ToolUseBlockParam
from .message_create_params import MessageCreateParams as MessageCreateParams
from .raw_message_stop_event import RawMessageStopEvent as RawMessageStopEvent
from .raw_message_delta_event import RawMessageDeltaEvent as RawMessageDeltaEvent
from .raw_message_start_event import RawMessageStartEvent as RawMessageStartEvent
+from .tool_result_block_param import ToolResultBlockParam as ToolResultBlockParam
from .completion_create_params import CompletionCreateParams as CompletionCreateParams
from .content_block_stop_event import ContentBlockStopEvent as ContentBlockStopEvent
from .raw_message_stream_event import RawMessageStreamEvent as RawMessageStreamEvent
diff --git a/src/anthropic/types/beta/__init__.py b/src/anthropic/types/beta/__init__.py
deleted file mode 100644
index f8ee8b14..00000000
--- a/src/anthropic/types/beta/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
diff --git a/src/anthropic/types/beta/tools/__init__.py b/src/anthropic/types/beta/tools/__init__.py
deleted file mode 100644
index 515919e8..00000000
--- a/src/anthropic/types/beta/tools/__init__.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from .tool_param import ToolParam as ToolParam
-from .tool_use_block import ToolUseBlock as ToolUseBlock
-from .input_json_delta import InputJsonDelta as InputJsonDelta
-from .tools_beta_message import ToolsBetaMessage as ToolsBetaMessage
-from .tool_use_block_param import ToolUseBlockParam as ToolUseBlockParam
-from .message_create_params import MessageCreateParams as MessageCreateParams
-from .tool_result_block_param import ToolResultBlockParam as ToolResultBlockParam
-from .tools_beta_content_block import ToolsBetaContentBlock as ToolsBetaContentBlock
-from .tools_beta_message_param import ToolsBetaMessageParam as ToolsBetaMessageParam
-from .tools_beta_message_stream_event import ToolsBetaMessageStreamEvent as ToolsBetaMessageStreamEvent
-from .raw_tools_beta_message_stream_event import RawToolsBetaMessageStreamEvent as RawToolsBetaMessageStreamEvent
-from .tools_beta_content_block_delta_event import ToolsBetaContentBlockDeltaEvent as ToolsBetaContentBlockDeltaEvent
-from .tools_beta_content_block_start_event import ToolsBetaContentBlockStartEvent as ToolsBetaContentBlockStartEvent
-from .raw_tools_beta_content_block_delta_event import (
- RawToolsBetaContentBlockDeltaEvent as RawToolsBetaContentBlockDeltaEvent,
-)
-from .raw_tools_beta_content_block_start_event import (
- RawToolsBetaContentBlockStartEvent as RawToolsBetaContentBlockStartEvent,
-)
diff --git a/src/anthropic/types/beta/tools/message_create_params.py b/src/anthropic/types/beta/tools/message_create_params.py
deleted file mode 100644
index 878f9bca..00000000
--- a/src/anthropic/types/beta/tools/message_create_params.py
+++ /dev/null
@@ -1,310 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List, Union, Iterable, Optional
-from typing_extensions import Literal, Required, TypedDict
-
-from .tool_param import ToolParam
-from .tools_beta_message_param import ToolsBetaMessageParam
-
-__all__ = [
- "MessageCreateParamsBase",
- "Metadata",
- "ToolChoice",
- "ToolChoiceToolChoiceAuto",
- "ToolChoiceToolChoiceAny",
- "ToolChoiceToolChoiceTool",
- "MessageCreateParamsNonStreaming",
- "MessageCreateParamsStreaming",
-]
-
-
-class MessageCreateParamsBase(TypedDict, total=False):
- max_tokens: Required[int]
- """The maximum number of tokens to generate before stopping.
-
- Note that our models may stop _before_ reaching this maximum. This parameter
- only specifies the absolute maximum number of tokens to generate.
-
- Different models have different maximum values for this parameter. See
- [models](https://docs.anthropic.com/en/docs/models-overview) for details.
- """
-
- messages: Required[Iterable[ToolsBetaMessageParam]]
- """Input messages.
-
- Our models are trained to operate on alternating `user` and `assistant`
- conversational turns. When creating a new `Message`, you specify the prior
- conversational turns with the `messages` parameter, and the model then generates
- the next `Message` in the conversation.
-
- Each input message must be an object with a `role` and `content`. You can
- specify a single `user`-role message, or you can include multiple `user` and
- `assistant` messages. The first message must always use the `user` role.
-
- If the final message uses the `assistant` role, the response content will
- continue immediately from the content in that message. This can be used to
- constrain part of the model's response.
-
- Example with a single `user` message:
-
- ```json
- [{ "role": "user", "content": "Hello, Claude" }]
- ```
-
- Example with multiple conversational turns:
-
- ```json
- [
- { "role": "user", "content": "Hello there." },
- { "role": "assistant", "content": "Hi, I'm Claude. How can I help you?" },
- { "role": "user", "content": "Can you explain LLMs in plain English?" }
- ]
- ```
-
- Example with a partially-filled response from Claude:
-
- ```json
- [
- {
- "role": "user",
- "content": "What's the Greek name for Sun? (A) Sol (B) Helios (C) Sun"
- },
- { "role": "assistant", "content": "The best answer is (" }
- ]
- ```
-
- Each input message `content` may be either a single `string` or an array of
- content blocks, where each block has a specific `type`. Using a `string` for
- `content` is shorthand for an array of one content block of type `"text"`. The
- following input messages are equivalent:
-
- ```json
- { "role": "user", "content": "Hello, Claude" }
- ```
-
- ```json
- { "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] }
- ```
-
- Starting with Claude 3 models, you can also send image content blocks:
-
- ```json
- {
- "role": "user",
- "content": [
- {
- "type": "image",
- "source": {
- "type": "base64",
- "media_type": "image/jpeg",
- "data": "/9j/4AAQSkZJRg..."
- }
- },
- { "type": "text", "text": "What is in this image?" }
- ]
- }
- ```
-
- We currently support the `base64` source type for images, and the `image/jpeg`,
- `image/png`, `image/gif`, and `image/webp` media types.
-
- See [examples](https://docs.anthropic.com/en/api/messages-examples) for more
- input examples.
-
- Note that if you want to include a
- [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use
- the top-level `system` parameter — there is no `"system"` role for input
- messages in the Messages API.
- """
-
- model: Required[str]
- """The model that will complete your prompt.
-
- See [models](https://docs.anthropic.com/en/docs/models-overview) for additional
- details and options.
- """
-
- metadata: Metadata
- """An object describing metadata about the request."""
-
- stop_sequences: List[str]
- """Custom text sequences that will cause the model to stop generating.
-
- Our models will normally stop when they have naturally completed their turn,
- which will result in a response `stop_reason` of `"end_turn"`.
-
- If you want the model to stop generating when it encounters custom strings of
- text, you can use the `stop_sequences` parameter. If the model encounters one of
- the custom sequences, the response `stop_reason` value will be `"stop_sequence"`
- and the response `stop_sequence` value will contain the matched stop sequence.
- """
-
- system: str
- """System prompt.
-
- A system prompt is a way of providing context and instructions to Claude, such
- as specifying a particular goal or role. See our
- [guide to system prompts](https://docs.anthropic.com/en/docs/system-prompts).
- """
-
- temperature: float
- """Amount of randomness injected into the response.
-
- Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0`
- for analytical / multiple choice, and closer to `1.0` for creative and
- generative tasks.
-
- Note that even with `temperature` of `0.0`, the results will not be fully
- deterministic.
- """
-
- tool_choice: ToolChoice
- """How the model should use the provided tools.
-
- The model can use a specific tool, any available tool, or decide by itself.
- """
-
- tools: Iterable[ToolParam]
- """[beta] Definitions of tools that the model may use.
-
- If you include `tools` in your API request, the model may return `tool_use`
- content blocks that represent the model's use of those tools. You can then run
- those tools using the tool input generated by the model and then optionally
- return results back to the model using `tool_result` content blocks.
-
- Each tool definition includes:
-
- - `name`: Name of the tool.
- - `description`: Optional, but strongly-recommended description of the tool.
- - `input_schema`: [JSON schema](https://json-schema.org/) for the tool `input`
- shape that the model will produce in `tool_use` output content blocks.
-
- For example, if you defined `tools` as:
-
- ```json
- [
- {
- "name": "get_stock_price",
- "description": "Get the current stock price for a given ticker symbol.",
- "input_schema": {
- "type": "object",
- "properties": {
- "ticker": {
- "type": "string",
- "description": "The stock ticker symbol, e.g. AAPL for Apple Inc."
- }
- },
- "required": ["ticker"]
- }
- }
- ]
- ```
-
- And then asked the model "What's the S&P 500 at today?", the model might produce
- `tool_use` content blocks in the response like this:
-
- ```json
- [
- {
- "type": "tool_use",
- "id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
- "name": "get_stock_price",
- "input": { "ticker": "^GSPC" }
- }
- ]
- ```
-
- You might then run your `get_stock_price` tool with `{"ticker": "^GSPC"}` as an
- input, and return the following back to the model in a subsequent `user`
- message:
-
- ```json
- [
- {
- "type": "tool_result",
- "tool_use_id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
- "content": "259.75 USD"
- }
- ]
- ```
-
- Tools can be used for workflows that include running client-side tools and
- functions, or more generally whenever you want the model to produce a particular
- JSON structure of output.
-
- See our [beta guide](https://docs.anthropic.com/en/docs/tool-use) for more
- details.
- """
-
- top_k: int
- """Only sample from the top K options for each subsequent token.
-
- Used to remove "long tail" low probability responses.
- [Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277).
-
- Recommended for advanced use cases only. You usually only need to use
- `temperature`.
- """
-
- top_p: float
- """Use nucleus sampling.
-
- In nucleus sampling, we compute the cumulative distribution over all the options
- for each subsequent token in decreasing probability order and cut it off once it
- reaches a particular probability specified by `top_p`. You should either alter
- `temperature` or `top_p`, but not both.
-
- Recommended for advanced use cases only. You usually only need to use
- `temperature`.
- """
-
-
-class Metadata(TypedDict, total=False):
- user_id: Optional[str]
- """An external identifier for the user who is associated with the request.
-
- This should be a uuid, hash value, or other opaque identifier. Anthropic may use
- this id to help detect abuse. Do not include any identifying information such as
- name, email address, or phone number.
- """
-
-
-class ToolChoiceToolChoiceAuto(TypedDict, total=False):
- type: Required[Literal["auto"]]
-
-
-class ToolChoiceToolChoiceAny(TypedDict, total=False):
- type: Required[Literal["any"]]
-
-
-class ToolChoiceToolChoiceTool(TypedDict, total=False):
- name: Required[str]
- """The name of the tool to use."""
-
- type: Required[Literal["tool"]]
-
-
-ToolChoice = Union[ToolChoiceToolChoiceAuto, ToolChoiceToolChoiceAny, ToolChoiceToolChoiceTool]
-
-
-class MessageCreateParamsNonStreaming(MessageCreateParamsBase):
- stream: Literal[False]
- """Whether to incrementally stream the response using server-sent events.
-
- See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for
- details.
- """
-
-
-class MessageCreateParamsStreaming(MessageCreateParamsBase):
- stream: Required[Literal[True]]
- """Whether to incrementally stream the response using server-sent events.
-
- See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for
- details.
- """
-
-
-MessageCreateParams = Union[MessageCreateParamsNonStreaming, MessageCreateParamsStreaming]
diff --git a/src/anthropic/types/beta/tools/raw_tools_beta_content_block_delta_event.py b/src/anthropic/types/beta/tools/raw_tools_beta_content_block_delta_event.py
deleted file mode 100644
index 4c8a8dc7..00000000
--- a/src/anthropic/types/beta/tools/raw_tools_beta_content_block_delta_event.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Union
-from typing_extensions import Literal, Annotated
-
-from ...._utils import PropertyInfo
-from ...._models import BaseModel
-from ...text_delta import TextDelta
-from .input_json_delta import InputJsonDelta
-
-__all__ = ["RawToolsBetaContentBlockDeltaEvent", "Delta"]
-
-Delta = Annotated[Union[TextDelta, InputJsonDelta], PropertyInfo(discriminator="type")]
-
-
-class RawToolsBetaContentBlockDeltaEvent(BaseModel):
- delta: Delta
-
- index: int
-
- type: Literal["content_block_delta"]
diff --git a/src/anthropic/types/beta/tools/raw_tools_beta_content_block_start_event.py b/src/anthropic/types/beta/tools/raw_tools_beta_content_block_start_event.py
deleted file mode 100644
index e7b674de..00000000
--- a/src/anthropic/types/beta/tools/raw_tools_beta_content_block_start_event.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Union
-from typing_extensions import Literal, Annotated
-
-from ...._utils import PropertyInfo
-from ...._models import BaseModel
-from ...text_block import TextBlock
-from .tool_use_block import ToolUseBlock
-
-__all__ = ["RawToolsBetaContentBlockStartEvent", "ContentBlock"]
-
-ContentBlock = Annotated[Union[TextBlock, ToolUseBlock], PropertyInfo(discriminator="type")]
-
-
-class RawToolsBetaContentBlockStartEvent(BaseModel):
- content_block: ContentBlock
-
- index: int
-
- type: Literal["content_block_start"]
diff --git a/src/anthropic/types/beta/tools/raw_tools_beta_message_stream_event.py b/src/anthropic/types/beta/tools/raw_tools_beta_message_stream_event.py
deleted file mode 100644
index a9f235fb..00000000
--- a/src/anthropic/types/beta/tools/raw_tools_beta_message_stream_event.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Union
-from typing_extensions import Annotated
-
-from ...._utils import PropertyInfo
-from ...raw_message_stop_event import RawMessageStopEvent
-from ...raw_message_delta_event import RawMessageDeltaEvent
-from ...raw_message_start_event import RawMessageStartEvent
-from ...raw_content_block_stop_event import RawContentBlockStopEvent
-from .raw_tools_beta_content_block_delta_event import RawToolsBetaContentBlockDeltaEvent
-from .raw_tools_beta_content_block_start_event import RawToolsBetaContentBlockStartEvent
-
-__all__ = ["RawToolsBetaMessageStreamEvent"]
-
-RawToolsBetaMessageStreamEvent = Annotated[
- Union[
- RawMessageStartEvent,
- RawMessageDeltaEvent,
- RawMessageStopEvent,
- RawToolsBetaContentBlockStartEvent,
- RawToolsBetaContentBlockDeltaEvent,
- RawContentBlockStopEvent,
- ],
- PropertyInfo(discriminator="type"),
-]
diff --git a/src/anthropic/types/beta/tools/tools_beta_content_block.py b/src/anthropic/types/beta/tools/tools_beta_content_block.py
deleted file mode 100644
index 503ed266..00000000
--- a/src/anthropic/types/beta/tools/tools_beta_content_block.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Union
-from typing_extensions import Annotated
-
-from ...._utils import PropertyInfo
-from ...text_block import TextBlock
-from .tool_use_block import ToolUseBlock
-
-__all__ = ["ToolsBetaContentBlock"]
-
-ToolsBetaContentBlock = Annotated[Union[TextBlock, ToolUseBlock], PropertyInfo(discriminator="type")]
diff --git a/src/anthropic/types/beta/tools/tools_beta_content_block_delta_event.py b/src/anthropic/types/beta/tools/tools_beta_content_block_delta_event.py
deleted file mode 100644
index 94247480..00000000
--- a/src/anthropic/types/beta/tools/tools_beta_content_block_delta_event.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-
-
-from .raw_tools_beta_content_block_delta_event import RawToolsBetaContentBlockDeltaEvent
-
-__all__ = ["ToolsBetaContentBlockDeltaEvent"]
-
-ToolsBetaContentBlockDeltaEvent = RawToolsBetaContentBlockDeltaEvent
-"""The RawToolsBetaContentBlockDeltaEvent type should be used instead"""
diff --git a/src/anthropic/types/beta/tools/tools_beta_content_block_start_event.py b/src/anthropic/types/beta/tools/tools_beta_content_block_start_event.py
deleted file mode 100644
index dd38ae35..00000000
--- a/src/anthropic/types/beta/tools/tools_beta_content_block_start_event.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-
-
-from .raw_tools_beta_content_block_start_event import RawToolsBetaContentBlockStartEvent
-
-__all__ = ["ToolsBetaContentBlockStartEvent"]
-
-ToolsBetaContentBlockStartEvent = RawToolsBetaContentBlockStartEvent
-"""The RawToolsBetaContentBlockStartEvent type should be used instead"""
diff --git a/src/anthropic/types/beta/tools/tools_beta_message.py b/src/anthropic/types/beta/tools/tools_beta_message.py
deleted file mode 100644
index ce9ec187..00000000
--- a/src/anthropic/types/beta/tools/tools_beta_message.py
+++ /dev/null
@@ -1,104 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Optional
-from typing_extensions import Literal
-
-from ...usage import Usage
-from ...._models import BaseModel
-from .tools_beta_content_block import ToolsBetaContentBlock
-
-__all__ = ["ToolsBetaMessage"]
-
-
-class ToolsBetaMessage(BaseModel):
- id: str
- """Unique object identifier.
-
- The format and length of IDs may change over time.
- """
-
- content: List[ToolsBetaContentBlock]
- """Content generated by the model.
-
- This is an array of content blocks, each of which has a `type` that determines
- its shape. Currently, the only `type` in responses is `"text"`.
-
- Example:
-
- ```json
- [{ "type": "text", "text": "Hi, I'm Claude." }]
- ```
-
- If the request input `messages` ended with an `assistant` turn, then the
- response `content` will continue directly from that last turn. You can use this
- to constrain the model's output.
-
- For example, if the input `messages` were:
-
- ```json
- [
- {
- "role": "user",
- "content": "What's the Greek name for Sun? (A) Sol (B) Helios (C) Sun"
- },
- { "role": "assistant", "content": "The best answer is (" }
- ]
- ```
-
- Then the response `content` might be:
-
- ```json
- [{ "type": "text", "text": "B)" }]
- ```
- """
-
- model: str
- """The model that handled the request."""
-
- role: Literal["assistant"]
- """Conversational role of the generated message.
-
- This will always be `"assistant"`.
- """
-
- stop_reason: Optional[Literal["end_turn", "max_tokens", "stop_sequence", "tool_use"]] = None
- """The reason that we stopped.
-
- This may be one the following values:
-
- - `"end_turn"`: the model reached a natural stopping point
- - `"max_tokens"`: we exceeded the requested `max_tokens` or the model's maximum
- - `"stop_sequence"`: one of your provided custom `stop_sequences` was generated
- - `"tool_use"`: (tools beta only) the model invoked one or more tools
-
- In non-streaming mode this value is always non-null. In streaming mode, it is
- null in the `message_start` event and non-null otherwise.
- """
-
- stop_sequence: Optional[str] = None
- """Which custom stop sequence was generated, if any.
-
- This value will be a non-null string if one of your custom stop sequences was
- generated.
- """
-
- type: Literal["message"]
- """Object type.
-
- For Messages, this is always `"message"`.
- """
-
- usage: Usage
- """Billing and rate-limit usage.
-
- Anthropic's API bills and rate-limits by token counts, as tokens represent the
- underlying cost to our systems.
-
- Under the hood, the API transforms requests into a format suitable for the
- model. The model's output then goes through a parsing stage before becoming an
- API response. As a result, the token counts in `usage` will not match one-to-one
- with the exact visible content of an API request or response.
-
- For example, `output_tokens` will be non-zero, even for an empty string response
- from Claude.
- """
diff --git a/src/anthropic/types/beta/tools/tools_beta_message_param.py b/src/anthropic/types/beta/tools/tools_beta_message_param.py
deleted file mode 100644
index 616dd78e..00000000
--- a/src/anthropic/types/beta/tools/tools_beta_message_param.py
+++ /dev/null
@@ -1,27 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Union, Iterable
-from typing_extensions import Literal, Required, TypedDict
-
-from ...text_block_param import TextBlockParam
-from ...image_block_param import ImageBlockParam
-from .tool_use_block_param import ToolUseBlockParam
-from .tool_result_block_param import ToolResultBlockParam
-from .tools_beta_content_block import ToolsBetaContentBlock
-
-__all__ = ["ToolsBetaMessageParam"]
-
-
-class ToolsBetaMessageParam(TypedDict, total=False):
- content: Required[
- Union[
- str,
- Iterable[
- Union[TextBlockParam, ImageBlockParam, ToolUseBlockParam, ToolResultBlockParam, ToolsBetaContentBlock]
- ],
- ]
- ]
-
- role: Required[Literal["user", "assistant"]]
diff --git a/src/anthropic/types/beta/tools/tools_beta_message_stream_event.py b/src/anthropic/types/beta/tools/tools_beta_message_stream_event.py
deleted file mode 100644
index 80ddfe0b..00000000
--- a/src/anthropic/types/beta/tools/tools_beta_message_stream_event.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-
-
-from .raw_tools_beta_message_stream_event import RawToolsBetaMessageStreamEvent
-
-__all__ = ["ToolsBetaMessageStreamEvent"]
-
-ToolsBetaMessageStreamEvent = RawToolsBetaMessageStreamEvent
-"""The RawToolsBetaMessageStreamEvent type should be used instead"""
diff --git a/src/anthropic/types/content_block.py b/src/anthropic/types/content_block.py
index d74aa426..eca827bc 100644
--- a/src/anthropic/types/content_block.py
+++ b/src/anthropic/types/content_block.py
@@ -1,8 +1,12 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+from typing import Union
+from typing_extensions import Annotated
+from .._utils import PropertyInfo
from .text_block import TextBlock
+from .tool_use_block import ToolUseBlock
__all__ = ["ContentBlock"]
-ContentBlock = TextBlock
+ContentBlock = Annotated[Union[TextBlock, ToolUseBlock], PropertyInfo(discriminator="type")]
diff --git a/src/anthropic/types/content_block_start_event.py b/src/anthropic/types/content_block_start_event.py
index bd03cc34..cedfe802 100644
--- a/src/anthropic/types/content_block_start_event.py
+++ b/src/anthropic/types/content_block_start_event.py
@@ -2,7 +2,6 @@
-from .content_block import ContentBlock as ContentBlock
from .raw_content_block_start_event import RawContentBlockStartEvent
__all__ = ["ContentBlockStartEvent"]
diff --git a/src/anthropic/types/beta/tools/input_json_delta.py b/src/anthropic/types/input_json_delta.py
similarity index 88%
rename from src/anthropic/types/beta/tools/input_json_delta.py
rename to src/anthropic/types/input_json_delta.py
index 004a8f67..6391d4bf 100644
--- a/src/anthropic/types/beta/tools/input_json_delta.py
+++ b/src/anthropic/types/input_json_delta.py
@@ -2,7 +2,7 @@
from typing_extensions import Literal
-from ...._models import BaseModel
+from .._models import BaseModel
__all__ = ["InputJsonDelta"]
diff --git a/src/anthropic/types/message.py b/src/anthropic/types/message.py
index 39b357b5..9ef967ea 100644
--- a/src/anthropic/types/message.py
+++ b/src/anthropic/types/message.py
@@ -5,8 +5,7 @@
from .usage import Usage
from .._models import BaseModel
-from .text_block import TextBlock
-from .content_block import ContentBlock as ContentBlock
+from .content_block import ContentBlock, ContentBlock as ContentBlock
__all__ = ["Message"]
@@ -18,11 +17,11 @@ class Message(BaseModel):
The format and length of IDs may change over time.
"""
- content: List[TextBlock]
+ content: List[ContentBlock]
"""Content generated by the model.
This is an array of content blocks, each of which has a `type` that determines
- its shape. Currently, the only `type` in responses is `"text"`.
+ its shape.
Example:
@@ -62,7 +61,7 @@ class Message(BaseModel):
This will always be `"assistant"`.
"""
- stop_reason: Optional[Literal["end_turn", "max_tokens", "stop_sequence"]] = None
+ stop_reason: Optional[Literal["end_turn", "max_tokens", "stop_sequence", "tool_use"]] = None
"""The reason that we stopped.
This may be one the following values:
@@ -70,6 +69,7 @@ class Message(BaseModel):
- `"end_turn"`: the model reached a natural stopping point
- `"max_tokens"`: we exceeded the requested `max_tokens` or the model's maximum
- `"stop_sequence"`: one of your provided custom `stop_sequences` was generated
+ - `"tool_use"`: the model invoked one or more tools
In non-streaming mode this value is always non-null. In streaming mode, it is
null in the `message_start` event and non-null otherwise.
diff --git a/src/anthropic/types/message_create_params.py b/src/anthropic/types/message_create_params.py
index b20699bc..cb0a0cfb 100644
--- a/src/anthropic/types/message_create_params.py
+++ b/src/anthropic/types/message_create_params.py
@@ -5,9 +5,19 @@
from typing import List, Union, Iterable, Optional
from typing_extensions import Literal, Required, TypedDict
+from .tool_param import ToolParam
from .message_param import MessageParam
-__all__ = ["MessageCreateParamsBase", "Metadata", "MessageCreateParamsNonStreaming", "MessageCreateParamsStreaming"]
+__all__ = [
+ "MessageCreateParamsBase",
+ "Metadata",
+ "ToolChoice",
+ "ToolChoiceToolChoiceAuto",
+ "ToolChoiceToolChoiceAny",
+ "ToolChoiceToolChoiceTool",
+ "MessageCreateParamsNonStreaming",
+ "MessageCreateParamsStreaming",
+]
class MessageCreateParamsBase(TypedDict, total=False):
@@ -162,6 +172,83 @@ class MessageCreateParamsBase(TypedDict, total=False):
deterministic.
"""
+ tool_choice: ToolChoice
+ """How the model should use the provided tools.
+
+ The model can use a specific tool, any available tool, or decide by itself.
+ """
+
+ tools: Iterable[ToolParam]
+ """Definitions of tools that the model may use.
+
+ If you include `tools` in your API request, the model may return `tool_use`
+ content blocks that represent the model's use of those tools. You can then run
+ those tools using the tool input generated by the model and then optionally
+ return results back to the model using `tool_result` content blocks.
+
+ Each tool definition includes:
+
+ - `name`: Name of the tool.
+ - `description`: Optional, but strongly-recommended description of the tool.
+ - `input_schema`: [JSON schema](https://json-schema.org/) for the tool `input`
+ shape that the model will produce in `tool_use` output content blocks.
+
+ For example, if you defined `tools` as:
+
+ ```json
+ [
+ {
+ "name": "get_stock_price",
+ "description": "Get the current stock price for a given ticker symbol.",
+ "input_schema": {
+ "type": "object",
+ "properties": {
+ "ticker": {
+ "type": "string",
+ "description": "The stock ticker symbol, e.g. AAPL for Apple Inc."
+ }
+ },
+ "required": ["ticker"]
+ }
+ }
+ ]
+ ```
+
+ And then asked the model "What's the S&P 500 at today?", the model might produce
+ `tool_use` content blocks in the response like this:
+
+ ```json
+ [
+ {
+ "type": "tool_use",
+ "id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
+ "name": "get_stock_price",
+ "input": { "ticker": "^GSPC" }
+ }
+ ]
+ ```
+
+ You might then run your `get_stock_price` tool with `{"ticker": "^GSPC"}` as an
+ input, and return the following back to the model in a subsequent `user`
+ message:
+
+ ```json
+ [
+ {
+ "type": "tool_result",
+ "tool_use_id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
+ "content": "259.75 USD"
+ }
+ ]
+ ```
+
+ Tools can be used for workflows that include running client-side tools and
+ functions, or more generally whenever you want the model to produce a particular
+ JSON structure of output.
+
+ See our [guide](https://docs.anthropic.com/en/docs/tool-use) for more details.
+ """
+
top_k: int
"""Only sample from the top K options for each subsequent token.
@@ -195,6 +282,24 @@ class Metadata(TypedDict, total=False):
"""
+class ToolChoiceToolChoiceAuto(TypedDict, total=False):
+ type: Required[Literal["auto"]]
+
+
+class ToolChoiceToolChoiceAny(TypedDict, total=False):
+ type: Required[Literal["any"]]
+
+
+class ToolChoiceToolChoiceTool(TypedDict, total=False):
+ name: Required[str]
+ """The name of the tool to use."""
+
+ type: Required[Literal["tool"]]
+
+
+ToolChoice = Union[ToolChoiceToolChoiceAuto, ToolChoiceToolChoiceAny, ToolChoiceToolChoiceTool]
+
+
class MessageCreateParamsNonStreaming(MessageCreateParamsBase):
stream: Literal[False]
"""Whether to incrementally stream the response using server-sent events.
diff --git a/src/anthropic/types/message_param.py b/src/anthropic/types/message_param.py
index 8242e8f1..89921c61 100644
--- a/src/anthropic/types/message_param.py
+++ b/src/anthropic/types/message_param.py
@@ -8,11 +8,17 @@
from .content_block import ContentBlock
from .text_block_param import TextBlockParam
from .image_block_param import ImageBlockParam
+from .tool_use_block_param import ToolUseBlockParam
+from .tool_result_block_param import ToolResultBlockParam
__all__ = ["MessageParam"]
class MessageParam(TypedDict, total=False):
- content: Required[Union[str, Iterable[Union[TextBlockParam, ImageBlockParam, ContentBlock]]]]
+ content: Required[
+ Union[
+ str, Iterable[Union[TextBlockParam, ImageBlockParam, ToolUseBlockParam, ToolResultBlockParam, ContentBlock]]
+ ]
+ ]
role: Required[Literal["user", "assistant"]]
diff --git a/src/anthropic/types/raw_content_block_delta_event.py b/src/anthropic/types/raw_content_block_delta_event.py
index d51ddacc..e1370fdb 100644
--- a/src/anthropic/types/raw_content_block_delta_event.py
+++ b/src/anthropic/types/raw_content_block_delta_event.py
@@ -1,15 +1,20 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing_extensions import Literal
+from typing import Union
+from typing_extensions import Literal, Annotated
+from .._utils import PropertyInfo
from .._models import BaseModel
from .text_delta import TextDelta
+from .input_json_delta import InputJsonDelta
-__all__ = ["RawContentBlockDeltaEvent"]
+__all__ = ["RawContentBlockDeltaEvent", "Delta"]
+
+Delta = Annotated[Union[TextDelta, InputJsonDelta], PropertyInfo(discriminator="type")]
class RawContentBlockDeltaEvent(BaseModel):
- delta: TextDelta
+ delta: Delta
index: int
diff --git a/src/anthropic/types/raw_content_block_start_event.py b/src/anthropic/types/raw_content_block_start_event.py
index afff0f1e..b5d19322 100644
--- a/src/anthropic/types/raw_content_block_start_event.py
+++ b/src/anthropic/types/raw_content_block_start_event.py
@@ -1,16 +1,20 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing_extensions import Literal
+from typing import Union
+from typing_extensions import Literal, Annotated
+from .._utils import PropertyInfo
from .._models import BaseModel
from .text_block import TextBlock
-from .content_block import ContentBlock as ContentBlock
+from .tool_use_block import ToolUseBlock
-__all__ = ["RawContentBlockStartEvent"]
+__all__ = ["RawContentBlockStartEvent", "ContentBlock"]
+
+ContentBlock = Annotated[Union[TextBlock, ToolUseBlock], PropertyInfo(discriminator="type")]
class RawContentBlockStartEvent(BaseModel):
- content_block: TextBlock
+ content_block: ContentBlock
index: int
diff --git a/src/anthropic/types/raw_message_delta_event.py b/src/anthropic/types/raw_message_delta_event.py
index 29198ac3..cc59268c 100644
--- a/src/anthropic/types/raw_message_delta_event.py
+++ b/src/anthropic/types/raw_message_delta_event.py
@@ -10,7 +10,7 @@
class Delta(BaseModel):
- stop_reason: Optional[Literal["end_turn", "max_tokens", "stop_sequence"]] = None
+ stop_reason: Optional[Literal["end_turn", "max_tokens", "stop_sequence", "tool_use"]] = None
stop_sequence: Optional[str] = None
diff --git a/src/anthropic/types/beta/tools/tool_param.py b/src/anthropic/types/tool_param.py
similarity index 100%
rename from src/anthropic/types/beta/tools/tool_param.py
rename to src/anthropic/types/tool_param.py
diff --git a/src/anthropic/types/beta/tools/tool_result_block_param.py b/src/anthropic/types/tool_result_block_param.py
similarity index 83%
rename from src/anthropic/types/beta/tools/tool_result_block_param.py
rename to src/anthropic/types/tool_result_block_param.py
index eec2270a..cbacd83b 100644
--- a/src/anthropic/types/beta/tools/tool_result_block_param.py
+++ b/src/anthropic/types/tool_result_block_param.py
@@ -5,8 +5,8 @@
from typing import Union, Iterable
from typing_extensions import Literal, Required, TypedDict
-from ...text_block_param import TextBlockParam
-from ...image_block_param import ImageBlockParam
+from .text_block_param import TextBlockParam
+from .image_block_param import ImageBlockParam
__all__ = ["ToolResultBlockParam", "Content"]
diff --git a/src/anthropic/types/beta/tools/tool_use_block.py b/src/anthropic/types/tool_use_block.py
similarity index 88%
rename from src/anthropic/types/beta/tools/tool_use_block.py
rename to src/anthropic/types/tool_use_block.py
index 7da68f56..05514471 100644
--- a/src/anthropic/types/beta/tools/tool_use_block.py
+++ b/src/anthropic/types/tool_use_block.py
@@ -2,7 +2,7 @@
from typing_extensions import Literal
-from ...._models import BaseModel
+from .._models import BaseModel
__all__ = ["ToolUseBlock"]
diff --git a/src/anthropic/types/beta/tools/tool_use_block_param.py b/src/anthropic/types/tool_use_block_param.py
similarity index 100%
rename from src/anthropic/types/beta/tools/tool_use_block_param.py
rename to src/anthropic/types/tool_use_block_param.py
diff --git a/tests/api_resources/beta/__init__.py b/tests/api_resources/beta/__init__.py
deleted file mode 100644
index fd8019a9..00000000
--- a/tests/api_resources/beta/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/beta/tools/__init__.py b/tests/api_resources/beta/tools/__init__.py
deleted file mode 100644
index fd8019a9..00000000
--- a/tests/api_resources/beta/tools/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/beta/tools/test_messages.py b/tests/api_resources/beta/tools/test_messages.py
deleted file mode 100644
index 9d63e054..00000000
--- a/tests/api_resources/beta/tools/test_messages.py
+++ /dev/null
@@ -1,534 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from anthropic import Anthropic, AsyncAnthropic
-from tests.utils import assert_matches_type
-from anthropic.types.beta.tools import ToolsBetaMessage
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestMessages:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @parametrize
- def test_method_create_overload_1(self, client: Anthropic) -> None:
- message = client.beta.tools.messages.create(
- max_tokens=1024,
- messages=[
- {
- "role": "user",
- "content": "Hello, world",
- }
- ],
- model="claude-3-opus-20240229",
- )
- assert_matches_type(ToolsBetaMessage, message, path=["response"])
-
- @parametrize
- def test_method_create_with_all_params_overload_1(self, client: Anthropic) -> None:
- message = client.beta.tools.messages.create(
- max_tokens=1024,
- messages=[
- {
- "role": "user",
- "content": "Hello, world",
- }
- ],
- model="claude-3-opus-20240229",
- metadata={"user_id": "13803d75-b4b5-4c3e-b2a2-6f21399b021b"},
- stop_sequences=["string", "string", "string"],
- stream=False,
- system="Today's date is 2024-01-01.",
- temperature=1,
- tool_choice={"type": "auto"},
- tools=[
- {
- "description": "Get the current weather in a given location",
- "name": "x",
- "input_schema": {
- "type": "object",
- "properties": {
- "location": {
- "description": "The city and state, e.g. San Francisco, CA",
- "type": "string",
- },
- "unit": {
- "description": "Unit for the output - one of (celsius, fahrenheit)",
- "type": "string",
- },
- },
- },
- },
- {
- "description": "Get the current weather in a given location",
- "name": "x",
- "input_schema": {
- "type": "object",
- "properties": {
- "location": {
- "description": "The city and state, e.g. San Francisco, CA",
- "type": "string",
- },
- "unit": {
- "description": "Unit for the output - one of (celsius, fahrenheit)",
- "type": "string",
- },
- },
- },
- },
- {
- "description": "Get the current weather in a given location",
- "name": "x",
- "input_schema": {
- "type": "object",
- "properties": {
- "location": {
- "description": "The city and state, e.g. San Francisco, CA",
- "type": "string",
- },
- "unit": {
- "description": "Unit for the output - one of (celsius, fahrenheit)",
- "type": "string",
- },
- },
- },
- },
- ],
- top_k=5,
- top_p=0.7,
- )
- assert_matches_type(ToolsBetaMessage, message, path=["response"])
-
- @parametrize
- def test_raw_response_create_overload_1(self, client: Anthropic) -> None:
- response = client.beta.tools.messages.with_raw_response.create(
- max_tokens=1024,
- messages=[
- {
- "role": "user",
- "content": "Hello, world",
- }
- ],
- model="claude-3-opus-20240229",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- message = response.parse()
- assert_matches_type(ToolsBetaMessage, message, path=["response"])
-
- @parametrize
- def test_streaming_response_create_overload_1(self, client: Anthropic) -> None:
- with client.beta.tools.messages.with_streaming_response.create(
- max_tokens=1024,
- messages=[
- {
- "role": "user",
- "content": "Hello, world",
- }
- ],
- model="claude-3-opus-20240229",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- message = response.parse()
- assert_matches_type(ToolsBetaMessage, message, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- def test_method_create_overload_2(self, client: Anthropic) -> None:
- message_stream = client.beta.tools.messages.create(
- max_tokens=1024,
- messages=[
- {
- "role": "user",
- "content": "Hello, world",
- }
- ],
- model="claude-3-opus-20240229",
- stream=True,
- )
- message_stream.response.close()
-
- @parametrize
- def test_method_create_with_all_params_overload_2(self, client: Anthropic) -> None:
- message_stream = client.beta.tools.messages.create(
- max_tokens=1024,
- messages=[
- {
- "role": "user",
- "content": "Hello, world",
- }
- ],
- model="claude-3-opus-20240229",
- stream=True,
- metadata={"user_id": "13803d75-b4b5-4c3e-b2a2-6f21399b021b"},
- stop_sequences=["string", "string", "string"],
- system="Today's date is 2024-01-01.",
- temperature=1,
- tool_choice={"type": "auto"},
- tools=[
- {
- "description": "Get the current weather in a given location",
- "name": "x",
- "input_schema": {
- "type": "object",
- "properties": {
- "location": {
- "description": "The city and state, e.g. San Francisco, CA",
- "type": "string",
- },
- "unit": {
- "description": "Unit for the output - one of (celsius, fahrenheit)",
- "type": "string",
- },
- },
- },
- },
- {
- "description": "Get the current weather in a given location",
- "name": "x",
- "input_schema": {
- "type": "object",
- "properties": {
- "location": {
- "description": "The city and state, e.g. San Francisco, CA",
- "type": "string",
- },
- "unit": {
- "description": "Unit for the output - one of (celsius, fahrenheit)",
- "type": "string",
- },
- },
- },
- },
- {
- "description": "Get the current weather in a given location",
- "name": "x",
- "input_schema": {
- "type": "object",
- "properties": {
- "location": {
- "description": "The city and state, e.g. San Francisco, CA",
- "type": "string",
- },
- "unit": {
- "description": "Unit for the output - one of (celsius, fahrenheit)",
- "type": "string",
- },
- },
- },
- },
- ],
- top_k=5,
- top_p=0.7,
- )
- message_stream.response.close()
-
- @parametrize
- def test_raw_response_create_overload_2(self, client: Anthropic) -> None:
- response = client.beta.tools.messages.with_raw_response.create(
- max_tokens=1024,
- messages=[
- {
- "role": "user",
- "content": "Hello, world",
- }
- ],
- model="claude-3-opus-20240229",
- stream=True,
- )
-
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- stream = response.parse()
- stream.close()
-
- @parametrize
- def test_streaming_response_create_overload_2(self, client: Anthropic) -> None:
- with client.beta.tools.messages.with_streaming_response.create(
- max_tokens=1024,
- messages=[
- {
- "role": "user",
- "content": "Hello, world",
- }
- ],
- model="claude-3-opus-20240229",
- stream=True,
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- stream = response.parse()
- stream.close()
-
- assert cast(Any, response.is_closed) is True
-
-
-class TestAsyncMessages:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @parametrize
- async def test_method_create_overload_1(self, async_client: AsyncAnthropic) -> None:
- message = await async_client.beta.tools.messages.create(
- max_tokens=1024,
- messages=[
- {
- "role": "user",
- "content": "Hello, world",
- }
- ],
- model="claude-3-opus-20240229",
- )
- assert_matches_type(ToolsBetaMessage, message, path=["response"])
-
- @parametrize
- async def test_method_create_with_all_params_overload_1(self, async_client: AsyncAnthropic) -> None:
- message = await async_client.beta.tools.messages.create(
- max_tokens=1024,
- messages=[
- {
- "role": "user",
- "content": "Hello, world",
- }
- ],
- model="claude-3-opus-20240229",
- metadata={"user_id": "13803d75-b4b5-4c3e-b2a2-6f21399b021b"},
- stop_sequences=["string", "string", "string"],
- stream=False,
- system="Today's date is 2024-01-01.",
- temperature=1,
- tool_choice={"type": "auto"},
- tools=[
- {
- "description": "Get the current weather in a given location",
- "name": "x",
- "input_schema": {
- "type": "object",
- "properties": {
- "location": {
- "description": "The city and state, e.g. San Francisco, CA",
- "type": "string",
- },
- "unit": {
- "description": "Unit for the output - one of (celsius, fahrenheit)",
- "type": "string",
- },
- },
- },
- },
- {
- "description": "Get the current weather in a given location",
- "name": "x",
- "input_schema": {
- "type": "object",
- "properties": {
- "location": {
- "description": "The city and state, e.g. San Francisco, CA",
- "type": "string",
- },
- "unit": {
- "description": "Unit for the output - one of (celsius, fahrenheit)",
- "type": "string",
- },
- },
- },
- },
- {
- "description": "Get the current weather in a given location",
- "name": "x",
- "input_schema": {
- "type": "object",
- "properties": {
- "location": {
- "description": "The city and state, e.g. San Francisco, CA",
- "type": "string",
- },
- "unit": {
- "description": "Unit for the output - one of (celsius, fahrenheit)",
- "type": "string",
- },
- },
- },
- },
- ],
- top_k=5,
- top_p=0.7,
- )
- assert_matches_type(ToolsBetaMessage, message, path=["response"])
-
- @parametrize
- async def test_raw_response_create_overload_1(self, async_client: AsyncAnthropic) -> None:
- response = await async_client.beta.tools.messages.with_raw_response.create(
- max_tokens=1024,
- messages=[
- {
- "role": "user",
- "content": "Hello, world",
- }
- ],
- model="claude-3-opus-20240229",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- message = response.parse()
- assert_matches_type(ToolsBetaMessage, message, path=["response"])
-
- @parametrize
- async def test_streaming_response_create_overload_1(self, async_client: AsyncAnthropic) -> None:
- async with async_client.beta.tools.messages.with_streaming_response.create(
- max_tokens=1024,
- messages=[
- {
- "role": "user",
- "content": "Hello, world",
- }
- ],
- model="claude-3-opus-20240229",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- message = await response.parse()
- assert_matches_type(ToolsBetaMessage, message, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @parametrize
- async def test_method_create_overload_2(self, async_client: AsyncAnthropic) -> None:
- message_stream = await async_client.beta.tools.messages.create(
- max_tokens=1024,
- messages=[
- {
- "role": "user",
- "content": "Hello, world",
- }
- ],
- model="claude-3-opus-20240229",
- stream=True,
- )
- await message_stream.response.aclose()
-
- @parametrize
- async def test_method_create_with_all_params_overload_2(self, async_client: AsyncAnthropic) -> None:
- message_stream = await async_client.beta.tools.messages.create(
- max_tokens=1024,
- messages=[
- {
- "role": "user",
- "content": "Hello, world",
- }
- ],
- model="claude-3-opus-20240229",
- stream=True,
- metadata={"user_id": "13803d75-b4b5-4c3e-b2a2-6f21399b021b"},
- stop_sequences=["string", "string", "string"],
- system="Today's date is 2024-01-01.",
- temperature=1,
- tool_choice={"type": "auto"},
- tools=[
- {
- "description": "Get the current weather in a given location",
- "name": "x",
- "input_schema": {
- "type": "object",
- "properties": {
- "location": {
- "description": "The city and state, e.g. San Francisco, CA",
- "type": "string",
- },
- "unit": {
- "description": "Unit for the output - one of (celsius, fahrenheit)",
- "type": "string",
- },
- },
- },
- },
- {
- "description": "Get the current weather in a given location",
- "name": "x",
- "input_schema": {
- "type": "object",
- "properties": {
- "location": {
- "description": "The city and state, e.g. San Francisco, CA",
- "type": "string",
- },
- "unit": {
- "description": "Unit for the output - one of (celsius, fahrenheit)",
- "type": "string",
- },
- },
- },
- },
- {
- "description": "Get the current weather in a given location",
- "name": "x",
- "input_schema": {
- "type": "object",
- "properties": {
- "location": {
- "description": "The city and state, e.g. San Francisco, CA",
- "type": "string",
- },
- "unit": {
- "description": "Unit for the output - one of (celsius, fahrenheit)",
- "type": "string",
- },
- },
- },
- },
- ],
- top_k=5,
- top_p=0.7,
- )
- await message_stream.response.aclose()
-
- @parametrize
- async def test_raw_response_create_overload_2(self, async_client: AsyncAnthropic) -> None:
- response = await async_client.beta.tools.messages.with_raw_response.create(
- max_tokens=1024,
- messages=[
- {
- "role": "user",
- "content": "Hello, world",
- }
- ],
- model="claude-3-opus-20240229",
- stream=True,
- )
-
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- stream = response.parse()
- await stream.close()
-
- @parametrize
- async def test_streaming_response_create_overload_2(self, async_client: AsyncAnthropic) -> None:
- async with async_client.beta.tools.messages.with_streaming_response.create(
- max_tokens=1024,
- messages=[
- {
- "role": "user",
- "content": "Hello, world",
- }
- ],
- model="claude-3-opus-20240229",
- stream=True,
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- stream = await response.parse()
- await stream.close()
-
- assert cast(Any, response.is_closed) is True
diff --git a/tests/api_resources/test_messages.py b/tests/api_resources/test_messages.py
index ac0700c3..af76de94 100644
--- a/tests/api_resources/test_messages.py
+++ b/tests/api_resources/test_messages.py
@@ -47,6 +47,60 @@ def test_method_create_with_all_params_overload_1(self, client: Anthropic) -> No
stream=False,
system="Today's date is 2024-01-01.",
temperature=1,
+ tool_choice={"type": "auto"},
+ tools=[
+ {
+ "description": "Get the current weather in a given location",
+ "name": "x",
+ "input_schema": {
+ "type": "object",
+ "properties": {
+ "location": {
+ "description": "The city and state, e.g. San Francisco, CA",
+ "type": "string",
+ },
+ "unit": {
+ "description": "Unit for the output - one of (celsius, fahrenheit)",
+ "type": "string",
+ },
+ },
+ },
+ },
+ {
+ "description": "Get the current weather in a given location",
+ "name": "x",
+ "input_schema": {
+ "type": "object",
+ "properties": {
+ "location": {
+ "description": "The city and state, e.g. San Francisco, CA",
+ "type": "string",
+ },
+ "unit": {
+ "description": "Unit for the output - one of (celsius, fahrenheit)",
+ "type": "string",
+ },
+ },
+ },
+ },
+ {
+ "description": "Get the current weather in a given location",
+ "name": "x",
+ "input_schema": {
+ "type": "object",
+ "properties": {
+ "location": {
+ "description": "The city and state, e.g. San Francisco, CA",
+ "type": "string",
+ },
+ "unit": {
+ "description": "Unit for the output - one of (celsius, fahrenheit)",
+ "type": "string",
+ },
+ },
+ },
+ },
+ ],
top_k=5,
top_p=0.7,
)
@@ -121,6 +175,60 @@ def test_method_create_with_all_params_overload_2(self, client: Anthropic) -> No
stop_sequences=["string", "string", "string"],
system="Today's date is 2024-01-01.",
temperature=1,
+ tool_choice={"type": "auto"},
+ tools=[
+ {
+ "description": "Get the current weather in a given location",
+ "name": "x",
+ "input_schema": {
+ "type": "object",
+ "properties": {
+ "location": {
+ "description": "The city and state, e.g. San Francisco, CA",
+ "type": "string",
+ },
+ "unit": {
+ "description": "Unit for the output - one of (celsius, fahrenheit)",
+ "type": "string",
+ },
+ },
+ },
+ },
+ {
+ "description": "Get the current weather in a given location",
+ "name": "x",
+ "input_schema": {
+ "type": "object",
+ "properties": {
+ "location": {
+ "description": "The city and state, e.g. San Francisco, CA",
+ "type": "string",
+ },
+ "unit": {
+ "description": "Unit for the output - one of (celsius, fahrenheit)",
+ "type": "string",
+ },
+ },
+ },
+ },
+ {
+ "description": "Get the current weather in a given location",
+ "name": "x",
+ "input_schema": {
+ "type": "object",
+ "properties": {
+ "location": {
+ "description": "The city and state, e.g. San Francisco, CA",
+ "type": "string",
+ },
+ "unit": {
+ "description": "Unit for the output - one of (celsius, fahrenheit)",
+ "type": "string",
+ },
+ },
+ },
+ },
+ ],
top_k=5,
top_p=0.7,
)
@@ -199,6 +307,60 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn
stream=False,
system="Today's date is 2024-01-01.",
temperature=1,
+ tool_choice={"type": "auto"},
+ tools=[
+ {
+ "description": "Get the current weather in a given location",
+ "name": "x",
+ "input_schema": {
+ "type": "object",
+ "properties": {
+ "location": {
+ "description": "The city and state, e.g. San Francisco, CA",
+ "type": "string",
+ },
+ "unit": {
+ "description": "Unit for the output - one of (celsius, fahrenheit)",
+ "type": "string",
+ },
+ },
+ },
+ },
+ {
+ "description": "Get the current weather in a given location",
+ "name": "x",
+ "input_schema": {
+ "type": "object",
+ "properties": {
+ "location": {
+ "description": "The city and state, e.g. San Francisco, CA",
+ "type": "string",
+ },
+ "unit": {
+ "description": "Unit for the output - one of (celsius, fahrenheit)",
+ "type": "string",
+ },
+ },
+ },
+ },
+ {
+ "description": "Get the current weather in a given location",
+ "name": "x",
+ "input_schema": {
+ "type": "object",
+ "properties": {
+ "location": {
+ "description": "The city and state, e.g. San Francisco, CA",
+ "type": "string",
+ },
+ "unit": {
+ "description": "Unit for the output - one of (celsius, fahrenheit)",
+ "type": "string",
+ },
+ },
+ },
+ },
+ ],
top_k=5,
top_p=0.7,
)
@@ -273,6 +435,60 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn
stop_sequences=["string", "string", "string"],
system="Today's date is 2024-01-01.",
temperature=1,
+ tool_choice={"type": "auto"},
+ tools=[
+ {
+ "description": "Get the current weather in a given location",
+ "name": "x",
+ "input_schema": {
+ "type": "object",
+ "properties": {
+ "location": {
+ "description": "The city and state, e.g. San Francisco, CA",
+ "type": "string",
+ },
+ "unit": {
+ "description": "Unit for the output - one of (celsius, fahrenheit)",
+ "type": "string",
+ },
+ },
+ },
+ },
+ {
+ "description": "Get the current weather in a given location",
+ "name": "x",
+ "input_schema": {
+ "type": "object",
+ "properties": {
+ "location": {
+ "description": "The city and state, e.g. San Francisco, CA",
+ "type": "string",
+ },
+ "unit": {
+ "description": "Unit for the output - one of (celsius, fahrenheit)",
+ "type": "string",
+ },
+ },
+ },
+ },
+ {
+ "description": "Get the current weather in a given location",
+ "name": "x",
+ "input_schema": {
+ "type": "object",
+ "properties": {
+ "location": {
+ "description": "The city and state, e.g. San Francisco, CA",
+ "type": "string",
+ },
+ "unit": {
+ "description": "Unit for the output - one of (celsius, fahrenheit)",
+ "type": "string",
+ },
+ },
+ },
+ },
+ ],
top_k=5,
top_p=0.7,
)