From f6825941acc09b33af386b40718bd2f3c01b29ef Mon Sep 17 00:00:00 2001
From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com>
Date: Mon, 12 Feb 2024 18:09:53 -0500
Subject: [PATCH] feat(api): messages is generally available (#343)
---
README.md | 168 +++++++++++-------
api.md | 10 +-
examples/messages.py | 4 +-
examples/messages_stream.py | 2 +-
examples/messages_stream_handler.py | 4 +-
examples/vertex.py | 4 +-
helpers.md | 12 +-
src/anthropic/_client.py | 16 +-
src/anthropic/lib/streaming/_messages.py | 8 +-
src/anthropic/lib/vertex/_client.py | 16 +-
src/anthropic/resources/__init__.py | 26 +--
src/anthropic/resources/beta/__init__.py | 33 ----
src/anthropic/resources/beta/beta.py | 80 ---------
.../resources/{beta => }/messages.py | 66 ++-----
src/anthropic/types/__init__.py | 15 ++
src/anthropic/types/beta/__init__.py | 19 --
.../types/{beta => }/content_block.py | 2 +-
.../{beta => }/content_block_delta_event.py | 2 +-
.../{beta => }/content_block_start_event.py | 2 +-
.../{beta => }/content_block_stop_event.py | 2 +-
src/anthropic/types/{beta => }/message.py | 2 +-
.../types/{beta => }/message_create_params.py | 3 -
.../types/{beta => }/message_delta_event.py | 2 +-
.../types/{beta => }/message_delta_usage.py | 2 +-
.../types/{beta => }/message_param.py | 0
.../types/{beta => }/message_start_event.py | 2 +-
.../types/{beta => }/message_stop_event.py | 2 +-
.../types/{beta => }/message_stream_event.py | 0
.../types/{beta => }/text_block_param.py | 0
src/anthropic/types/{beta => }/text_delta.py | 2 +-
src/anthropic/types/{beta => }/usage.py | 2 +-
tests/api_resources/beta/__init__.py | 1 -
.../api_resources/{beta => }/test_messages.py | 34 ++--
tests/lib/streaming/test_messages.py | 12 +-
tests/test_client.py | 52 ++++--
35 files changed, 249 insertions(+), 358 deletions(-)
delete mode 100644 src/anthropic/resources/beta/__init__.py
delete mode 100644 src/anthropic/resources/beta/beta.py
rename src/anthropic/resources/{beta => }/messages.py (95%)
delete mode 100644 src/anthropic/types/beta/__init__.py
rename src/anthropic/types/{beta => }/content_block.py (85%)
rename src/anthropic/types/{beta => }/content_block_delta_event.py (89%)
rename src/anthropic/types/{beta => }/content_block_start_event.py (90%)
rename src/anthropic/types/{beta => }/content_block_stop_event.py (87%)
rename src/anthropic/types/{beta => }/message.py (98%)
rename src/anthropic/types/{beta => }/message_create_params.py (98%)
rename src/anthropic/types/{beta => }/message_delta_event.py (94%)
rename src/anthropic/types/{beta => }/message_delta_usage.py (86%)
rename src/anthropic/types/{beta => }/message_param.py (100%)
rename src/anthropic/types/{beta => }/message_start_event.py (88%)
rename src/anthropic/types/{beta => }/message_stop_event.py (85%)
rename src/anthropic/types/{beta => }/message_stream_event.py (100%)
rename src/anthropic/types/{beta => }/text_block_param.py (100%)
rename src/anthropic/types/{beta => }/text_delta.py (85%)
rename src/anthropic/types/{beta => }/usage.py (88%)
delete mode 100644 tests/api_resources/beta/__init__.py
rename tests/api_resources/{beta => }/test_messages.py (90%)
diff --git a/README.md b/README.md
index 870fd5f8..9a7a5662 100644
--- a/README.md
+++ b/README.md
@@ -23,19 +23,25 @@ pip install anthropic
The full API of this library can be found in [api.md](api.md).
```python
-from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
+import os
+from anthropic import Anthropic
-anthropic = Anthropic(
- # defaults to os.environ.get("ANTHROPIC_API_KEY")
- api_key="my api key",
+client = Anthropic(
+ # This is the default and can be omitted
+ api_key=os.environ.get("ANTHROPIC_API_KEY"),
)
-completion = anthropic.completions.create(
+message = client.messages.create(
+ max_tokens=1024,
+ messages=[
+ {
+ "role": "user",
+ "content": "How does a court case get to the supreme court?",
+ }
+ ],
model="claude-2.1",
- max_tokens_to_sample=300,
- prompt=f"{HUMAN_PROMPT} how does a court case get to the Supreme Court?{AI_PROMPT}",
)
-print(completion.completion)
+print(message.content)
```
While you can provide an `api_key` keyword argument,
@@ -48,21 +54,28 @@ so that your API Key is not stored in source control.
Simply import `AsyncAnthropic` instead of `Anthropic` and use `await` with each API call:
```python
-from anthropic import AsyncAnthropic, HUMAN_PROMPT, AI_PROMPT
+import os
+import asyncio
+from anthropic import AsyncAnthropic
-anthropic = AsyncAnthropic(
- # defaults to os.environ.get("ANTHROPIC_API_KEY")
- api_key="my api key",
+client = AsyncAnthropic(
+ # This is the default and can be omitted
+ api_key=os.environ.get("ANTHROPIC_API_KEY"),
)
-async def main():
- completion = await anthropic.completions.create(
+async def main() -> None:
+ message = await client.messages.create(
+ max_tokens=1024,
+ messages=[
+ {
+ "role": "user",
+ "content": "How does a court case get to the supreme court?",
+ }
+ ],
model="claude-2.1",
- max_tokens_to_sample=300,
- prompt=f"{HUMAN_PROMPT} how does a court case get to the Supreme Court?{AI_PROMPT}",
)
- print(completion.completion)
+ print(message.content)
asyncio.run(main())
@@ -75,35 +88,45 @@ Functionality between the synchronous and asynchronous clients is otherwise iden
We provide support for streaming responses using Server Side Events (SSE).
```python
-from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
+from anthropic import Anthropic
-anthropic = Anthropic()
+client = Anthropic()
-stream = anthropic.completions.create(
- prompt=f"{HUMAN_PROMPT} Your prompt here{AI_PROMPT}",
- max_tokens_to_sample=300,
+stream = client.messages.create(
+ max_tokens=1024,
+ messages=[
+ {
+ "role": "user",
+ "content": "your prompt here",
+ }
+ ],
model="claude-2.1",
stream=True,
)
-for completion in stream:
- print(completion.completion, end="", flush=True)
+for event in stream:
+ print(event.type)
```
The async client uses the exact same interface.
```python
-from anthropic import AsyncAnthropic, HUMAN_PROMPT, AI_PROMPT
+from anthropic import Anthropic
-anthropic = AsyncAnthropic()
+client = Anthropic()
-stream = await anthropic.completions.create(
- prompt=f"{HUMAN_PROMPT} Your prompt here{AI_PROMPT}",
- max_tokens_to_sample=300,
+stream = await client.messages.create(
+ max_tokens=1024,
+ messages=[
+ {
+ "role": "user",
+ "content": "your prompt here",
+ }
+ ],
model="claude-2.1",
stream=True,
)
-async for completion in stream:
- print(completion.completion, end="", flush=True)
+async for event in stream:
+ print(event.type)
```
### Streaming Helpers
@@ -117,7 +140,7 @@ from anthropic import AsyncAnthropic
client = AsyncAnthropic()
async def main() -> None:
- async with client.beta.messages.stream(
+ async with client.messages.stream(
max_tokens=1024,
messages=[
{
@@ -137,9 +160,9 @@ async def main() -> None:
asyncio.run(main())
```
-Streaming with `client.beta.messages.stream(...)` exposes [various helpers for your convenience](helpers.md) including event handlers and accumulation.
+Streaming with `client.messages.stream(...)` exposes [various helpers for your convenience](helpers.md) including event handlers and accumulation.
-Alternatively, you can use `client.beta.messages.create(..., stream=True)` which only returns an async iterable of the events in the stream and thus uses less memory (it does not build up a final message object for you).
+Alternatively, you can use `client.messages.create(..., stream=True)` which only returns an async iterable of the events in the stream and thus uses less memory (it does not build up a final message object for you).
## AWS Bedrock
@@ -195,13 +218,19 @@ All errors inherit from `anthropic.APIError`.
```python
import anthropic
+from anthropic import Anthropic
-client = anthropic.Anthropic()
+client = Anthropic()
try:
- client.completions.create(
- prompt=f"{anthropic.HUMAN_PROMPT} Your prompt here{anthropic.AI_PROMPT}",
- max_tokens_to_sample=300,
+ client.messages.create(
+ max_tokens=1024,
+ messages=[
+ {
+ "role": "user",
+ "content": "your prompt here",
+ }
+ ],
model="claude-2.1",
)
except anthropic.APIConnectionError as e:
@@ -237,18 +266,23 @@ Connection errors (for example, due to a network connectivity problem), 408 Requ
You can use the `max_retries` option to configure or disable retry settings:
```python
-from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
+from anthropic import Anthropic
# Configure the default for all requests:
-anthropic = Anthropic(
+client = Anthropic(
# default is 2
max_retries=0,
)
# Or, configure per-request:
-anthropic.with_options(max_retries=5).completions.create(
- prompt=f"{HUMAN_PROMPT} Can you help me effectively ask for a raise at work?{AI_PROMPT}",
- max_tokens_to_sample=300,
+client.with_options(max_retries=5).messages.create(
+ max_tokens=1024,
+ messages=[
+ {
+ "role": "user",
+ "content": "Can you help me effectively ask for a raise at work?",
+ }
+ ],
model="claude-2.1",
)
```
@@ -259,23 +293,28 @@ By default requests time out after 10 minutes. You can configure this with a `ti
which accepts a float or an [`httpx.Timeout`](https://www.python-httpx.org/advanced/#fine-tuning-the-configuration) object:
```python
-from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
+from anthropic import Anthropic
# Configure the default for all requests:
-anthropic = Anthropic(
- # default is 10 minutes
+client = Anthropic(
+ # 20 seconds (default is 10 minutes)
timeout=20.0,
)
# More granular control:
-anthropic = Anthropic(
+client = Anthropic(
timeout=httpx.Timeout(60.0, read=5.0, write=10.0, connect=2.0),
)
# Override per-request:
-anthropic.with_options(timeout=5 * 1000).completions.create(
- prompt=f"{HUMAN_PROMPT} Where can I get a good coffee in my neighbourhood?{AI_PROMPT}",
- max_tokens_to_sample=300,
+client.with_options(timeout=5 * 1000).messages.create(
+ max_tokens=1024,
+ messages=[
+ {
+ "role": "user",
+ "content": "Where can I get a good coffee in my neighbourhood?",
+ }
+ ],
model="claude-2.1",
)
```
@@ -329,19 +368,21 @@ if response.my_field is None:
The "raw" Response object can be accessed by prefixing `.with_raw_response.` to any HTTP method call, e.g.,
```py
-from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
-
-anthropic = Anthropic()
+from anthropic import Anthropic
-response = anthropic.completions.with_raw_response.create(
+client = Anthropic()
+response = client.messages.with_raw_response.create(
+ max_tokens=1024,
+ messages=[{
+ "role": "user",
+ "content": "Where can I get a good coffee in my neighbourhood?",
+ }],
model="claude-2.1",
- max_tokens_to_sample=300,
- prompt=f"{HUMAN_PROMPT} how does a court case get to the Supreme Court?{AI_PROMPT}",
)
print(response.headers.get('X-My-Header'))
-completion = response.parse() # get the object that `completions.create()` would have returned
-print(completion.completion)
+message = response.parse() # get the object that `messages.create()` would have returned
+print(message.content)
```
These methods return an [`LegacyAPIResponse`](https://github.com/anthropics/anthropic-sdk-python/tree/main/src/anthropic/_legacy_response.py) object. This is a legacy class as we're changing it slightly in the next major version.
@@ -362,10 +403,15 @@ To stream the response body, use `.with_streaming_response` instead, which requi
As such, `.with_streaming_response` methods return a different [`APIResponse`](https://github.com/anthropics/anthropic-sdk-python/tree/main/src/anthropic/_response.py) object, and the async client returns an [`AsyncAPIResponse`](https://github.com/anthropics/anthropic-sdk-python/tree/main/src/anthropic/_response.py) object.
```python
-with client.completions.with_streaming_response.create(
- max_tokens_to_sample=300,
+with client.messages.with_streaming_response.create(
+ max_tokens=1024,
+ messages=[
+ {
+ "role": "user",
+ "content": "Where can I get a good coffee in my neighbourhood?",
+ }
+ ],
model="claude-2.1",
- prompt=f"{HUMAN_PROMPT} Where can I get a good coffee in my neighbourhood?{AI_PROMPT}",
) as response:
print(response.headers.get("X-My-Header"))
diff --git a/api.md b/api.md
index da916389..3c83d177 100644
--- a/api.md
+++ b/api.md
@@ -16,14 +16,12 @@ Methods:
- client.completions.create(\*\*params) -> Completion
-# Beta
-
-## Messages
+# Messages
Types:
```python
-from anthropic.types.beta import (
+from anthropic.types import (
ContentBlock,
ContentBlockDeltaEvent,
ContentBlockStartEvent,
@@ -43,5 +41,5 @@ from anthropic.types.beta import (
Methods:
-- client.beta.messages.create(\*\*params) -> Message
-- client.beta.messages.stream(\*args) -> MessageStreamManager[MessageStream] | MessageStreamManager[MessageStreamT]
+- client.messages.create(\*\*params) -> Message
+- client.messages.stream(\*args) -> MessageStreamManager[MessageStream] | MessageStreamManager[MessageStreamT]
diff --git a/examples/messages.py b/examples/messages.py
index 5fb8b5cd..72fff289 100644
--- a/examples/messages.py
+++ b/examples/messages.py
@@ -2,7 +2,7 @@
client = Anthropic()
-response = client.beta.messages.create(
+response = client.messages.create(
max_tokens=1024,
messages=[
{
@@ -14,7 +14,7 @@
)
print(response)
-response2 = client.beta.messages.create(
+response2 = client.messages.create(
max_tokens=1024,
messages=[
{
diff --git a/examples/messages_stream.py b/examples/messages_stream.py
index 3ea6a201..3c664cfd 100644
--- a/examples/messages_stream.py
+++ b/examples/messages_stream.py
@@ -6,7 +6,7 @@
async def main() -> None:
- async with client.beta.messages.stream(
+ async with client.messages.stream(
max_tokens=1024,
messages=[
{
diff --git a/examples/messages_stream_handler.py b/examples/messages_stream_handler.py
index a0175d6f..65e24445 100644
--- a/examples/messages_stream_handler.py
+++ b/examples/messages_stream_handler.py
@@ -2,7 +2,7 @@
from typing_extensions import override
from anthropic import AsyncAnthropic, AsyncMessageStream
-from anthropic.types.beta import MessageStreamEvent
+from anthropic.types import MessageStreamEvent
client = AsyncAnthropic()
@@ -14,7 +14,7 @@ async def on_stream_event(self, event: MessageStreamEvent) -> None:
async def main() -> None:
- async with client.beta.messages.stream(
+ async with client.messages.stream(
max_tokens=1024,
messages=[
{
diff --git a/examples/vertex.py b/examples/vertex.py
index 825cf95c..ef504011 100644
--- a/examples/vertex.py
+++ b/examples/vertex.py
@@ -8,7 +8,7 @@ def sync_client() -> None:
client = AnthropicVertex()
- message = client.beta.messages.create(
+ message = client.messages.create(
model="claude-instant-1p2",
max_tokens=100,
messages=[
@@ -26,7 +26,7 @@ async def async_client() -> None:
client = AsyncAnthropicVertex()
- message = await client.beta.messages.create(
+ message = await client.messages.create(
model="claude-instant-1p2",
max_tokens=1024,
messages=[
diff --git a/helpers.md b/helpers.md
index a5edcaf8..c0fecb25 100644
--- a/helpers.md
+++ b/helpers.md
@@ -3,7 +3,7 @@
## Streaming Responses
```python
-async with client.beta.messages.stream(
+async with client.messages.stream(
max_tokens=1024,
messages=[
{
@@ -18,9 +18,9 @@ async with client.beta.messages.stream(
print()
```
-`client.beta.messages.stream()` returns a `MessageStreamManager`, which is a context manager that yields a `MessageStream` which is iterable, emits events and accumulates messages.
+`client.messages.stream()` returns a `MessageStreamManager`, which is a context manager that yields a `MessageStream` which is iterable, emits events and accumulates messages.
-Alternatively, you can use `client.beta.messages.create(..., stream=True)` which returns an
+Alternatively, you can use `client.messages.create(..., stream=True)` which returns an
iterable of the events in the stream and uses less memory (most notably, it does not accumulate a final message
object for you).
@@ -45,14 +45,14 @@ print()
### Events
-You can pass an `event_handler` argument to `client.beta.messages.stream` to register callback methods that are fired when certain events happen:
+You can pass an `event_handler` argument to `client.messages.stream` to register callback methods that are fired when certain events happen:
```py
import asyncio
from typing_extensions import override
from anthropic import AsyncAnthropic, AsyncMessageStream
-from anthropic.types.beta import MessageStreamEvent
+from anthropic.types import MessageStreamEvent
client = AsyncAnthropic()
@@ -66,7 +66,7 @@ class MyStream(AsyncMessageStream):
print("on_event fired with:", event)
async def main() -> None:
- async with client.beta.messages.stream(
+ async with client.messages.stream(
max_tokens=1024,
messages=[
{
diff --git a/src/anthropic/_client.py b/src/anthropic/_client.py
index 2c4df083..4b9e2b90 100644
--- a/src/anthropic/_client.py
+++ b/src/anthropic/_client.py
@@ -57,7 +57,7 @@
class Anthropic(SyncAPIClient):
completions: resources.Completions
- beta: resources.Beta
+ messages: resources.Messages
with_raw_response: AnthropicWithRawResponse
with_streaming_response: AnthropicWithStreamedResponse
@@ -133,7 +133,7 @@ def __init__(
self._default_stream_cls = Stream
self.completions = resources.Completions(self)
- self.beta = resources.Beta(self)
+ self.messages = resources.Messages(self)
self.with_raw_response = AnthropicWithRawResponse(self)
self.with_streaming_response = AnthropicWithStreamedResponse(self)
@@ -312,7 +312,7 @@ def _make_status_error(
class AsyncAnthropic(AsyncAPIClient):
completions: resources.AsyncCompletions
- beta: resources.AsyncBeta
+ messages: resources.AsyncMessages
with_raw_response: AsyncAnthropicWithRawResponse
with_streaming_response: AsyncAnthropicWithStreamedResponse
@@ -388,7 +388,7 @@ def __init__(
self._default_stream_cls = AsyncStream
self.completions = resources.AsyncCompletions(self)
- self.beta = resources.AsyncBeta(self)
+ self.messages = resources.AsyncMessages(self)
self.with_raw_response = AsyncAnthropicWithRawResponse(self)
self.with_streaming_response = AsyncAnthropicWithStreamedResponse(self)
@@ -568,25 +568,25 @@ def _make_status_error(
class AnthropicWithRawResponse:
def __init__(self, client: Anthropic) -> None:
self.completions = resources.CompletionsWithRawResponse(client.completions)
- self.beta = resources.BetaWithRawResponse(client.beta)
+ self.messages = resources.MessagesWithRawResponse(client.messages)
class AsyncAnthropicWithRawResponse:
def __init__(self, client: AsyncAnthropic) -> None:
self.completions = resources.AsyncCompletionsWithRawResponse(client.completions)
- self.beta = resources.AsyncBetaWithRawResponse(client.beta)
+ self.messages = resources.AsyncMessagesWithRawResponse(client.messages)
class AnthropicWithStreamedResponse:
def __init__(self, client: Anthropic) -> None:
self.completions = resources.CompletionsWithStreamingResponse(client.completions)
- self.beta = resources.BetaWithStreamingResponse(client.beta)
+ self.messages = resources.MessagesWithStreamingResponse(client.messages)
class AsyncAnthropicWithStreamedResponse:
def __init__(self, client: AsyncAnthropic) -> None:
self.completions = resources.AsyncCompletionsWithStreamingResponse(client.completions)
- self.beta = resources.AsyncBetaWithStreamingResponse(client.beta)
+ self.messages = resources.AsyncMessagesWithStreamingResponse(client.messages)
Client = Anthropic
diff --git a/src/anthropic/lib/streaming/_messages.py b/src/anthropic/lib/streaming/_messages.py
index 76accf0f..2f4bc7e5 100644
--- a/src/anthropic/lib/streaming/_messages.py
+++ b/src/anthropic/lib/streaming/_messages.py
@@ -7,10 +7,10 @@
import httpx
+from ...types import Message, MessageStreamEvent
from ..._utils import consume_sync_iterator, consume_async_iterator
from ..._streaming import Stream, AsyncStream
-from ...types.beta import Message, MessageStreamEvent
-from ...types.beta.message import ContentBlock
+from ...types.message import ContentBlock
if TYPE_CHECKING:
from ..._client import Anthropic, AsyncAnthropic
@@ -182,7 +182,7 @@ class MessageStreamManager(Generic[MessageStreamT]):
"""Wrapper over MessageStream that is returned by `.stream()`.
```py
- with client.beta.messages.stream(...) as stream:
+ with client.messages.stream(...) as stream:
for chunk in stream:
...
```
@@ -383,7 +383,7 @@ class AsyncMessageStreamManager(Generic[AsyncMessageStreamT]):
original client call.
```py
- async with client.beta.messages.stream(...) as stream:
+ async with client.messages.stream(...) as stream:
async for chunk in stream:
...
```
diff --git a/src/anthropic/lib/vertex/_client.py b/src/anthropic/lib/vertex/_client.py
index 5d7029fe..3bd3798a 100644
--- a/src/anthropic/lib/vertex/_client.py
+++ b/src/anthropic/lib/vertex/_client.py
@@ -16,7 +16,7 @@
from ..._streaming import Stream, AsyncStream
from ..._exceptions import APIStatusError
from ..._base_client import DEFAULT_MAX_RETRIES, BaseClient, SyncAPIClient, AsyncAPIClient
-from ...resources.beta.beta import Beta, AsyncBeta
+from ...resources.messages import Messages, AsyncMessages
if TYPE_CHECKING:
from google.auth.credentials import Credentials as GoogleCredentials # type: ignore
@@ -109,7 +109,7 @@ def _make_status_error(
class AnthropicVertex(BaseVertexClient[httpx.Client, Stream[Any]], SyncAPIClient):
- beta: Beta
+ messages: Messages
def __init__(
self,
@@ -165,10 +165,7 @@ def __init__(
self.access_token = access_token
self._credentials: GoogleCredentials | None = None
- self.beta = Beta(
- # TODO: fix types here
- self # type: ignore
- )
+ self.messages = Messages(self)
@override
def _prepare_request(self, request: httpx.Request) -> None:
@@ -199,7 +196,7 @@ def _ensure_access_token(self) -> str:
class AsyncAnthropicVertex(BaseVertexClient[httpx.AsyncClient, AsyncStream[Any]], AsyncAPIClient):
- beta: AsyncBeta
+ messages: AsyncMessages
def __init__(
self,
@@ -255,10 +252,7 @@ def __init__(
self.access_token = access_token
self._credentials: GoogleCredentials | None = None
- self.beta = AsyncBeta(
- # TODO: fix types here
- self # type: ignore
- )
+ self.messages = AsyncMessages(self)
@override
async def _prepare_request(self, request: httpx.Request) -> None:
diff --git a/src/anthropic/resources/__init__.py b/src/anthropic/resources/__init__.py
index 1b085e9f..cf0f6254 100644
--- a/src/anthropic/resources/__init__.py
+++ b/src/anthropic/resources/__init__.py
@@ -1,12 +1,12 @@
# File generated from our OpenAPI spec by Stainless.
-from .beta import (
- Beta,
- AsyncBeta,
- BetaWithRawResponse,
- AsyncBetaWithRawResponse,
- BetaWithStreamingResponse,
- AsyncBetaWithStreamingResponse,
+from .messages import (
+ Messages,
+ AsyncMessages,
+ MessagesWithRawResponse,
+ AsyncMessagesWithRawResponse,
+ MessagesWithStreamingResponse,
+ AsyncMessagesWithStreamingResponse,
)
from .completions import (
Completions,
@@ -24,10 +24,10 @@
"AsyncCompletionsWithRawResponse",
"CompletionsWithStreamingResponse",
"AsyncCompletionsWithStreamingResponse",
- "Beta",
- "AsyncBeta",
- "BetaWithRawResponse",
- "AsyncBetaWithRawResponse",
- "BetaWithStreamingResponse",
- "AsyncBetaWithStreamingResponse",
+ "Messages",
+ "AsyncMessages",
+ "MessagesWithRawResponse",
+ "AsyncMessagesWithRawResponse",
+ "MessagesWithStreamingResponse",
+ "AsyncMessagesWithStreamingResponse",
]
diff --git a/src/anthropic/resources/beta/__init__.py b/src/anthropic/resources/beta/__init__.py
deleted file mode 100644
index 663d4b95..00000000
--- a/src/anthropic/resources/beta/__init__.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# File generated from our OpenAPI spec by Stainless.
-
-from .beta import (
- Beta,
- AsyncBeta,
- BetaWithRawResponse,
- AsyncBetaWithRawResponse,
- BetaWithStreamingResponse,
- AsyncBetaWithStreamingResponse,
-)
-from .messages import (
- Messages,
- AsyncMessages,
- MessagesWithRawResponse,
- AsyncMessagesWithRawResponse,
- MessagesWithStreamingResponse,
- AsyncMessagesWithStreamingResponse,
-)
-
-__all__ = [
- "Messages",
- "AsyncMessages",
- "MessagesWithRawResponse",
- "AsyncMessagesWithRawResponse",
- "MessagesWithStreamingResponse",
- "AsyncMessagesWithStreamingResponse",
- "Beta",
- "AsyncBeta",
- "BetaWithRawResponse",
- "AsyncBetaWithRawResponse",
- "BetaWithStreamingResponse",
- "AsyncBetaWithStreamingResponse",
-]
diff --git a/src/anthropic/resources/beta/beta.py b/src/anthropic/resources/beta/beta.py
deleted file mode 100644
index de0085ac..00000000
--- a/src/anthropic/resources/beta/beta.py
+++ /dev/null
@@ -1,80 +0,0 @@
-# File generated from our OpenAPI spec by Stainless.
-
-from __future__ import annotations
-
-from .messages import (
- Messages,
- AsyncMessages,
- MessagesWithRawResponse,
- AsyncMessagesWithRawResponse,
- MessagesWithStreamingResponse,
- AsyncMessagesWithStreamingResponse,
-)
-from ..._compat import cached_property
-from ..._resource import SyncAPIResource, AsyncAPIResource
-
-__all__ = ["Beta", "AsyncBeta"]
-
-
-class Beta(SyncAPIResource):
- @cached_property
- def messages(self) -> Messages:
- return Messages(self._client)
-
- @cached_property
- def with_raw_response(self) -> BetaWithRawResponse:
- return BetaWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> BetaWithStreamingResponse:
- return BetaWithStreamingResponse(self)
-
-
-class AsyncBeta(AsyncAPIResource):
- @cached_property
- def messages(self) -> AsyncMessages:
- return AsyncMessages(self._client)
-
- @cached_property
- def with_raw_response(self) -> AsyncBetaWithRawResponse:
- return AsyncBetaWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncBetaWithStreamingResponse:
- return AsyncBetaWithStreamingResponse(self)
-
-
-class BetaWithRawResponse:
- def __init__(self, beta: Beta) -> None:
- self._beta = beta
-
- @cached_property
- def messages(self) -> MessagesWithRawResponse:
- return MessagesWithRawResponse(self._beta.messages)
-
-
-class AsyncBetaWithRawResponse:
- def __init__(self, beta: AsyncBeta) -> None:
- self._beta = beta
-
- @cached_property
- def messages(self) -> AsyncMessagesWithRawResponse:
- return AsyncMessagesWithRawResponse(self._beta.messages)
-
-
-class BetaWithStreamingResponse:
- def __init__(self, beta: Beta) -> None:
- self._beta = beta
-
- @cached_property
- def messages(self) -> MessagesWithStreamingResponse:
- return MessagesWithStreamingResponse(self._beta.messages)
-
-
-class AsyncBetaWithStreamingResponse:
- def __init__(self, beta: AsyncBeta) -> None:
- self._beta = beta
-
- @cached_property
- def messages(self) -> AsyncMessagesWithStreamingResponse:
- return AsyncMessagesWithStreamingResponse(self._beta.messages)
diff --git a/src/anthropic/resources/beta/messages.py b/src/anthropic/resources/messages.py
similarity index 95%
rename from src/anthropic/resources/beta/messages.py
rename to src/anthropic/resources/messages.py
index 2e04d085..ad908c02 100644
--- a/src/anthropic/resources/beta/messages.py
+++ b/src/anthropic/resources/messages.py
@@ -8,18 +8,18 @@
import httpx
-from ... import _legacy_response
-from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ..._utils import required_args, maybe_transform
-from ..._compat import cached_property
-from ..._resource import SyncAPIResource, AsyncAPIResource
-from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
-from ..._streaming import Stream, AsyncStream
-from ...types.beta import Message, MessageParam, MessageStreamEvent, message_create_params
-from ..._base_client import (
+from .. import _legacy_response
+from ..types import Message, MessageParam, MessageStreamEvent, message_create_params
+from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from .._utils import required_args, maybe_transform
+from .._compat import cached_property
+from .._resource import SyncAPIResource, AsyncAPIResource
+from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from .._streaming import Stream, AsyncStream
+from .._base_client import (
make_request_options,
)
-from ...lib.streaming import (
+from ..lib.streaming import (
MessageStream,
MessageStreamT,
AsyncMessageStream,
@@ -70,10 +70,6 @@ def create(
Messages can be used for either single queries to the model or for multi-turn
conversations.
- The Messages API is currently in beta. During beta, you must send the
- `anthropic-beta: messages-2023-12-15` header in your requests. If you are using
- our client SDKs, this is handled for you automatically.
-
Args:
max_tokens: The maximum number of tokens to generate before stopping.
@@ -137,9 +133,6 @@ def create(
{ "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] }
```
- During beta, the Messages API only accepts content blocks of type `"text"`, and
- at most one block per message.
-
See our
[guide to prompt design](https://docs.anthropic.com/claude/docs/introduction-to-prompt-design)
for more details on how to best construct prompts.
@@ -240,10 +233,6 @@ def create(
Messages can be used for either single queries to the model or for multi-turn
conversations.
- The Messages API is currently in beta. During beta, you must send the
- `anthropic-beta: messages-2023-12-15` header in your requests. If you are using
- our client SDKs, this is handled for you automatically.
-
Args:
max_tokens: The maximum number of tokens to generate before stopping.
@@ -307,9 +296,6 @@ def create(
{ "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] }
```
- During beta, the Messages API only accepts content blocks of type `"text"`, and
- at most one block per message.
-
See our
[guide to prompt design](https://docs.anthropic.com/claude/docs/introduction-to-prompt-design)
for more details on how to best construct prompts.
@@ -410,10 +396,6 @@ def create(
Messages can be used for either single queries to the model or for multi-turn
conversations.
- The Messages API is currently in beta. During beta, you must send the
- `anthropic-beta: messages-2023-12-15` header in your requests. If you are using
- our client SDKs, this is handled for you automatically.
-
Args:
max_tokens: The maximum number of tokens to generate before stopping.
@@ -477,9 +459,6 @@ def create(
{ "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] }
```
- During beta, the Messages API only accepts content blocks of type `"text"`, and
- at most one block per message.
-
See our
[guide to prompt design](https://docs.anthropic.com/claude/docs/introduction-to-prompt-design)
for more details on how to best construct prompts.
@@ -571,7 +550,6 @@ def create(
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = 600,
) -> Message | Stream[MessageStreamEvent]:
- extra_headers = {"Anthropic-Beta": "messages-2023-12-15", **(extra_headers or {})}
return self._post(
"/v1/messages",
body=maybe_transform(
@@ -666,7 +644,6 @@ def stream(
) -> MessageStreamManager[MessageStream] | MessageStreamManager[MessageStreamT]:
"""Create a Message stream"""
extra_headers = {
- "Anthropic-Beta": "messages-2023-12-15",
"X-Stainless-Stream-Helper": "messages",
"X-Stainless-Custom-Event-Handler": "true" if event_handler != MessageStream else "false",
**(extra_headers or {}),
@@ -738,10 +715,6 @@ async def create(
Messages can be used for either single queries to the model or for multi-turn
conversations.
- The Messages API is currently in beta. During beta, you must send the
- `anthropic-beta: messages-2023-12-15` header in your requests. If you are using
- our client SDKs, this is handled for you automatically.
-
Args:
max_tokens: The maximum number of tokens to generate before stopping.
@@ -805,9 +778,6 @@ async def create(
{ "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] }
```
- During beta, the Messages API only accepts content blocks of type `"text"`, and
- at most one block per message.
-
See our
[guide to prompt design](https://docs.anthropic.com/claude/docs/introduction-to-prompt-design)
for more details on how to best construct prompts.
@@ -908,10 +878,6 @@ async def create(
Messages can be used for either single queries to the model or for multi-turn
conversations.
- The Messages API is currently in beta. During beta, you must send the
- `anthropic-beta: messages-2023-12-15` header in your requests. If you are using
- our client SDKs, this is handled for you automatically.
-
Args:
max_tokens: The maximum number of tokens to generate before stopping.
@@ -975,9 +941,6 @@ async def create(
{ "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] }
```
- During beta, the Messages API only accepts content blocks of type `"text"`, and
- at most one block per message.
-
See our
[guide to prompt design](https://docs.anthropic.com/claude/docs/introduction-to-prompt-design)
for more details on how to best construct prompts.
@@ -1078,10 +1041,6 @@ async def create(
Messages can be used for either single queries to the model or for multi-turn
conversations.
- The Messages API is currently in beta. During beta, you must send the
- `anthropic-beta: messages-2023-12-15` header in your requests. If you are using
- our client SDKs, this is handled for you automatically.
-
Args:
max_tokens: The maximum number of tokens to generate before stopping.
@@ -1145,9 +1104,6 @@ async def create(
{ "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] }
```
- During beta, the Messages API only accepts content blocks of type `"text"`, and
- at most one block per message.
-
See our
[guide to prompt design](https://docs.anthropic.com/claude/docs/introduction-to-prompt-design)
for more details on how to best construct prompts.
@@ -1239,7 +1195,6 @@ async def create(
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = 600,
) -> Message | AsyncStream[MessageStreamEvent]:
- extra_headers = {"Anthropic-Beta": "messages-2023-12-15", **(extra_headers or {})}
return await self._post(
"/v1/messages",
body=maybe_transform(
@@ -1334,7 +1289,6 @@ def stream(
) -> AsyncMessageStreamManager[AsyncMessageStream] | AsyncMessageStreamManager[AsyncMessageStreamT]:
"""Create a Message stream"""
extra_headers = {
- "Anthropic-Beta": "messages-2023-12-15",
"X-Stainless-Stream-Helper": "messages",
"X-Stainless-Custom-Event-Handler": "true" if event_handler != AsyncMessageStream else "false",
**(extra_headers or {}),
diff --git a/src/anthropic/types/__init__.py b/src/anthropic/types/__init__.py
index 9644d1ab..42df2f09 100644
--- a/src/anthropic/types/__init__.py
+++ b/src/anthropic/types/__init__.py
@@ -2,5 +2,20 @@
from __future__ import annotations
+from .usage import Usage as Usage
+from .message import Message as Message
from .completion import Completion as Completion
+from .text_delta import TextDelta as TextDelta
+from .content_block import ContentBlock as ContentBlock
+from .message_param import MessageParam as MessageParam
+from .text_block_param import TextBlockParam as TextBlockParam
+from .message_stop_event import MessageStopEvent as MessageStopEvent
+from .message_delta_event import MessageDeltaEvent as MessageDeltaEvent
+from .message_delta_usage import MessageDeltaUsage as MessageDeltaUsage
+from .message_start_event import MessageStartEvent as MessageStartEvent
+from .message_stream_event import MessageStreamEvent as MessageStreamEvent
+from .message_create_params import MessageCreateParams as MessageCreateParams
from .completion_create_params import CompletionCreateParams as CompletionCreateParams
+from .content_block_stop_event import ContentBlockStopEvent as ContentBlockStopEvent
+from .content_block_delta_event import ContentBlockDeltaEvent as ContentBlockDeltaEvent
+from .content_block_start_event import ContentBlockStartEvent as ContentBlockStartEvent
diff --git a/src/anthropic/types/beta/__init__.py b/src/anthropic/types/beta/__init__.py
deleted file mode 100644
index aef9fea1..00000000
--- a/src/anthropic/types/beta/__init__.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# File generated from our OpenAPI spec by Stainless.
-
-from __future__ import annotations
-
-from .usage import Usage as Usage
-from .message import Message as Message
-from .text_delta import TextDelta as TextDelta
-from .content_block import ContentBlock as ContentBlock
-from .message_param import MessageParam as MessageParam
-from .text_block_param import TextBlockParam as TextBlockParam
-from .message_stop_event import MessageStopEvent as MessageStopEvent
-from .message_delta_event import MessageDeltaEvent as MessageDeltaEvent
-from .message_delta_usage import MessageDeltaUsage as MessageDeltaUsage
-from .message_start_event import MessageStartEvent as MessageStartEvent
-from .message_stream_event import MessageStreamEvent as MessageStreamEvent
-from .message_create_params import MessageCreateParams as MessageCreateParams
-from .content_block_stop_event import ContentBlockStopEvent as ContentBlockStopEvent
-from .content_block_delta_event import ContentBlockDeltaEvent as ContentBlockDeltaEvent
-from .content_block_start_event import ContentBlockStartEvent as ContentBlockStartEvent
diff --git a/src/anthropic/types/beta/content_block.py b/src/anthropic/types/content_block.py
similarity index 85%
rename from src/anthropic/types/beta/content_block.py
rename to src/anthropic/types/content_block.py
index 0e8f50a1..b10610c1 100644
--- a/src/anthropic/types/beta/content_block.py
+++ b/src/anthropic/types/content_block.py
@@ -2,7 +2,7 @@
from typing_extensions import Literal
-from ..._models import BaseModel
+from .._models import BaseModel
__all__ = ["ContentBlock"]
diff --git a/src/anthropic/types/beta/content_block_delta_event.py b/src/anthropic/types/content_block_delta_event.py
similarity index 89%
rename from src/anthropic/types/beta/content_block_delta_event.py
rename to src/anthropic/types/content_block_delta_event.py
index ad4f9636..1a7ded0b 100644
--- a/src/anthropic/types/beta/content_block_delta_event.py
+++ b/src/anthropic/types/content_block_delta_event.py
@@ -2,7 +2,7 @@
from typing_extensions import Literal
-from ..._models import BaseModel
+from .._models import BaseModel
from .text_delta import TextDelta
__all__ = ["ContentBlockDeltaEvent"]
diff --git a/src/anthropic/types/beta/content_block_start_event.py b/src/anthropic/types/content_block_start_event.py
similarity index 90%
rename from src/anthropic/types/beta/content_block_start_event.py
rename to src/anthropic/types/content_block_start_event.py
index c851cdd3..97fed900 100644
--- a/src/anthropic/types/beta/content_block_start_event.py
+++ b/src/anthropic/types/content_block_start_event.py
@@ -2,7 +2,7 @@
from typing_extensions import Literal
-from ..._models import BaseModel
+from .._models import BaseModel
from .content_block import ContentBlock
__all__ = ["ContentBlockStartEvent"]
diff --git a/src/anthropic/types/beta/content_block_stop_event.py b/src/anthropic/types/content_block_stop_event.py
similarity index 87%
rename from src/anthropic/types/beta/content_block_stop_event.py
rename to src/anthropic/types/content_block_stop_event.py
index bc5b5f36..1b026438 100644
--- a/src/anthropic/types/beta/content_block_stop_event.py
+++ b/src/anthropic/types/content_block_stop_event.py
@@ -2,7 +2,7 @@
from typing_extensions import Literal
-from ..._models import BaseModel
+from .._models import BaseModel
__all__ = ["ContentBlockStopEvent"]
diff --git a/src/anthropic/types/beta/message.py b/src/anthropic/types/message.py
similarity index 98%
rename from src/anthropic/types/beta/message.py
rename to src/anthropic/types/message.py
index 23dbe1d2..005eb29b 100644
--- a/src/anthropic/types/beta/message.py
+++ b/src/anthropic/types/message.py
@@ -4,7 +4,7 @@
from typing_extensions import Literal
from .usage import Usage
-from ..._models import BaseModel
+from .._models import BaseModel
from .content_block import ContentBlock
__all__ = ["Message"]
diff --git a/src/anthropic/types/beta/message_create_params.py b/src/anthropic/types/message_create_params.py
similarity index 98%
rename from src/anthropic/types/beta/message_create_params.py
rename to src/anthropic/types/message_create_params.py
index 99199e57..21d56395 100644
--- a/src/anthropic/types/beta/message_create_params.py
+++ b/src/anthropic/types/message_create_params.py
@@ -76,9 +76,6 @@ class MessageCreateParamsBase(TypedDict, total=False):
{ "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] }
```
- During beta, the Messages API only accepts content blocks of type `"text"`, and
- at most one block per message.
-
See our
[guide to prompt design](https://docs.anthropic.com/claude/docs/introduction-to-prompt-design)
for more details on how to best construct prompts.
diff --git a/src/anthropic/types/beta/message_delta_event.py b/src/anthropic/types/message_delta_event.py
similarity index 94%
rename from src/anthropic/types/beta/message_delta_event.py
rename to src/anthropic/types/message_delta_event.py
index 5639e3bf..1498b7f2 100644
--- a/src/anthropic/types/beta/message_delta_event.py
+++ b/src/anthropic/types/message_delta_event.py
@@ -3,7 +3,7 @@
from typing import Optional
from typing_extensions import Literal
-from ..._models import BaseModel
+from .._models import BaseModel
from .message_delta_usage import MessageDeltaUsage
__all__ = ["MessageDeltaEvent", "Delta"]
diff --git a/src/anthropic/types/beta/message_delta_usage.py b/src/anthropic/types/message_delta_usage.py
similarity index 86%
rename from src/anthropic/types/beta/message_delta_usage.py
rename to src/anthropic/types/message_delta_usage.py
index d5415573..2eec07e7 100644
--- a/src/anthropic/types/beta/message_delta_usage.py
+++ b/src/anthropic/types/message_delta_usage.py
@@ -1,6 +1,6 @@
# File generated from our OpenAPI spec by Stainless.
-from ..._models import BaseModel
+from .._models import BaseModel
__all__ = ["MessageDeltaUsage"]
diff --git a/src/anthropic/types/beta/message_param.py b/src/anthropic/types/message_param.py
similarity index 100%
rename from src/anthropic/types/beta/message_param.py
rename to src/anthropic/types/message_param.py
diff --git a/src/anthropic/types/beta/message_start_event.py b/src/anthropic/types/message_start_event.py
similarity index 88%
rename from src/anthropic/types/beta/message_start_event.py
rename to src/anthropic/types/message_start_event.py
index 203e5a5d..7493e1e3 100644
--- a/src/anthropic/types/beta/message_start_event.py
+++ b/src/anthropic/types/message_start_event.py
@@ -3,7 +3,7 @@
from typing_extensions import Literal
from .message import Message
-from ..._models import BaseModel
+from .._models import BaseModel
__all__ = ["MessageStartEvent"]
diff --git a/src/anthropic/types/beta/message_stop_event.py b/src/anthropic/types/message_stop_event.py
similarity index 85%
rename from src/anthropic/types/beta/message_stop_event.py
rename to src/anthropic/types/message_stop_event.py
index 11969ceb..45e56c99 100644
--- a/src/anthropic/types/beta/message_stop_event.py
+++ b/src/anthropic/types/message_stop_event.py
@@ -2,7 +2,7 @@
from typing_extensions import Literal
-from ..._models import BaseModel
+from .._models import BaseModel
__all__ = ["MessageStopEvent"]
diff --git a/src/anthropic/types/beta/message_stream_event.py b/src/anthropic/types/message_stream_event.py
similarity index 100%
rename from src/anthropic/types/beta/message_stream_event.py
rename to src/anthropic/types/message_stream_event.py
diff --git a/src/anthropic/types/beta/text_block_param.py b/src/anthropic/types/text_block_param.py
similarity index 100%
rename from src/anthropic/types/beta/text_block_param.py
rename to src/anthropic/types/text_block_param.py
diff --git a/src/anthropic/types/beta/text_delta.py b/src/anthropic/types/text_delta.py
similarity index 85%
rename from src/anthropic/types/beta/text_delta.py
rename to src/anthropic/types/text_delta.py
index 1ef6eafc..8b3171c3 100644
--- a/src/anthropic/types/beta/text_delta.py
+++ b/src/anthropic/types/text_delta.py
@@ -2,7 +2,7 @@
from typing_extensions import Literal
-from ..._models import BaseModel
+from .._models import BaseModel
__all__ = ["TextDelta"]
diff --git a/src/anthropic/types/beta/usage.py b/src/anthropic/types/usage.py
similarity index 88%
rename from src/anthropic/types/beta/usage.py
rename to src/anthropic/types/usage.py
index 5ba569ee..d38f4b41 100644
--- a/src/anthropic/types/beta/usage.py
+++ b/src/anthropic/types/usage.py
@@ -1,6 +1,6 @@
# File generated from our OpenAPI spec by Stainless.
-from ..._models import BaseModel
+from .._models import BaseModel
__all__ = ["Usage"]
diff --git a/tests/api_resources/beta/__init__.py b/tests/api_resources/beta/__init__.py
deleted file mode 100644
index 1016754e..00000000
--- a/tests/api_resources/beta/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# File generated from our OpenAPI spec by Stainless.
diff --git a/tests/api_resources/beta/test_messages.py b/tests/api_resources/test_messages.py
similarity index 90%
rename from tests/api_resources/beta/test_messages.py
rename to tests/api_resources/test_messages.py
index b1460038..cc76a125 100644
--- a/tests/api_resources/beta/test_messages.py
+++ b/tests/api_resources/test_messages.py
@@ -9,7 +9,7 @@
from anthropic import Anthropic, AsyncAnthropic
from tests.utils import assert_matches_type
-from anthropic.types.beta import Message
+from anthropic.types import Message
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -19,7 +19,7 @@ class TestMessages:
@parametrize
def test_method_create_overload_1(self, client: Anthropic) -> None:
- message = client.beta.messages.create(
+ message = client.messages.create(
max_tokens=1024,
messages=[
{
@@ -33,7 +33,7 @@ def test_method_create_overload_1(self, client: Anthropic) -> None:
@parametrize
def test_method_create_with_all_params_overload_1(self, client: Anthropic) -> None:
- message = client.beta.messages.create(
+ message = client.messages.create(
max_tokens=1024,
messages=[
{
@@ -54,7 +54,7 @@ def test_method_create_with_all_params_overload_1(self, client: Anthropic) -> No
@parametrize
def test_raw_response_create_overload_1(self, client: Anthropic) -> None:
- response = client.beta.messages.with_raw_response.create(
+ response = client.messages.with_raw_response.create(
max_tokens=1024,
messages=[
{
@@ -72,7 +72,7 @@ def test_raw_response_create_overload_1(self, client: Anthropic) -> None:
@parametrize
def test_streaming_response_create_overload_1(self, client: Anthropic) -> None:
- with client.beta.messages.with_streaming_response.create(
+ with client.messages.with_streaming_response.create(
max_tokens=1024,
messages=[
{
@@ -92,7 +92,7 @@ def test_streaming_response_create_overload_1(self, client: Anthropic) -> None:
@parametrize
def test_method_create_overload_2(self, client: Anthropic) -> None:
- message_stream = client.beta.messages.create(
+ message_stream = client.messages.create(
max_tokens=1024,
messages=[
{
@@ -107,7 +107,7 @@ def test_method_create_overload_2(self, client: Anthropic) -> None:
@parametrize
def test_method_create_with_all_params_overload_2(self, client: Anthropic) -> None:
- message_stream = client.beta.messages.create(
+ message_stream = client.messages.create(
max_tokens=1024,
messages=[
{
@@ -128,7 +128,7 @@ def test_method_create_with_all_params_overload_2(self, client: Anthropic) -> No
@parametrize
def test_raw_response_create_overload_2(self, client: Anthropic) -> None:
- response = client.beta.messages.with_raw_response.create(
+ response = client.messages.with_raw_response.create(
max_tokens=1024,
messages=[
{
@@ -146,7 +146,7 @@ def test_raw_response_create_overload_2(self, client: Anthropic) -> None:
@parametrize
def test_streaming_response_create_overload_2(self, client: Anthropic) -> None:
- with client.beta.messages.with_streaming_response.create(
+ with client.messages.with_streaming_response.create(
max_tokens=1024,
messages=[
{
@@ -171,7 +171,7 @@ class TestAsyncMessages:
@parametrize
async def test_method_create_overload_1(self, async_client: AsyncAnthropic) -> None:
- message = await async_client.beta.messages.create(
+ message = await async_client.messages.create(
max_tokens=1024,
messages=[
{
@@ -185,7 +185,7 @@ async def test_method_create_overload_1(self, async_client: AsyncAnthropic) -> N
@parametrize
async def test_method_create_with_all_params_overload_1(self, async_client: AsyncAnthropic) -> None:
- message = await async_client.beta.messages.create(
+ message = await async_client.messages.create(
max_tokens=1024,
messages=[
{
@@ -206,7 +206,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn
@parametrize
async def test_raw_response_create_overload_1(self, async_client: AsyncAnthropic) -> None:
- response = await async_client.beta.messages.with_raw_response.create(
+ response = await async_client.messages.with_raw_response.create(
max_tokens=1024,
messages=[
{
@@ -224,7 +224,7 @@ async def test_raw_response_create_overload_1(self, async_client: AsyncAnthropic
@parametrize
async def test_streaming_response_create_overload_1(self, async_client: AsyncAnthropic) -> None:
- async with async_client.beta.messages.with_streaming_response.create(
+ async with async_client.messages.with_streaming_response.create(
max_tokens=1024,
messages=[
{
@@ -244,7 +244,7 @@ async def test_streaming_response_create_overload_1(self, async_client: AsyncAnt
@parametrize
async def test_method_create_overload_2(self, async_client: AsyncAnthropic) -> None:
- message_stream = await async_client.beta.messages.create(
+ message_stream = await async_client.messages.create(
max_tokens=1024,
messages=[
{
@@ -259,7 +259,7 @@ async def test_method_create_overload_2(self, async_client: AsyncAnthropic) -> N
@parametrize
async def test_method_create_with_all_params_overload_2(self, async_client: AsyncAnthropic) -> None:
- message_stream = await async_client.beta.messages.create(
+ message_stream = await async_client.messages.create(
max_tokens=1024,
messages=[
{
@@ -280,7 +280,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn
@parametrize
async def test_raw_response_create_overload_2(self, async_client: AsyncAnthropic) -> None:
- response = await async_client.beta.messages.with_raw_response.create(
+ response = await async_client.messages.with_raw_response.create(
max_tokens=1024,
messages=[
{
@@ -298,7 +298,7 @@ async def test_raw_response_create_overload_2(self, async_client: AsyncAnthropic
@parametrize
async def test_streaming_response_create_overload_2(self, async_client: AsyncAnthropic) -> None:
- async with async_client.beta.messages.with_streaming_response.create(
+ async with async_client.messages.with_streaming_response.create(
max_tokens=1024,
messages=[
{
diff --git a/tests/lib/streaming/test_messages.py b/tests/lib/streaming/test_messages.py
index 561a6617..1acbb26f 100644
--- a/tests/lib/streaming/test_messages.py
+++ b/tests/lib/streaming/test_messages.py
@@ -10,8 +10,8 @@
from anthropic import Anthropic, AsyncAnthropic
from anthropic.lib.streaming import MessageStream, AsyncMessageStream
-from anthropic.types.beta.message import Message
-from anthropic.types.beta.message_stream_event import MessageStreamEvent
+from anthropic.types.message import Message
+from anthropic.types.message_stream_event import MessageStreamEvent
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
api_key = "my-anthropic-api-key"
@@ -113,7 +113,7 @@ class TestSyncMessages:
def test_basic_response(self, respx_mock: MockRouter) -> None:
respx_mock.post("/v1/messages").mock(return_value=httpx.Response(200, content=basic_response()))
- with client.beta.messages.stream(
+ with client.messages.stream(
max_tokens=1024,
messages=[
{
@@ -130,7 +130,7 @@ def test_basic_response(self, respx_mock: MockRouter) -> None:
def test_context_manager(self, respx_mock: MockRouter) -> None:
respx_mock.post("/v1/messages").mock(return_value=httpx.Response(200, content=basic_response()))
- with client.beta.messages.stream(
+ with client.messages.stream(
max_tokens=1024,
messages=[
{
@@ -152,7 +152,7 @@ class TestAsyncMessages:
async def test_basic_response(self, respx_mock: MockRouter) -> None:
respx_mock.post("/v1/messages").mock(return_value=httpx.Response(200, content=to_async_iter(basic_response())))
- async with async_client.beta.messages.stream(
+ async with async_client.messages.stream(
max_tokens=1024,
messages=[
{
@@ -170,7 +170,7 @@ async def test_basic_response(self, respx_mock: MockRouter) -> None:
async def test_context_manager(self, respx_mock: MockRouter) -> None:
respx_mock.post("/v1/messages").mock(return_value=httpx.Response(200, content=to_async_iter(basic_response())))
- async with async_client.beta.messages.stream(
+ async with async_client.messages.stream(
max_tokens=1024,
messages=[
{
diff --git a/tests/test_client.py b/tests/test_client.py
index 864b2dd1..6d214499 100644
--- a/tests/test_client.py
+++ b/tests/test_client.py
@@ -803,17 +803,22 @@ def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str
@mock.patch("anthropic._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
@pytest.mark.respx(base_url=base_url)
def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter) -> None:
- respx_mock.post("/v1/complete").mock(side_effect=httpx.TimeoutException("Test timeout error"))
+ respx_mock.post("/v1/messages").mock(side_effect=httpx.TimeoutException("Test timeout error"))
with pytest.raises(APITimeoutError):
self.client.post(
- "/v1/complete",
+ "/v1/messages",
body=cast(
object,
dict(
- max_tokens_to_sample=300,
+ max_tokens=1024,
+ messages=[
+ {
+ "role": "user",
+ "content": "Where can I get a good coffee in my neighbourhood?",
+ }
+ ],
model="claude-2.1",
- prompt="\n\nHuman:Where can I get a good coffee in my neighbourhood?\n\nAssistant:",
),
),
cast_to=httpx.Response,
@@ -825,17 +830,22 @@ def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter) -> No
@mock.patch("anthropic._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
@pytest.mark.respx(base_url=base_url)
def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter) -> None:
- respx_mock.post("/v1/complete").mock(return_value=httpx.Response(500))
+ respx_mock.post("/v1/messages").mock(return_value=httpx.Response(500))
with pytest.raises(APIStatusError):
self.client.post(
- "/v1/complete",
+ "/v1/messages",
body=cast(
object,
dict(
- max_tokens_to_sample=300,
+ max_tokens=1024,
+ messages=[
+ {
+ "role": "user",
+ "content": "Where can I get a good coffee in my neighbourhood?",
+ }
+ ],
model="claude-2.1",
- prompt="\n\nHuman:Where can I get a good coffee in my neighbourhood?\n\nAssistant:",
),
),
cast_to=httpx.Response,
@@ -1611,17 +1621,22 @@ async def test_parse_retry_after_header(self, remaining_retries: int, retry_afte
@mock.patch("anthropic._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
@pytest.mark.respx(base_url=base_url)
async def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter) -> None:
- respx_mock.post("/v1/complete").mock(side_effect=httpx.TimeoutException("Test timeout error"))
+ respx_mock.post("/v1/messages").mock(side_effect=httpx.TimeoutException("Test timeout error"))
with pytest.raises(APITimeoutError):
await self.client.post(
- "/v1/complete",
+ "/v1/messages",
body=cast(
object,
dict(
- max_tokens_to_sample=300,
+ max_tokens=1024,
+ messages=[
+ {
+ "role": "user",
+ "content": "Where can I get a good coffee in my neighbourhood?",
+ }
+ ],
model="claude-2.1",
- prompt="\n\nHuman:Where can I get a good coffee in my neighbourhood?\n\nAssistant:",
),
),
cast_to=httpx.Response,
@@ -1633,17 +1648,22 @@ async def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter)
@mock.patch("anthropic._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
@pytest.mark.respx(base_url=base_url)
async def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter) -> None:
- respx_mock.post("/v1/complete").mock(return_value=httpx.Response(500))
+ respx_mock.post("/v1/messages").mock(return_value=httpx.Response(500))
with pytest.raises(APIStatusError):
await self.client.post(
- "/v1/complete",
+ "/v1/messages",
body=cast(
object,
dict(
- max_tokens_to_sample=300,
+ max_tokens=1024,
+ messages=[
+ {
+ "role": "user",
+ "content": "Where can I get a good coffee in my neighbourhood?",
+ }
+ ],
model="claude-2.1",
- prompt="\n\nHuman:Where can I get a good coffee in my neighbourhood?\n\nAssistant:",
),
),
cast_to=httpx.Response,