Skip to content

Commit

Permalink
feat(api): messages is generally available (#343)
Browse files Browse the repository at this point in the history
  • Loading branch information
stainless-bot authored Feb 12, 2024
1 parent 78469ad commit f682594
Show file tree
Hide file tree
Showing 35 changed files with 249 additions and 358 deletions.
168 changes: 107 additions & 61 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -23,19 +23,25 @@ pip install anthropic
The full API of this library can be found in [api.md](api.md).

```python
from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
import os
from anthropic import Anthropic

anthropic = Anthropic(
# defaults to os.environ.get("ANTHROPIC_API_KEY")
api_key="my api key",
client = Anthropic(
# This is the default and can be omitted
api_key=os.environ.get("ANTHROPIC_API_KEY"),
)

completion = anthropic.completions.create(
message = client.messages.create(
max_tokens=1024,
messages=[
{
"role": "user",
"content": "How does a court case get to the supreme court?",
}
],
model="claude-2.1",
max_tokens_to_sample=300,
prompt=f"{HUMAN_PROMPT} how does a court case get to the Supreme Court?{AI_PROMPT}",
)
print(completion.completion)
print(message.content)
```

While you can provide an `api_key` keyword argument,
Expand All @@ -48,21 +54,28 @@ so that your API Key is not stored in source control.
Simply import `AsyncAnthropic` instead of `Anthropic` and use `await` with each API call:

```python
from anthropic import AsyncAnthropic, HUMAN_PROMPT, AI_PROMPT
import os
import asyncio
from anthropic import AsyncAnthropic

anthropic = AsyncAnthropic(
# defaults to os.environ.get("ANTHROPIC_API_KEY")
api_key="my api key",
client = AsyncAnthropic(
# This is the default and can be omitted
api_key=os.environ.get("ANTHROPIC_API_KEY"),
)


async def main():
completion = await anthropic.completions.create(
async def main() -> None:
message = await client.messages.create(
max_tokens=1024,
messages=[
{
"role": "user",
"content": "How does a court case get to the supreme court?",
}
],
model="claude-2.1",
max_tokens_to_sample=300,
prompt=f"{HUMAN_PROMPT} how does a court case get to the Supreme Court?{AI_PROMPT}",
)
print(completion.completion)
print(message.content)


asyncio.run(main())
Expand All @@ -75,35 +88,45 @@ Functionality between the synchronous and asynchronous clients is otherwise iden
We provide support for streaming responses using Server Side Events (SSE).

```python
from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
from anthropic import Anthropic

anthropic = Anthropic()
client = Anthropic()

stream = anthropic.completions.create(
prompt=f"{HUMAN_PROMPT} Your prompt here{AI_PROMPT}",
max_tokens_to_sample=300,
stream = client.messages.create(
max_tokens=1024,
messages=[
{
"role": "user",
"content": "your prompt here",
}
],
model="claude-2.1",
stream=True,
)
for completion in stream:
print(completion.completion, end="", flush=True)
for event in stream:
print(event.type)
```

The async client uses the exact same interface.

```python
from anthropic import AsyncAnthropic, HUMAN_PROMPT, AI_PROMPT
from anthropic import Anthropic

anthropic = AsyncAnthropic()
client = Anthropic()

stream = await anthropic.completions.create(
prompt=f"{HUMAN_PROMPT} Your prompt here{AI_PROMPT}",
max_tokens_to_sample=300,
stream = await client.messages.create(
max_tokens=1024,
messages=[
{
"role": "user",
"content": "your prompt here",
}
],
model="claude-2.1",
stream=True,
)
async for completion in stream:
print(completion.completion, end="", flush=True)
async for event in stream:
print(event.type)
```

### Streaming Helpers
Expand All @@ -117,7 +140,7 @@ from anthropic import AsyncAnthropic
client = AsyncAnthropic()

async def main() -> None:
async with client.beta.messages.stream(
async with client.messages.stream(
max_tokens=1024,
messages=[
{
Expand All @@ -137,9 +160,9 @@ async def main() -> None:
asyncio.run(main())
```

Streaming with `client.beta.messages.stream(...)` exposes [various helpers for your convenience](helpers.md) including event handlers and accumulation.
Streaming with `client.messages.stream(...)` exposes [various helpers for your convenience](helpers.md) including event handlers and accumulation.

Alternatively, you can use `client.beta.messages.create(..., stream=True)` which only returns an async iterable of the events in the stream and thus uses less memory (it does not build up a final message object for you).
Alternatively, you can use `client.messages.create(..., stream=True)` which only returns an async iterable of the events in the stream and thus uses less memory (it does not build up a final message object for you).

## AWS Bedrock

Expand Down Expand Up @@ -195,13 +218,19 @@ All errors inherit from `anthropic.APIError`.

```python
import anthropic
from anthropic import Anthropic

client = anthropic.Anthropic()
client = Anthropic()

try:
client.completions.create(
prompt=f"{anthropic.HUMAN_PROMPT} Your prompt here{anthropic.AI_PROMPT}",
max_tokens_to_sample=300,
client.messages.create(
max_tokens=1024,
messages=[
{
"role": "user",
"content": "your prompt here",
}
],
model="claude-2.1",
)
except anthropic.APIConnectionError as e:
Expand Down Expand Up @@ -237,18 +266,23 @@ Connection errors (for example, due to a network connectivity problem), 408 Requ
You can use the `max_retries` option to configure or disable retry settings:

```python
from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
from anthropic import Anthropic

# Configure the default for all requests:
anthropic = Anthropic(
client = Anthropic(
# default is 2
max_retries=0,
)

# Or, configure per-request:
anthropic.with_options(max_retries=5).completions.create(
prompt=f"{HUMAN_PROMPT} Can you help me effectively ask for a raise at work?{AI_PROMPT}",
max_tokens_to_sample=300,
client.with_options(max_retries=5).messages.create(
max_tokens=1024,
messages=[
{
"role": "user",
"content": "Can you help me effectively ask for a raise at work?",
}
],
model="claude-2.1",
)
```
Expand All @@ -259,23 +293,28 @@ By default requests time out after 10 minutes. You can configure this with a `ti
which accepts a float or an [`httpx.Timeout`](https://www.python-httpx.org/advanced/#fine-tuning-the-configuration) object:

```python
from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
from anthropic import Anthropic

# Configure the default for all requests:
anthropic = Anthropic(
# default is 10 minutes
client = Anthropic(
# 20 seconds (default is 10 minutes)
timeout=20.0,
)

# More granular control:
anthropic = Anthropic(
client = Anthropic(
timeout=httpx.Timeout(60.0, read=5.0, write=10.0, connect=2.0),
)

# Override per-request:
anthropic.with_options(timeout=5 * 1000).completions.create(
prompt=f"{HUMAN_PROMPT} Where can I get a good coffee in my neighbourhood?{AI_PROMPT}",
max_tokens_to_sample=300,
client.with_options(timeout=5 * 1000).messages.create(
max_tokens=1024,
messages=[
{
"role": "user",
"content": "Where can I get a good coffee in my neighbourhood?",
}
],
model="claude-2.1",
)
```
Expand Down Expand Up @@ -329,19 +368,21 @@ if response.my_field is None:
The "raw" Response object can be accessed by prefixing `.with_raw_response.` to any HTTP method call, e.g.,

```py
from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT

anthropic = Anthropic()
from anthropic import Anthropic

response = anthropic.completions.with_raw_response.create(
client = Anthropic()
response = client.messages.with_raw_response.create(
max_tokens=1024,
messages=[{
"role": "user",
"content": "Where can I get a good coffee in my neighbourhood?",
}],
model="claude-2.1",
max_tokens_to_sample=300,
prompt=f"{HUMAN_PROMPT} how does a court case get to the Supreme Court?{AI_PROMPT}",
)
print(response.headers.get('X-My-Header'))

completion = response.parse() # get the object that `completions.create()` would have returned
print(completion.completion)
message = response.parse() # get the object that `messages.create()` would have returned
print(message.content)
```

These methods return an [`LegacyAPIResponse`](https://github.com/anthropics/anthropic-sdk-python/tree/main/src/anthropic/_legacy_response.py) object. This is a legacy class as we're changing it slightly in the next major version.
Expand All @@ -362,10 +403,15 @@ To stream the response body, use `.with_streaming_response` instead, which requi
As such, `.with_streaming_response` methods return a different [`APIResponse`](https://github.com/anthropics/anthropic-sdk-python/tree/main/src/anthropic/_response.py) object, and the async client returns an [`AsyncAPIResponse`](https://github.com/anthropics/anthropic-sdk-python/tree/main/src/anthropic/_response.py) object.

```python
with client.completions.with_streaming_response.create(
max_tokens_to_sample=300,
with client.messages.with_streaming_response.create(
max_tokens=1024,
messages=[
{
"role": "user",
"content": "Where can I get a good coffee in my neighbourhood?",
}
],
model="claude-2.1",
prompt=f"{HUMAN_PROMPT} Where can I get a good coffee in my neighbourhood?{AI_PROMPT}",
) as response:
print(response.headers.get("X-My-Header"))

Expand Down
10 changes: 4 additions & 6 deletions api.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,14 +16,12 @@ Methods:

- <code title="post /v1/complete">client.completions.<a href="./src/anthropic/resources/completions.py">create</a>(\*\*<a href="src/anthropic/types/completion_create_params.py">params</a>) -> <a href="./src/anthropic/types/completion.py">Completion</a></code>

# Beta

## Messages
# Messages

Types:

```python
from anthropic.types.beta import (
from anthropic.types import (
ContentBlock,
ContentBlockDeltaEvent,
ContentBlockStartEvent,
Expand All @@ -43,5 +41,5 @@ from anthropic.types.beta import (

Methods:

- <code title="post /v1/messages">client.beta.messages.<a href="./src/anthropic/resources/beta/messages.py">create</a>(\*\*<a href="src/anthropic/types/beta/message_create_params.py">params</a>) -> <a href="./src/anthropic/types/beta/message.py">Message</a></code>
- <code>client.beta.messages.<a href="./src/anthropic/resources/beta/messages.py">stream</a>(\*args) -> MessageStreamManager[MessageStream] | MessageStreamManager[MessageStreamT]</code>
- <code title="post /v1/messages">client.messages.<a href="./src/anthropic/resources/messages.py">create</a>(\*\*<a href="src/anthropic/types/message_create_params.py">params</a>) -> <a href="./src/anthropic/types/message.py">Message</a></code>
- <code>client.messages.<a href="./src/anthropic/resources/messages.py">stream</a>(\*args) -> MessageStreamManager[MessageStream] | MessageStreamManager[MessageStreamT]</code>
4 changes: 2 additions & 2 deletions examples/messages.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

client = Anthropic()

response = client.beta.messages.create(
response = client.messages.create(
max_tokens=1024,
messages=[
{
Expand All @@ -14,7 +14,7 @@
)
print(response)

response2 = client.beta.messages.create(
response2 = client.messages.create(
max_tokens=1024,
messages=[
{
Expand Down
2 changes: 1 addition & 1 deletion examples/messages_stream.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@


async def main() -> None:
async with client.beta.messages.stream(
async with client.messages.stream(
max_tokens=1024,
messages=[
{
Expand Down
4 changes: 2 additions & 2 deletions examples/messages_stream_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
from typing_extensions import override

from anthropic import AsyncAnthropic, AsyncMessageStream
from anthropic.types.beta import MessageStreamEvent
from anthropic.types import MessageStreamEvent

client = AsyncAnthropic()

Expand All @@ -14,7 +14,7 @@ async def on_stream_event(self, event: MessageStreamEvent) -> None:


async def main() -> None:
async with client.beta.messages.stream(
async with client.messages.stream(
max_tokens=1024,
messages=[
{
Expand Down
4 changes: 2 additions & 2 deletions examples/vertex.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ def sync_client() -> None:

client = AnthropicVertex()

message = client.beta.messages.create(
message = client.messages.create(
model="claude-instant-1p2",
max_tokens=100,
messages=[
Expand All @@ -26,7 +26,7 @@ async def async_client() -> None:

client = AsyncAnthropicVertex()

message = await client.beta.messages.create(
message = await client.messages.create(
model="claude-instant-1p2",
max_tokens=1024,
messages=[
Expand Down
Loading

0 comments on commit f682594

Please sign in to comment.