Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Incorporating llmclient.CommonLLMNames #207

Merged
merged 2 commits into from
Jan 14, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 0 additions & 10 deletions ldp/agent/__init__.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,3 @@
from enum import StrEnum


class DefaultLLMModelNames(StrEnum):
"""Defaults for LLM models, pin exact versions for performance stability."""

OPENAI = "gpt-4o-2024-08-06" # Cheap, fast, and decent


# Lower than LiteLLM's 10-min default: https://github.com/BerriAI/litellm/blob/v1.48.10/litellm/main.py#L859
DEFAULT_LLM_COMPLETION_TIMEOUT = 120 # seconds

Expand All @@ -23,7 +14,6 @@ class DefaultLLMModelNames(StrEnum):
"DEFAULT_LLM_COMPLETION_TIMEOUT",
"Agent",
"AgentConfig",
"DefaultLLMModelNames",
"HTTPAgentClient",
"MemoryAgent",
"ReActAgent",
Expand Down
5 changes: 3 additions & 2 deletions ldp/agent/react_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
ToolRequestMessage,
ToolResponseMessage,
)
from llmclient import CommonLLMNames
from pydantic import BaseModel, ConfigDict, Field
from tenacity import (
Future,
Expand All @@ -29,7 +30,7 @@
ToolDescriptionMethods,
)

from . import DEFAULT_LLM_COMPLETION_TIMEOUT, DefaultLLMModelNames
from . import DEFAULT_LLM_COMPLETION_TIMEOUT
from .agent import Agent
from .simple_agent import SimpleAgentState

Expand Down Expand Up @@ -81,7 +82,7 @@ class ReActAgent(BaseModel, Agent[SimpleAgentState]):

llm_model: dict[str, Any] = Field(
default={
"model": DefaultLLMModelNames.OPENAI.value,
"model": CommonLLMNames.GPT_4O.value,
"temperature": 0.1,
"logprobs": True,
"top_logprobs": 1,
Expand Down
5 changes: 3 additions & 2 deletions ldp/agent/simple_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,13 @@

from aviary.core import Message, Tool, ToolRequestMessage, ToolResponseMessage
from aviary.message import EnvStateMessage
from llmclient import CommonLLMNames
from pydantic import BaseModel, ConfigDict, Field

from ldp.graph import ConfigOp, LLMCallOp, OpResult, compute_graph
from ldp.llms import prepend_sys

from . import DEFAULT_LLM_COMPLETION_TIMEOUT, DefaultLLMModelNames
from . import DEFAULT_LLM_COMPLETION_TIMEOUT
from .agent import Agent


Expand Down Expand Up @@ -83,7 +84,7 @@ class SimpleAgent(BaseModel, Agent[SimpleAgentState]):

llm_model: dict[str, Any] = Field(
default={
"model": DefaultLLMModelNames.OPENAI.value,
"model": CommonLLMNames.GPT_4O.value,
"temperature": 0.1,
"timeout": DEFAULT_LLM_COMPLETION_TIMEOUT,
},
Expand Down
5 changes: 3 additions & 2 deletions ldp/agent/tree_of_thoughts_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,12 +17,13 @@
from typing import Any

from aviary.core import Message, Tool, ToolCall, ToolRequestMessage
from llmclient import CommonLLMNames
from pydantic import BaseModel, ConfigDict, Field

from ldp.graph import FxnOp, LLMCallOp, OpResult, compute_graph, get_call_id, op_call
from ldp.llms import prepend_sys

from . import DEFAULT_LLM_COMPLETION_TIMEOUT, DefaultLLMModelNames
from . import DEFAULT_LLM_COMPLETION_TIMEOUT
from .agent import Agent
from .simple_agent import SimpleAgentState

Expand All @@ -43,7 +44,7 @@ class TreeofThoughtsAgent(BaseModel, Agent[SimpleAgentState]):

llm_model: dict[str, Any] = Field(
default={
"model": DefaultLLMModelNames.OPENAI.value,
"model": CommonLLMNames.GPT_4O.value,
"temperature": 0.1,
"timeout": DEFAULT_LLM_COMPLETION_TIMEOUT,
},
Expand Down
9 changes: 0 additions & 9 deletions tests/__init__.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,4 @@
import pathlib
from enum import StrEnum


class CILLMModelNames(StrEnum):
"""Models to use for generic CI testing."""

ANTHROPIC = "claude-3-haiku-20240307" # Cheap and not Anthropic's cutting edge
OPENAI = "gpt-4o-mini-2024-07-18" # Cheap and not OpenAI's cutting edge


TESTS_DIR = pathlib.Path(__file__).parent
CASSETTES_DIR = TESTS_DIR / "cassettes"
40 changes: 23 additions & 17 deletions tests/cassettes/TestLLMCallOp.test_compute_logprob[0.0].yaml
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
interactions:
- request:
body:
'{"messages": [{"role": "user", "content": "Hello"}], "model": "gpt-4o-mini",
"temperature": 0.0}'
'{"messages": [{"role": "user", "content": "Hello"}], "model": "gpt-4o-mini-2024-07-18",
"n": 1, "temperature": 0.0}'
headers:
accept:
- application/json
Expand All @@ -11,13 +11,13 @@ interactions:
connection:
- keep-alive
content-length:
- "96"
- "115"
content-type:
- application/json
host:
- api.openai.com
user-agent:
- AsyncOpenAI/Python 1.54.3
- AsyncOpenAI/Python 1.59.3
x-stainless-arch:
- arm64
x-stainless-async:
Expand All @@ -27,7 +27,7 @@ interactions:
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.54.3
- 1.59.3
x-stainless-raw-response:
- "true"
x-stainless-retry-count:
Expand All @@ -41,29 +41,35 @@ interactions:
response:
body:
string: !!binary |
H4sIAAAAAAAAA4xSQW7bMBC86xVbnq1CclzY8SVoUqApih6SALkUhUCRK5kJxWXJVVsn8N8DSo7l
IAnQCw8zO8OZxT5mAMJosQahNpJV523++fpG33+6+tL5W+Sbi/rH1cU2nNOf718f6t9ilhRU36Hi
Z9VHRZ23yIbcSKuAkjG5lsuTsixWJ+V8IDrSaJOs9ZwvKO+MM/m8mC/yYpmXq716Q0ZhFGv4mQEA
PA5vyuk0/hNrKGbPSIcxyhbF+jAEIALZhAgZo4ksHYvZRCpyjG6IfonW0ge4pL+gpINvMApgSz0w
abk9OxYGbPooU3jXW7vHd4ckllofqI57/oA3xpm4qQLKSC79Gpm8GNhdBvBraNy/KCF8oM5zxXSP
LhmuRjcxrXniTvccE0s7weVy9oZXpZGlsfFoX0JJtUE9Kaflyl4bOiKyo8avs7zlPbY2rv0f+4lQ
Cj2jrnxAbdTLvtNYwHSD740dNjwEFnEbGbuqMa7F4IMZL6DxVVHLQpfzRVOKbJc9AQAA//8DAOqA
rbEPAwAA
H4sIAAAAAAAAA4xSQW7bMBC86xVbnq1Clt3Y9aUIerGvLRAgCAKBJlcyG4rLkqs2RuC/B5RsS0Zb
oBcdZnZGM8t9ywCE0WIDQh0kq9bb/N43r7ufW6Tu8Vv8XET6Tg+tK7T+uii0mCUF7X+g4ovqo6LW
W2RDbqBVQMmYXOerxd16vSjnZU+0pNEmWeM5X1LeGmfysiiXebHK5+uz+kBGYRQbeMoAAN76b8rp
NL6KDRSzC9JijLJBsbkOAYhANiFCxmgiS8diNpKKHKPro2/RWvoAW/oNSjrYwSCAI3XApOXxy1QY
sO6iTOFdZ+0ZP12TWGp8oH0881e8Ns7EQxVQRnLpr5HJi549ZQDPfePupoTwgVrPFdMLumS4HtzE
uOaRm5+XIZhY2gl+Ed2YVRpZGhsnCxNKqgPqUTluV3ba0ITIJpX/DPM376G2cc3/2I+EUugZdeUD
aqNuC49jAdMR/mvsuuI+sIgYfhmFFRsM6Rk01rKzw2mIeIyMbVUb12DwwQz3UftqVaJeyf2npRLZ
KXsHAAD//wMAEDvPji0DAAA=
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8df9522b1bb9176b-SJC
- 90200eeee9d7174e-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Fri, 08 Nov 2024 23:25:13 GMT
- Tue, 14 Jan 2025 19:33:33 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=UQ5laTP9Pnzbx_cAQ0fhk0VBG915_xMVIUeFrx2yVx0-1736883213-1.0.1.1-JCTt9uio_P3O5iDe8YaMBuZUFyrhlGDG8S.DahTCxxbe1WsS6hHltTsfVZ0znl.y1nkpb9NSlv5GHuo1SfYd9Q;
path=/; expires=Tue, 14-Jan-25 20:03:33 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=zK8680ITY0A0uyqUIYD4RjgzO5O1h13Zw2DSCcLdaTQ-1736883213044-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
X-Content-Type-Options:
Expand All @@ -75,7 +81,7 @@ interactions:
openai-organization:
- future-house-xr4tdh
openai-processing-ms:
- "308"
- "275"
openai-version:
- "2020-10-01"
strict-transport-security:
Expand All @@ -93,7 +99,7 @@ interactions:
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_578a56bf5fc1a231deb3d6bcba377206
- req_b67f9d10a1a62ae04ccdc3c7fa8bf5ad
status:
code: 200
message: OK
Expand Down
34 changes: 17 additions & 17 deletions tests/cassettes/TestLLMCallOp.test_compute_logprob[0.5].yaml
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
interactions:
- request:
body:
'{"messages": [{"role": "user", "content": "Hello"}], "model": "gpt-4o-mini",
"temperature": 0.5}'
'{"messages": [{"role": "user", "content": "Hello"}], "model": "gpt-4o-mini-2024-07-18",
"n": 1, "temperature": 0.5}'
headers:
accept:
- application/json
Expand All @@ -11,13 +11,13 @@ interactions:
connection:
- keep-alive
content-length:
- "96"
- "115"
content-type:
- application/json
host:
- api.openai.com
user-agent:
- AsyncOpenAI/Python 1.54.3
- AsyncOpenAI/Python 1.59.3
x-stainless-arch:
- arm64
x-stainless-async:
Expand All @@ -27,7 +27,7 @@ interactions:
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.54.3
- 1.59.3
x-stainless-raw-response:
- "true"
x-stainless-retry-count:
Expand All @@ -41,27 +41,27 @@ interactions:
response:
body:
string: !!binary |
H4sIAAAAAAAAA4xSQW7bMBC86xVbnq1CUpza8aXILb3GMAykKASaXMl0KS5BrtI6gf9eUHIsB02B
XniY2RnOLPY1AxBGixUItZesOm/z+8e1ru7W2+cX3j7q9a0+3h82T4et2WyepJglBe0OqPhN9VlR
5y2yITfSKqBkTK7l4qYsi2X1ZTkQHWm0SdZ6zueUd8aZvCqqeV4s8nJ5Vu/JKIxiBd8zAIDX4U05
ncbfYgXF7A3pMEbZolhdhgBEIJsQIWM0kaVjMZtIRY7RDdEf0Fr6BA/0C5R08A1GARypByYtj1+v
hQGbPsoU3vXWnvHTJYml1gfaxTN/wRvjTNzXAWUkl36NTF4M7CkD+DE07t+VED5Q57lm+okuGS5H
NzGteeLuzhwTSzvB5WL2gVetkaWx8WpfQkm1Rz0pp+XKXhu6IrKrxn9n+ch7bG1c+z/2E6EUekZd
+4DaqPd9p7GA6Qb/NXbZ8BBYxGNk7OrGuBaDD2a8gMbXxU4WuqzmTSmyU/YHAAD//wMAJKyyfA8D
AAA=
H4sIAAAAAAAAA4xSQW7bMBC86xXbPVuFZQWJ7EuRS5ukQJB7UAg0uZKZUFyCpNK4gf9eULItGW2B
XnSY2RnNLPcjA0CtcAModyLKzpn81rXvD5X5/uur2bN/UrJ4fFwXas3Ft9cHXCQFb19IxpPqs+TO
GYqa7UhLTyJSci1uyuuqKldFORAdKzJJ1rqYX3Heaavz1XJ1lS9v8qI6qnesJQXcwHMGAPAxfFNO
q+gdN7BcnJCOQhAt4eY8BICeTUJQhKBDFDbiYiIl20h2iH5HxvAnuOOfIIWFexgFsOceIiux/zIX
emr6IFJ42xtzxA/nJIZb53kbjvwZb7TVYVd7EoFt+muI7HBgDxnAj6Fxf1ECnefOxTryK9lkWI1u
OK154orjMjByFGaGn0QXZrWiKLQJs4WhFHJHalJO2xW90jwjslnlP8P8zXusrW37P/YTISW5SKp2
npSWl4WnMU/pCP81dl7xEBgD+TctqY6afHoGRY3ozXgaGPYhUlc32rbkndfjfTSu3qqqLFfr5rrE
7JD9BgAA//8DAHzPNyctAwAA
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8df95113983fcef5-SJC
- 90200ef4e82ceb20-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Fri, 08 Nov 2024 23:24:28 GMT
- Tue, 14 Jan 2025 19:33:34 GMT
Server:
- cloudflare
Transfer-Encoding:
Expand All @@ -75,7 +75,7 @@ interactions:
openai-organization:
- future-house-xr4tdh
openai-processing-ms:
- "283"
- "330"
openai-version:
- "2020-10-01"
strict-transport-security:
Expand All @@ -93,7 +93,7 @@ interactions:
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_9782ec5fccde956ecd7d4c363b44bc91
- req_309cd40a2787f7b08cfd1d6a7e894490
status:
code: 200
message: OK
Expand Down
34 changes: 17 additions & 17 deletions tests/cassettes/TestLLMCallOp.test_compute_logprob[1.0].yaml
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
interactions:
- request:
body:
'{"messages": [{"role": "user", "content": "Hello"}], "model": "gpt-4o-mini",
"temperature": 1.0}'
'{"messages": [{"role": "user", "content": "Hello"}], "model": "gpt-4o-mini-2024-07-18",
"n": 1, "temperature": 1.0}'
headers:
accept:
- application/json
Expand All @@ -11,13 +11,13 @@ interactions:
connection:
- keep-alive
content-length:
- "96"
- "115"
content-type:
- application/json
host:
- api.openai.com
user-agent:
- AsyncOpenAI/Python 1.54.3
- AsyncOpenAI/Python 1.59.3
x-stainless-arch:
- arm64
x-stainless-async:
Expand All @@ -27,7 +27,7 @@ interactions:
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.54.3
- 1.59.3
x-stainless-raw-response:
- "true"
x-stainless-retry-count:
Expand All @@ -41,27 +41,27 @@ interactions:
response:
body:
string: !!binary |
H4sIAAAAAAAAA4xSXWvcMBB896/Y6vlc7Luj9/ESjkBJSOhDQgslFKOT1rYaWSukNe0R7r8X2Zfz
habQFz3M7Ixmln3JAITRYgtCtZJV522+e3jUi89fNt923xfX9/Lhsfm6snehXe9M3YlZUtD+Jyp+
VX1U1HmLbMiNtAooGZNruVqUZbGef9oMREcabZI1nvMl5Z1xJp8X82VerPJyfVK3ZBRGsYWnDADg
ZXhTTqfxt9hCMXtFOoxRNii25yEAEcgmRMgYTWTpWMwmUpFjdEP0G7SWPsAN/QIlHdzCKIAD9cCk
5eHqUhiw7qNM4V1v7Qk/npNYanygfTzxZ7w2zsS2CigjufRrZPJiYI8ZwI+hcf+mhPCBOs8V0zO6
ZLge3cS05onbnDgmlnaCy9XsHa9KI0tj48W+hJKqRT0pp+XKXhu6ILKLxn9nec97bG1c8z/2E6EU
ekZd+YDaqLd9p7GA6Qb/NXbe8BBYxENk7KrauAaDD2a8gNpXxV4Wupwv61Jkx+wPAAAA//8DAC+9
7CsPAwAA
H4sIAAAAAAAAAwAAAP//jFJBbtswELzrFVuercK2DFvxpeihgIPklBbooSgEmlxJTCkuQa6SGIH/
XlByLAdJgFx0mNkZzSz3OQMQRostCNVKVp23+XffPN0UptmpH79/3t7fXt9R+SDXq8f9rv0lZklB
+3tU/KL6qqjzFtmQG2kVUDIm18WmWJdlsVysBqIjjTbJGs/5ivLOOJMv58tVPt/ki/KkbskojGIL
fzIAgOfhm3I6jU9iC/PZC9JhjLJBsT0PAYhANiFCxmgiS8diNpGKHKMbou/QWvoCO3oEJR1cwyiA
A/XApOXh26UwYN1HmcK73toTfjwnsdT4QPt44s94bZyJbRVQRnLpr5HJi4E9ZgB/h8b9qxLCB+o8
V0z/0CXDcnQT05on7urEMbG0E7zYzN7xqjSyNDZe7EsoqVrUk3Jaruy1oQsiu2j8Nst73mNr45rP
2E+EUugZdeUDaqNe953GAqYb/GjsvOEhsIgYHozCig2G9Aoaa9nb8TJEPETGrqqNazD4YMbzqH21
12VRLK/qdSGyY/YfAAD//wMA7U+cOiwDAAA=
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8df951193f79fafc-SJC
- 90200efb0f5feb26-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Fri, 08 Nov 2024 23:24:29 GMT
- Tue, 14 Jan 2025 19:33:35 GMT
Server:
- cloudflare
Transfer-Encoding:
Expand All @@ -75,7 +75,7 @@ interactions:
openai-organization:
- future-house-xr4tdh
openai-processing-ms:
- "272"
- "399"
openai-version:
- "2020-10-01"
strict-transport-security:
Expand All @@ -93,7 +93,7 @@ interactions:
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_8cba5ef357c8edab9b2a215420f0dbf5
- req_97dba0dc17a8d2572fe30083e06468b0
status:
code: 200
message: OK
Expand Down
Loading
Loading