Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Catch langchain agent errors #1539

Merged
merged 14 commits into from
Mar 11, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion conda/environments/all_cuda-121_arch-x86_64.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,9 @@ dependencies:
- pytorch=*=*cuda*
- rapidjson=1.1.0
- rdma-core>=48
- requests
- requests-cache=1.1
- requests-toolbelt
- s3fs=2023.12.2
- scikit-build=0.17.6
- scikit-learn=1.3.2
Expand All @@ -117,7 +119,7 @@ dependencies:
- --find-links https://data.dgl.ai/wheels/cu121/repo.html
- PyMuPDF==1.23.21
- databricks-connect
- dgl
- dgl==2.0.0
- dglgo
- google-search-results==2.4
- langchain==0.1.9
Expand Down
2 changes: 2 additions & 0 deletions conda/environments/dev_cuda-121_arch-x86_64.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,9 @@ dependencies:
- pytorch-cuda
- pytorch=*=*cuda*
- rapidjson=1.1.0
- requests
- requests-cache=1.1
- requests-toolbelt
- scikit-build=0.17.6
- scikit-learn=1.3.2
- sphinx
Expand Down
4 changes: 3 additions & 1 deletion conda/environments/examples_cuda-121_arch-x86_64.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,9 @@ dependencies:
- python=3.10
- pytorch-cuda
- pytorch=*=*cuda*
- requests
- requests-cache=1.1
- requests-toolbelt
- s3fs=2023.12.2
- scikit-learn=1.3.2
- sentence-transformers
Expand All @@ -61,7 +63,7 @@ dependencies:
- --find-links https://data.dgl.ai/wheels/cu121/repo.html
- PyMuPDF==1.23.21
- databricks-connect
- dgl
- dgl==2.0.0
- dglgo
- google-search-results==2.4
- langchain==0.1.9
Expand Down
2 changes: 2 additions & 0 deletions conda/environments/runtime_cuda-121_arch-x86_64.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,9 @@ dependencies:
- python=3.10
- pytorch-cuda
- pytorch=*=*cuda*
- requests
- requests-cache=1.1
- requests-toolbelt
- scikit-learn=1.3.2
- sqlalchemy
- tqdm=4
Expand Down
4 changes: 3 additions & 1 deletion dependencies.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -261,7 +261,9 @@ dependencies:
- python-graphviz
- pytorch-cuda
- pytorch=*=*cuda*
- requests
- requests-cache=1.1
- requests-toolbelt # Transitive dep needed by nemollm, specified here to ensure we get a compatible version
- sqlalchemy
- tqdm=4
- typing_utils=0.1
Expand Down Expand Up @@ -311,7 +313,7 @@ dependencies:
- pip:
- --find-links https://data.dgl.ai/wheels/cu121/repo.html
- --find-links https://data.dgl.ai/wheels-test/repo.html
- dgl
- dgl==2.0.0
- dglgo

example-llm-agents:
Expand Down
11 changes: 8 additions & 3 deletions morpheus/llm/nodes/langchain_agent_node.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,9 +66,14 @@ async def _run_single(self, **kwargs: dict[str, typing.Any]) -> dict[str, typing
return results

# We are not dealing with a list, so run single
return await self._agent_executor.arun(**kwargs)

async def execute(self, context: LLMContext) -> LLMContext:
try:
return await self._agent_executor.arun(**kwargs)
except Exception as e:
error_msg = f"Error running agent: {e}"
logger.exception(error_msg)
return error_msg

async def execute(self, context: LLMContext) -> LLMContext: # pylint: disable=invalid-overridden-method

input_dict = context.get_inputs()

Expand Down
32 changes: 30 additions & 2 deletions tests/_utils/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,35 @@ def mk_mock_openai_response(messages: list[str]) -> mock.MagicMock:
Creates a mocked openai.types.chat.chat_completion.ChatCompletion response with the given messages.
"""
response = mock.MagicMock()
mock_choices = [_mk_mock_choice(message) for message in messages]
response.choices = mock_choices

response.choices = [_mk_mock_choice(message) for message in messages]
response.dict.return_value = {
"choices": [{
'message': {
'role': 'assistant', 'content': message
}
} for message in messages]
}

return response


def mk_mock_langchain_tool(responses: list[str]) -> mock.MagicMock:
"""
Creates a mocked LangChainTestTool with the given responses.
"""

# Langchain will call inspect.signature on the tool methods, typically mock objects don't have a signature,
# explicitly providing one here
async def _arun_spec(*_, **__):
pass

def run_spec(*_, **__):
pass

tool = mock.MagicMock()
tool.arun = mock.create_autospec(spec=_arun_spec)
tool.arun.side_effect = responses
tool.run = mock.create_autospec(run_spec)
tool.run.side_effect = responses
return tool
88 changes: 88 additions & 0 deletions tests/llm/nodes/test_langchain_agent_node.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,14 @@
from unittest import mock

import pytest
from langchain.agents import AgentType
from langchain.agents import Tool
from langchain.agents import initialize_agent
from langchain.chat_models import ChatOpenAI # pylint: disable=no-name-in-module

from _utils.llm import execute_node
from _utils.llm import mk_mock_langchain_tool
from _utils.llm import mk_mock_openai_response
from morpheus.llm import LLMNodeBase
from morpheus.llm.nodes.langchain_agent_node import LangChainAgentNode

Expand Down Expand Up @@ -50,8 +56,90 @@ def test_execute(
expected_output: list,
expected_calls: list[mock.call],
):
# Tests the execute method of the LangChainAgentNode with a mocked agent_executor
mock_agent_executor.arun.return_value = arun_return

node = LangChainAgentNode(agent_executor=mock_agent_executor)
assert execute_node(node, **values) == expected_output
mock_agent_executor.arun.assert_has_calls(expected_calls)


def test_execute_tools(mock_chat_completion: tuple[mock.MagicMock, mock.MagicMock]):
# Tests the execute method of the LangChainAgentNode with a a mocked tools and chat completion
(_, mock_async_client) = mock_chat_completion
chat_responses = [
'I should check Tool1\nAction: Tool1\nAction Input: "name a reptile"',
'I should check Tool2\nAction: Tool2\nAction Input: "name of a day of the week"',
'I should check Tool1\nAction: Tool1\nAction Input: "name a reptile"',
'I should check Tool2\nAction: Tool2\nAction Input: "name of a day of the week"',
'Observation: Answer: Yes!\nI now know the final answer.\nFinal Answer: Yes!'
]
mock_responses = [mk_mock_openai_response([response]) for response in chat_responses]
mock_async_client.chat.completions.create.side_effect = mock_responses

llm_chat = ChatOpenAI(model="fake-model", openai_api_key="fake-key")

mock_tool1 = mk_mock_langchain_tool(["lizard", "frog"])
mock_tool2 = mk_mock_langchain_tool(["Tuesday", "Thursday"])

tools = [
Tool(name="Tool1",
func=mock_tool1.run,
coroutine=mock_tool1.arun,
description="useful for when you need to know the name of a reptile"),
Tool(name="Tool2",
func=mock_tool2.run,
coroutine=mock_tool2.arun,
description="useful for when you need to know the day of the week")
]

agent = initialize_agent(tools,
llm_chat,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
handle_parsing_errors=True,
early_stopping_method="generate",
return_intermediate_steps=False)

node = LangChainAgentNode(agent_executor=agent)

assert execute_node(node, input="input1") == "Yes!"


def test_execute_error(mock_chat_completion: tuple[mock.MagicMock, mock.MagicMock]):
# Tests the execute method of the LangChainAgentNode with a a mocked tools and chat completion
(_, mock_async_client) = mock_chat_completion
chat_responses = [
'I should check Tool1\nAction: Tool1\nAction Input: "name a reptile"',
'I should check Tool2\nAction: Tool2\nAction Input: "name of a day of the week"',
'Observation: Answer: Yes!\nI now know the final answer.\nFinal Answer: Yes!'
]
mock_responses = [mk_mock_openai_response([response]) for response in chat_responses]
mock_async_client.chat.completions.create.side_effect = mock_responses

llm_chat = ChatOpenAI(model="fake-model", openai_api_key="fake-key")

mock_tool1 = mk_mock_langchain_tool(["lizard"])
mock_tool2 = mk_mock_langchain_tool(RuntimeError("unittest"))

tools = [
Tool(name="Tool1",
func=mock_tool1.run,
coroutine=mock_tool1.arun,
description="useful for when you need to know the name of a reptile"),
Tool(name="Tool2",
func=mock_tool2.run,
coroutine=mock_tool2.arun,
description="useful for when you need to test tool errors")
]

agent = initialize_agent(tools,
llm_chat,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
handle_parsing_errors=True,
early_stopping_method="generate",
return_intermediate_steps=False)

node = LangChainAgentNode(agent_executor=agent)
assert execute_node(node, input="input1") == "Error running agent: unittest"
Loading