diff --git a/llm_demo/orchestrator/langchain_tools/langchain_tools_orchestrator.py b/llm_demo/orchestrator/langchain_tools/langchain_tools_orchestrator.py index 1595b052..f1ee7679 100644 --- a/llm_demo/orchestrator/langchain_tools/langchain_tools_orchestrator.py +++ b/llm_demo/orchestrator/langchain_tools/langchain_tools_orchestrator.py @@ -69,8 +69,8 @@ def initialize_agent( prompt: ChatPromptTemplate, model: str, ) -> "UserAgent": + # TODO: Use .bind_tools(tools) to bind the tools with the LLM. llm = ChatVertexAI(max_output_tokens=512, model_name=model, temperature=0.0) - llm.bind_tools(tools) memory = ConversationBufferMemory( chat_memory=ChatMessageHistory(messages=history), memory_key="chat_history", diff --git a/llm_demo/orchestrator/langgraph/react_graph.py b/llm_demo/orchestrator/langgraph/react_graph.py index 3472ee5a..b05134f1 100644 --- a/llm_demo/orchestrator/langgraph/react_graph.py +++ b/llm_demo/orchestrator/langgraph/react_graph.py @@ -85,6 +85,7 @@ async def create_graph( tool_node = ToolNode(tools) # model node + # TODO: Use .bind_tools(tools) to bind the tools with the LLM. model = ChatVertexAI(max_output_tokens=512, model_name=model_name, temperature=0.0) # Add the prompt to the model to create a model runnable @@ -98,6 +99,10 @@ async def acall_model(state: UserState, config: RunnableConfig): messages = state["messages"] res = await model_runnable.ainvoke({"messages": messages}, config) + # TODO: Remove the temporary fix of parsing LLM response and invoking + # tools until we use bind_tools API and have automatic response parsing + # and tool calling. (see + # https://langchain-ai.github.io/langgraph/#example) if "```json" in res.content: try: response = str(res.content).replace("```json", "").replace("```", "")