From 47986592a674322fe2f69aff7166a3e594756ace Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Sat, 25 May 2024 16:55:04 +0200 Subject: [PATCH] docs: Document tool calling with OpenRouter (#437) --- .../chat_models/integrations/open_router.md | 60 ++++++++++++++++ .../chat_models/integrations/open_router.dart | 54 +++++++++++++++ .../test/chat_models/open_router_test.dart | 69 +++++++++++++++++++ 3 files changed, 183 insertions(+) diff --git a/docs/modules/model_io/models/chat_models/integrations/open_router.md b/docs/modules/model_io/models/chat_models/integrations/open_router.md index e747ca5f..c2d63555 100644 --- a/docs/modules/model_io/models/chat_models/integrations/open_router.md +++ b/docs/modules/model_io/models/chat_models/integrations/open_router.md @@ -95,3 +95,63 @@ await stream.forEach(print); // 123 // 456789 ``` + +## Tool calling + +OpenRouter supports [tool calling](https://openrouter.ai/docs#tool-calls). + +Check the [docs](/modules/model_io/models/chat_models/how_to/tools.md) for more information on how to use tools. + +In the following example we use the `joke` tool to generate jokes. We stream the joke generation using the `ToolsOutputParser' which tries to "auto-complete" the partial json from each chunk into a valid state. + +```dart +final openRouterApiKey = Platform.environment['OPEN_ROUTER_API_KEY']; +const tool = ToolSpec( + name: 'joke', + description: 'A joke', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'setup': { + 'type': 'string', + 'description': 'The setup for the joke', + }, + 'punchline': { + 'type': 'string', + 'description': 'The punchline to the joke', + }, + }, + 'required': ['location', 'punchline'], + }, +); +final promptTemplate = ChatPromptTemplate.fromTemplate( + 'tell me a long joke about {foo}', +); +final chat = ChatOpenAI( + apiKey: openRouterApiKey, + baseUrl: 'https://openrouter.ai/api/v1', + defaultOptions: ChatOpenAIOptions( + model: 'gpt-4o', + tools: [tool], + toolChoice: ChatToolChoice.forced(name: 'joke'), + ), +); +final outputParser = ToolsOutputParser(); + +final chain = promptTemplate.pipe(chat).pipe(outputParser); + +final stream = chain.stream({'foo': 'bears'}); +await for (final chunk in stream) { + final args = chunk.first.arguments; + print(args); +} +// {} +// {setup: } +// {setup: Why don't} +// {setup: Why don't bears} +// {setup: Why don't bears like fast food} +// {setup: Why don't bears like fast food?, punchline: } +// {setup: Why don't bears like fast food?, punchline: Because} +// {setup: Why don't bears like fast food?, punchline: Because they can't} +// {setup: Why don't bears like fast food?, punchline: Because they can't catch it!} +``` diff --git a/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/open_router.dart b/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/open_router.dart index 439943c5..f552e60b 100644 --- a/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/open_router.dart +++ b/examples/docs_examples/bin/modules/model_io/models/chat_models/integrations/open_router.dart @@ -7,6 +7,7 @@ import 'package:langchain_openai/langchain_openai.dart'; void main(final List arguments) async { await _openRouter(); await _openRouterStreaming(); + await _openRouterStreamingTools(); } Future _openRouter() async { @@ -66,3 +67,56 @@ Future _openRouterStreaming() async { // 123 // 456789 } + +Future _openRouterStreamingTools() async { + final openRouterApiKey = Platform.environment['OPEN_ROUTER_API_KEY']; + + const tool = ToolSpec( + name: 'joke', + description: 'A joke', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'setup': { + 'type': 'string', + 'description': 'The setup for the joke', + }, + 'punchline': { + 'type': 'string', + 'description': 'The punchline to the joke', + }, + }, + 'required': ['location', 'punchline'], + }, + ); + final promptTemplate = ChatPromptTemplate.fromTemplate( + 'tell me a long joke about {foo}', + ); + final chat = ChatOpenAI( + apiKey: openRouterApiKey, + baseUrl: 'https://openrouter.ai/api/v1', + defaultOptions: ChatOpenAIOptions( + model: 'gpt-4o', + tools: const [tool], + toolChoice: ChatToolChoice.forced(name: 'joke'), + ), + ); + final outputParser = ToolsOutputParser(); + + final chain = promptTemplate.pipe(chat).pipe(outputParser); + + final stream = chain.stream({'foo': 'bears'}); + await for (final chunk in stream) { + final args = chunk.first.arguments; + print(args); + } + // {} + // {setup: } + // {setup: Why don't} + // {setup: Why don't bears} + // {setup: Why don't bears like fast food} + // {setup: Why don't bears like fast food?, punchline: } + // {setup: Why don't bears like fast food?, punchline: Because} + // {setup: Why don't bears like fast food?, punchline: Because they can't} + // {setup: Why don't bears like fast food?, punchline: Because they can't catch it!} +} diff --git a/packages/langchain_openai/test/chat_models/open_router_test.dart b/packages/langchain_openai/test/chat_models/open_router_test.dart index 396f8ac4..4587b56b 100644 --- a/packages/langchain_openai/test/chat_models/open_router_test.dart +++ b/packages/langchain_openai/test/chat_models/open_router_test.dart @@ -1,10 +1,12 @@ @TestOn('vm') library; // Uses dart:io +import 'dart:convert'; import 'dart:io'; import 'package:langchain_core/chat_models.dart'; import 'package:langchain_core/prompts.dart'; +import 'package:langchain_core/tools.dart'; import 'package:langchain_openai/langchain_openai.dart'; import 'package:test/test.dart'; @@ -104,5 +106,72 @@ void main() { expect(numTokens, 13, reason: model); } }); + + test('Test tool calling', + timeout: const Timeout(Duration(minutes: 1)), () async { + const tool = ToolSpec( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + inputJsonSchema: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and state, e.g. San Francisco, CA', + }, + 'unit': { + 'type': 'string', + 'description': 'The unit of temperature to return', + 'enum': ['celsius', 'fahrenheit'], + }, + }, + 'required': ['location'], + }, + ); + + final humanMessage = ChatMessage.humanText( + 'What’s the weather like in Boston right now?', + ); + final res1 = await chatModel.invoke( + PromptValue.chat([humanMessage]), + options: const ChatOpenAIOptions( + model: 'gpt-4o', + tools: [tool], + ), + ); + + final aiMessage1 = res1.output; + + expect(aiMessage1.content, isEmpty); + expect(aiMessage1.toolCalls, isNotEmpty); + final toolCall = aiMessage1.toolCalls.first; + + expect(toolCall.name, tool.name); + expect(toolCall.arguments.containsKey('location'), isTrue); + expect(toolCall.arguments['location'], contains('Boston')); + + final functionResult = { + 'temperature': '22', + 'unit': 'celsius', + 'description': 'Sunny', + }; + final functionMessage = ChatMessage.tool( + toolCallId: toolCall.id, + content: json.encode(functionResult), + ); + + final res2 = await chatModel.invoke( + PromptValue.chat([humanMessage, aiMessage1, functionMessage]), + options: const ChatOpenAIOptions( + model: 'gpt-4o', + tools: [tool], + ), + ); + + final aiMessage2 = res2.output; + + expect(aiMessage2.toolCalls, isEmpty); + expect(aiMessage2.content, contains('22')); + }); }); }