Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

docs: Document tool calling with OpenRouter #437

Merged
merged 1 commit into from
May 25, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -95,3 +95,63 @@ await stream.forEach(print);
// 123
// 456789
```

## Tool calling

OpenRouter supports [tool calling](https://openrouter.ai/docs#tool-calls).

Check the [docs](/modules/model_io/models/chat_models/how_to/tools.md) for more information on how to use tools.

In the following example we use the `joke` tool to generate jokes. We stream the joke generation using the `ToolsOutputParser' which tries to "auto-complete" the partial json from each chunk into a valid state.

```dart
final openRouterApiKey = Platform.environment['OPEN_ROUTER_API_KEY'];
const tool = ToolSpec(
name: 'joke',
description: 'A joke',
inputJsonSchema: {
'type': 'object',
'properties': {
'setup': {
'type': 'string',
'description': 'The setup for the joke',
},
'punchline': {
'type': 'string',
'description': 'The punchline to the joke',
},
},
'required': ['location', 'punchline'],
},
);
final promptTemplate = ChatPromptTemplate.fromTemplate(
'tell me a long joke about {foo}',
);
final chat = ChatOpenAI(
apiKey: openRouterApiKey,
baseUrl: 'https://openrouter.ai/api/v1',
defaultOptions: ChatOpenAIOptions(
model: 'gpt-4o',
tools: [tool],
toolChoice: ChatToolChoice.forced(name: 'joke'),
),
);
final outputParser = ToolsOutputParser();
final chain = promptTemplate.pipe(chat).pipe(outputParser);
final stream = chain.stream({'foo': 'bears'});
await for (final chunk in stream) {
final args = chunk.first.arguments;
print(args);
}
// {}
// {setup: }
// {setup: Why don't}
// {setup: Why don't bears}
// {setup: Why don't bears like fast food}
// {setup: Why don't bears like fast food?, punchline: }
// {setup: Why don't bears like fast food?, punchline: Because}
// {setup: Why don't bears like fast food?, punchline: Because they can't}
// {setup: Why don't bears like fast food?, punchline: Because they can't catch it!}
```
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import 'package:langchain_openai/langchain_openai.dart';
void main(final List<String> arguments) async {
await _openRouter();
await _openRouterStreaming();
await _openRouterStreamingTools();
}

Future<void> _openRouter() async {
Expand Down Expand Up @@ -66,3 +67,56 @@ Future<void> _openRouterStreaming() async {
// 123
// 456789
}

Future<void> _openRouterStreamingTools() async {
final openRouterApiKey = Platform.environment['OPEN_ROUTER_API_KEY'];

const tool = ToolSpec(
name: 'joke',
description: 'A joke',
inputJsonSchema: {
'type': 'object',
'properties': {
'setup': {
'type': 'string',
'description': 'The setup for the joke',
},
'punchline': {
'type': 'string',
'description': 'The punchline to the joke',
},
},
'required': ['location', 'punchline'],
},
);
final promptTemplate = ChatPromptTemplate.fromTemplate(
'tell me a long joke about {foo}',
);
final chat = ChatOpenAI(
apiKey: openRouterApiKey,
baseUrl: 'https://openrouter.ai/api/v1',
defaultOptions: ChatOpenAIOptions(
model: 'gpt-4o',
tools: const [tool],
toolChoice: ChatToolChoice.forced(name: 'joke'),
),
);
final outputParser = ToolsOutputParser();

final chain = promptTemplate.pipe(chat).pipe(outputParser);

final stream = chain.stream({'foo': 'bears'});
await for (final chunk in stream) {
final args = chunk.first.arguments;
print(args);
}
// {}
// {setup: }
// {setup: Why don't}
// {setup: Why don't bears}
// {setup: Why don't bears like fast food}
// {setup: Why don't bears like fast food?, punchline: }
// {setup: Why don't bears like fast food?, punchline: Because}
// {setup: Why don't bears like fast food?, punchline: Because they can't}
// {setup: Why don't bears like fast food?, punchline: Because they can't catch it!}
}
69 changes: 69 additions & 0 deletions packages/langchain_openai/test/chat_models/open_router_test.dart
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
@TestOn('vm')
library; // Uses dart:io

import 'dart:convert';
import 'dart:io';

import 'package:langchain_core/chat_models.dart';
import 'package:langchain_core/prompts.dart';
import 'package:langchain_core/tools.dart';
import 'package:langchain_openai/langchain_openai.dart';
import 'package:test/test.dart';

Expand Down Expand Up @@ -104,5 +106,72 @@ void main() {
expect(numTokens, 13, reason: model);
}
});

test('Test tool calling',
timeout: const Timeout(Duration(minutes: 1)), () async {
const tool = ToolSpec(
name: 'get_current_weather',
description: 'Get the current weather in a given location',
inputJsonSchema: {
'type': 'object',
'properties': {
'location': {
'type': 'string',
'description': 'The city and state, e.g. San Francisco, CA',
},
'unit': {
'type': 'string',
'description': 'The unit of temperature to return',
'enum': ['celsius', 'fahrenheit'],
},
},
'required': ['location'],
},
);

final humanMessage = ChatMessage.humanText(
'What’s the weather like in Boston right now?',
);
final res1 = await chatModel.invoke(
PromptValue.chat([humanMessage]),
options: const ChatOpenAIOptions(
model: 'gpt-4o',
tools: [tool],
),
);

final aiMessage1 = res1.output;

expect(aiMessage1.content, isEmpty);
expect(aiMessage1.toolCalls, isNotEmpty);
final toolCall = aiMessage1.toolCalls.first;

expect(toolCall.name, tool.name);
expect(toolCall.arguments.containsKey('location'), isTrue);
expect(toolCall.arguments['location'], contains('Boston'));

final functionResult = {
'temperature': '22',
'unit': 'celsius',
'description': 'Sunny',
};
final functionMessage = ChatMessage.tool(
toolCallId: toolCall.id,
content: json.encode(functionResult),
);

final res2 = await chatModel.invoke(
PromptValue.chat([humanMessage, aiMessage1, functionMessage]),
options: const ChatOpenAIOptions(
model: 'gpt-4o',
tools: [tool],
),
);

final aiMessage2 = res2.output;

expect(aiMessage2.toolCalls, isEmpty);
expect(aiMessage2.content, contains('22'));
});
});
}
Loading