Skip to content

Commit

Permalink
Merge pull request #87 from zamm-dev/lightweight-llm-calls
Browse files Browse the repository at this point in the history
Return lightweight LLM call info back to frontend
  • Loading branch information
amosjyng authored May 16, 2024
2 parents 8f36971 + 6ced415 commit 06bdcc6
Show file tree
Hide file tree
Showing 21 changed files with 700 additions and 2,447 deletions.
19 changes: 10 additions & 9 deletions src-svelte/src/lib/bindings.ts
Original file line number Diff line number Diff line change
Expand Up @@ -35,30 +35,31 @@ export function getSystemInfo() {
}

export function chat(provider: Service, llm: string, temperature: number | null, prompt: ChatMessage[]) {
return invoke()<LlmCall>("chat", { provider,llm,temperature,prompt })
return invoke()<LightweightLlmCall>("chat", { provider,llm,temperature,prompt })
}

export function getApiCall(id: string) {
return invoke()<LlmCall>("get_api_call", { id })
}

export function getApiCalls(offset: number) {
return invoke()<LlmCall[]>("get_api_calls", { offset })
return invoke()<LightweightLlmCall[]>("get_api_calls", { offset })
}

export type Request = { prompt: Prompt; temperature: number }
export type ChatMessage = { role: "System"; text: string } | { role: "Human"; text: string } | { role: "AI"; text: string }
export type Llm = { name: string; requested: string; provider: Service }
export type Service = "OpenAI"
export type EntityId = { uuid: string }
export type Prompt = ({ type: "Chat" } & ChatPrompt)
export type Response = { completion: ChatMessage }
export type Service = "OpenAI"
export type Request = { prompt: Prompt; temperature: number }
export type Preferences = { animations_on?: boolean | null; background_animation?: boolean | null; animation_speed?: number | null; transparency_on?: boolean | null; sound_on?: boolean | null; volume?: number | null }
export type Llm = { name: string; requested: string; provider: Service }
export type LightweightLlmCall = { id: EntityId; timestamp: string; response_message: ChatMessage }
export type LlmCall = { id: EntityId; timestamp: string; llm: Llm; request: Request; response: Response; tokens: TokenMetadata }
export type ApiKeys = { openai: string | null }
export type OS = "Mac" | "Linux" | "Windows"
export type Shell = "Bash" | "Zsh" | "PowerShell"
export type SystemInfo = { zamm_version: string; os: OS | null; shell: Shell | null; shell_init_file: string | null }
export type LlmCall = { id: EntityId; timestamp: string; llm: Llm; request: Request; response: Response; tokens: TokenMetadata }
export type ChatPrompt = { messages: ChatMessage[] }
export type TokenMetadata = { prompt: number | null; response: number | null; total: number | null }
export type Sound = "Switch" | "Whoosh"
export type Prompt = ({ type: "Chat" } & ChatPrompt)
export type ChatPrompt = { messages: ChatMessage[] }
export type EntityId = { uuid: string }
6 changes: 3 additions & 3 deletions src-svelte/src/routes/api-calls/ApiCalls.svelte
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
<script lang="ts">
import { getApiCalls, type LlmCall } from "$lib/bindings";
import { getApiCalls, type LightweightLlmCall } from "$lib/bindings";
import { snackbarError } from "$lib/snackbar/Snackbar.svelte";
import InfoBox from "$lib/InfoBox.svelte";
import Scrollable from "$lib/Scrollable.svelte";
Expand All @@ -12,7 +12,7 @@
export let dateTimeLocale: string | undefined = undefined;
export let timeZone: string | undefined = undefined;
let llmCalls: LlmCall[] = [];
let llmCalls: LightweightLlmCall[] = [];
let llmCallsPromise: Promise<void> | undefined = undefined;
let allCallsLoaded = false;
let messageWidth = MIN_MESSAGE_WIDTH;
Expand Down Expand Up @@ -111,7 +111,7 @@
<a href={`/api-calls/${call.id}`}>
<div class="message instance">
<div class="text-container">
<div class="text">{call.response.completion.text}</div>
<div class="text">{call.response_message.text}</div>
</div>
<div class="time">{formatTimestamp(call.timestamp)}</div>
</div>
Expand Down
2 changes: 1 addition & 1 deletion src-svelte/src/routes/chat/Chat.svelte
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@
try {
let llmCall = await chat("OpenAI", "gpt-4", null, $conversation);
appendMessage(llmCall.response.completion);
appendMessage(llmCall.response_message);
} catch (err) {
snackbarError(err as string);
} finally {
Expand Down
7 changes: 4 additions & 3 deletions src-svelte/src/routes/chat/Chat.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ import PersistentChatView from "./PersistentChatView.svelte";
import userEvent from "@testing-library/user-event";
import { TauriInvokePlayback, type ParsedCall } from "$lib/sample-call-testing";
import { animationSpeed } from "$lib/preferences";
import type { ChatMessage, LlmCall } from "$lib/bindings";
import type { ChatMessage, LightweightLlmCall } from "$lib/bindings";

describe("Chat conversation", () => {
let tauriInvokeMock: Mock;
Expand Down Expand Up @@ -81,8 +81,9 @@ describe("Chat conversation", () => {
expect(screen.getByText(nextExpectedHumanPrompt)).toBeInTheDocument();

expect(tauriInvokeMock).toHaveReturnedTimes(1);
const lastResult: LlmCall = tauriInvokeMock.mock.results[0].value;
const aiResponse = lastResult.response.completion.text;
const lastResult: LightweightLlmCall =
tauriInvokeMock.mock.results[0].value;
const aiResponse = lastResult.response_message.text;
const lastSentence = aiResponse.split("\n").slice(-1)[0];
await waitFor(() => {
expect(
Expand Down
42 changes: 3 additions & 39 deletions src-tauri/api/sample-calls/chat-continue-conversation.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -29,45 +29,9 @@ response:
{
"id": "c13c1e67-2de3-48de-a34c-a32079c03316",
"timestamp": "2024-01-16T09:50:19.738093890",
"llm": {
"name": "gpt-4-0613",
"requested": "gpt-4",
"provider": "OpenAI"
},
"request": {
"prompt": {
"type": "Chat",
"messages": [
{
"role": "System",
"text": "You are ZAMM, a chat program. Respond in first person."
},
{
"role": "Human",
"text": "Hello, does this work?"
},
{
"role": "AI",
"text": "Yes, it works. How can I assist you today?"
},
{
"role": "Human",
"text": "Tell me something funny."
}
]
},
"temperature": 1.0
},
"response": {
"completion": {
"role": "AI",
"text": "Sure, here's a joke for you: Why don't scientists trust atoms? Because they make up everything!"
}
},
"tokens": {
"prompt": 57,
"response": 22,
"total": 79
"response_message": {
"role": "AI",
"text": "Sure, here's a joke for you: Why don't scientists trust atoms? Because they make up everything!"
}
}
sideEffects:
Expand Down
34 changes: 3 additions & 31 deletions src-tauri/api/sample-calls/chat-start-conversation.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -21,37 +21,9 @@ response:
{
"id": "d5ad1e49-f57f-4481-84fb-4d70ba8a7a74",
"timestamp": "2024-01-16T08:50:19.738093890",
"llm": {
"name": "gpt-4-0613",
"requested": "gpt-4",
"provider": "OpenAI"
},
"request": {
"prompt": {
"type": "Chat",
"messages": [
{
"role": "System",
"text": "You are ZAMM, a chat program. Respond in first person."
},
{
"role": "Human",
"text": "Hello, does this work?"
}
]
},
"temperature": 1.0
},
"response": {
"completion": {
"role": "AI",
"text": "Yes, it works. How can I assist you today?"
}
},
"tokens": {
"prompt": 32,
"response": 12,
"total": 44
"response_message": {
"role": "AI",
"text": "Yes, it works. How can I assist you today?"
}
}
sideEffects:
Expand Down
Loading

0 comments on commit 06bdcc6

Please sign in to comment.