Skip to content

Commit

Permalink
Merge pull request #25 from sgomez/upgrade
Browse files Browse the repository at this point in the history
Upgrade dependencies
  • Loading branch information
sgomez authored Sep 6, 2024
2 parents 8f626c0 + 5e06c5a commit 4889883
Show file tree
Hide file tree
Showing 21 changed files with 731 additions and 200 deletions.
50 changes: 38 additions & 12 deletions examples/ai-core/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,22 @@ embed-many_ollama-cosine-similarity:
$(call RUN_EXAMPLE_TARGET,$@)


# middleware
.PHONY: middleware middleware-run middleware-all middleware_generate-text-cache-middleware middleware_generate-text-log-middleware middleware_stream-text-log-middleware middleware_stream-text-rag-middleware
middleware: middleware-run middleware-all
middleware-run:
echo - examples/middleware:
middleware-all: middleware_generate-text-cache-middleware middleware_generate-text-log-middleware middleware_stream-text-log-middleware middleware_stream-text-rag-middleware
middleware_generate-text-cache-middleware:
$(call RUN_EXAMPLE_TARGET,$@)
middleware_generate-text-log-middleware:
$(call RUN_EXAMPLE_TARGET,$@)
middleware_stream-text-log-middleware:
$(call RUN_EXAMPLE_TARGET,$@)
middleware_stream-text-rag-middleware:
$(call RUN_EXAMPLE_TARGET,$@)


# registry
.PHONY: registry registry-run registry-all registry_embed registry_stream-text registry_stream-multimodal
registry: registry-run registry-all
Expand All @@ -56,16 +72,18 @@ registry_stream-multimodal:


# generate-object
.PHONY: generate-object generate-object-run generate-object-all generate-object_ollama generate-object_ollama-date-parsing generate-object_ollama-full-json generate-object_ollama-json generate-object_ollama-multimodal generate-object_ollama-no-schema generate-object_ollama-raw-json-schema generate-object_ollama-tool
.PHONY: generate-object generate-object-run generate-object-all generate-object_ollama generate-object_ollama-array generate-object_ollama-date-parsing generate-object_ollama-full-result generate-object_ollama-json generate-object_ollama-multimodal generate-object_ollama-no-schema generate-object_ollama-raw-json-schema generate-object_ollama-tool
generate-object: generate-object-run generate-object-all
generate-object-run:
echo - examples/generate-object:
generate-object-all: generate-object_ollama generate-object_ollama-date-parsing generate-object_ollama-full-json generate-object_ollama-json generate-object_ollama-multimodal generate-object_ollama-no-schema generate-object_ollama-raw-json-schema generate-object_ollama-tool
generate-object_ollama-date-parsing:
$(call RUN_EXAMPLE_TARGET,$@)
generate-object-all: generate-object_ollama generate-object_ollama-array generate-object_ollama-date-parsing generate-object_ollama-full-result generate-object_ollama-json generate-object_ollama-multimodal generate-object_ollama-no-schema generate-object_ollama-raw-json-schema generate-object_ollama-tool
generate-object_ollama:
$(call RUN_EXAMPLE_TARGET,$@)
generate-object_ollama-full-json:
generate-object_ollama-array:
$(call RUN_EXAMPLE_TARGET,$@)
generate-object_ollama-date-parsing:
$(call RUN_EXAMPLE_TARGET,$@)
generate-object_ollama-full-result:
$(call RUN_EXAMPLE_TARGET,$@)
generate-object_ollama-json:
$(call RUN_EXAMPLE_TARGET,$@)
Expand All @@ -80,11 +98,11 @@ generate-object_ollama-tool:


# generate-text
.PHONY: generate-text generate-text-run generate-text-all generate-text_ollama generate-text_ollama-completion generate-text_ollama-completion-chat generate-text_ollama-custom-fetch generate-text_ollama-custom-headers generate-text_ollama-multimodal generate-text_ollama-multimodal-base64 generate-text_ollama-multimodal-url generate-text_ollama-system-message-a generate-text_ollama-system-message-b generate-text_ollama-tool-call generate-text_ollama-tool-call-raw-json-schema
.PHONY: generate-text generate-text-run generate-text-all generate-text_ollama generate-text_ollama-completion generate-text_ollama-completion-chat generate-text_ollama-custom-fetch generate-text_ollama-custom-headers generate-text_ollama-full-result generate-text_ollama-multimodal generate-text_ollama-multimodal-base64 generate-text_ollama-multimodal-url generate-text_ollama-system-message-a generate-text_ollama-system-message-b generate-text_ollama-tool-call generate-text_ollama-tool-call-raw-json-schema
generate-text: generate-text-run generate-text-all
generate-text-run:
echo - examples/generate-text:
generate-text-all: generate-text_ollama generate-text_ollama-completion generate-text_ollama-completion-chat generate-text_ollama-custom-fetch generate-text_ollama-custom-headers generate-text_ollama-multimodal generate-text_ollama-multimodal-base64 generate-text_ollama-multimodal-url generate-text_ollama-system-message-a generate-text_ollama-system-message-b generate-text_ollama-tool-call generate-text_ollama-tool-call-raw-json-schema
generate-text-all: generate-text_ollama generate-text_ollama-completion generate-text_ollama-completion-chat generate-text_ollama-custom-fetch generate-text_ollama-custom-headers generate-text_ollama-full-result generate-text_ollama-multimodal generate-text_ollama-multimodal-base64 generate-text_ollama-multimodal-url generate-text_ollama-system-message-a generate-text_ollama-system-message-b generate-text_ollama-tool-call generate-text_ollama-tool-call-raw-json-schema
generate-text_ollama:
$(call RUN_EXAMPLE_TARGET,$@)
generate-text_ollama-completion:
Expand All @@ -95,6 +113,8 @@ generate-text_ollama-custom-fetch:
$(call RUN_EXAMPLE_TARGET,$@)
generate-text_ollama-custom-headers:
$(call RUN_EXAMPLE_TARGET,$@)
generate-text_ollama-full-result:
$(call RUN_EXAMPLE_TARGET,$@)
generate-text_ollama-multimodal:
$(call RUN_EXAMPLE_TARGET,$@)
generate-text_ollama-multimodal-base64:
Expand Down Expand Up @@ -137,11 +157,11 @@ stream-object_ollama-tool:
$(call RUN_EXAMPLE_TARGET,$@)

# stream-text
.PHONY: stream-text stream-text-run stream-text-all stream-text_ollama stream-text_ollama-abort stream-text_ollama-chatbot stream-text_ollama-chatbot-with-tools stream-text_ollama-completion stream-text_ollama-completion-chat stream-text_ollama-custom-fetch-inject-error stream-text_ollama-fullstream stream-text_ollama-ollama-on-chunk stream-text_ollama-ollama-on-chunk-tool-call-streaming stream-text_ollama-on-finish stream-text_ollama-reader stream-text_ollama-response-headers stream-text_ollama-tool-call-raw-json-schema
.PHONY: stream-text stream-text-run stream-text-all stream-text_ollama stream-text_ollama-abort stream-text_ollama-chatbot stream-text_ollama-chatbot-with-tools stream-text_ollama-completion stream-text_ollama-completion-chat stream-text_ollama-custom-fetch-inject-error stream-text_ollama-fullstream stream-text_ollama-fullstream-raw stream-text_ollama-ollama-on-chunk stream-text_ollama-ollama-on-chunk-tool-call-streaming stream-text_ollama-on-finish stream-text_ollama-reader stream-text_ollama-response stream-text_ollama-tool-call-raw-json-schema
stream-text: stream-text-run stream-text-all
stream-text-run:
echo - examples/stream-text:
stream-text-all: stream-text_ollama stream-text_ollama-abort stream-text_ollama-chatbot stream-text_ollama-chatbot-with-tools stream-text_ollama-completion stream-text_ollama-completion-chat stream-text_ollama-custom-fetch-inject-error stream-text_ollama-fullstream stream-text_ollama-ollama-on-chunk stream-text_ollama-ollama-on-chunk-tool-call-streaming stream-text_ollama-on-finish stream-text_ollama-reader stream-text_ollama-response-headers stream-text_ollama-tool-call-raw-json-schema
stream-text-all: stream-text_ollama stream-text_ollama-abort stream-text_ollama-chatbot stream-text_ollama-chatbot-with-tools stream-text_ollama-completion stream-text_ollama-completion-chat stream-text_ollama-custom-fetch-inject-error stream-text_ollama-fullstream stream-text_ollama-fullstream-raw stream-text_ollama-ollama-on-chunk stream-text_ollama-ollama-on-chunk-tool-call-streaming stream-text_ollama-on-finish stream-text_ollama-reader stream-text_ollama-response stream-text_ollama-tool-call-raw-json-schema
stream-text_ollama:
$(call RUN_EXAMPLE_TARGET,$@)
stream-text_ollama-abort:
Expand All @@ -158,6 +178,8 @@ stream-text_ollama-custom-fetch-inject-error:
$(call RUN_EXAMPLE_TARGET,$@)
stream-text_ollama-fullstream:
$(call RUN_EXAMPLE_TARGET,$@)
stream-text_ollama-fullstream-raw:
$(call RUN_EXAMPLE_TARGET,$@)
stream-text_ollama-ollama-on-chunk:
$(call RUN_EXAMPLE_TARGET,$@)
stream-text_ollama-ollama-on-chunk-tool-call-streaming:
Expand All @@ -166,20 +188,24 @@ stream-text_ollama-on-finish:
$(call RUN_EXAMPLE_TARGET,$@)
stream-text_ollama-reader:
$(call RUN_EXAMPLE_TARGET,$@)
stream-text_ollama-response-headers:
stream-text_ollama-response:
$(call RUN_EXAMPLE_TARGET,$@)
stream-text_ollama-tool-call-raw-json-schema:
$(call RUN_EXAMPLE_TARGET,$@)

# telemetry
.PHONY: telemetry telemetry-run telemetry-all telemetry_generate-text telemetry_generate-text-tool-call telemetry_stream-text
.PHONY: telemetry telemetry-run telemetry-all telemetry_generate-object telemetry_generate-text telemetry_generate-text-tool-call telemetry_stream-object telemetry_stream-text
telemetry: telemetry-run telemetry-all
telemetry-run:
echo - examples/telemetry:
telemetry-all: telemetry_generate-text telemetry_generate-text-tool-call telemetry_stream-text
telemetry-all: telemetry_generate-object telemetry_generate-text telemetry_generate-text-tool-call telemetry_stream-object telemetry_stream-text
telemetry_generate-object:
$(call RUN_EXAMPLE_TARGET,$@)
telemetry_generate-text:
$(call RUN_EXAMPLE_TARGET,$@)
telemetry_generate-text-tool-call:
$(call RUN_EXAMPLE_TARGET,$@)
telemetry_stream-object:
$(call RUN_EXAMPLE_TARGET,$@)
telemetry_stream-text:
$(call RUN_EXAMPLE_TARGET,$@)
6 changes: 4 additions & 2 deletions examples/ai-core/src/complex/math-agent/agent.ts
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ const problem =
async function main(model: OllamaChatModelId) {
console.log(`PROBLEM: ${problem}\n`)

await generateText({
const response = await generateText({
maxToolRoundtrips: 10,
model: ollama(model),
prompt: problem,
Expand All @@ -25,7 +25,7 @@ async function main(model: OllamaChatModelId) {
'Reason step by step. ' +
'Use the tool `calculate` when necessary. ' +
'The calculator can only do simple additions, subtractions, multiplications, and divisions. ' +
'When you give the final answer, provide an explanation for how you got it.',
'When you give the final answer, provide an explanation for how you got it using the `answer` tool.',
toolChoice: 'required',
tools: {
answer: tool({
Expand All @@ -45,6 +45,8 @@ async function main(model: OllamaChatModelId) {
}),
},
})

console.error(JSON.stringify(response, null, 2))
}

buildProgram('firefunction-v2', main).catch(console.error)
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,8 @@ import { buildProgram } from '../tools/command'

async function main(model: Parameters<typeof ollama>[0]) {
const result = await generateObject({
mode: 'json',
model: ollama(model),
output: 'array',
prompt:
'Generate 3 character descriptions for a fantasy role playing game.',
schema: z.object({
Expand Down
27 changes: 27 additions & 0 deletions examples/ai-core/src/generate-object/ollama-full-result.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
#! /usr/bin/env -S pnpm tsx

import { generateObject } from 'ai'
import { ollama } from 'ollama-ai-provider'
import { z } from 'zod'

import { buildProgram } from '../tools/command'

async function main(model: Parameters<typeof ollama>[0]) {
const result = await generateObject({
model: ollama(model),
prompt: 'Generate a lasagna recipe.',
schema: z.object({
recipe: z.object({
ingredients: z.array(
z.object({ amount: z.string(), name: z.string() }),
),
name: z.string(),
steps: z.array(z.string()),
}),
}),
})

console.log(JSON.stringify(result, null, 2))
}

buildProgram('llama3.1', main).catch(console.error)
17 changes: 17 additions & 0 deletions examples/ai-core/src/generate-text/ollama-full-result.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
#! /usr/bin/env -S pnpm tsx

import { generateText } from 'ai'
import { ollama } from 'ollama-ai-provider'

import { buildProgram } from '../tools/command'

async function main(model: Parameters<typeof ollama>[0]) {
const result = await generateText({
model: ollama(model),
prompt: 'Invent a new holiday and describe its traditions.',
})

console.log(JSON.stringify(result, null, 2))
}

buildProgram('llama3.1', main).catch(console.error)
28 changes: 28 additions & 0 deletions examples/ai-core/src/middleware/add-to-last-user-message.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
import { LanguageModelV1CallOptions } from 'ai'

export function addToLastUserMessage({
params,
text,
}: {
params: LanguageModelV1CallOptions
text: string
}): LanguageModelV1CallOptions {
const { prompt, ...rest } = params

const lastMessage = prompt.at(-1)

if (lastMessage?.role !== 'user') {
return params
}

return {
...rest,
prompt: [
...prompt.slice(0, -1),
{
...lastMessage,
content: [{ text, type: 'text' }, ...lastMessage.content],
},
],
}
}
39 changes: 39 additions & 0 deletions examples/ai-core/src/middleware/generate-text-cache-middleware.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
#! /usr/bin/env -S pnpm tsx

import {
experimental_wrapLanguageModel as wrapLanguageModel,
generateText,
} from 'ai'
import { ollama } from 'ollama-ai-provider'

import { buildProgram } from '../tools/command'
import { yourCacheMiddleware } from './your-cache-middleware'

async function main(model: Parameters<typeof ollama>[0]) {
const modelWithCaching = wrapLanguageModel({
middleware: yourCacheMiddleware,
model: ollama(model),
})

const start1 = Date.now()
const result1 = await generateText({
model: modelWithCaching,
prompt: 'What cities are in the United States?',
})

const end1 = Date.now()

const start2 = Date.now()
const result2 = await generateText({
model: modelWithCaching,
prompt: 'What cities are in the United States?',
})
const end2 = Date.now()

console.log(`Time taken for result1: ${end1 - start1}ms`)
console.log(`Time taken for result2: ${end2 - start2}ms`)

console.log(result1.text === result2.text)
}

buildProgram('llama3.1', main).catch(console.error)
22 changes: 22 additions & 0 deletions examples/ai-core/src/middleware/generate-text-log-middleware.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
#! /usr/bin/env -S pnpm tsx

import {
experimental_wrapLanguageModel as wrapLanguageModel,
generateText,
} from 'ai'
import { ollama } from 'ollama-ai-provider'

import { buildProgram } from '../tools/command'
import { yourLogMiddleware } from './your-log-middleware'

async function main(model: Parameters<typeof ollama>[0]) {
const result = await generateText({
model: wrapLanguageModel({
middleware: yourLogMiddleware,
model: ollama(model),
}),
prompt: 'What cities are in the United States?',
})
}

buildProgram('llama3.1', main).catch(console.error)
17 changes: 17 additions & 0 deletions examples/ai-core/src/middleware/get-last-user-message-text.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
import { LanguageModelV1Prompt } from 'ai'

export function getLastUserMessageText({
prompt,
}: {
prompt: LanguageModelV1Prompt
}): string | undefined {
const lastMessage = prompt.at(-1)

if (lastMessage?.role !== 'user') {
return undefined
}

return lastMessage.content.length === 0
? undefined
: lastMessage.content.filter((c) => c.type === 'text').join('\n')
}
26 changes: 26 additions & 0 deletions examples/ai-core/src/middleware/stream-text-log-middleware.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
#! /usr/bin/env -S pnpm tsx

import {
experimental_wrapLanguageModel as wrapLanguageModel,
streamText,
} from 'ai'
import { ollama } from 'ollama-ai-provider'

import { buildProgram } from '../tools/command'
import { yourLogMiddleware } from './your-log-middleware'

async function main(model: Parameters<typeof ollama>[0]) {
const result = await streamText({
model: wrapLanguageModel({
middleware: yourLogMiddleware,
model: ollama(model),
}),
prompt: 'What cities are in the United States?',
})

for await (const textPart of result.textStream) {
// consume the stream
}
}

buildProgram('llama3.1', main).catch(console.error)
26 changes: 26 additions & 0 deletions examples/ai-core/src/middleware/stream-text-rag-middleware.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
#! /usr/bin/env -S pnpm tsx

import {
experimental_wrapLanguageModel as wrapLanguageModel,
streamText,
} from 'ai'
import { ollama } from 'ollama-ai-provider'

import { buildProgram } from '../tools/command'
import { yourRagMiddleware } from './your-rag-middleware'

async function main(model: Parameters<typeof ollama>[0]) {
const result = await streamText({
model: wrapLanguageModel({
middleware: yourRagMiddleware,
model: ollama(model),
}),
prompt: 'What cities are in the United States?',
})

for await (const textPart of result.textStream) {
process.stdout.write(textPart)
}
}

buildProgram('llama3.1', main).catch(console.error)
22 changes: 22 additions & 0 deletions examples/ai-core/src/middleware/your-cache-middleware.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
import type { Experimental_LanguageModelV1Middleware as LanguageModelV1Middleware } from 'ai'

// eslint-disable-next-line @typescript-eslint/no-explicit-any
const cache = new Map<string, any>()

export const yourCacheMiddleware: LanguageModelV1Middleware = {
wrapGenerate: async ({ doGenerate, params }) => {
const cacheKey = JSON.stringify(params)

if (cache.has(cacheKey)) {
return cache.get(cacheKey)
}

const result = await doGenerate()

cache.set(cacheKey, result)

return result
},

// here you would implement the caching logic for streaming
}
Loading

0 comments on commit 4889883

Please sign in to comment.