Skip to content

Commit

Permalink
Use a helper for creating openai contexts instead of a single global …
Browse files Browse the repository at this point in the history
…context.
  • Loading branch information
jmoseley committed Jan 17, 2025
1 parent ad3af8b commit 13d6664
Show file tree
Hide file tree
Showing 6 changed files with 147 additions and 74 deletions.
70 changes: 43 additions & 27 deletions examples/blogWriter/blogWriter.tsx
Original file line number Diff line number Diff line change
@@ -1,6 +1,14 @@
import { ChatCompletion, OpenAIProvider } from "@gensx/openai";
import { gsx } from "gensx";

import {
ChatCompletion as OpenAIChatCompletion,
Provider as OpenAIProvider,
} from "./openai.js";
import {
ChatCompletion as PplxChatCompletion,
Provider as PplxProvider,
} from "./perplexity.js";

interface LLMResearchBrainstormProps {
prompt: string;
}
Expand All @@ -16,7 +24,7 @@ const LLMResearchBrainstorm = gsx.Component<
Here is an example of the JSON output: { "topics": ["topic 1", "topic 2", "topic 3"] }`;
return (
<ChatCompletion
<OpenAIChatCompletion
model="gpt-4o-mini"
temperature={0.5}
messages={[
Expand All @@ -30,7 +38,7 @@ Here is an example of the JSON output: { "topics": ["topic 1", "topic 2", "topic
(completion: string | null) =>
JSON.parse(completion ?? '{ "topics": [] }')
}
</ChatCompletion>
</OpenAIChatCompletion>
);
});

Expand All @@ -45,7 +53,7 @@ const LLMResearch = gsx.Component<LLMResearchProps, LLMResearchOutput>(
const systemPrompt = `You are a helpful assistant that researches topics. The user will provide a topic and you will research the topic. You should return a summary of the research, summarizing the most important points in a few sentences at most.`;

return (
<ChatCompletion
<OpenAIChatCompletion
model="gpt-4o-mini"
temperature={0}
messages={[
Expand All @@ -71,7 +79,7 @@ Here is the research for the blog post: ${research.join("\n")}`;

console.log("🚀 Writing blog post for:", { prompt, research });
return (
<ChatCompletion
<OpenAIChatCompletion
model="gpt-4o-mini"
temperature={0}
messages={[
Expand All @@ -93,7 +101,7 @@ const LLMEditor = gsx.StreamComponent<LLMEditorProps>(
const systemPrompt = `You are a helpful assistant that edits blog posts. The user will provide a draft and you will edit it to make it more engaging and interesting.`;

return (
<ChatCompletion
<OpenAIChatCompletion
stream={true}
model="gpt-4o-mini"
temperature={0}
Expand All @@ -109,21 +117,27 @@ const LLMEditor = gsx.StreamComponent<LLMEditorProps>(
interface WebResearcherProps {
prompt: string;
}
type WebResearcherOutput = string[];
export const WebResearcher = gsx.Component<
WebResearcherProps,
WebResearcherOutput
>("WebResearcher", async ({ prompt }) => {
type WebResearcherOutput = string;
function WebResearcher({
prompt,
}: gsx.Args<WebResearcherProps, WebResearcherOutput>) {
console.log("🌐 Researching web for:", prompt);
const results = await Promise.resolve([
"web result 1",
"web result 2",
"web result 3",
]);
return results;
});
const systemPrompt =
"You are an AI research assistant. Your job is to find relevant online information and provide detailed answers. A user will enter a prompt and you should respond with a brief research report on the topic.";

return (
<PplxChatCompletion
model="llama-3.1-sonar-small-128k-online"
temperature={0}
messages={[
{ role: "system", content: systemPrompt },
{ role: "user", content: prompt },
]}
/>
);
}

type ParallelResearchOutput = [string[], string[]];
type ParallelResearchOutput = [string[], string];
interface ParallelResearchComponentProps {
prompt: string;
}
Expand Down Expand Up @@ -151,14 +165,16 @@ export const BlogWritingWorkflow =
"BlogWritingWorkflow",
({ prompt }) => {
return (
<OpenAIProvider apiKey={process.env.OPENAI_API_KEY}>
<ParallelResearch prompt={prompt}>
{(research) => (
<LLMWriter prompt={prompt} research={research.flat()}>
{(draft) => <LLMEditor draft={draft} stream={true} />}
</LLMWriter>
)}
</ParallelResearch>
<OpenAIProvider>
<PplxProvider>
<ParallelResearch prompt={prompt}>
{(research) => (
<LLMWriter prompt={prompt} research={research.flat()}>
{(draft) => <LLMEditor draft={draft} stream={true} />}
</LLMWriter>
)}
</ParallelResearch>
</PplxProvider>
</OpenAIProvider>
);
},
Expand Down
10 changes: 10 additions & 0 deletions examples/blogWriter/openai.tsx
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
import { createOpenAIClientContext } from "@gensx/openai";

const { Provider, ChatCompletion } = createOpenAIClientContext(
{
apiKey: process.env.OPENAI_API_KEY,
},
"OpenAI",
);

export { Provider, ChatCompletion };
15 changes: 15 additions & 0 deletions examples/blogWriter/perplexity.tsx
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
import { createOpenAIClientContext } from "@gensx/openai";

const { Provider, ChatCompletion } = createOpenAIClientContext<
| "llama-3.1-sonar-small-128k-online"
| "llama-3.1-sonar-large-128k-online"
| "llama-3.1-sonar-huge-128k-online"
>(
{
apiKey: process.env.PERPLEXITY_API_KEY,
baseURL: "https://api.perplexity.ai/chat/completions",
},
"Perplexity",
);

export { Provider, ChatCompletion };
109 changes: 64 additions & 45 deletions packages/gensx-openai/src/index.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -8,58 +8,77 @@ import {
} from "openai/resources/index.mjs";
import { Stream } from "openai/streaming";

// Create a context for OpenAI
export const OpenAIContext = gsx.createContext<{
client?: OpenAI;
}>({});
/**
* Create a context for an OpenAI client and bind it to a ChatCompletion component. This allows you to have
* multiple OpenAI clients that target different OpenAI-compatible API endpoints.
*
* @param args - The client options
* @returns The OpenAI context and the ChatCompletion component
*/
export const createOpenAIClientContext = <
// eslint-disable-next-line @typescript-eslint/no-unnecessary-type-parameters
ModelName extends string = ChatCompletionCreateParams["model"],
>(
args: ClientOptions,
namePrefix: string,
) => {
const client = new OpenAI(args);

export const OpenAIProvider = gsx.Component<ClientOptions, never>(
"OpenAIProvider",
(args) => {
const client = new OpenAI(args);
return <OpenAIContext.Provider value={{ client }} />;
},
);
const OpenAIClientContext = gsx.createContext(client);

// Create a component for chat completions
export const ChatCompletion = gsx.StreamComponent<ChatCompletionCreateParams>(
"ChatCompletion",
async (props) => {
const context = gsx.useContext(OpenAIContext);

if (!context.client) {
throw new Error(
"OpenAI client not found in context. Please wrap your component with OpenAIProvider.",
);
const BoundChatCompletion = gsx.StreamComponent<
ChatCompletionCreateParams & {
model: ModelName;
}
>(`${namePrefix}ChatCompletion`, (props) => {
const { stream, ...rest } = props;
return (
<ChatCompletion
context={OpenAIClientContext}
stream={stream ?? false}
{...rest}
/>
);
});

if (props.stream) {
const stream = await context.client.chat.completions.create(props);
function OpenAIClientProvider(_: gsx.Args<{}, never>) {
return <OpenAIClientContext.Provider value={client} />;
}

async function* generateTokens(): AsyncGenerator<
string,
void,
undefined
> {
for await (const chunk of stream as Stream<ChatCompletionChunk>) {
const content = chunk.choices[0]?.delta?.content;
if (content) {
yield content;
}
}
}
return {
Provider: OpenAIClientProvider,
ChatCompletion: BoundChatCompletion,
};
};

export const ChatCompletion = gsx.StreamComponent<
ChatCompletionCreateParams & { context: gsx.Context<OpenAI> }
>("ChatCompletion", async (props) => {
const { context, ...createParams } = props;
const client = gsx.useContext(context);

const streamable: Streamable = generateTokens();
return streamable;
} else {
const response = await context.client.chat.completions.create(props);
const content = response.choices[0]?.message?.content ?? "";
if (props.stream) {
const stream = await client.chat.completions.create(createParams);

function* generateTokens() {
yield content;
async function* generateTokens(): AsyncGenerator<string, void, undefined> {
for await (const chunk of stream as Stream<ChatCompletionChunk>) {
const content = chunk.choices[0]?.delta?.content;
if (content) {
yield content;
}
}
}

return generateTokens();
const streamable: Streamable = generateTokens();
return streamable;
} else {
const response = await client.chat.completions.create(props);
const content = response.choices[0]?.message?.content ?? "";

function* generateTokens() {
yield content;
}
},
);

return generateTokens();
}
});
5 changes: 5 additions & 0 deletions packages/gensx/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,8 @@ export type {
StreamArgs,
GsxStreamComponent,
GsxComponent,
ExecutionContext,
ContextProvider,
} from "./types";

import { Component, StreamComponent } from "./component";
Expand All @@ -22,6 +24,9 @@ import * as types from "./types";
export namespace gsx {
export type Args<P, O> = types.Args<P, O>;
export type StreamArgs<P> = types.StreamArgs<P>;
export type ExecutionContext = types.ExecutionContext;
export type Context<T> = types.Context<T>;
export type ContextProvider<T> = types.ContextProvider<T>;
}

export const gsx = {
Expand Down
12 changes: 10 additions & 2 deletions packages/gensx/src/types.ts
Original file line number Diff line number Diff line change
@@ -1,10 +1,16 @@
import { ExecutionContext } from "./context";
import { JSX } from "./jsx-runtime";

export type MaybePromise<T> = T | Promise<T>;

export type Element = JSX.Element;

export type WorkflowContext = Record<symbol, unknown>;

export interface ExecutionContext {
withContext(newContext: Partial<WorkflowContext>): ExecutionContext;
get<K extends keyof WorkflowContext>(key: K): WorkflowContext[K] | undefined;
}

export type Primitive = string | number | boolean | null | undefined;

/**
Expand Down Expand Up @@ -88,9 +94,11 @@ export type StreamArgs<P> = P & {
| ((output: Streamable) => Promise<void>);
};

export type ContextProvider<T> = GsxComponent<{ value: T }, ExecutionContext>;

export interface Context<T> {
readonly __type: "Context";
readonly defaultValue: T;
readonly symbol: symbol;
Provider: GsxComponent<{ value: T }, ExecutionContext>;
Provider: ContextProvider<T>;
}

0 comments on commit 13d6664

Please sign in to comment.