-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- 添加智谱、通义和豆包等新的 AI 模型提供商支持 - 改进 AI 提供商工厂的缓存管理机制,增加 30 分钟缓存清理 - 优化错误处理和模型信息展示 - 统一本地化消息的使用 - 重构 OpenAI Provider,提取基类以复用代码 - 改进各提供商的代码结构和异常处理
- Loading branch information
1 parent
5603b08
commit 1b36a48
Showing
9 changed files
with
620 additions
and
295 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,100 @@ | ||
import OpenAI from "openai"; | ||
import { ChatCompletionMessageParam } from "openai/resources"; | ||
import { AIProvider, AIRequestParams, AIResponse, AIModel } from "../types"; | ||
import { generateWithRetry, getSystemPrompt } from "../utils/generateHelper"; | ||
|
||
export interface OpenAIProviderConfig { | ||
apiKey: string; | ||
baseURL?: string; | ||
apiVersion?: string; | ||
providerId: string; | ||
providerName: string; | ||
defaultModel?: string; | ||
models: AIModel[]; | ||
} | ||
|
||
export abstract class BaseOpenAIProvider implements AIProvider { | ||
protected openai: OpenAI; | ||
protected config: OpenAIProviderConfig; | ||
protected provider: { id: string; name: string }; | ||
|
||
constructor(config: OpenAIProviderConfig) { | ||
this.config = config; | ||
this.provider = { | ||
id: config.providerId, | ||
name: config.providerName, | ||
}; | ||
this.openai = this.createClient(); | ||
} | ||
|
||
protected createClient(): OpenAI { | ||
const config: any = { | ||
apiKey: this.config.apiKey, | ||
}; | ||
|
||
if (this.config.baseURL) { | ||
config.baseURL = this.config.baseURL; | ||
if (this.config.apiKey) { | ||
// config.defaultQuery = { "api-version": this.config.apiVersion }; | ||
config.defaultHeaders = { "api-key": this.config.apiKey }; | ||
} | ||
} | ||
console.log("config", config); | ||
|
||
return new OpenAI(config); | ||
} | ||
|
||
async generateResponse(params: AIRequestParams): Promise<AIResponse> { | ||
return generateWithRetry( | ||
params, | ||
async (truncatedDiff) => { | ||
const messages: ChatCompletionMessageParam[] = [ | ||
{ | ||
role: "system", | ||
content: getSystemPrompt(params), | ||
}, | ||
{ | ||
role: "user", | ||
content: truncatedDiff, | ||
}, | ||
]; | ||
|
||
const completion = await this.openai.chat.completions.create({ | ||
model: | ||
(params.model && params.model.id) || | ||
this.config.defaultModel || | ||
"gpt-3.5-turbo", | ||
messages, | ||
}); | ||
|
||
return { | ||
content: completion.choices[0]?.message?.content || "", | ||
usage: { | ||
promptTokens: completion.usage?.prompt_tokens, | ||
completionTokens: completion.usage?.completion_tokens, | ||
totalTokens: completion.usage?.total_tokens, | ||
}, | ||
}; | ||
}, | ||
{ | ||
initialMaxLength: params.model?.maxTokens?.input || 16385, | ||
provider: this.getId(), | ||
} | ||
); | ||
} | ||
|
||
async getModels(): Promise<AIModel[]> { | ||
return Promise.resolve(this.config.models); | ||
} | ||
|
||
getName(): string { | ||
return this.provider.name; | ||
} | ||
|
||
getId(): string { | ||
return this.provider.id; | ||
} | ||
|
||
abstract isAvailable(): Promise<boolean>; | ||
abstract refreshModels(): Promise<string[]>; | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,145 @@ | ||
import { BaseOpenAIProvider } from "./BaseOpenAIProvider"; | ||
import { ConfigurationManager } from "../../config/ConfigurationManager"; | ||
import { AIModel } from "../types"; | ||
|
||
const dashscopeModels: AIModel[] = [ | ||
{ | ||
id: "qwen-max", | ||
name: "Qwen Max (稳定版) - 旗舰模型: 强大的理解和生成能力", | ||
maxTokens: { input: 30720, output: 8192 }, | ||
provider: { id: "dashscope", name: "DashScope" }, | ||
capabilities: { | ||
streaming: true, | ||
functionCalling: true, | ||
}, | ||
cost: { | ||
input: 0.02, | ||
output: 0.06, | ||
}, | ||
}, | ||
{ | ||
id: "qwen-max-latest", | ||
name: "Qwen Max (最新版) - 旗舰实验版: 最新的模型改进和优化", | ||
maxTokens: { input: 30720, output: 8192 }, | ||
provider: { id: "dashscope", name: "DashScope" }, | ||
capabilities: { | ||
streaming: true, | ||
functionCalling: true, | ||
}, | ||
cost: { | ||
input: 0.02, | ||
output: 0.06, | ||
}, | ||
}, | ||
{ | ||
id: "qwen-plus", | ||
name: "Qwen Plus (稳定版) - 增强版: 性能与成本的最佳平衡", | ||
maxTokens: { input: 129024, output: 8192 }, | ||
provider: { id: "dashscope", name: "DashScope" }, | ||
capabilities: { | ||
streaming: true, | ||
functionCalling: true, | ||
}, | ||
cost: { | ||
input: 0.0008, | ||
output: 0.002, | ||
}, | ||
}, | ||
{ | ||
id: "qwen-plus-latest", | ||
name: "Qwen Plus (最新版) - 增强实验版: 新特性和优化的测试版本", | ||
maxTokens: { input: 129024, output: 8192 }, | ||
provider: { id: "dashscope", name: "DashScope" }, | ||
capabilities: { | ||
streaming: true, | ||
functionCalling: true, | ||
}, | ||
cost: { | ||
input: 0.0008, | ||
output: 0.002, | ||
}, | ||
}, | ||
{ | ||
id: "qwen-turbo", | ||
name: "Qwen Turbo (稳定版) - 快速版: 高性价比的日常对话模型", | ||
maxTokens: { input: 129024, output: 8192 }, | ||
provider: { id: "dashscope", name: "DashScope" }, | ||
default: true, | ||
capabilities: { | ||
streaming: true, | ||
functionCalling: true, | ||
}, | ||
cost: { | ||
input: 0.0003, | ||
output: 0.0006, | ||
}, | ||
}, | ||
{ | ||
id: "qwen-turbo-latest", | ||
name: "Qwen Turbo (最新版) - 快速实验版: 优化推理速度的最新版本", | ||
maxTokens: { input: 1000000, output: 8192 }, | ||
provider: { id: "dashscope", name: "DashScope" }, | ||
capabilities: { | ||
streaming: true, | ||
functionCalling: true, | ||
}, | ||
cost: { | ||
input: 0.0003, | ||
output: 0.0006, | ||
}, | ||
}, | ||
{ | ||
id: "qwen-coder-turbo", | ||
name: "Qwen Coder Turbo (稳定版) - 编程专用: 代码生成和分析的专业模型", | ||
maxTokens: { input: 129024, output: 8192 }, | ||
provider: { id: "dashscope", name: "DashScope" }, | ||
capabilities: { | ||
streaming: true, | ||
functionCalling: true, | ||
}, | ||
cost: { | ||
input: 0.002, | ||
output: 0.006, | ||
}, | ||
}, | ||
{ | ||
id: "qwen-coder-turbo-latest", | ||
name: "Qwen Coder Turbo (最新版) - 编程实验版: 最新的代码辅助功能", | ||
maxTokens: { input: 129024, output: 8192 }, | ||
provider: { id: "dashscope", name: "DashScope" }, | ||
capabilities: { | ||
streaming: true, | ||
functionCalling: true, | ||
}, | ||
cost: { | ||
input: 0.002, | ||
output: 0.006, | ||
}, | ||
}, | ||
]; | ||
|
||
export class DashScopeProvider extends BaseOpenAIProvider { | ||
constructor() { | ||
const configManager = ConfigurationManager.getInstance(); | ||
super({ | ||
apiKey: configManager.getConfig<string>("DASHSCOPE_API_KEY", false), | ||
baseURL: "https://api.dashscope.com/v1/services/chat/completions", | ||
providerId: "dashscope", | ||
providerName: "DashScope", | ||
models: dashscopeModels, | ||
defaultModel: "qwen-turbo", | ||
}); | ||
} | ||
|
||
async isAvailable(): Promise<boolean> { | ||
try { | ||
return !!this.config.apiKey; | ||
} catch { | ||
return false; | ||
} | ||
} | ||
|
||
async refreshModels(): Promise<string[]> { | ||
return Promise.resolve(dashscopeModels.map((m) => m.id)); | ||
} | ||
} |
Oops, something went wrong.