diff --git a/app/api/cors/[...path]/route.ts b/app/api/cors/[...path]/route.ts index 58c2b8e09b9..586afe5b09f 100644 --- a/app/api/cors/[...path]/route.ts +++ b/app/api/cors/[...path]/route.ts @@ -1,24 +1,11 @@ import { NextRequest, NextResponse } from "next/server"; -import { DEFAULT_CORS_HOST } from "@/app/constant"; async function handle( req: NextRequest, { params }: { params: { path: string[] } }, ) { if (req.method === "OPTIONS") { - // Set CORS headers for preflight requests - return NextResponse.json( - { body: "OK" }, - { - status: 200, - headers: { - "Access-Control-Allow-Origin": `${DEFAULT_CORS_HOST}`, // Replace * with the appropriate origin(s) - "Access-Control-Allow-Methods": "GET, POST, OPTIONS", // Add other allowed methods if needed - "Access-Control-Allow-Headers": "*", // Replace * with the appropriate headers - "Access-Control-Max-Age": "86400", // Adjust the max age value if needed - }, - }, - ); + return NextResponse.json({ body: "OK" }, { status: 200 }); } const [protocol, ...subpath] = params.path; @@ -29,19 +16,22 @@ async function handle( method?.toLowerCase() ?? "", ); - const fetchOptions: RequestInit = { - headers: { - authorization: req.headers.get("authorization") ?? "", - }, - body: shouldNotHaveBody ? null : req.body, - method, - // @ts-ignore - duplex: "half", - }; + function isRealDevicez(userAgent: string | null): boolean { + // Author : @H0llyW00dzZ + // Note : This just an experiment for a prevent suspicious bot + // Modify this function to define your logic for determining if the user-agent belongs to a real device + // For example, you can check if the user-agent contains certain keywords or patterns that indicate a real device + if (userAgent) { + return userAgent.includes("AppleWebKit") && !userAgent.includes("Headless"); + } + return false; + } + + + const userAgent = req.headers.get("User-Agent"); + const isRealDevice = isRealDevicez(userAgent); - const origin = req.headers.get("Origin"); - const referrer = req.headers.get("Referer"); - if (origin !== DEFAULT_CORS_HOST || (referrer && !referrer.includes(DEFAULT_CORS_HOST))) { + if (!isRealDevice) { return NextResponse.json( { error: true, @@ -53,6 +43,16 @@ async function handle( ); } + const fetchOptions: RequestInit = { + headers: { + authorization: req.headers.get("authorization") ?? "", + }, + body: shouldNotHaveBody ? null : req.body, + method, + // @ts-ignore + duplex: "half", + }; + const fetchResult = await fetch(targetUrl, fetchOptions); console.log("[Cloud Sync]", targetUrl, { diff --git a/app/client/platforms/openai.ts b/app/client/platforms/openai.ts index 6fddd8eafd4..fb9f65c956e 100644 --- a/app/client/platforms/openai.ts +++ b/app/client/platforms/openai.ts @@ -17,6 +17,14 @@ import { import { prettyObject } from "@/app/utils/format"; import { getClientConfig } from "@/app/config/client"; import { makeAzurePath } from "@/app/azure"; +/** + * Models Text-Moderations OpenAI + * Author: @H0llyW00dzZ + **/ +interface ModerationResponse { + flagged: boolean; + categories: Record; +} export interface OpenAIListModelResponse { object: string; @@ -66,10 +74,41 @@ export class ChatGPTApi implements LLMApi { return res.choices?.at(0)?.message?.content ?? ""; } + /** System Fingerprint & Max Tokens + * Author : @H0llyW00dzZ + * This method should be a member of the ChatGPTApi class, not nested inside another method + **/ + private getNewStuff( + model: string, + max_tokens?: number, + system_fingerprint?: string + ): { max_tokens?: number; system_fingerprint?: string; isNewModel: boolean } { + const modelConfig = { + ...useAppConfig.getState().modelConfig, + ...useChatStore.getState().currentSession().mask.modelConfig, + }; + const isNewModel = model.endsWith("-preview"); + if (isNewModel) { + return { + max_tokens: max_tokens !== undefined ? max_tokens : modelConfig.max_tokens, + system_fingerprint: + system_fingerprint !== undefined + ? system_fingerprint + : modelConfig.system_fingerprint, + isNewModel: true, + }; + } else { + return { + isNewModel: false, + }; + } + } + async chat(options: ChatOptions) { + const textmoderation = useAppConfig.getState().textmoderation; const latest = OpenaiPath.TextModerationModels.latest; - if (OpenaiPath.TextModeration && options.whitelist !== true) { + if (textmoderation && options.whitelist !== true) { const messages = options.messages.map((v) => ({ role: v.role, content: v.content, @@ -86,84 +125,40 @@ export class ChatGPTApi implements LLMApi { }; try { - let moderationResponse = await fetch(moderationPath, { - method: "POST", - body: JSON.stringify(moderationPayload), - headers: getHeaders(), - }); - - let moderationJson = await moderationResponse.json(); - - if (moderationJson.results && moderationJson.results.length > 0) { - let moderationResult = moderationJson.results[0]; // Access the first element of the array - - if (!moderationResult.flagged) { - const stable = OpenaiPath.TextModerationModels.stable; // Fall back to "stable" if "latest" is still false - moderationPayload.model = stable; - moderationResponse = await fetch(moderationPath, { - method: "POST", - body: JSON.stringify(moderationPayload), - headers: getHeaders(), + const moderationResponse = await this.sendModerationRequest( + moderationPath, + moderationPayload + ); + + if (moderationResponse.flagged) { + const flaggedCategories = Object.entries( + moderationResponse.categories + ) + .filter(([category, flagged]) => flagged) + .map(([category]) => category); + + if (flaggedCategories.length > 0) { + const translatedReasons = flaggedCategories.map((category) => { + const translation = + (Locale.Error.Content_Policy.Reason as any)[category]; + return translation ? translation : category; // Use category name if translation is not available }); + const translatedReasonText = translatedReasons.join(", "); + const responseText = `${Locale.Error.Content_Policy.Title}\n${Locale.Error.Content_Policy.Reason.Title}: ${translatedReasonText}\n${Locale.Error.Content_Policy.SubTitle}\n`; - moderationJson = await moderationResponse.json(); - - if (moderationJson.results && moderationJson.results.length > 0) { - moderationResult = moderationJson.results[0]; // Access the first element of the array - } - } - - if (moderationResult && moderationResult.flagged) { - // Display a message indicating content policy violation - const contentPolicyViolations = moderationResult.categories; - const flaggedCategories = Object.entries(contentPolicyViolations) - .filter(([category, flagged]) => flagged) - .map(([category]) => category); - - if (flaggedCategories.length > 0) { - const translatedReasons = flaggedCategories.map((category) => { - const translation = ( - Locale.Error.Content_Policy.Reason as any - )[category]; - return translation ? translation : category; // Use category name if translation is not available - }); - const translatedReasonText = translatedReasons.join(", "); - const responseText = `${Locale.Error.Content_Policy.Title}\n${Locale.Error.Content_Policy.Reason.Title}: ${translatedReasonText}\n`; - - // Generate text-based graph for category scores - const categoryScores = moderationResult.category_scores; - const graphLines = flaggedCategories.map((category) => { - const score = categoryScores[category]; - const barLength = Math.round(score * 100); - const bar = "█".repeat(barLength / 10); - return `${category}: ${bar} [${barLength.toFixed(2)}%]`; - }); - const graphText = graphLines.join("\n"); - - const responseWithGraph = `${responseText}${graphText}`; - options.onFinish(responseWithGraph); - return; - } + const responseWithGraph = responseText; + options.onFinish(responseWithGraph); + return; } } } catch (e) { console.log("[Request] failed to make a moderation request", e); - options.onError?.(e as Error); - - // Show error response as JSON for 401 status code - if (e instanceof Response && e.status === 401) { - const errorResponse = { - error: "Unauthorized", - extraInfo: await e.text(), - }; - options.onFinish(JSON.stringify(errorResponse)); - } else { - const errorResponse = { - error: (e as Error).message, - stack: (e as Error).stack, - }; - options.onFinish(JSON.stringify(errorResponse)); - } + const error = { + error: (e as Error).message, + stack: (e as Error).stack, + }; + options.onFinish(JSON.stringify(error)); + return; } } } @@ -172,8 +167,6 @@ export class ChatGPTApi implements LLMApi { role: v.role, content: v.content, })); - const userMessages = messages.filter((msg) => msg.role === "user"); - const userMessage = userMessages[userMessages.length - 1]?.content; const modelConfig = { ...useAppConfig.getState().modelConfig, @@ -183,33 +176,102 @@ export class ChatGPTApi implements LLMApi { }, }; - let requestPayload: any = { - messages, - stream: options.config.stream, - model: modelConfig.model, - temperature: modelConfig.temperature, - presence_penalty: modelConfig.presence_penalty, - frequency_penalty: modelConfig.frequency_penalty, - top_p: modelConfig.top_p, - // max_tokens: Math.max(modelConfig.max_tokens, 1024), - // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore. + const defaultModel = modelConfig.model; + + const userMessages = messages.filter((msg) => msg.role === "user"); + const userMessage = userMessages[userMessages.length - 1]?.content; + /** + * DALL·E Models + * Author: @H0llyW00dzZ + * Usage in this chat: prompt + * Example: A Best Picture of Andromeda Galaxy + **/ + const actualModel = this.getModelForInstructVersion(modelConfig.model); + const { max_tokens, system_fingerprint } = this.getNewStuff( + modelConfig.model, + modelConfig.max_tokens, + modelConfig.system_fingerprint + ); + + const requestPayloads = { + chat: { + messages, + stream: options.config.stream, + model: modelConfig.model, + temperature: modelConfig.temperature, + presence_penalty: modelConfig.presence_penalty, + frequency_penalty: modelConfig.frequency_penalty, + top_p: modelConfig.top_p, + // beta test for new model's since it consumed much tokens + // max is 4096 + ...{ max_tokens }, // Spread the max_tokens value + // not yet ready + //...{ system_fingerprint }, // Spread the system_fingerprint value + }, + image: { + model: actualModel, + prompt: userMessage, + n: modelConfig.n, + quality: modelConfig.quality, + style: modelConfig.style, + size: modelConfig.size, + }, }; - if (OpenaiPath.TodoPath) { - requestPayload = { - input: userMessage, - model: latest, - }; + /** Magic TypeScript payload parameter 🎩 🪄 + * Author : @H0llyW00dzZ + **/ + const magicPayload = this.getNewStuff(defaultModel); + + if (defaultModel.startsWith("dall-e")) { + console.log("[Request] openai payload: ", { + image: requestPayloads.image, + }); + } else if (magicPayload.isNewModel) { + console.log("[Request] openai payload: ", { + chat: requestPayloads.chat, + }); + } else { + const { max_tokens, ...oldChatPayload } = requestPayloads.chat; + console.log("[Request] openai payload: ", { + chat: oldChatPayload, + }); } - console.log("[Request] openai payload: ", requestPayload); - const shouldStream = !!options.config.stream; const controller = new AbortController(); options.onController?.(controller); try { - const chatPath = this.path(OpenaiPath.ChatPath); + const dallemodels = + defaultModel.startsWith("dall-e"); + + let chatPath = dallemodels + ? this.path(OpenaiPath.ImageCreationPath) + : this.path(OpenaiPath.ChatPath); + + let requestPayload; + if (dallemodels) { + /** + * Author : @H0llyW00dzZ + * Use the image payload structure + */ + if (defaultModel.includes("dall-e-2")) { + /** + * Magic TypeScript payload parameter 🎩 🪄 + **/ + const { quality, style, ...imagePayload } = requestPayloads.image; + requestPayload = imagePayload; + } else { + requestPayload = requestPayloads.image; + } + } else { + /** + * Use the chat model payload structure + */ + requestPayload = requestPayloads.chat; + } + const chatPayload = { method: "POST", body: JSON.stringify(requestPayload), @@ -223,9 +285,8 @@ export class ChatGPTApi implements LLMApi { REQUEST_TIMEOUT_MS, ); - let responseText = ""; - if (shouldStream) { + let responseText = ""; let finished = false; const finish = () => { @@ -237,21 +298,116 @@ export class ChatGPTApi implements LLMApi { controller.signal.onabort = finish; + const isApp = !!getClientConfig()?.isApp; + const apiPath = "api/openai/"; + fetchEventSource(chatPath, { ...chatPayload, async onopen(res) { clearTimeout(requestTimeoutId); const contentType = res.headers.get("content-type"); - console.log( - "[OpenAI] request response content type: ", - contentType, - ); + console.log("[OpenAI] request response content type: ", contentType); if (contentType?.startsWith("text/plain")) { responseText = await res.clone().text(); - return finish(); - } + } else if (contentType?.startsWith("application/json")) { + const jsonResponse = await res.clone().json(); + const imageUrl = jsonResponse.data?.[0]?.url; + const prompt = requestPayloads.image.prompt; + const revised_prompt = jsonResponse.data?.[0]?.revised_prompt; + const index = requestPayloads.image.n - 1; + const size = requestPayloads.image.size; + const InstrucModel = defaultModel.endsWith("-vision"); + + if (defaultModel.includes("dall-e-3")) { + const imageDescription = `| ![${prompt}](${imageUrl}) |\n|---|\n| Size: ${size} |\n| [Download Here](${imageUrl}) |\n| 🎩 🪄 Revised Prompt (${index + 1}): ${revised_prompt} |\n| 🤖 AI Models: ${defaultModel} |`; + + responseText = `${imageDescription}`; + } else { + const imageDescription = `#### ${prompt} (${index + 1})\n\n\n | ![${prompt}](${imageUrl}) |\n|---|\n| Size: ${size} |\n| [Download Here](${imageUrl}) |\n| 🤖 AI Models: ${defaultModel} |`; + + responseText = `${imageDescription}`; + } + + if (InstrucModel) { + const instructx = await fetch( + (isApp ? DEFAULT_API_HOST : apiPath) + OpenaiPath.ChatPath, // Pass the path parameter + { + method: "POST", + body: JSON.stringify({ + messages: [ + ...messages, + ], + model: "gpt-4-vision-preview", + temperature: modelConfig.temperature, + presence_penalty: modelConfig.presence_penalty, + frequency_penalty: modelConfig.frequency_penalty, + top_p: modelConfig.top_p, + // have to add this max_tokens for dall-e instruct + max_tokens: modelConfig.max_tokens, + }), + headers: getHeaders(), + } + ); + clearTimeout(requestTimeoutId); + const instructxx = await instructx.json(); + + const instructionDelta = instructxx.choices?.[0]?.message?.content; + const instructionPayload = { + messages: [ + ...messages, + { + role: "system", + content: instructionDelta, + }, + ], + model: "gpt-4-vision-preview", + temperature: modelConfig.temperature, + presence_penalty: modelConfig.presence_penalty, + frequency_penalty: modelConfig.frequency_penalty, + top_p: modelConfig.top_p, + max_tokens: modelConfig.max_tokens, + }; + + const instructionResponse = await fetch( + (isApp ? DEFAULT_API_HOST : apiPath) + OpenaiPath.ChatPath, + { + method: "POST", + body: JSON.stringify(instructionPayload), + headers: getHeaders(), + } + ); + + const instructionJson = await instructionResponse.json(); + const instructionMessage = instructionJson.choices?.[0]?.message?.content; // Access the appropriate property containing the message + const imageDescription = `| ![${prompt}](${imageUrl}) |\n|---|\n| Size: ${size} |\n| [Download Here](${imageUrl}) |\n| 🤖 AI Models: ${defaultModel} |`; + + responseText = `${imageDescription}\n\n${instructionMessage}`; + } + if ( + !res.ok || + !res.headers + .get("content-type") + ?.startsWith(EventStreamContentType) || + res.status !== 200 + ) { + let anyinfo = await res.clone().text(); + try { + const infJson = await res.clone().json(); + anyinfo = prettyObject(infJson); + } catch { } + if (res.status === 401) { + responseText = "\n\n" + Locale.Error.Unauthorized; + } + if (res.status !== 200) { + if (anyinfo) { + responseText += "\n\n" + anyinfo; + } + } + return; + } + } if ( !res.ok || !res.headers @@ -285,20 +441,14 @@ export class ChatGPTApi implements LLMApi { } const text = msg.data; try { - const json = JSON.parse(text) as { - choices: Array<{ - delta: { - content: string; - }; - }>; - }; - const delta = json.choices[0]?.delta?.content; + const json = JSON.parse(text); + const delta = json.choices[0].delta.content; if (delta) { responseText += delta; options.onUpdate?.(responseText, delta); } } catch (e) { - console.error("[Request] parse error", text); + console.error("[Request] parse error", text, msg); } }, onclose() { @@ -315,10 +465,9 @@ export class ChatGPTApi implements LLMApi { clearTimeout(requestTimeoutId); const resJson = await res.json(); - responseText = this.extractMessage(resJson); + const message = this.extractMessage(resJson); + options.onFinish(message); } - - options.onFinish(responseText); } catch (e) { console.log("[Request] failed to make a chat request", e); options.onError?.(e as Error); @@ -385,18 +534,19 @@ export class ChatGPTApi implements LLMApi { if (total.hard_limit_usd) { total.hard_limit_usd = Math.round(total.hard_limit_usd * 100) / 100; } - + if (total.system_hard_limit_usd) { - total.system_hard_limit_usd = Math.round(total.system_hard_limit_usd * 100) / 100; + total.system_hard_limit_usd = + Math.round(total.system_hard_limit_usd * 100) / 100; } - + return { used: response.total_usage, total: { hard_limit_usd: total.hard_limit_usd, system_hard_limit_usd: total.system_hard_limit_usd, }, - } as unknown as LLMUsage; + } as unknown as LLMUsage; } async models(): Promise { @@ -424,5 +574,105 @@ export class ChatGPTApi implements LLMApi { available: true, })); } + + /** + * Models Text-Moderations OpenAI + * Author: @H0llyW00dzZ + **/ + + private async sendModerationRequest( + moderationPath: string, + moderationPayload: any + ): Promise { + try { + const moderationResponse = await fetch(moderationPath, { + method: "POST", + body: JSON.stringify(moderationPayload), + headers: getHeaders(), + }); + + const moderationJson = await moderationResponse.json(); + + if (moderationJson.results && moderationJson.results.length > 0) { + let moderationResult = moderationJson.results[0]; // Access the first element of the array + + if (!moderationResult.flagged) { + const stable = OpenaiPath.TextModerationModels.stable; // Fall back to "stable" if "latest" is still false + moderationPayload.model = stable; + const fallbackModerationResponse = await fetch(moderationPath, { + method: "POST", + body: JSON.stringify(moderationPayload), + headers: getHeaders(), + }); + + const fallbackModerationJson = + await fallbackModerationResponse.json(); + + if ( + fallbackModerationJson.results && + fallbackModerationJson.results.length > 0 + ) { + moderationResult = fallbackModerationJson.results[0]; // Access the first element of the array + } + } + + console.log("[Text Moderation] flagged:", moderationResult.flagged); // Log the flagged result + + if (moderationResult.flagged) { + const flaggedCategories = Object.entries(moderationResult.categories) + .filter(([category, flagged]) => flagged) + .map(([category]) => category); + + console.log("[Text Moderation] flagged categories:", flaggedCategories); // Log the flagged categories + } + + return moderationResult as ModerationResponse; + } else { + console.error("Moderation response is empty"); + throw new Error("Failed to get moderation response"); + } + } catch (e) { + console.error("[Request] failed to make a moderation request", e); + return {} as ModerationResponse; + } + } + /** + * DALL·E Instruct + * Author : @H0llyW00dzZ + * Still WIP + */ + + private getModelForInstructVersion(inputModel: string): string { + const modelMap: Record = { + "dall-e-2-beta-instruct-vision": "dall-e-2", + "dall-e-3-beta-instruct-vision": "dall-e-3", + }; + return modelMap[inputModel] || inputModel; + } + /** + * DALL·E Models + * Author : @H0llyW00dzZ + * Todo : Function to save an image from a response json object and make it accessible locally + */ + + private async saveImageFromResponse(imageResponse: any, filename: string): Promise { + try { + const blob = await imageResponse.blob(); + + const url = URL.createObjectURL(blob); + + const link = document.createElement('a'); + link.href = url; + link.download = filename; + link.click(); + + URL.revokeObjectURL(url); + + console.log('Image saved successfully:', filename); + } catch (e) { + console.error('Failed to save image:', e); + } + } } + export { OpenaiPath }; diff --git a/app/components/button.tsx b/app/components/button.tsx index 99c0305e185..7a5633924c5 100644 --- a/app/components/button.tsx +++ b/app/components/button.tsx @@ -16,61 +16,36 @@ export function IconButton(props: { disabled?: boolean; tabIndex?: number; autoFocus?: boolean; - importData?: () => void; // Add importData prop - confirmDialogVisible?: boolean; // Add confirmDialogVisible prop }) { - const { - onClick, - icon, - type, - text, - bordered, - shadow, - className, - title, - disabled, - tabIndex, - autoFocus, - importData, // Destructure importData prop - confirmDialogVisible, // Destructure confirmDialogVisible prop - } = props; - - const handleClick = () => { - if (confirmDialogVisible) { - // Handle confirm dialog logic here - } else if (importData) { - importData(); - } else if (onClick) { - onClick(); - } - }; - return ( ); } diff --git a/app/components/changelog.tsx b/app/components/changelog.tsx index fd9a94f5d16..fa7f7314071 100644 --- a/app/components/changelog.tsx +++ b/app/components/changelog.tsx @@ -56,7 +56,7 @@ export function ChangeLog(props: { onClose?: () => void }) { change.replace(prLinkRegex, '[$&](https://github.com/H0llyW00dzZ/ChatGPT-Next-Web/pull/$1/commits)') ).join('\n\n\n'); - table += `\n\n\n![GitHub contributors](https://img.shields.io/github/contributors/Yidadaa/ChatGPT-Next-Web.svg) ![GitHub commits](https://badgen.net/github/commits/H0llyW00dzZ/ChatGPT-Next-Web) ![GitHub license](https://img.shields.io/github/license/H0llyW00dzZ/ChatGPT-Next-Web) [![GitHub forks](https://img.shields.io/github/forks/Yidadaa/ChatGPT-Next-Web.svg)](https://github.com/Yidadaa/ChatGPT-Next-Web/network/members) [![GitHub stars](https://img.shields.io/github/stars/Yidadaa/ChatGPT-Next-Web.svg)](https://github.com/Yidadaa/ChatGPT-Next-Web/stargazers) [![Github All Releases](https://img.shields.io/github/downloads/Yidadaa/ChatGPT-Next-Web/total.svg)](https://github.com/Yidadaa/ChatGPT-Next-Web/releases/)\n\n\n [![GitHub](https://img.shields.io/badge/--181717?logo=github&logoColor=ffffff)](https://github.com/${author}) ![${author.replace("[bot]", "")}](https://github.com/${author.replace("[bot]", "")}.png?size=25) ${authorSection} :\n\n${prLink}\n\n\n${descriptionWithLinks}\n\n\n\n\n\n`; + table += `\n\n\n![GitHub contributors](https://img.shields.io/github/contributors/Yidadaa/ChatGPT-Next-Web.svg) ![GitHub commits](https://badgen.net/github/commits/H0llyW00dzZ/ChatGPT-Next-Web) ![GitHub license](https://img.shields.io/github/license/H0llyW00dzZ/ChatGPT-Next-Web) [![GitHub forks](https://img.shields.io/github/forks/Yidadaa/ChatGPT-Next-Web.svg)](https://github.com/Yidadaa/ChatGPT-Next-Web/network/members) [![GitHub stars](https://img.shields.io/github/stars/Yidadaa/ChatGPT-Next-Web.svg)](https://github.com/Yidadaa/ChatGPT-Next-Web/stargazers) [![Github All Releases](https://img.shields.io/github/downloads/Yidadaa/ChatGPT-Next-Web/total.svg)](https://github.com/Yidadaa/ChatGPT-Next-Web/releases/) [![CI: CodeQL Unit Testing Advanced](https://github.com/H0llyW00dzZ/ChatGPT-Next-Web/actions/workflows/codeql.yml/badge.svg)](https://github.com/H0llyW00dzZ/ChatGPT-Next-Web/actions/workflows/codeql.yml) \n\n\n [![GitHub](https://img.shields.io/badge/--181717?logo=github&logoColor=ffffff)](https://github.com/${author}) ![${author.replace("[bot]", "")}](https://github.com/${author.replace("[bot]", "")}.png?size=25) ${authorSection} :\n\n${prLink}\n\n\n${descriptionWithLinks}\n\n\n\n\n\n`; } else { table += `###${commitInfo?.commitMessage.summary}###\nNo changes\n\n`; } diff --git a/app/components/exporter.tsx b/app/components/exporter.tsx index c3be4fc9dcb..60a2eca0498 100644 --- a/app/components/exporter.tsx +++ b/app/components/exporter.tsx @@ -560,14 +560,9 @@ export function ImagePreviewer(props: { }`; return ( -
+
- +
diff --git a/app/components/model-config.tsx b/app/components/model-config.tsx index 1c730e1449f..6c278953f73 100644 --- a/app/components/model-config.tsx +++ b/app/components/model-config.tsx @@ -11,6 +11,11 @@ export function ModelConfigList(props: { }) { const allModels = useAllModels(); + const sizeOptions = ["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"]; + const styleOptions = ["Vivid", "Natural"]; + + const isDalleModel = props.modelConfig.model.startsWith("dall-e"); + return ( <> @@ -19,9 +24,9 @@ export function ModelConfigList(props: { onChange={(e) => { props.updateConfig( (config) => - (config.model = ModalConfigValidator.model( - e.currentTarget.value, - )), + (config.model = ModalConfigValidator.model( + e.currentTarget.value, + )), ); }} > @@ -32,183 +37,261 @@ export function ModelConfigList(props: { ))} - - { - props.updateConfig( - (config) => - (config.temperature = ModalConfigValidator.temperature( - e.currentTarget.valueAsNumber, - )), - ); - }} - > - - - { - props.updateConfig( - (config) => - (config.top_p = ModalConfigValidator.top_p( - e.currentTarget.valueAsNumber, - )), - ); - }} - > - - - - props.updateConfig( - (config) => - (config.max_tokens = ModalConfigValidator.max_tokens( - e.currentTarget.valueAsNumber, - )), - ) - } - > - - - { - props.updateConfig( - (config) => - (config.presence_penalty = - ModalConfigValidator.presence_penalty( + {isDalleModel && ( + <> + + { + props.updateConfig((config) => { + config.n = ModalConfigValidator.n(e.currentTarget.valueAsNumber); + }); + }} + > + + + + + + + + + + + + )} + + {!isDalleModel && ( + <> + + { + props.updateConfig( + (config) => + (config.temperature = ModalConfigValidator.temperature( e.currentTarget.valueAsNumber, )), - ); - }} - > - - - - { - props.updateConfig( - (config) => - (config.frequency_penalty = - ModalConfigValidator.frequency_penalty( + ); + }} + > + + + { + props.updateConfig( + (config) => + (config.top_p = ModalConfigValidator.top_p( e.currentTarget.valueAsNumber, )), - ); - }} - > - + ); + }} + > + + + + props.updateConfig( + (config) => + (config.max_tokens = ModalConfigValidator.max_tokens( + e.currentTarget.valueAsNumber, + )), + ) + } + > + + + { + props.updateConfig( + (config) => + (config.presence_penalty = + ModalConfigValidator.presence_penalty( + e.currentTarget.valueAsNumber, + )), + ); + }} + > + - - - props.updateConfig( - (config) => - (config.enableInjectSystemPrompts = e.currentTarget.checked), - ) - } - > - + + { + props.updateConfig( + (config) => + (config.frequency_penalty = + ModalConfigValidator.frequency_penalty( + e.currentTarget.valueAsNumber, + )), + ); + }} + > + - - - props.updateConfig( - (config) => (config.template = e.currentTarget.value), - ) - } - > - + + + props.updateConfig( + (config) => + (config.enableInjectSystemPrompts = e.currentTarget.checked), + ) + } + > + - - - props.updateConfig( - (config) => (config.historyMessageCount = e.target.valueAsNumber), - ) - } - > - + + + props.updateConfig( + (config) => (config.template = e.currentTarget.value), + ) + } + > + - - - props.updateConfig( - (config) => - (config.compressMessageLengthThreshold = - e.currentTarget.valueAsNumber), - ) - } - > - - - - props.updateConfig( - (config) => (config.sendMemory = e.currentTarget.checked), - ) - } - > - + + + props.updateConfig( + (config) => (config.historyMessageCount = e.target.valueAsNumber), + ) + } + > + + + + + props.updateConfig( + (config) => + (config.compressMessageLengthThreshold = + e.currentTarget.valueAsNumber), + ) + } + > + + + + props.updateConfig( + (config) => (config.sendMemory = e.currentTarget.checked), + ) + } + > + + + )} ); } diff --git a/app/constant.ts b/app/constant.ts index 7008c4c6134..06f12e91209 100644 --- a/app/constant.ts +++ b/app/constant.ts @@ -72,13 +72,16 @@ export enum ServiceProvider { export const OpenaiPath = { ChatPath: "v1/chat/completions", // text moderation - TextModeration: true, ModerationPath: "v1/moderations", TextModerationModels: { latest: "text-moderation-latest", stable: "text-moderation-stable", }, - TodoPath: false, + // image creation (dalle models) + ImageCreationPath: "v1/images/generations", + // todo + ImageEditPath: "v1/images/edits", + ImageVariationPath: "v1/images/variations", UsagePath: "dashboard/billing/usage", SubsPath: "dashboard/billing/subscription", ListModelPath: "v1/models", @@ -105,6 +108,22 @@ export const KnowledgeCutOffDate: Record = { }; export const DEFAULT_MODELS = [ + { + name: "dall-e-2", + available: true, + }, + { + name: "dall-e-3", + available: true, + }, + { + name: "dall-e-2-beta-instruct-vision", + available: true, + }, + { + name: "dall-e-3-beta-instruct-vision", + available: true, + }, { name: "gpt-4", available: true, diff --git a/app/locales/cn.ts b/app/locales/cn.ts index ff4f94cca5d..f7726ab0c53 100644 --- a/app/locales/cn.ts +++ b/app/locales/cn.ts @@ -11,7 +11,9 @@ const cn = { : "访问密码不正确或为空,请前往[登录](/#/auth)页输入正确的访问密码,或者在[设置](/#/settings)页填入你自己的 OpenAI API Key。", Content_Policy: { Title: - "您的请求因违反内容政策而被标记。\n阅读详情:https://platform.openai.com/docs/guides/moderation/overview", + "您的请求因违反内容政策而被标记。", + SubTitle: + "阅读详情:https://platform.openai.com/docs/guides/moderation/overview", Reason: { Title: "理由", sexual: "性别", @@ -77,6 +79,7 @@ const cn = { UI: { MasksSuccess: "成功更新了掩码会话", MasksFail: "无法更新掩码会话", + Summarizing: "正在总结当前会话的内容", SummarizeSuccess: "成功总结此次聊天的会话内容", SummarizeFail: "无法总结此次聊天的会话内容", }, @@ -452,6 +455,26 @@ const cn = { Title: "频率惩罚度 (frequency_penalty)", SubTitle: "值越大,越有可能降低重复字词", }, + NumberOfImages: { + Title: "创建图片数量", + SubTitle: + "要生成的图像数量\n必须介于1和10之间。对于dall-e-3,仅支持1。", + }, + QualityOfImages: { + Title: "创建图片质量", + SubTitle: + "将要生成的图像的质量\n此配置仅适用于dall-e-3。", + }, + SizeOfImages: { + Title: "图片尺寸", + SubTitle: + "生成图像的尺寸\nDALL·E-2:必须是`256x256`、`512x512`或`1024x1024`之一。\nDALL-E-3:必须是`1024x1024`、`1792x1024`或`1024x1792`之一。", + }, + StyleOfImages: { + Title: "图片风格", + SubTitle: + "生成图像的风格\n必须是生动或自然之一\n此配置仅适用于dall-e-3", + }, }, Store: { DefaultTopic: "新的聊天", diff --git a/app/locales/en.ts b/app/locales/en.ts index 38a40a3e5c8..1b6e31282d1 100644 --- a/app/locales/en.ts +++ b/app/locales/en.ts @@ -13,20 +13,22 @@ const en: LocaleType = { : "Unauthorized access, please enter access code in [auth](/#/auth) page, or enter your OpenAI API Key.", Content_Policy: { Title: - "Your request got flagged because of a Content Policy Violation.\nRead Here: https://platform.openai.com/docs/guides/moderation/overview", + "Your request got flagged because of a Content Policy Violation.", + SubTitle: + "Read Here: https://platform.openai.com/docs/guides/moderation/overview", Reason: { - Title: "reason", - sexual: "sexual", - hate: "hate", - harassment: "harassment", - "self-harm": "self-harm", - "sexual/minors": "sexual/minors", - "hate/threatening": "hate/threatening", - "violence/graphic": "violence/graphic", - "self-harm/intent": "self-harm/intent", - "self-harm/instructions": "self-harm/instructions", - "harassment/threatening": "harassment/threatening", - violence: "violence", + Title: "Reason", + sexual: "Sexual", + hate: "Hate", + harassment: "Harassment", + "self-harm": "Self-harm", + "sexual/minors": "Sexual/minors", + "hate/threatening": "Hate/threatening", + "violence/graphic": "Violence/graphic", + "self-harm/intent": "Self-harm/intent", + "self-harm/instructions": "Self-harm/instructions", + "harassment/threatening": "Harassment/threatening", + violence: "Violence", }, }, }, @@ -79,6 +81,7 @@ const en: LocaleType = { UI: { MasksSuccess: "Successfully updated session of masks", MasksFail: "Failed to update session of masks", + Summarizing: "正在总结当前会话的内容", SummarizeSuccess: "Successfully summarize session of this chat", SummarizeFail: "Failed to summarize session of this chat", }, @@ -459,6 +462,26 @@ const en: LocaleType = { SubTitle: "A larger value decreasing the likelihood to repeat the same line", }, + NumberOfImages: { + Title: "Number Image Create", + SubTitle: + "A number of images to generate\nMust be between 1 and 10. For dall-e-3, only 1 is supported.", + }, + QualityOfImages: { + Title: "Quality Image Create", + SubTitle: + "A quality of the image that will be generated\nThis Configuration is only supported for dall-e-3.", + }, + SizeOfImages: { + Title: "Size Image", + SubTitle: + "A size of the generated images\nDALL·E-2 : Must be one of `256x256`, `512x512`, or `1024x1024`.\nDALL-E-3 : Must be one of `1024x1024`, `1792x1024`, or `1024x1792`.", + }, + StyleOfImages: { + Title: "Style Image", + SubTitle: + "A style of the generated images\nMust be one of vivid or natural\nThis Configuration is only supported for dall-e-3", + }, }, Store: { DefaultTopic: "New Conversation", diff --git a/app/locales/id.ts b/app/locales/id.ts index 42b18890e91..d82c524a752 100644 --- a/app/locales/id.ts +++ b/app/locales/id.ts @@ -7,20 +7,22 @@ const id: PartialLocaleType = { Unauthorized: "Akses tidak diizinkan, silakan masukkan kode akses atau masukkan kunci API OpenAI Anda. di halaman [autentikasi](/#/auth) atau di halaman [Pengaturan](/#/settings).", Content_Policy: { Title: - "Permintaan Anda ditandai karena Pelanggaran Kebijakan Konten.\nBaca di sini: https://platform.openai.com/docs/guides/moderation/overview", + "Permintaan Anda ditandai karena Pelanggaran Kebijakan Konten.", + SubTitle: + "Baca selengkapnya di sini: https://platform.openai.com/docs/guides/moderation/overview", Reason: { - Title: "alasan", - sexual: "seksual", - hate: "kebencian", - harassment: "pelecehan", - "self-harm": "melukai diri sendiri", - "sexual/minors": "seksual/anak-anak", - "hate/threatening": "kebencian/ancaman", - "violence/graphic": "kekerasan/grafis", - "self-harm/intent": "melukai diri sendiri/niat", - "self-harm/instructions": "melukai diri sendiri/instruksi", - "harassment/threatening": "pelecehan/ancaman", - violence: "kekerasan", + Title: "Alasan", + sexual: "Seksual", + hate: "Kebencian", + harassment: "Pelecehan", + "self-harm": "Melukai diri sendiri", + "sexual/minors": "Seksual/anak-anak", + "hate/threatening": "Kebencian/ancaman", + "violence/graphic": "Kekerasan/grafis", + "self-harm/intent": "Melukai diri sendiri/niat", + "self-harm/instructions": "Melukai diri sendiri/instruksi", + "harassment/threatening": "Pelecehan/ancaman", + violence: "Kekerasan", }, }, }, @@ -66,6 +68,7 @@ const id: PartialLocaleType = { UI: { MasksSuccess: "Berhasil memperbarui sesi Masks", MasksFail: "Gagal memperbarui sesi Masks", + Summarizing: "Meringkas sesi percakapan ini", SummarizeSuccess: "Berhasil merangkum sesi obrolan ini", SummarizeFail: "Gagal merangkum sesi obrolan ini", }, @@ -391,6 +394,26 @@ const id: PartialLocaleType = { SubTitle: "Semakin tinggi nilai, semakin rendah kemungkinan penggunaan ulang baris yang sama", }, + NumberOfImages: { + Title: "Buat Jumlah Gambar", + SubTitle: + "Sejumlah gambar yang akan dihasilkan\nHarus di antara 1 dan 10. Untuk dall-e-3, hanya 1 yang didukung.", + }, + QualityOfImages: { + Title: "Buat Kualitas Gambar", + SubTitle: + "Kualitas gambar yang akan dihasilkan\nKonfigurasi ini hanya didukung untuk dall-e-3.", + }, + SizeOfImages: { + Title: "Ukuran Gambar", + SubTitle: + "Ukuran gambar yang dihasilkan\nDALL·E-2: Harus menjadi salah satu dari `256x256`, `512x512`, atau `1024x1024`.\nDALL-E-3: Harus menjadi salah satu dari `1024x1024`, `1792x1024`, atau `1024x1792`.", + }, + StyleOfImages: { + Title: "Gaya Gambar", + SubTitle: + "Gaya gambar yang dihasilkan\nHarus menjadi salah satu dari cerah atau alami\nKonfigurasi ini hanya didukung untuk dall-e-3", + }, }, Store: { DefaultTopic: "Percakapan Baru", diff --git a/app/locales/index.ts b/app/locales/index.ts index 79e314facdd..8e456e7713f 100644 --- a/app/locales/index.ts +++ b/app/locales/index.ts @@ -89,9 +89,9 @@ function setItem(key: string, value: string) { } catch {} } -function getLanguage() { +function getLanguages() { try { - return navigator.language.toLowerCase(); + return navigator.languages; } catch { return DEFAULT_LANG; } @@ -104,11 +104,20 @@ export function getLang(): Lang { return savedLang as Lang; } - const lang = getLanguage(); - - for (const option of AllLangs) { - if (lang.includes(option)) { - return option; + const preferredLangs = getLanguages(); + if (typeof preferredLangs === "string") return preferredLangs; // no language list, return the only lang + + // loop for searching best language option based on user accepted language + let bestMatch: Lang | null = null; + for (let i = 0; i < preferredLangs.length; i++) { + for (const option of AllLangs) { + if (preferredLangs[i].toLowerCase().includes(option)) { + bestMatch = option; + break; + } + } + if (bestMatch) { + return bestMatch; } } diff --git a/app/store/chat.ts b/app/store/chat.ts index c2eb59f11d0..1e4a08cd3f2 100644 --- a/app/store/chat.ts +++ b/app/store/chat.ts @@ -496,12 +496,12 @@ export const useChatStore = createPersistStore( const sessionModelConfig = this.currentSession().mask.modelConfig; const topicModel = getSummarizeModel(session.mask.modelConfig.model, sessionModelConfig); - if (topicModel === "dall-e-2-beta-instruct-vision" || topicModel === "dall-e-3-beta-instruct-vision" || topicModel === "dall-e-2" || topicModel === "dall-e-3") { - // Summarize topic using gpt-3.5-turbo-0613 which is compatible with DALL-E-2 model + if (topicModel.startsWith("dall-e")) { api.llm.chat({ messages: topicMessages, config: { model: "gpt-4-vision-preview", + stream: false, }, whitelist: true, onFinish(message) { @@ -510,12 +510,11 @@ export const useChatStore = createPersistStore( session.topic = message.length > 0 ? trimTopic(message) : DEFAULT_TOPIC; // Add system message after summarizing the topic - // which is powerful based of fine-tuning const systemMessage: ChatMessage = { - date: new Date().toLocaleString(), - id: nanoid(), role: "system", content: `${Locale.FineTuned.Sysmessage} ${session.topic}`, + date: new Date().toLocaleString(), + id: nanoid(), }; session.messages = [systemMessage, ...session.messages]; }); @@ -527,6 +526,7 @@ export const useChatStore = createPersistStore( messages: topicMessages, config: { model: topicModel, + stream: false, }, whitelist: true, onFinish(message) { @@ -535,12 +535,11 @@ export const useChatStore = createPersistStore( session.topic = message.length > 0 ? trimTopic(message) : DEFAULT_TOPIC; // Add system message after summarizing the topic - // which is powerful based of fine-tuning const systemMessage: ChatMessage = { - date: new Date().toLocaleString(), - id: nanoid(), role: "system", content: `${Locale.FineTuned.Sysmessage} ${session.topic}`, + date: new Date().toLocaleString(), + id: nanoid(), }; session.messages = [systemMessage, ...session.messages]; }); @@ -570,6 +569,7 @@ export const useChatStore = createPersistStore( // add memory prompt toBeSummarizedMsgs.unshift(get().getMemoryPrompt()); + let isToastShown = false; const lastSummarizeIndex = session.messages.length; @@ -586,9 +586,9 @@ export const useChatStore = createPersistStore( ) { const sessionModelConfig = this.currentSession().mask.modelConfig; const summarizeModel = getSummarizeModel(session.mask.modelConfig.model, sessionModelConfig); + const { max_tokens, ...modelcfg } = modelConfig; - if (summarizeModel === "dall-e-2-beta-instruct-vision" || summarizeModel === "dall-e-3-beta-instruct-vision" || summarizeModel === "dall-e-2" || summarizeModel === "dall-e-3") { - // Summarize using gpt-3.5-turbo-0613 which is compatible with DALL-E-2 model + if (summarizeModel.startsWith("dall-e")) { api.llm.chat({ messages: toBeSummarizedMsgs.concat( createMessage({ @@ -597,14 +597,29 @@ export const useChatStore = createPersistStore( date: "", }), ), - config: { ...modelConfig, model: "gpt-4-vision-preview", stream: true }, + config: { ...modelcfg, model: "gpt-4-vision-preview", stream: true }, + onUpdate(message) { + session.memoryPrompt = message; + if (!isToastShown) { + showToast( + Locale.Chat.Commands.UI.Summarizing, + ); + isToastShown = true; + } + }, whitelist: true, onFinish(message) { console.log("[Memory] ", message); session.lastSummarizeIndex = lastSummarizeIndex; + showToast( + Locale.Chat.Commands.UI.SummarizeSuccess, + ); }, onError(err) { console.error("[Summarize] ", err); + showToast( + Locale.Chat.Commands.UI.SummarizeFail, + ); }, }); } else { @@ -617,19 +632,29 @@ export const useChatStore = createPersistStore( date: "", }), ), - config: { ...modelConfig, stream: true }, + config: { ...modelcfg, stream: true }, onUpdate(message) { session.memoryPrompt = message; + if (!isToastShown) { + showToast( + Locale.Chat.Commands.UI.Summarizing, + ); + isToastShown = true; + } }, whitelist: true, onFinish(message) { console.log("[Memory] ", message); session.lastSummarizeIndex = lastSummarizeIndex; - showToast(Locale.Chat.Commands.UI.SummarizeSuccess); + showToast( + Locale.Chat.Commands.UI.SummarizeSuccess, + ); }, onError(err) { console.error("[Summarize] ", err); - showToast(Locale.Chat.Commands.UI.SummarizeFail); + showToast( + Locale.Chat.Commands.UI.SummarizeFail, + ); }, }); } diff --git a/app/store/config.ts b/app/store/config.ts index 0218f52b7d6..44199a68f16 100644 --- a/app/store/config.ts +++ b/app/store/config.ts @@ -49,15 +49,46 @@ export const DEFAULT_CONFIG = { model: "gpt-3.5-turbo" as ModelType, temperature: 0.5, top_p: 1, - max_tokens: 2000, // this bad if keep 8192 as default lmao we are not only using other ai that 100% not stable hahaha + max_tokens: 2000, presence_penalty: 0, frequency_penalty: 0, + /** + * DALL·E Models + * Author: @H0llyW00dzZ + * + **/ + n: 1, // The number of images to generate. Must be between 1 and 10. For dall-e-3, only n=1 is supported. + /** Quality Only DALL·E-3 Models + * Author: @H0llyW00dzZ + * The quality of the image that will be generated. + * `hd` creates images with finer details and greater consistency across the image. + **/ + quality: "hd", // Only DALL·E-3 for DALL·E-2 not not really needed + /** SIZE ALL·E Models + * Author: @H0llyW00dzZ + * DALL·E-2 : Must be one of `256x256`, `512x512`, or `1024x1024`. + * DALL-E-3 : Must be one of `1024x1024`, `1792x1024`, or `1024x1792`. + **/ + size: "1024x1024", + /** Style DALL-E-3 Models + * Author: @H0llyW00dzZ + * Must be one of `vivid` or `natural`. + * `Vivid` causes the model to lean towards generating hyper-real and dramatic images. + * `Natural` causes the model to produce more natural, less hyper-real looking images. + */ + style: "vivid", // Only DALL·E-3 for DALL·E-2 not not really needed + system_fingerprint: "", sendMemory: true, historyMessageCount: 4, compressMessageLengthThreshold: 1000, enableInjectSystemPrompts: true, template: DEFAULT_INPUT_TEMPLATE, }, + /** + * Text Moderation Open AI + * Author: @H0llyW00dzZ + **/ + textmoderation: true, // text moderation default is enabled }; export type ChatConfig = typeof DEFAULT_CONFIG; @@ -96,6 +127,25 @@ export const ModalConfigValidator = { top_p(x: number) { return limitNumber(x, 0, 1, 1); }, + n(x: number) { + return limitNumber(x, 1, 10, 1); + }, + quality(x: string) { + return ["hd"].includes(x) ? x : "hd"; + }, + size(x: string) { + const validSizes = ["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"]; + return validSizes.includes(x) ? x : "1024x1024"; + }, + style(x: string) { + const validStyles = ["vivid", "natural"]; + return validStyles.includes(x) ? x : "vivid"; + }, + system_fingerprint(x: string) { + // Example: Ensure the fingerprint matches the format "fp_XXXXXXXXXX" where X represents a hexadecimal digit + const regex = /^fp_[0-9a-fA-F]{10}$/; + return regex.test(x) ? x : ""; + }, }; export const useAppConfig = createPersistStore( @@ -132,7 +182,7 @@ export const useAppConfig = createPersistStore( }), { name: StoreKey.Config, - version: 3.8, + version: 4.2, // DALL·E Models switching version to 4.1 because in 4.0 @Yidadaa using it. migrate(persistedState, version) { const state = persistedState as ChatConfig; @@ -163,6 +213,29 @@ export const useAppConfig = createPersistStore( state.lastUpdate = Date.now(); } + if (version < 3.9) { + state.textmoderation = true; + } + + if (version < 4.1) { + state.modelConfig = { + ...state.modelConfig, + n: 1, + quality: "hd", + size: "1024x1024", + style: "vivid", + }; + } + + // In the wilds 🚀 + + if (version < 4.2) { + state.modelConfig = { + ...state.modelConfig, + system_fingerprint: "", + }; + } + return state as any; }, },