Skip to content

Commit

Permalink
Merge branch 'main' into deploy
Browse files Browse the repository at this point in the history
  • Loading branch information
CNSeniorious000 committed Feb 22, 2025
2 parents 9c0df1c + 918a856 commit 71f2b8f
Show file tree
Hide file tree
Showing 14 changed files with 101 additions and 101 deletions.
15 changes: 0 additions & 15 deletions .github/ISSUE_TEMPLATE/sweep-template.yml

This file was deleted.

6 changes: 6 additions & 0 deletions .github/renovate.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
{
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
"extends": [
"config:recommended"
]
}
8 changes: 4 additions & 4 deletions frontend/app/HeroPage.svelte
Original file line number Diff line number Diff line change
Expand Up @@ -2,18 +2,18 @@
import { TokenSquare } from "@promplate/pattern";
import { onMount } from "svelte";
import { sineInOut } from "svelte/easing";
import { tweened } from "svelte/motion";
import { Tween } from "svelte/motion";
const length = 20;
const i = tweened(length * 1.3, { easing: sineInOut, duration: 700 });
const i = new Tween(length * 1.3, { easing: sineInOut, duration: 700 });
onMount(() => ($i = 0));
onMount(() => i.set(0));
</script>

<div class="grid h-screen select-none place-items-center bg-neutral-950">
<div class="flex flex-col gap-2 text-neutral-2">
<div class="text-sm">
<TokenSquare {length} isHighlighted={({ x, y }) => y - x >= $i} />
<TokenSquare {length} isHighlighted={({ x, y }) => y - x >= i.current} />
</div>
<h1 class="mx-0.5 w-fit text-white font-sans">
Promplate
Expand Down
7 changes: 3 additions & 4 deletions frontend/app/main.ts
Original file line number Diff line number Diff line change
@@ -1,9 +1,8 @@
import { mount } from "svelte";
import App from "./App.svelte";
import "@unocss/reset/tailwind-compat.css";
import "uno.css";

const app = new App({
target: document.getElementById("app")!,
export default mount(App, {
target: document.body,
});

export default app;
24 changes: 12 additions & 12 deletions frontend/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -12,24 +12,24 @@
"check": "svelte-check --tsconfig ./tsconfig.json"
},
"devDependencies": {
"@antfu/eslint-config": "^3.0.0",
"@promplate/pattern": "^0.0.1",
"@sveltejs/vite-plugin-svelte": "^3.1.1",
"@antfu/eslint-config": "^4.1.0",
"@promplate/pattern": "^0.0.3",
"@sveltejs/vite-plugin-svelte": "^5.0.0",
"@tsconfig/svelte": "^5.0.4",
"@unocss/eslint-plugin": "^0.61.0",
"@unocss/extractor-svelte": "^0.61.0",
"@unocss/reset": "^0.61.0",
"@unocss/eslint-plugin": "^66.0.0",
"@unocss/extractor-svelte": "^66.0.0",
"@unocss/reset": "^66.0.0",
"eslint": "^9.4.0",
"eslint-plugin-format": "^1.0.1",
"eslint-plugin-svelte": "^2.39.3",
"eslint-plugin-svelte": "^2.45.1",
"prettier": "^3.3.2",
"prettier-plugin-svelte": "^3.2.4",
"svelte": "^4.2.18",
"prettier-plugin-svelte": "^3.2.6",
"svelte": "^5.0.0",
"svelte-check": "^4.0.0",
"svelte-eslint-parser": "^0.43.0",
"tslib": "^2.6.3",
"typescript": "^5.4.5",
"unocss": "^0.61.0",
"vite": "^5.3.1"
"typescript": "^5.5.0",
"unocss": "^66.0.0",
"vite": "^6.0.0"
}
}
4 changes: 2 additions & 2 deletions frontend/uno.config.ts
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
import extractorSvelte from "@unocss/extractor-svelte";
import { defineConfig, presetAttributify, presetUno, presetWebFonts, transformerDirectives, transformerVariantGroup } from "unocss";
import { defineConfig, presetAttributify, presetWebFonts, presetWind3, transformerDirectives, transformerVariantGroup } from "unocss";

const config = defineConfig({
extractors: [extractorSvelte()],
transformers: [transformerDirectives(), transformerVariantGroup()],
presets: [presetAttributify(), presetUno(), presetWebFonts({ provider: "bunny", fonts: { mono: "Fira Code" } })],
presets: [presetAttributify(), presetWind3({ preflight: "on-demand" }), presetWebFonts({ provider: "bunny", fonts: { mono: "Fira Code" } })],
});

export default config;
12 changes: 6 additions & 6 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -11,26 +11,26 @@ dependencies = [
"promplate[all]~=0.3.5.0",
"promplate-trace[langfuse,langsmith]==0.3.0dev5",
"python-box~=7.3.0",
"pydantic-settings~=2.7.0",
"pydantic-settings~=2.8.0",
"httpx[http2]~=0.28.0",
"promptools[stream,validation]~=0.1.3.3",
"fake-useragent~=2.0.0",
"html2text~=2024.2.26",
"beautifulsoup4~=4.12.3",
"beautifulsoup4~=4.13.1",
"rich~=13.9.0",
"zhipuai~=2.1.5.20241203",
"anthropic~=0.45.0",
"anthropic~=0.46.0",
"dashscope~=1.22.0",
"logfire[fastapi,system-metrics,httpx]~=3.3.0",
"logfire[fastapi,system-metrics,httpx]~=3.6.0",
]

[tool.pdm]
distribution = false

[dependency-groups]
dev = [
"isort~=5.13.2",
"black~=24.4.2",
"isort~=6.0.0",
"black~=25.1.0",
]

[tool.pdm.scripts]
Expand Down
33 changes: 29 additions & 4 deletions src/routes/openai.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,10 @@
from typing import AsyncIterable, cast
from typing import AsyncIterable, Literal, cast, get_args

from fastapi import APIRouter, Depends, Request
from openai.types.chat import ChatCompletionContentPartTextParam
from promplate import Message
from pydantic import field_serializer
from typing_extensions import TypedDict

from ..utils.http import forward_headers
from ..utils.llm import Model, openai_compatible_providers
Expand All @@ -13,23 +16,45 @@
openai_router = APIRouter(tags=["openai"])


class ModelItem(TypedDict):
id: Model
object: Literal["model"]


class ModelList(TypedDict):
object: Literal["list"]
data: list[ModelItem]


@openai_router.get("/models")
async def get_models():
async def get_models() -> ModelList:
return {
"object": "list",
"data": [
{
"id": name,
"object": "model",
}
for name in Model.__args__
for name in get_args(Model)
],
}


class CompatibleMessage(Message):
content: str | list[ChatCompletionContentPartTextParam] # type: ignore


class ChatInput(ChainInput):
stream: bool = False
messages: list[Message] # type: ignore
messages: list[CompatibleMessage] # type: ignore

@field_serializer("messages")
def serialize_messages(self, value: CompatibleMessage):
content = value["content"]
if isinstance(content, str):
return value
value["content"] = "".join(i["text"] for i in content)
return value

@property
def config(self):
Expand Down
30 changes: 24 additions & 6 deletions src/utils/llm/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,12 +39,14 @@
"azure:gpt-4o-mini",
"azure:o1",
"azure:o1-mini",
"azure:o3-mini",
"azure:o1-preview",
"Mistral-Nemo",
"Mistral-large",
"Mistral-large-2407",
"Mistral-large-2411",
"Mistral-small",
"Codestral-2501",
"Ministral-3B",
"Meta-Llama-3.1-405B-Instruct",
"Meta-Llama-3.1-70B-Instruct",
Expand All @@ -57,6 +59,7 @@
"Cohere-command-r-08-2024",
"AI21-Jamba-1.5-Large",
"AI21-Jamba-1.5-Mini",
"Phi-4",
"Phi-3.5-MoE-instruct",
"Phi-3.5-mini-instruct",
"Phi-3-medium-128k-instruct",
Expand All @@ -65,10 +68,15 @@
"Phi-3-mini-4k-instruct",
"Phi-3-small-128k-instruct",
"Phi-3-small-8k-instruct",
"DeepSeek-R1",
"chatglm_turbo",
"claude-3-haiku-20240307",
"claude-3-sonnet-20240229",
"claude-3-opus-20240229",
"qwen-2.5-32b",
"qwen-2.5-coder-32b",
"deepseek-r1-distill-qwen-32b",
"deepseek-r1-distill-llama-70b",
"deepseek-r1-distill-llama-70b-specdec",
"gemma-7b-it",
"gemma2-9b-it",
"llama3-8b-8192",
Expand All @@ -94,11 +102,6 @@
"abab5.5-chat",
"abab6-chat",
"Qwen/QwQ-32B-Preview",
"Qwen/Qwen2-7B-Instruct",
"Qwen/Qwen2-1.5B-Instruct",
"Qwen/Qwen2-72B-Instruct",
"Qwen/Qwen2-57B-A14B-Instruct",
"Vendor-A/Qwen/Qwen2-72B-Instruct",
"Qwen/Qwen2.5-Coder-7B-Instruct",
"Qwen/Qwen2.5-7B-Instruct",
"Qwen/Qwen2.5-14B-Instruct",
Expand All @@ -114,6 +117,14 @@
"deepseek-ai/DeepSeek-V2-Chat",
"deepseek-ai/DeepSeek-V2.5",
"deepseek-ai/deepseek-llm-67b-chat",
"deepseek-ai/DeepSeek-V3",
"deepseek-ai/DeepSeek-R1",
"deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
"deepseek-ai/DeepSeek-R1-Distill-Qwen-7B",
"deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
"deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
"deepseek-ai/DeepSeek-R1-Distill-Llama-8B",
"deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
"internlm/internlm2_5-7b-chat",
"internlm/internlm2_5-20b-chat",
"yi-lightning",
Expand All @@ -122,5 +133,12 @@
"Qwen2.5-Coder-32B-Instruct",
"Qwen2.5-72B-Instruct",
"QwQ-32B-Preview",
"Llama-3.1-Tulu-3-405B",
"Llama-3.2-11B-Vision-Instruct",
"Llama-3.2-90B-Vision-Instruct",
"Meta-Llama-3.2-1B-Instruct",
"Meta-Llama-3.2-3B-Instruct",
"Meta-Llama-3.3-70B-Instruct",
"DeepSeek-R1-Distill-Llama-70B",
"deepseek-chat",
]
3 changes: 3 additions & 0 deletions src/utils/llm/azure.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,13 +14,16 @@


@link_llm("azure:o1")
@link_llm("azure:o3")
@link_llm("azure:gpt")
@link_llm("Ministral")
@link_llm("Codestral")
@link_llm("Mistral")
@link_llm("Meta")
@link_llm("Cohere")
@link_llm("AI21")
@link_llm("Phi")
@link_llm("DeepSeek")
class AzureOpenAI(AsyncChatOpenAI):
@staticmethod
async def generate(prompt, **kwargs):
Expand Down
2 changes: 2 additions & 0 deletions src/utils/llm/groq.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,8 @@
@link_llm("llama-3.2")
@link_llm("llama-3.3-70b-")
@link_llm("mixtral")
@link_llm("qwen-2.5")
@link_llm("deepseek-r1-distill")
class Groq(AsyncChatOpenAI):
async def complete(self, prompt: str | list[Message], /, **config):
config = self._run_config | config
Expand Down
4 changes: 4 additions & 0 deletions src/utils/llm/sambanova.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,10 @@

@link_llm("Qwen2.5")
@link_llm("QwQ")
@link_llm("Llama-3")
@link_llm("Meta-Llama-3.2")
@link_llm("Meta-Llama-3.3")
@link_llm("DeepSeek-R1-Distill")
class SambaNova(AsyncChatOpenAI):
complete = staticmethod(
patch.chat.acomplete(
Expand Down
11 changes: 6 additions & 5 deletions src/utils/openai_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,12 @@
from typing import AsyncIterable


def format_chunk(id, content, model, stop=False):
def format_chunk(t, id, content, model, stop=False):
if stop:
choice = {"index": 0, "delta": {}, "finish_reason": "stop"}
else:
choice = {"index": 0, "delta": {"content": content, "role": "assistant"}}
return {"id": id, "choices": [choice], "model": model, "object": "chat.completion.chunk"}
return {"id": id, "choices": [choice], "model": model, "object": "chat.completion.chunk", "created": t}


def format_response(content, model: str):
Expand All @@ -21,9 +21,10 @@ def format_response(content, model: str):


async def stream_output(stream: AsyncIterable[str], model: str):
response_id = f"chatcmpl-{int(time())}"
created = int(time())
response_id = f"chatcmpl-{created}"

async for delta in stream:
yield f"data: {dumps(format_chunk(response_id, delta, model))}\n\n"
yield f"data: {dumps(format_chunk(created, response_id, delta, model))}\n\n"

yield f"data: {dumps(format_chunk(response_id, None, model, stop=True))}\n\ndata: [DONE]\n\n"
yield f"data: {dumps(format_chunk(created, response_id, None, model, stop=True))}\n\ndata: [DONE]\n\n"
43 changes: 0 additions & 43 deletions sweep.yaml

This file was deleted.

0 comments on commit 71f2b8f

Please sign in to comment.