diff --git a/.github/ISSUE_TEMPLATE/sweep-template.yml b/.github/ISSUE_TEMPLATE/sweep-template.yml deleted file mode 100644 index 44116f53..00000000 --- a/.github/ISSUE_TEMPLATE/sweep-template.yml +++ /dev/null @@ -1,15 +0,0 @@ -name: Sweep Issue -title: 'Sweep: ' -description: For small bugs, features, refactors, and tests to be handled by Sweep, an AI-powered junior developer. -labels: sweep -body: - - type: textarea - id: description - attributes: - label: Details - description: Tell Sweep where and what to edit and provide enough context for a new developer to the codebase - placeholder: | - Unit Tests: Write unit tests for . Test each function in the file. Make sure to test edge cases. - Bugs: The bug might be in . Here are the logs: ... - Features: the new endpoint should use the ... class from because it contains ... logic. - Refactors: We are migrating this function to ... version because ... \ No newline at end of file diff --git a/.github/renovate.json b/.github/renovate.json new file mode 100644 index 00000000..5db72dd6 --- /dev/null +++ b/.github/renovate.json @@ -0,0 +1,6 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "extends": [ + "config:recommended" + ] +} diff --git a/frontend/app/HeroPage.svelte b/frontend/app/HeroPage.svelte index c4402590..ed984190 100644 --- a/frontend/app/HeroPage.svelte +++ b/frontend/app/HeroPage.svelte @@ -2,18 +2,18 @@ import { TokenSquare } from "@promplate/pattern"; import { onMount } from "svelte"; import { sineInOut } from "svelte/easing"; - import { tweened } from "svelte/motion"; + import { Tween } from "svelte/motion"; const length = 20; - const i = tweened(length * 1.3, { easing: sineInOut, duration: 700 }); + const i = new Tween(length * 1.3, { easing: sineInOut, duration: 700 }); - onMount(() => ($i = 0)); + onMount(() => i.set(0));
- y - x >= $i} /> + y - x >= i.current} />

Promplate diff --git a/frontend/app/main.ts b/frontend/app/main.ts index 61c6d6b1..128095de 100644 --- a/frontend/app/main.ts +++ b/frontend/app/main.ts @@ -1,9 +1,8 @@ +import { mount } from "svelte"; import App from "./App.svelte"; import "@unocss/reset/tailwind-compat.css"; import "uno.css"; -const app = new App({ - target: document.getElementById("app")!, +export default mount(App, { + target: document.body, }); - -export default app; diff --git a/frontend/package.json b/frontend/package.json index ef8133ae..88847e5c 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -12,24 +12,24 @@ "check": "svelte-check --tsconfig ./tsconfig.json" }, "devDependencies": { - "@antfu/eslint-config": "^3.0.0", - "@promplate/pattern": "^0.0.1", - "@sveltejs/vite-plugin-svelte": "^3.1.1", + "@antfu/eslint-config": "^4.1.0", + "@promplate/pattern": "^0.0.3", + "@sveltejs/vite-plugin-svelte": "^5.0.0", "@tsconfig/svelte": "^5.0.4", - "@unocss/eslint-plugin": "^0.61.0", - "@unocss/extractor-svelte": "^0.61.0", - "@unocss/reset": "^0.61.0", + "@unocss/eslint-plugin": "^66.0.0", + "@unocss/extractor-svelte": "^66.0.0", + "@unocss/reset": "^66.0.0", "eslint": "^9.4.0", "eslint-plugin-format": "^1.0.1", - "eslint-plugin-svelte": "^2.39.3", + "eslint-plugin-svelte": "^2.45.1", "prettier": "^3.3.2", - "prettier-plugin-svelte": "^3.2.4", - "svelte": "^4.2.18", + "prettier-plugin-svelte": "^3.2.6", + "svelte": "^5.0.0", "svelte-check": "^4.0.0", "svelte-eslint-parser": "^0.43.0", "tslib": "^2.6.3", - "typescript": "^5.4.5", - "unocss": "^0.61.0", - "vite": "^5.3.1" + "typescript": "^5.5.0", + "unocss": "^66.0.0", + "vite": "^6.0.0" } } diff --git a/frontend/uno.config.ts b/frontend/uno.config.ts index 46b9a2e5..48e4c3c2 100644 --- a/frontend/uno.config.ts +++ b/frontend/uno.config.ts @@ -1,10 +1,10 @@ import extractorSvelte from "@unocss/extractor-svelte"; -import { defineConfig, presetAttributify, presetUno, presetWebFonts, transformerDirectives, transformerVariantGroup } from "unocss"; +import { defineConfig, presetAttributify, presetWebFonts, presetWind3, transformerDirectives, transformerVariantGroup } from "unocss"; const config = defineConfig({ extractors: [extractorSvelte()], transformers: [transformerDirectives(), transformerVariantGroup()], - presets: [presetAttributify(), presetUno(), presetWebFonts({ provider: "bunny", fonts: { mono: "Fira Code" } })], + presets: [presetAttributify(), presetWind3({ preflight: "on-demand" }), presetWebFonts({ provider: "bunny", fonts: { mono: "Fira Code" } })], }); export default config; diff --git a/pyproject.toml b/pyproject.toml index 4cc369ef..d802826b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -11,17 +11,17 @@ dependencies = [ "promplate[all]~=0.3.5.0", "promplate-trace[langfuse,langsmith]==0.3.0dev5", "python-box~=7.3.0", - "pydantic-settings~=2.7.0", + "pydantic-settings~=2.8.0", "httpx[http2]~=0.28.0", "promptools[stream,validation]~=0.1.3.3", "fake-useragent~=2.0.0", "html2text~=2024.2.26", - "beautifulsoup4~=4.12.3", + "beautifulsoup4~=4.13.1", "rich~=13.9.0", "zhipuai~=2.1.5.20241203", - "anthropic~=0.45.0", + "anthropic~=0.46.0", "dashscope~=1.22.0", - "logfire[fastapi,system-metrics,httpx]~=3.3.0", + "logfire[fastapi,system-metrics,httpx]~=3.6.0", ] [tool.pdm] @@ -29,8 +29,8 @@ distribution = false [dependency-groups] dev = [ - "isort~=5.13.2", - "black~=24.4.2", + "isort~=6.0.0", + "black~=25.1.0", ] [tool.pdm.scripts] diff --git a/src/routes/openai.py b/src/routes/openai.py index 6880deaf..d4e7774c 100644 --- a/src/routes/openai.py +++ b/src/routes/openai.py @@ -1,7 +1,10 @@ -from typing import AsyncIterable, cast +from typing import AsyncIterable, Literal, cast, get_args from fastapi import APIRouter, Depends, Request +from openai.types.chat import ChatCompletionContentPartTextParam from promplate import Message +from pydantic import field_serializer +from typing_extensions import TypedDict from ..utils.http import forward_headers from ..utils.llm import Model, openai_compatible_providers @@ -13,8 +16,18 @@ openai_router = APIRouter(tags=["openai"]) +class ModelItem(TypedDict): + id: Model + object: Literal["model"] + + +class ModelList(TypedDict): + object: Literal["list"] + data: list[ModelItem] + + @openai_router.get("/models") -async def get_models(): +async def get_models() -> ModelList: return { "object": "list", "data": [ @@ -22,14 +35,26 @@ async def get_models(): "id": name, "object": "model", } - for name in Model.__args__ + for name in get_args(Model) ], } +class CompatibleMessage(Message): + content: str | list[ChatCompletionContentPartTextParam] # type: ignore + + class ChatInput(ChainInput): stream: bool = False - messages: list[Message] # type: ignore + messages: list[CompatibleMessage] # type: ignore + + @field_serializer("messages") + def serialize_messages(self, value: CompatibleMessage): + content = value["content"] + if isinstance(content, str): + return value + value["content"] = "".join(i["text"] for i in content) + return value @property def config(self): diff --git a/src/utils/llm/__init__.py b/src/utils/llm/__init__.py index b0a823be..83dd0d12 100644 --- a/src/utils/llm/__init__.py +++ b/src/utils/llm/__init__.py @@ -39,12 +39,14 @@ "azure:gpt-4o-mini", "azure:o1", "azure:o1-mini", + "azure:o3-mini", "azure:o1-preview", "Mistral-Nemo", "Mistral-large", "Mistral-large-2407", "Mistral-large-2411", "Mistral-small", + "Codestral-2501", "Ministral-3B", "Meta-Llama-3.1-405B-Instruct", "Meta-Llama-3.1-70B-Instruct", @@ -57,6 +59,7 @@ "Cohere-command-r-08-2024", "AI21-Jamba-1.5-Large", "AI21-Jamba-1.5-Mini", + "Phi-4", "Phi-3.5-MoE-instruct", "Phi-3.5-mini-instruct", "Phi-3-medium-128k-instruct", @@ -65,10 +68,15 @@ "Phi-3-mini-4k-instruct", "Phi-3-small-128k-instruct", "Phi-3-small-8k-instruct", + "DeepSeek-R1", "chatglm_turbo", "claude-3-haiku-20240307", "claude-3-sonnet-20240229", - "claude-3-opus-20240229", + "qwen-2.5-32b", + "qwen-2.5-coder-32b", + "deepseek-r1-distill-qwen-32b", + "deepseek-r1-distill-llama-70b", + "deepseek-r1-distill-llama-70b-specdec", "gemma-7b-it", "gemma2-9b-it", "llama3-8b-8192", @@ -94,11 +102,6 @@ "abab5.5-chat", "abab6-chat", "Qwen/QwQ-32B-Preview", - "Qwen/Qwen2-7B-Instruct", - "Qwen/Qwen2-1.5B-Instruct", - "Qwen/Qwen2-72B-Instruct", - "Qwen/Qwen2-57B-A14B-Instruct", - "Vendor-A/Qwen/Qwen2-72B-Instruct", "Qwen/Qwen2.5-Coder-7B-Instruct", "Qwen/Qwen2.5-7B-Instruct", "Qwen/Qwen2.5-14B-Instruct", @@ -114,6 +117,14 @@ "deepseek-ai/DeepSeek-V2-Chat", "deepseek-ai/DeepSeek-V2.5", "deepseek-ai/deepseek-llm-67b-chat", + "deepseek-ai/DeepSeek-V3", + "deepseek-ai/DeepSeek-R1", + "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B", + "deepseek-ai/DeepSeek-R1-Distill-Qwen-7B", + "deepseek-ai/DeepSeek-R1-Distill-Qwen-14B", + "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", + "deepseek-ai/DeepSeek-R1-Distill-Llama-8B", + "deepseek-ai/DeepSeek-R1-Distill-Llama-70B", "internlm/internlm2_5-7b-chat", "internlm/internlm2_5-20b-chat", "yi-lightning", @@ -122,5 +133,12 @@ "Qwen2.5-Coder-32B-Instruct", "Qwen2.5-72B-Instruct", "QwQ-32B-Preview", + "Llama-3.1-Tulu-3-405B", + "Llama-3.2-11B-Vision-Instruct", + "Llama-3.2-90B-Vision-Instruct", + "Meta-Llama-3.2-1B-Instruct", + "Meta-Llama-3.2-3B-Instruct", + "Meta-Llama-3.3-70B-Instruct", + "DeepSeek-R1-Distill-Llama-70B", "deepseek-chat", ] diff --git a/src/utils/llm/azure.py b/src/utils/llm/azure.py index d1c0a455..751c493a 100644 --- a/src/utils/llm/azure.py +++ b/src/utils/llm/azure.py @@ -14,13 +14,16 @@ @link_llm("azure:o1") +@link_llm("azure:o3") @link_llm("azure:gpt") @link_llm("Ministral") +@link_llm("Codestral") @link_llm("Mistral") @link_llm("Meta") @link_llm("Cohere") @link_llm("AI21") @link_llm("Phi") +@link_llm("DeepSeek") class AzureOpenAI(AsyncChatOpenAI): @staticmethod async def generate(prompt, **kwargs): diff --git a/src/utils/llm/groq.py b/src/utils/llm/groq.py index dadfd1c0..dd6be0bf 100644 --- a/src/utils/llm/groq.py +++ b/src/utils/llm/groq.py @@ -18,6 +18,8 @@ @link_llm("llama-3.2") @link_llm("llama-3.3-70b-") @link_llm("mixtral") +@link_llm("qwen-2.5") +@link_llm("deepseek-r1-distill") class Groq(AsyncChatOpenAI): async def complete(self, prompt: str | list[Message], /, **config): config = self._run_config | config diff --git a/src/utils/llm/sambanova.py b/src/utils/llm/sambanova.py index 80f613c0..1329e5e8 100644 --- a/src/utils/llm/sambanova.py +++ b/src/utils/llm/sambanova.py @@ -8,6 +8,10 @@ @link_llm("Qwen2.5") @link_llm("QwQ") +@link_llm("Llama-3") +@link_llm("Meta-Llama-3.2") +@link_llm("Meta-Llama-3.3") +@link_llm("DeepSeek-R1-Distill") class SambaNova(AsyncChatOpenAI): complete = staticmethod( patch.chat.acomplete( diff --git a/src/utils/openai_api.py b/src/utils/openai_api.py index 4cd43313..54178fd8 100644 --- a/src/utils/openai_api.py +++ b/src/utils/openai_api.py @@ -3,12 +3,12 @@ from typing import AsyncIterable -def format_chunk(id, content, model, stop=False): +def format_chunk(t, id, content, model, stop=False): if stop: choice = {"index": 0, "delta": {}, "finish_reason": "stop"} else: choice = {"index": 0, "delta": {"content": content, "role": "assistant"}} - return {"id": id, "choices": [choice], "model": model, "object": "chat.completion.chunk"} + return {"id": id, "choices": [choice], "model": model, "object": "chat.completion.chunk", "created": t} def format_response(content, model: str): @@ -21,9 +21,10 @@ def format_response(content, model: str): async def stream_output(stream: AsyncIterable[str], model: str): - response_id = f"chatcmpl-{int(time())}" + created = int(time()) + response_id = f"chatcmpl-{created}" async for delta in stream: - yield f"data: {dumps(format_chunk(response_id, delta, model))}\n\n" + yield f"data: {dumps(format_chunk(created, response_id, delta, model))}\n\n" - yield f"data: {dumps(format_chunk(response_id, None, model, stop=True))}\n\ndata: [DONE]\n\n" + yield f"data: {dumps(format_chunk(created, response_id, None, model, stop=True))}\n\ndata: [DONE]\n\n" diff --git a/sweep.yaml b/sweep.yaml deleted file mode 100644 index e2c3de33..00000000 --- a/sweep.yaml +++ /dev/null @@ -1,43 +0,0 @@ -# Sweep AI turns bugs & feature requests into code changes (https://sweep.dev) -# For details on our config file, check out our docs at https://docs.sweep.dev/usage/config - -# This setting contains a list of rules that Sweep will check for. If any of these rules are broken in a new commit, Sweep will create an pull request to fix the broken rule. -rules: -- "All new business logic should have corresponding unit tests." -- "Refactor large functions to be more modular." -- "Add docstrings to all functions and file headers." - -# This is the branch that Sweep will develop from and make pull requests to. Most people use 'main' or 'master' but some users also use 'dev' or 'staging'. -branch: 'main' - -# By default Sweep will read the logs and outputs from your existing Github Actions. To disable this, set this to false. -gha_enabled: True - -# This is the description of your project. It will be used by sweep when creating PRs. You can tell Sweep what's unique about your project, what frameworks you use, or anything else you want. -# -# Example: -# -# description: sweepai/sweep is a python project. The main api endpoints are in sweepai/api.py. Write code that adheres to PEP8. -description: '' - -# This sets whether to create pull requests as drafts. If this is set to True, then all pull requests will be created as drafts and GitHub Actions will not be triggered. -draft: False - -# This is a list of directories that Sweep will not be able to edit. -blocked_dirs: [] - -# This is a list of documentation links that Sweep will use to help it understand your code. You can add links to documentation for any packages you use here. -# -# Example: -# -# docs: -# - PyGitHub: ["https://pygithub.readthedocs.io/en/latest/", "We use pygithub to interact with the GitHub API"] -docs: [] - -# Sandbox executes commands in a sandboxed environment to validate code changes after every edit to guarantee pristine code. For more details, see the [Sandbox](./sandbox) page. -sandbox: - install: - - trunk init - check: - - trunk fmt {file_path} || return 0 - - trunk check --fix --print-failures {file_path}