diff --git a/.changes/extensions/intellij/0.0.42.md b/.changes/extensions/intellij/0.0.42.md new file mode 100644 index 0000000000..dbcdd3fabb --- /dev/null +++ b/.changes/extensions/intellij/0.0.42.md @@ -0,0 +1,7 @@ +## 0.0.42 - 2024-04-12 +### Added +* Inline cmd/ctrl+I in JetBrains +### Fixed +* Fixed character encoding error causing display issues +* Fixed error causing input to constantly demand focus +* Fixed automatic reloading of config.json diff --git a/.changes/extensions/vscode/0.8.24.md b/.changes/extensions/vscode/0.8.24.md new file mode 100644 index 0000000000..62c32d5e50 --- /dev/null +++ b/.changes/extensions/vscode/0.8.24.md @@ -0,0 +1,5 @@ +## 0.8.24 - 2024-04-12 +### Added +* Support for improved retrieval models (Voyage embeddings/reranking) +* New @code context provider +* Personal usage analytics diff --git a/.changes/unreleased/Added-20240412-160513.yaml b/.changes/unreleased/Added-20240412-160513.yaml new file mode 100644 index 0000000000..a2cd945708 --- /dev/null +++ b/.changes/unreleased/Added-20240412-160513.yaml @@ -0,0 +1,4 @@ +project: extensions/vscode +kind: Added +body: Support for Gemini 1.5 Pro +time: 2024-04-12T16:05:13.251485-07:00 diff --git a/CHANGELOG.md b/CHANGELOG.md index 459ca71ab3..010e9d1e3a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,7 +1,6 @@ # Changelog -Separate changelogs are kept for each part of the Continue repository: +Separate changelogs are kept for each extension: - [VS Code Extension](./extensions/vscode/CHANGELOG.md) - [Intellij Extension](./extensions/intellij/CHANGELOG.md) -- [Continue Server](./server/CHANGELOG.md) diff --git a/README.md b/README.md index 0e785357cb..17b2b953db 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,14 @@ -![Continue logo](media/c_d.png) +
+ +![Continue logo](media/readme.png) + +

Continue

-**[Continue](https://continue.dev/docs) is an open-source autopilot for [VS Code](https://marketplace.visualstudio.com/items?itemName=Continue.continue) and [JetBrains](https://plugins.jetbrains.com/plugin/22707-continue-extension)—the easiest way to code with any LLM** +**[Continue](https://continue.dev/docs) keeps developers in flow. Our open-source [VS Code](https://marketplace.visualstudio.com/items?itemName=Continue.continue) and [JetBrains](https://plugins.jetbrains.com/plugin/22707-continue-extension) extensions enable you to easily create your own modular AI software development system that you can improve.**
diff --git a/core/index.d.ts b/core/index.d.ts index fb7108a3ea..de35a97d9c 100644 --- a/core/index.d.ts +++ b/core/index.d.ts @@ -523,9 +523,10 @@ export type ModelName = | "gpt-4-turbo" | "gpt-4-turbo-preview" | "gpt-4-vision-preview" - // Open Source + // Mistral | "mistral-7b" | "mistral-8x7b" + // Llama 2 | "llama2-7b" | "llama2-13b" | "llama2-70b" @@ -533,6 +534,10 @@ export type ModelName = | "codellama-13b" | "codellama-34b" | "codellama-70b" + // Llama 3 + | "llama3-8b" + | "llama3-70b" + // Other Open-source | "phi2" | "phind-codellama-34b" | "wizardcoder-7b" diff --git a/core/llm/llms/Groq.ts b/core/llm/llms/Groq.ts index 6c95157954..9f5e6f896c 100644 --- a/core/llm/llms/Groq.ts +++ b/core/llm/llms/Groq.ts @@ -11,6 +11,8 @@ class Groq extends OpenAI { "llama2-70b": "llama2-70b-4096", "mistral-8x7b": "mixtral-8x7b-32768", gemma: "gemma-7b-it", + "llama3-8b": "llama3-8b-8192", + "llama3-70b": "llama3-70b-8192", }; protected _convertModelName(model: string): string { return Groq.modelConversion[model] ?? model; diff --git a/core/llm/llms/Ollama.ts b/core/llm/llms/Ollama.ts index ccc899a250..10ce18b2e6 100644 --- a/core/llm/llms/Ollama.ts +++ b/core/llm/llms/Ollama.ts @@ -76,6 +76,8 @@ class Ollama extends BaseLLM { "codellama-13b": "codellama:13b", "codellama-34b": "codellama:34b", "codellama-70b": "codellama:70b", + "llama3-8b": "llama3:8b", + "llama3-70b": "llama3:70b", "phi-2": "phi:2.7b", "phind-codellama-34b": "phind-codellama:34b-v2", "wizardcoder-7b": "wizardcoder:7b-python", diff --git a/core/llm/llms/Replicate.ts b/core/llm/llms/Replicate.ts index daca82a832..4ebe96332d 100644 --- a/core/llm/llms/Replicate.ts +++ b/core/llm/llms/Replicate.ts @@ -16,6 +16,8 @@ class Replicate extends BaseLLM { "meta/codellama-70b-instruct:a279116fe47a0f65701a8817188601e2fe8f4b9e04a518789655ea7b995851bf", "llama2-7b": "meta/llama-2-7b-chat" as any, "llama2-13b": "meta/llama-2-13b-chat" as any, + "llama3-8b": "meta/meta-llama-3-8b-instruct" as any, + "llama3-70b": "meta/meta-llama-3-70b-instruct" as any, "zephyr-7b": "nateraw/zephyr-7b-beta:b79f33de5c6c4e34087d44eaea4a9d98ce5d3f3a09522f7328eea0685003a931", "mistral-7b": diff --git a/core/llm/llms/Together.ts b/core/llm/llms/Together.ts index 62ce21e834..c15b6d1812 100644 --- a/core/llm/llms/Together.ts +++ b/core/llm/llms/Together.ts @@ -18,6 +18,8 @@ class Together extends OpenAI { "codellama-13b": "togethercomputer/CodeLlama-13b-Instruct", "codellama-34b": "togethercomputer/CodeLlama-34b-Instruct", "codellama-70b": "codellama/CodeLlama-70b-Instruct-hf", + "llama3-8b": "meta-llama/Llama-3-8b-chat-hf", + "llama3-70b": "meta-llama/Llama-3-70b-chat-hf", "llama2-7b": "togethercomputer/llama-2-7b-chat", "llama2-13b": "togethercomputer/llama-2-13b-chat", "llama2-70b": "togethercomputer/llama-2-70b-chat", diff --git a/core/llm/llms/index.ts b/core/llm/llms/index.ts index d1d28253bd..2e789ea05d 100644 --- a/core/llm/llms/index.ts +++ b/core/llm/llms/index.ts @@ -1,15 +1,15 @@ import Handlebars from "handlebars"; import { BaseLLM } from ".."; import { - BaseCompletionOptions, - ILLM, - LLMOptions, - ModelDescription, + BaseCompletionOptions, + ILLM, + LLMOptions, + ModelDescription, } from "../.."; import { DEFAULT_MAX_TOKENS } from "../constants"; import Anthropic from "./Anthropic"; -import Cohere from "./Cohere"; import Bedrock from "./Bedrock"; +import Cohere from "./Cohere"; import DeepInfra from "./DeepInfra"; import Flowise from "./Flowise"; import FreeTrial from "./FreeTrial"; @@ -29,122 +29,122 @@ import TextGenWebUI from "./TextGenWebUI"; import Together from "./Together"; function convertToLetter(num: number): string { - let result = ""; - while (num > 0) { - const remainder = (num - 1) % 26; - result = String.fromCharCode(97 + remainder) + result; - num = Math.floor((num - 1) / 26); - } - return result; + let result = ""; + while (num > 0) { + const remainder = (num - 1) % 26; + result = String.fromCharCode(97 + remainder) + result; + num = Math.floor((num - 1) / 26); + } + return result; } const getHandlebarsVars = ( - value: string, + value: string, ): [string, { [key: string]: string }] => { - const ast = Handlebars.parse(value); + const ast = Handlebars.parse(value); - let keysToFilepath: { [key: string]: string } = {}; - let keyIndex = 1; - for (let i in ast.body) { - if (ast.body[i].type === "MustacheStatement") { - const letter = convertToLetter(keyIndex); - keysToFilepath[letter] = (ast.body[i] as any).path.original; - value = value.replace( - new RegExp("{{\\s*" + (ast.body[i] as any).path.original + "\\s*}}"), - `{{${letter}}}`, - ); - keyIndex++; - } - } - return [value, keysToFilepath]; + let keysToFilepath: { [key: string]: string } = {}; + let keyIndex = 1; + for (let i in ast.body) { + if (ast.body[i].type === "MustacheStatement") { + const letter = convertToLetter(keyIndex); + keysToFilepath[letter] = (ast.body[i] as any).path.original; + value = value.replace( + new RegExp("{{\\s*" + (ast.body[i] as any).path.original + "\\s*}}"), + `{{${letter}}}`, + ); + keyIndex++; + } + } + return [value, keysToFilepath]; }; export async function renderTemplatedString( - template: string, - readFile: (filepath: string) => Promise, - inputData: any, + template: string, + readFile: (filepath: string) => Promise, + inputData: any, ): Promise { - const [newTemplate, vars] = getHandlebarsVars(template); - const data: any = { ...inputData }; - for (const key in vars) { - const fileContents = await readFile(vars[key]); - data[key] = fileContents || (inputData[vars[key]] ?? vars[key]); - } - const templateFn = Handlebars.compile(newTemplate); - const final = templateFn(data); - return final; + const [newTemplate, vars] = getHandlebarsVars(template); + const data: any = { ...inputData }; + for (const key in vars) { + const fileContents = await readFile(vars[key]); + data[key] = fileContents || (inputData[vars[key]] ?? vars[key]); + } + const templateFn = Handlebars.compile(newTemplate); + const final = templateFn(data); + return final; } const LLMs = [ - Anthropic, - Cohere, - FreeTrial, - Gemini, - Llamafile, - Ollama, - Replicate, - TextGenWebUI, - Together, - HuggingFaceTGI, - HuggingFaceInferenceAPI, - LlamaCpp, - OpenAI, - LMStudio, - Mistral, - Bedrock, - DeepInfra, - OpenAIFreeTrial, - Flowise, - Groq, + Anthropic, + Cohere, + FreeTrial, + Gemini, + Llamafile, + Ollama, + Replicate, + TextGenWebUI, + Together, + HuggingFaceTGI, + HuggingFaceInferenceAPI, + LlamaCpp, + OpenAI, + LMStudio, + Mistral, + Bedrock, + DeepInfra, + OpenAIFreeTrial, + Flowise, + Groq, ]; export async function llmFromDescription( - desc: ModelDescription, - readFile: (filepath: string) => Promise, - completionOptions?: BaseCompletionOptions, - systemMessage?: string, + desc: ModelDescription, + readFile: (filepath: string) => Promise, + completionOptions?: BaseCompletionOptions, + systemMessage?: string, ): Promise { - const cls = LLMs.find((llm) => llm.providerName === desc.provider); + const cls = LLMs.find((llm) => llm.providerName === desc.provider); - if (!cls) { - return undefined; - } + if (!cls) { + return undefined; + } - const finalCompletionOptions = { - ...completionOptions, - ...desc.completionOptions, - }; + const finalCompletionOptions = { + ...completionOptions, + ...desc.completionOptions, + }; - systemMessage = desc.systemMessage ?? systemMessage; - if (systemMessage !== undefined) { - systemMessage = await renderTemplatedString(systemMessage, readFile, {}); - } + systemMessage = desc.systemMessage ?? systemMessage; + if (systemMessage !== undefined) { + systemMessage = await renderTemplatedString(systemMessage, readFile, {}); + } - const options: LLMOptions = { - ...desc, - completionOptions: { - ...finalCompletionOptions, - model: (desc.model || cls.defaultOptions?.model) ?? "codellama-7b", - maxTokens: - finalCompletionOptions.maxTokens ?? - cls.defaultOptions?.completionOptions?.maxTokens ?? - DEFAULT_MAX_TOKENS, - }, - systemMessage, - }; + const options: LLMOptions = { + ...desc, + completionOptions: { + ...finalCompletionOptions, + model: (desc.model || cls.defaultOptions?.model) ?? "codellama-7b", + maxTokens: + finalCompletionOptions.maxTokens ?? + cls.defaultOptions?.completionOptions?.maxTokens ?? + DEFAULT_MAX_TOKENS, + }, + systemMessage, + }; - return new cls(options); + return new cls(options); } export function llmFromProviderAndOptions( - providerName: string, - llmOptions: LLMOptions, + providerName: string, + llmOptions: LLMOptions, ): ILLM { - const cls = LLMs.find((llm) => llm.providerName === providerName); + const cls = LLMs.find((llm) => llm.providerName === providerName); - if (!cls) { - throw new Error(`Unknown LLM provider type "${providerName}"`); - } + if (!cls) { + throw new Error(`Unknown LLM provider type "${providerName}"`); + } - return new cls(llmOptions); + return new cls(llmOptions); } diff --git a/docs/docs/customization/slash-commands.md b/docs/docs/customization/slash-commands.md index d521b09ebc..c4cbb9ae64 100644 --- a/docs/docs/customization/slash-commands.md +++ b/docs/docs/customization/slash-commands.md @@ -16,7 +16,7 @@ To use any of the built-in slash commands, open `~/.continue/config.json` and ad ### `/edit` -Select code with ctrl/cmd + M (VS Code) or ctrl/cmd + J (JetBrains), and then type "/edit", followed by instructions for the edit. Continue will stream the changes into a side-by-side diff editor. +Select code with ctrl/cmd + L (VS Code) or ctrl/cmd + J (JetBrains), and then type "/edit", followed by instructions for the edit. Continue will stream the changes into a side-by-side diff editor. ```json { @@ -118,7 +118,7 @@ You can add custom slash commands by adding to the `customCommands` property in - `name`: the name of the command, which will be invoked with `/name` - `description`: a short description of the command, which will appear in the dropdown -- `prompt`: a set of instructions to the LLM, which will be shown in the prompt +- `prompt`: a templated prompt to send to the LLM Custom commands are great when you are frequently reusing a prompt. For example, if you've crafted a great prompt and frequently ask the LLM to check for mistakes in your code, you could add a command like this: @@ -126,10 +126,17 @@ Custom commands are great when you are frequently reusing a prompt. For example, customCommands=[{ "name": "check", "description": "Check for mistakes in my code", - "prompt": "Please read the highlighted code and check for any mistakes. You should look for the following, and be extremely vigilant:\n- Syntax errors\n- Logic errors\n- Security vulnerabilities\n- Performance issues\n- Anything else that looks wrong\n\nOnce you find an error, please explain it as clearly as possible, but without using extra words. For example, instead of saying 'I think there is a syntax error on line 5', you should say 'Syntax error on line 5'. Give your answer as one bullet point per mistake found." + "prompt": "{{{ input }}}\n\nPlease read the highlighted code and check for any mistakes. You should look for the following, and be extremely vigilant:\n- Syntax errors\n- Logic errors\n- Security vulnerabilities\n- Performance issues\n- Anything else that looks wrong\n\nOnce you find an error, please explain it as clearly as possible, but without using extra words. For example, instead of saying 'I think there is a syntax error on line 5', you should say 'Syntax error on line 5'. Give your answer as one bullet point per mistake found." }] ``` +#### Templating + +The `prompt` property supports templating with Handlebars syntax. You can use the following variables: + +- `input` (used in the example above): any additional input entered with the slash command. For example, if you type `/test only write one test`, `input` will be `only write one test`. This will also include highlighted code blocks. +- File names: You can reference any file by providing an absolute path or a path relative to the current working directory. + ### Custom Slash Commands If you want to go a step further than writing custom commands with natural language, you can write a custom function that returns the response. This requires using `config.ts` instead of `config.json`. @@ -147,7 +154,7 @@ export function modifyConfig(config: Config): Config { `${diff}\n\nWrite a commit message for the above changes. Use no more than 20 tokens to give a brief description in the imperative mood (e.g. 'Add feature' not 'Added feature'):`, { maxTokens: 20, - } + }, )) { yield message; } diff --git a/docs/docs/walkthroughs/codebase-embeddings.md b/docs/docs/walkthroughs/codebase-embeddings.md index 701100a4de..7d7dd8ca51 100644 --- a/docs/docs/walkthroughs/codebase-embeddings.md +++ b/docs/docs/walkthroughs/codebase-embeddings.md @@ -94,11 +94,27 @@ We also support other methods of generating embeddings, which can be configured } ``` +### Voyage AI + +Voyage AI offers the best embeddings for code with their voyage-code-2 model. After obtaining an API key from [here](https://www.voyageai.com/), you can configure like this: + +```json title="~/.continue/config.json" +{ + "embeddingsProvider": { + "provider": "openai", + "model": "voyage-code-2", + "apiBase": " + https://api.voyageai.com/v1/", + "apiKey": "" + } +} +``` + ### OpenAI OpenAI's [embeddings](https://platform.openai.com/docs/guides/embeddings) are high dimensional embeddings that give great performance on both text and code. -Configuration for text-embedding-3-small Model. This is default. +Configuration for text-embedding-3-small Model. This is default. The text-embedding-3-small model offers an outstanding balance between performance and efficiency, suitable for a versatile range of applications. ```json title="~/.continue/config.json" @@ -106,7 +122,8 @@ The text-embedding-3-small model offers an outstanding balance between performan "embeddingsProvider": { "provider": "openai", "model": "text-embedding-3-small", - "apiBase": "" // optional, defaults to OpenAI's API + "apiBase": "", // optional, defaults to OpenAI's API + "apiKey": "" } } ``` @@ -119,7 +136,8 @@ For those requiring the highest level of embedding detail and precision, the tex "embeddingsProvider": { "provider": "openai", "model": "text-embedding-3-large", - "apiBase": "" // optional, defaults to OpenAI's API + "apiBase": "", // optional, defaults to OpenAI's API + "apiKey": "" } } ``` @@ -132,7 +150,8 @@ For certain scenarios, you may still find the text-embedding-ada-002 model relev "embeddingsProvider": { "provider": "openai", "model": "text-embedding-ada-002", - "apiBase": "" // optional, defaults to OpenAI's API + "apiBase": "", // optional, defaults to OpenAI's API + "apiKey": "" } } ``` @@ -153,7 +172,7 @@ export function modifyConfig(config: Config): Config { }); const data = await response.json(); return data.embedding; - }) + }), ); }, }; diff --git a/docs/docs/walkthroughs/tab-autocomplete.md b/docs/docs/walkthroughs/tab-autocomplete.md index 52a2f65c89..f82ed42ea6 100644 --- a/docs/docs/walkthroughs/tab-autocomplete.md +++ b/docs/docs/walkthroughs/tab-autocomplete.md @@ -112,9 +112,8 @@ Follow these steps to ensure that everything is set up correctly: 2. Make sure you have downloaded Ollama. 3. Run `ollama run starcoder2:3b` to verify that the model is downloaded. 4. Make sure that any other completion providers are disabled (e.g. Copilot), as they may interfere. -5. Make sure that you aren't also using another Ollama model for chat. This will cause Ollama to constantly load and unload the models from memory, resulting in slow responses (or none at all) for both. -6. Check the output of the logs to find any potential errors (cmd/ctrl+shift+p -> "Toggle Developer Tools" -> "Console" tab in VS Code, ~/.continue/core.log in JetBrains). -7. If you are still having issues, please let us know in our [Discord](https://discord.gg/vapESyrFmJ) and we'll help as soon as possible. +5. Check the output of the logs to find any potential errors (cmd/ctrl+shift+p -> "Toggle Developer Tools" -> "Console" tab in VS Code, ~/.continue/core.log in JetBrains). +6. If you are still having issues, please let us know in our [Discord](https://discord.gg/vapESyrFmJ) and we'll help as soon as possible. ### Completions are slow diff --git a/docs/netlify.toml b/docs/netlify.toml index 490ac92ed7..048a32e812 100644 --- a/docs/netlify.toml +++ b/docs/netlify.toml @@ -1,4 +1,9 @@ [[redirects]] from = "/" to = "/docs/intro" + force = true + +[[redirects]] + from = "/docs" + to = "/docs/intro" force = true \ No newline at end of file diff --git a/docs/static/schemas/config.json b/docs/static/schemas/config.json index 47f1df2130..f94d3e5df1 100644 --- a/docs/static/schemas/config.json +++ b/docs/static/schemas/config.json @@ -403,6 +403,8 @@ "anyOf": [ { "enum": [ + "llama3-8b", + "llama3-70b", "codellama-7b", "codellama-13b", "codellama-34b", @@ -484,6 +486,8 @@ "codellama-13b", "codellama-34b", "codellama-70b", + "llama3-8b", + "llama3-70b", "phind-codellama-34b", "wizardcoder-7b", "wizardcoder-13b", @@ -628,6 +632,8 @@ "mistral-8x7b", "llama2-7b", "llama2-13b", + "llama3-8b", + "llama3-70b", "codellama-7b", "codellama-13b", "codellama-34b", @@ -690,6 +696,8 @@ "codellama-13b", "codellama-34b", "codellama-70b", + "llama3-8b", + "llama3-70b", "phind-codellama-34b", "wizardcoder-7b", "wizardcoder-13b", @@ -736,6 +744,8 @@ "codellama-13b", "codellama-34b", "codellama-70b", + "llama3-8b", + "llama3-70b", "phi-2", "phind-codellama-34b", "wizardcoder-7b", @@ -792,7 +802,13 @@ "then": { "properties": { "model": { - "enum": ["llama2-70b", "mistral-8x7b", "gemma"] + "enum": [ + "llama2-70b", + "mistral-8x7b", + "gemma", + "llama3-8b", + "llama3-70b" + ] } } } @@ -936,6 +952,8 @@ "codellama-13b", "codellama-34b", "codellama-70b", + "llama3-8b", + "llama3-70b", "phind-codellama-34b", "wizardcoder-7b", "wizardcoder-13b", diff --git a/extensions/intellij/CHANGELOG.md b/extensions/intellij/CHANGELOG.md index 2d2facf21d..e014cbbc41 100644 --- a/extensions/intellij/CHANGELOG.md +++ b/extensions/intellij/CHANGELOG.md @@ -6,7 +6,15 @@ adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html), and is generated by [Changie](https://github.com/miniscruff/changie). -Pre-release Changes +## 0.0.42 - 2024-04-12 +### Added +* Inline cmd/ctrl+I in JetBrains +### Fixed +* Fixed character encoding error causing display issues +* Fixed error causing input to constantly demand focus +* Fixed automatic reloading of config.json + +## 0.0.38 - 2024-03-15 ### Added * Remote config server support * Autocomplete support in JetBrains diff --git a/extensions/intellij/src/main/resources/config_schema.json b/extensions/intellij/src/main/resources/config_schema.json index 47f1df2130..f94d3e5df1 100644 --- a/extensions/intellij/src/main/resources/config_schema.json +++ b/extensions/intellij/src/main/resources/config_schema.json @@ -403,6 +403,8 @@ "anyOf": [ { "enum": [ + "llama3-8b", + "llama3-70b", "codellama-7b", "codellama-13b", "codellama-34b", @@ -484,6 +486,8 @@ "codellama-13b", "codellama-34b", "codellama-70b", + "llama3-8b", + "llama3-70b", "phind-codellama-34b", "wizardcoder-7b", "wizardcoder-13b", @@ -628,6 +632,8 @@ "mistral-8x7b", "llama2-7b", "llama2-13b", + "llama3-8b", + "llama3-70b", "codellama-7b", "codellama-13b", "codellama-34b", @@ -690,6 +696,8 @@ "codellama-13b", "codellama-34b", "codellama-70b", + "llama3-8b", + "llama3-70b", "phind-codellama-34b", "wizardcoder-7b", "wizardcoder-13b", @@ -736,6 +744,8 @@ "codellama-13b", "codellama-34b", "codellama-70b", + "llama3-8b", + "llama3-70b", "phi-2", "phind-codellama-34b", "wizardcoder-7b", @@ -792,7 +802,13 @@ "then": { "properties": { "model": { - "enum": ["llama2-70b", "mistral-8x7b", "gemma"] + "enum": [ + "llama2-70b", + "mistral-8x7b", + "gemma", + "llama3-8b", + "llama3-70b" + ] } } } @@ -936,6 +952,8 @@ "codellama-13b", "codellama-34b", "codellama-70b", + "llama3-8b", + "llama3-70b", "phind-codellama-34b", "wizardcoder-7b", "wizardcoder-13b", diff --git a/extensions/vscode/.continuerc.json b/extensions/vscode/.continuerc.json index 9f9190f709..7fbb047a6e 100644 --- a/extensions/vscode/.continuerc.json +++ b/extensions/vscode/.continuerc.json @@ -5,9 +5,5 @@ "prompt": "Write a comprehensive set of unit tests for the selected code. It should setup, run tests that check for correctness including important edge cases, and teardown. Ensure that the tests are complete and sophisticated. Give the tests just as chat output, don't edit any file.", "description": "This is an example custom command. Use /config to edit it and create more" } - ], - "disableIndexing": true, - "models": [ - ] } diff --git a/extensions/vscode/CHANGELOG.md b/extensions/vscode/CHANGELOG.md index 1ca13d7401..4597028abb 100644 --- a/extensions/vscode/CHANGELOG.md +++ b/extensions/vscode/CHANGELOG.md @@ -6,6 +6,16 @@ adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html), and is generated by [Changie](https://github.com/miniscruff/changie). +Pre-release Changes +### Added +* Support for Gemini 1.5 Pro + +## 0.8.24 - 2024-04-12 +### Added +* Support for improved retrieval models (Voyage embeddings/reranking) +* New @code context provider +* Personal usage analytics + ## 0.8.15 - 2024-03-05 ### Added * Tab-autocomplete in beta diff --git a/extensions/vscode/config_schema.json b/extensions/vscode/config_schema.json index f842d625dd..a14b8c5600 100644 --- a/extensions/vscode/config_schema.json +++ b/extensions/vscode/config_schema.json @@ -403,6 +403,8 @@ "anyOf": [ { "enum": [ + "llama3-8b", + "llama3-70b", "codellama-7b", "codellama-13b", "codellama-34b", @@ -484,6 +486,8 @@ "codellama-13b", "codellama-34b", "codellama-70b", + "llama3-8b", + "llama3-70b", "phind-codellama-34b", "wizardcoder-7b", "wizardcoder-13b", @@ -628,6 +632,8 @@ "mistral-8x7b", "llama2-7b", "llama2-13b", + "llama3-8b", + "llama3-70b", "codellama-7b", "codellama-13b", "codellama-34b", @@ -690,6 +696,8 @@ "codellama-13b", "codellama-34b", "codellama-70b", + "llama3-8b", + "llama3-70b", "phind-codellama-34b", "wizardcoder-7b", "wizardcoder-13b", @@ -736,6 +744,8 @@ "codellama-13b", "codellama-34b", "codellama-70b", + "llama3-8b", + "llama3-70b", "phi-2", "phind-codellama-34b", "wizardcoder-7b", @@ -792,7 +802,13 @@ "then": { "properties": { "model": { - "enum": ["llama2-70b", "mistral-8x7b", "gemma"] + "enum": [ + "llama2-70b", + "mistral-8x7b", + "gemma", + "llama3-8b", + "llama3-70b" + ] } } } @@ -936,6 +952,8 @@ "codellama-13b", "codellama-34b", "codellama-70b", + "llama3-8b", + "llama3-70b", "phind-codellama-34b", "wizardcoder-7b", "wizardcoder-13b", diff --git a/extensions/vscode/continue_rc_schema.json b/extensions/vscode/continue_rc_schema.json index 737e09eadd..ed9bfd7619 100644 --- a/extensions/vscode/continue_rc_schema.json +++ b/extensions/vscode/continue_rc_schema.json @@ -435,6 +435,8 @@ "anyOf": [ { "enum": [ + "llama3-8b", + "llama3-70b", "codellama-7b", "codellama-13b", "codellama-34b", @@ -524,6 +526,8 @@ "codellama-13b", "codellama-34b", "codellama-70b", + "llama3-8b", + "llama3-70b", "phind-codellama-34b", "wizardcoder-7b", "wizardcoder-13b", @@ -691,6 +695,8 @@ "mistral-8x7b", "llama2-7b", "llama2-13b", + "llama3-8b", + "llama3-70b", "codellama-7b", "codellama-13b", "codellama-34b", @@ -759,6 +765,8 @@ "codellama-13b", "codellama-34b", "codellama-70b", + "llama3-8b", + "llama3-70b", "phind-codellama-34b", "wizardcoder-7b", "wizardcoder-13b", @@ -809,6 +817,8 @@ "codellama-13b", "codellama-34b", "codellama-70b", + "llama3-8b", + "llama3-70b", "phi-2", "phind-codellama-34b", "wizardcoder-7b", @@ -880,7 +890,9 @@ "enum": [ "llama2-70b", "mistral-8x7b", - "gemma" + "gemma", + "llama3-8b", + "llama3-70b" ] } } @@ -1051,6 +1063,8 @@ "codellama-13b", "codellama-34b", "codellama-70b", + "llama3-8b", + "llama3-70b", "phind-codellama-34b", "wizardcoder-7b", "wizardcoder-13b", diff --git a/extensions/vscode/media/continue-dev-square.png b/extensions/vscode/media/continue-dev-square.png deleted file mode 100644 index e4b625568c..0000000000 Binary files a/extensions/vscode/media/continue-dev-square.png and /dev/null differ diff --git a/extensions/vscode/media/icon.png b/extensions/vscode/media/icon.png index 9ea7ec3fb4..d8f9f16c59 100644 Binary files a/extensions/vscode/media/icon.png and b/extensions/vscode/media/icon.png differ diff --git a/extensions/vscode/media/sidebar-icon.png b/extensions/vscode/media/sidebar-icon.png new file mode 100644 index 0000000000..4f41d9dfd1 Binary files /dev/null and b/extensions/vscode/media/sidebar-icon.png differ diff --git a/extensions/vscode/package-lock.json b/extensions/vscode/package-lock.json index 2ea5de9a1e..1861ae0d92 100644 --- a/extensions/vscode/package-lock.json +++ b/extensions/vscode/package-lock.json @@ -1,12 +1,12 @@ { "name": "continue", - "version": "0.9.92", + "version": "0.8.22", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "continue", - "version": "0.9.92", + "version": "0.8.22", "license": "Apache-2.0", "dependencies": { "@electron/rebuild": "^3.2.10", diff --git a/extensions/vscode/package.json b/extensions/vscode/package.json index 799313459c..093974c8c3 100644 --- a/extensions/vscode/package.json +++ b/extensions/vscode/package.json @@ -405,7 +405,7 @@ { "id": "continue", "title": "Continue", - "icon": "media/continue-dev-square.png" + "icon": "media/sidebar-icon.png" } ] }, diff --git a/gui/src/components/markdown/CodeBlockToolbar.tsx b/gui/src/components/markdown/CodeBlockToolbar.tsx index 3c306a1185..fae235fecd 100644 --- a/gui/src/components/markdown/CodeBlockToolbar.tsx +++ b/gui/src/components/markdown/CodeBlockToolbar.tsx @@ -64,7 +64,7 @@ const commonTerminalCommands = [ function isTerminalCodeBlock(language: string | undefined, text: string) { return ( terminalLanguages.includes(language) || - (language.length === 0 && + (language?.length === 0 && (text.trim().split("\n").length === 1 || commonTerminalCommands.some((c) => text.trim().startsWith(c)))) ); diff --git a/gui/src/util/modelData.ts b/gui/src/util/modelData.ts index 429a1f2b83..99702f2962 100644 --- a/gui/src/util/modelData.ts +++ b/gui/src/util/modelData.ts @@ -231,6 +231,32 @@ const mixtralTrial: ModelPackage = { providerOptions: ["freetrial", "groq"], }; +const llama38bChat: ModelPackage = { + title: "Llama3 8b", + description: "The latest Llama model from Meta, fine-tuned for chat", + refUrl: "", + params: { + title: "Llama3-8b", + model: "llama3-8b", + contextLength: 8192, + }, + icon: "meta.png", + providerOptions: ["groq"], +}; + +const llama370bChat: ModelPackage = { + title: "Llama3 70b Chat", + description: "The latest Llama model from Meta, fine-tuned for chat", + refUrl: "", + params: { + title: "Llama3-70b", + model: "llama3-70b", + contextLength: 8192, + }, + icon: "meta.png", + providerOptions: ["groq"], +}; + const llama270bChat: ModelPackage = { title: "Llama2 70b Chat", description: "The latest Llama model from Meta, fine-tuned for chat", @@ -244,14 +270,14 @@ const llama270bChat: ModelPackage = { providerOptions: ["groq"], }; -const llama2Chat: ModelPackage = { - title: "Llama2 Chat", - description: "The latest Llama model from Meta, fine-tuned for chat", +const llama3Chat: ModelPackage = { + title: "Llama3 Chat", + description: "The latest model from Meta, fine-tuned for chat", refUrl: "", params: { - title: "Llama2-7b", - model: "llama2-7b", - contextLength: 4096, + title: "Llama3-8b", + model: "llama3-8b", + contextLength: 8192, }, icon: "meta.png", dimensions: [ @@ -259,17 +285,13 @@ const llama2Chat: ModelPackage = { name: "Parameter Count", description: "The number of parameters in the model", options: { - "7b": { - model: "llama2-7b", - title: "Llama2-7b", - }, - "13b": { - model: "llama2-13b", - title: "Llama2-13b", + "8b": { + model: "llama3-8b", + title: "Llama3-8b", }, - "34b": { - model: "llama2-34b", - title: "Llama2-34b", + "70b": { + model: "llama3-70b", + title: "Llama3-70b", }, }, }, @@ -531,12 +553,12 @@ const commandRPlus: ModelPackage = { }; const osModels = [ + llama3Chat, deepseek, wizardCoder, codeLlamaInstruct, mistral, phindCodeLlama, - llama2Chat, zephyr, neuralChat, ]; @@ -661,13 +683,13 @@ export const MODEL_INFO: (ModelPackage | string)[] = [ gemini15Pro, geminiPro, "Open Source", + llama3Chat, deepseek, mistral, codellama70bTrial, wizardCoder, codeLlamaInstruct, phindCodeLlama, - llama2Chat, zephyr, ]; @@ -801,7 +823,7 @@ export const PROVIDER_INFO: { [key: string]: ModelInfo } = { }, ...completionParamsInputs, ], - packages: [llama2Chat, codeLlamaInstruct, mistral].map((p) => { + packages: [llama3Chat, codeLlamaInstruct, mistral].map((p) => { p.params.contextLength = 4096; return p; }), @@ -828,6 +850,8 @@ export const PROVIDER_INFO: { [key: string]: ModelInfo } = { }, ], packages: [ + llama370bChat, + llama38bChat, { ...mixtralTrial, title: "Mixtral" }, llama270bChat, { @@ -948,7 +972,7 @@ export const PROVIDER_INFO: { [key: string]: ModelInfo } = { ModelProviderTag["Requires API Key"], ModelProviderTag["Open-Source"], ], - packages: [codeLlamaInstruct, llama2Chat, wizardCoder, mistral, zephyr], + packages: [llama3Chat, codeLlamaInstruct, wizardCoder, mistral, zephyr], }, llamacpp: { title: "llama.cpp", diff --git a/media/c_d.png b/media/c_d.png deleted file mode 100644 index 2aae4003a3..0000000000 Binary files a/media/c_d.png and /dev/null differ diff --git a/media/readme.png b/media/readme.png new file mode 100644 index 0000000000..64f65b845a Binary files /dev/null and b/media/readme.png differ