diff --git a/src/components/FooterMenu/index.tsx b/src/components/FooterMenu/index.tsx
index ba5b0c14..fd8206ef 100644
--- a/src/components/FooterMenu/index.tsx
+++ b/src/components/FooterMenu/index.tsx
@@ -52,10 +52,6 @@ const menus = [
menu: 'Documentation',
path: '/docs',
},
- {
- menu: 'API Reference',
- path: '/api-reference',
- },
],
},
{
diff --git a/src/pages/_meta.json b/src/pages/_meta.json
index 54885285..c83c40ec 100644
--- a/src/pages/_meta.json
+++ b/src/pages/_meta.json
@@ -21,15 +21,6 @@
"title": "Integrations",
"display": "hidden"
},
- "api-reference": {
- "type": "page",
- "title": "API Reference",
- "href": "/api-reference",
- "theme": {
- "layout": "raw",
- "footer": false
- }
- },
"changelog": {
"type": "page",
"title": "Changelog",
diff --git a/src/pages/api-reference.mdx b/src/pages/api-reference.mdx
deleted file mode 100644
index f20b96ad..00000000
--- a/src/pages/api-reference.mdx
+++ /dev/null
@@ -1,22 +0,0 @@
----
-title: API Refrence
-description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server.
-keywords:
- [
- Jan,
- Customizable Intelligence, LLM,
- local AI,
- privacy focus,
- free and open source,
- private and offline,
- conversational AI,
- no-subscription fee,
- large language models,
- architecture,
- ]
----
-
-import APIReference from "@/components/APIReference"
-
-
-
diff --git a/src/pages/docs/_assets/Anthropic-1.gif b/src/pages/docs/_assets/Anthropic-1.gif
new file mode 100644
index 00000000..594c5317
Binary files /dev/null and b/src/pages/docs/_assets/Anthropic-1.gif differ
diff --git a/src/pages/docs/_assets/Anthropic-2.gif b/src/pages/docs/_assets/Anthropic-2.gif
new file mode 100644
index 00000000..63e76cc3
Binary files /dev/null and b/src/pages/docs/_assets/Anthropic-2.gif differ
diff --git a/src/pages/docs/_assets/Cohere-1.gif b/src/pages/docs/_assets/Cohere-1.gif
new file mode 100644
index 00000000..6441bd8a
Binary files /dev/null and b/src/pages/docs/_assets/Cohere-1.gif differ
diff --git a/src/pages/docs/_assets/Cohere-2.gif b/src/pages/docs/_assets/Cohere-2.gif
new file mode 100644
index 00000000..9488040c
Binary files /dev/null and b/src/pages/docs/_assets/Cohere-2.gif differ
diff --git a/src/pages/docs/_assets/Groq-1.gif b/src/pages/docs/_assets/Groq-1.gif
new file mode 100644
index 00000000..d95e577b
Binary files /dev/null and b/src/pages/docs/_assets/Groq-1.gif differ
diff --git a/src/pages/docs/_assets/Groq-2.gif b/src/pages/docs/_assets/Groq-2.gif
new file mode 100644
index 00000000..46231696
Binary files /dev/null and b/src/pages/docs/_assets/Groq-2.gif differ
diff --git a/src/pages/docs/_assets/LM-Studio-v1.gif b/src/pages/docs/_assets/LM-Studio-v1.gif
new file mode 100644
index 00000000..65dfb5db
Binary files /dev/null and b/src/pages/docs/_assets/LM-Studio-v1.gif differ
diff --git a/src/pages/docs/_assets/LM-Studio-v2.gif b/src/pages/docs/_assets/LM-Studio-v2.gif
new file mode 100644
index 00000000..ad012df5
Binary files /dev/null and b/src/pages/docs/_assets/LM-Studio-v2.gif differ
diff --git a/src/pages/docs/_assets/LM-Studio-v3.gif b/src/pages/docs/_assets/LM-Studio-v3.gif
new file mode 100644
index 00000000..d5208c2b
Binary files /dev/null and b/src/pages/docs/_assets/LM-Studio-v3.gif differ
diff --git a/src/pages/docs/_assets/Martian-1.gif b/src/pages/docs/_assets/Martian-1.gif
new file mode 100644
index 00000000..3da994cc
Binary files /dev/null and b/src/pages/docs/_assets/Martian-1.gif differ
diff --git a/src/pages/docs/_assets/Martian-2.gif b/src/pages/docs/_assets/Martian-2.gif
new file mode 100644
index 00000000..27fa761b
Binary files /dev/null and b/src/pages/docs/_assets/Martian-2.gif differ
diff --git a/src/pages/docs/_assets/Mistral-1.gif b/src/pages/docs/_assets/Mistral-1.gif
new file mode 100644
index 00000000..7d1e8210
Binary files /dev/null and b/src/pages/docs/_assets/Mistral-1.gif differ
diff --git a/src/pages/docs/_assets/Mistral-2.gif b/src/pages/docs/_assets/Mistral-2.gif
new file mode 100644
index 00000000..25c8d2fc
Binary files /dev/null and b/src/pages/docs/_assets/Mistral-2.gif differ
diff --git a/src/pages/docs/_assets/Ollama-1.gif b/src/pages/docs/_assets/Ollama-1.gif
new file mode 100644
index 00000000..d9c7fa13
Binary files /dev/null and b/src/pages/docs/_assets/Ollama-1.gif differ
diff --git a/src/pages/docs/_assets/Ollama-2.gif b/src/pages/docs/_assets/Ollama-2.gif
new file mode 100644
index 00000000..eadc6afd
Binary files /dev/null and b/src/pages/docs/_assets/Ollama-2.gif differ
diff --git a/src/pages/docs/_assets/Ollama-3.gif b/src/pages/docs/_assets/Ollama-3.gif
new file mode 100644
index 00000000..6b09000b
Binary files /dev/null and b/src/pages/docs/_assets/Ollama-3.gif differ
diff --git a/src/pages/docs/_assets/OpenAi-1.gif b/src/pages/docs/_assets/OpenAi-1.gif
new file mode 100644
index 00000000..c98e2f2b
Binary files /dev/null and b/src/pages/docs/_assets/OpenAi-1.gif differ
diff --git a/src/pages/docs/_assets/OpenAi-2.gif b/src/pages/docs/_assets/OpenAi-2.gif
new file mode 100644
index 00000000..8cd449f7
Binary files /dev/null and b/src/pages/docs/_assets/OpenAi-2.gif differ
diff --git a/src/pages/docs/_assets/OpenRouter-1.gif b/src/pages/docs/_assets/OpenRouter-1.gif
new file mode 100644
index 00000000..cd6a7898
Binary files /dev/null and b/src/pages/docs/_assets/OpenRouter-1.gif differ
diff --git a/src/pages/docs/_assets/OpenRouter-2.gif b/src/pages/docs/_assets/OpenRouter-2.gif
new file mode 100644
index 00000000..307b07de
Binary files /dev/null and b/src/pages/docs/_assets/OpenRouter-2.gif differ
diff --git a/src/pages/docs/_assets/advance-set.png b/src/pages/docs/_assets/advance-set.png
new file mode 100644
index 00000000..41917ef8
Binary files /dev/null and b/src/pages/docs/_assets/advance-set.png differ
diff --git a/src/pages/docs/_assets/advance-settings2.png b/src/pages/docs/_assets/advance-settings2.png
new file mode 100644
index 00000000..151063dc
Binary files /dev/null and b/src/pages/docs/_assets/advance-settings2.png differ
diff --git a/src/pages/docs/_assets/appearance.png b/src/pages/docs/_assets/appearance.png
new file mode 100644
index 00000000..46ba12e7
Binary files /dev/null and b/src/pages/docs/_assets/appearance.png differ
diff --git a/src/pages/docs/_assets/asst.gif b/src/pages/docs/_assets/asst.gif
new file mode 100644
index 00000000..21797d77
Binary files /dev/null and b/src/pages/docs/_assets/asst.gif differ
diff --git a/src/pages/docs/_assets/browser1.png b/src/pages/docs/_assets/browser1.png
new file mode 100644
index 00000000..61b1d477
Binary files /dev/null and b/src/pages/docs/_assets/browser1.png differ
diff --git a/src/pages/docs/_assets/browser2.png b/src/pages/docs/_assets/browser2.png
new file mode 100644
index 00000000..fbf3bcdb
Binary files /dev/null and b/src/pages/docs/_assets/browser2.png differ
diff --git a/src/pages/docs/_assets/chat.gif b/src/pages/docs/_assets/chat.gif
new file mode 100644
index 00000000..94b8c271
Binary files /dev/null and b/src/pages/docs/_assets/chat.gif differ
diff --git a/src/pages/docs/_assets/clean.png b/src/pages/docs/_assets/clean.png
index b1a546c0..a4f58c37 100644
Binary files a/src/pages/docs/_assets/clean.png and b/src/pages/docs/_assets/clean.png differ
diff --git a/src/pages/docs/_assets/clear-logs.png b/src/pages/docs/_assets/clear-logs.png
new file mode 100644
index 00000000..348fc1d8
Binary files /dev/null and b/src/pages/docs/_assets/clear-logs.png differ
diff --git a/src/pages/docs/_assets/data-folder.gif b/src/pages/docs/_assets/data-folder.gif
deleted file mode 100644
index d2d0758d..00000000
Binary files a/src/pages/docs/_assets/data-folder.gif and /dev/null differ
diff --git a/src/pages/docs/_assets/data-folder.png b/src/pages/docs/_assets/data-folder.png
new file mode 100644
index 00000000..def0f38c
Binary files /dev/null and b/src/pages/docs/_assets/data-folder.png differ
diff --git a/src/pages/docs/_assets/default.gif b/src/pages/docs/_assets/default.gif
new file mode 100644
index 00000000..3bc6a68f
Binary files /dev/null and b/src/pages/docs/_assets/default.gif differ
diff --git a/src/pages/docs/_assets/delete-threads.png b/src/pages/docs/_assets/delete-threads.png
index d41a94c0..b134e8e1 100644
Binary files a/src/pages/docs/_assets/delete-threads.png and b/src/pages/docs/_assets/delete-threads.png differ
diff --git a/src/pages/docs/_assets/delete.png b/src/pages/docs/_assets/delete.png
index f3b9520e..6b3f669c 100644
Binary files a/src/pages/docs/_assets/delete.png and b/src/pages/docs/_assets/delete.png differ
diff --git a/src/pages/docs/_assets/download-button.png b/src/pages/docs/_assets/download-button.png
new file mode 100644
index 00000000..2e8088d1
Binary files /dev/null and b/src/pages/docs/_assets/download-button.png differ
diff --git a/src/pages/docs/_assets/download-button2.png b/src/pages/docs/_assets/download-button2.png
new file mode 100644
index 00000000..8f8c683c
Binary files /dev/null and b/src/pages/docs/_assets/download-button2.png differ
diff --git a/src/pages/docs/_assets/download-button3.png b/src/pages/docs/_assets/download-button3.png
new file mode 100644
index 00000000..cf05ec45
Binary files /dev/null and b/src/pages/docs/_assets/download-button3.png differ
diff --git a/src/pages/docs/_assets/download-icon.png b/src/pages/docs/_assets/download-icon.png
new file mode 100644
index 00000000..f553e19f
Binary files /dev/null and b/src/pages/docs/_assets/download-icon.png differ
diff --git a/src/pages/docs/_assets/download-model2.gif b/src/pages/docs/_assets/download-model2.gif
new file mode 100644
index 00000000..c69819a1
Binary files /dev/null and b/src/pages/docs/_assets/download-model2.gif differ
diff --git a/src/pages/docs/_assets/exp-mode.png b/src/pages/docs/_assets/exp-mode.png
new file mode 100644
index 00000000..14ecf6bf
Binary files /dev/null and b/src/pages/docs/_assets/exp-mode.png differ
diff --git a/src/pages/docs/_assets/extensions-page2.png b/src/pages/docs/_assets/extensions-page2.png
new file mode 100644
index 00000000..51863aa9
Binary files /dev/null and b/src/pages/docs/_assets/extensions-page2.png differ
diff --git a/src/pages/docs/_assets/gpu-accel.png b/src/pages/docs/_assets/gpu-accel.png
new file mode 100644
index 00000000..294013fc
Binary files /dev/null and b/src/pages/docs/_assets/gpu-accel.png differ
diff --git a/src/pages/docs/_assets/gpu2.gif b/src/pages/docs/_assets/gpu2.gif
new file mode 100644
index 00000000..fcacc5e9
Binary files /dev/null and b/src/pages/docs/_assets/gpu2.gif differ
diff --git a/src/pages/docs/_assets/history.png b/src/pages/docs/_assets/history.png
index 62b1bbd5..7112a0d2 100644
Binary files a/src/pages/docs/_assets/history.png and b/src/pages/docs/_assets/history.png differ
diff --git a/src/pages/docs/_assets/http.png b/src/pages/docs/_assets/http.png
new file mode 100644
index 00000000..afa0aeae
Binary files /dev/null and b/src/pages/docs/_assets/http.png differ
diff --git a/src/pages/docs/_assets/hub.png b/src/pages/docs/_assets/hub.png
new file mode 100644
index 00000000..75b0f575
Binary files /dev/null and b/src/pages/docs/_assets/hub.png differ
diff --git a/src/pages/docs/_assets/import.png b/src/pages/docs/_assets/import.png
new file mode 100644
index 00000000..5ca7fff4
Binary files /dev/null and b/src/pages/docs/_assets/import.png differ
diff --git a/src/pages/docs/_assets/import2.png b/src/pages/docs/_assets/import2.png
new file mode 100644
index 00000000..caf2c022
Binary files /dev/null and b/src/pages/docs/_assets/import2.png differ
diff --git a/src/pages/docs/_assets/inf.gif b/src/pages/docs/_assets/inf.gif
new file mode 100644
index 00000000..aaef4438
Binary files /dev/null and b/src/pages/docs/_assets/inf.gif differ
diff --git a/src/pages/docs/_assets/install-ext.png b/src/pages/docs/_assets/install-ext.png
new file mode 100644
index 00000000..ee2ff845
Binary files /dev/null and b/src/pages/docs/_assets/install-ext.png differ
diff --git a/src/pages/docs/_assets/install-tensor.png b/src/pages/docs/_assets/install-tensor.png
new file mode 100644
index 00000000..050075d8
Binary files /dev/null and b/src/pages/docs/_assets/install-tensor.png differ
diff --git a/src/pages/docs/_assets/local-api1.png b/src/pages/docs/_assets/local-api1.png
index f232960f..d636d37a 100644
Binary files a/src/pages/docs/_assets/local-api1.png and b/src/pages/docs/_assets/local-api1.png differ
diff --git a/src/pages/docs/_assets/local-api2.png b/src/pages/docs/_assets/local-api2.png
index 53ba1f26..58652775 100644
Binary files a/src/pages/docs/_assets/local-api2.png and b/src/pages/docs/_assets/local-api2.png differ
diff --git a/src/pages/docs/_assets/local-api3.png b/src/pages/docs/_assets/local-api3.png
index e2d2a9ee..aa837264 100644
Binary files a/src/pages/docs/_assets/local-api3.png and b/src/pages/docs/_assets/local-api3.png differ
diff --git a/src/pages/docs/_assets/local-api4.png b/src/pages/docs/_assets/local-api4.png
index 1f2e0553..2a66c9c1 100644
Binary files a/src/pages/docs/_assets/local-api4.png and b/src/pages/docs/_assets/local-api4.png differ
diff --git a/src/pages/docs/_assets/local-api5.png b/src/pages/docs/_assets/local-api5.png
new file mode 100644
index 00000000..2dc80c6c
Binary files /dev/null and b/src/pages/docs/_assets/local-api5.png differ
diff --git a/src/pages/docs/_assets/model-parameters.png b/src/pages/docs/_assets/model-parameters.png
new file mode 100644
index 00000000..ce608ac5
Binary files /dev/null and b/src/pages/docs/_assets/model-parameters.png differ
diff --git a/src/pages/docs/_assets/model-tab.png b/src/pages/docs/_assets/model-tab.png
new file mode 100644
index 00000000..ea595bf1
Binary files /dev/null and b/src/pages/docs/_assets/model-tab.png differ
diff --git a/src/pages/docs/_assets/mymodels.png b/src/pages/docs/_assets/mymodels.png
index 2bf082d7..d12664b8 100644
Binary files a/src/pages/docs/_assets/mymodels.png and b/src/pages/docs/_assets/mymodels.png differ
diff --git a/src/pages/docs/_assets/reset-jan.png b/src/pages/docs/_assets/reset-jan.png
new file mode 100644
index 00000000..2c20ca66
Binary files /dev/null and b/src/pages/docs/_assets/reset-jan.png differ
diff --git a/src/pages/docs/_assets/retrieval1.png b/src/pages/docs/_assets/retrieval1.png
new file mode 100644
index 00000000..8b5d63bb
Binary files /dev/null and b/src/pages/docs/_assets/retrieval1.png differ
diff --git a/src/pages/docs/_assets/retrieval2.png b/src/pages/docs/_assets/retrieval2.png
new file mode 100644
index 00000000..2c9443c4
Binary files /dev/null and b/src/pages/docs/_assets/retrieval2.png differ
diff --git a/src/pages/docs/_assets/scheme.png b/src/pages/docs/_assets/scheme.png
new file mode 100644
index 00000000..fe50d4b6
Binary files /dev/null and b/src/pages/docs/_assets/scheme.png differ
diff --git a/src/pages/docs/_assets/search-bar.png b/src/pages/docs/_assets/search-bar.png
new file mode 100644
index 00000000..b6e55977
Binary files /dev/null and b/src/pages/docs/_assets/search-bar.png differ
diff --git a/src/pages/docs/_assets/server-openai2.gif b/src/pages/docs/_assets/server-openai2.gif
new file mode 100644
index 00000000..b4338d12
Binary files /dev/null and b/src/pages/docs/_assets/server-openai2.gif differ
diff --git a/src/pages/docs/_assets/settings.png b/src/pages/docs/_assets/settings.png
index e79b5f3f..e53f71d1 100644
Binary files a/src/pages/docs/_assets/settings.png and b/src/pages/docs/_assets/settings.png differ
diff --git a/src/pages/docs/_assets/shortcut.png b/src/pages/docs/_assets/shortcut.png
new file mode 100644
index 00000000..b7912f83
Binary files /dev/null and b/src/pages/docs/_assets/shortcut.png differ
diff --git a/src/pages/docs/_assets/ssl.png b/src/pages/docs/_assets/ssl.png
new file mode 100644
index 00000000..97005174
Binary files /dev/null and b/src/pages/docs/_assets/ssl.png differ
diff --git a/src/pages/docs/_assets/system-mili2.png b/src/pages/docs/_assets/system-mili2.png
new file mode 100644
index 00000000..6135c5a4
Binary files /dev/null and b/src/pages/docs/_assets/system-mili2.png differ
diff --git a/src/pages/docs/_assets/system-monitor2.png b/src/pages/docs/_assets/system-monitor2.png
new file mode 100644
index 00000000..e03dfbf6
Binary files /dev/null and b/src/pages/docs/_assets/system-monitor2.png differ
diff --git a/src/pages/docs/_assets/system-slider2.png b/src/pages/docs/_assets/system-slider2.png
new file mode 100644
index 00000000..9114d32c
Binary files /dev/null and b/src/pages/docs/_assets/system-slider2.png differ
diff --git a/src/pages/docs/_assets/tensor.png b/src/pages/docs/_assets/tensor.png
new file mode 100644
index 00000000..812640de
Binary files /dev/null and b/src/pages/docs/_assets/tensor.png differ
diff --git a/src/pages/docs/_assets/theme.png b/src/pages/docs/_assets/theme.png
new file mode 100644
index 00000000..ae14f205
Binary files /dev/null and b/src/pages/docs/_assets/theme.png differ
diff --git a/src/pages/docs/_assets/title.png b/src/pages/docs/_assets/title.png
new file mode 100644
index 00000000..b06f7712
Binary files /dev/null and b/src/pages/docs/_assets/title.png differ
diff --git a/src/pages/docs/_assets/tools.png b/src/pages/docs/_assets/tools.png
new file mode 100644
index 00000000..8104f633
Binary files /dev/null and b/src/pages/docs/_assets/tools.png differ
diff --git a/src/pages/docs/_assets/turn-off.png b/src/pages/docs/_assets/turn-off.png
new file mode 100644
index 00000000..a16a26a5
Binary files /dev/null and b/src/pages/docs/_assets/turn-off.png differ
diff --git a/src/pages/docs/_meta.json b/src/pages/docs/_meta.json
index a65dea1c..f28a74dd 100644
--- a/src/pages/docs/_meta.json
+++ b/src/pages/docs/_meta.json
@@ -19,7 +19,10 @@
"type": "separator"
},
"models": "Models",
- "assistants": "Assistants",
+ "assistants": {
+ "display": "hidden",
+ "title": "Assistants"
+ },
"tools": "Tools",
"threads": "Threads",
"settings": "Settings",
diff --git a/src/pages/docs/built-in/tensorrt-llm.mdx b/src/pages/docs/built-in/tensorrt-llm.mdx
index 1582279f..f360d293 100644
--- a/src/pages/docs/built-in/tensorrt-llm.mdx
+++ b/src/pages/docs/built-in/tensorrt-llm.mdx
@@ -50,10 +50,17 @@ This guide walks you through installing Jan's official [TensorRT-LLM Extension](
### Step 1: Install TensorRT-Extension
-1. Go to **Settings** > **Extensions**.
-2. Select the TensorRT-LLM Extension and click the **Install** button.
+1. Click the **Gear Icon (โ๏ธ)** on the bottom left of your screen.
-![Install Extension](../_assets/install-tensor.gif)
+![Settings](../_assets/settings.png)
+
+2. Select the **TensorRT-LLM** under the **Model Provider** section.
+
+![Click Tensor](../_assets/tensor.png)
+
+3. Click **Install** to install the required dependencies to use TensorRT-LLM.
+
+![Install Extension](../_assets/install-tensor.png)
3. Check that files are correctly downloaded.
@@ -91,7 +98,7 @@ We offer a handful of precompiled models for Ampere and Ada cards that you can i
Please see [here](/docs/models/model-parameters) for more detailed model parameters.
-![Configure Model](../_assets/set-tensor.gif)
+![Specific Conversation](../_assets/model-parameters.png)
diff --git a/src/pages/docs/extensions.mdx b/src/pages/docs/extensions.mdx
index 3b651257..75d54965 100644
--- a/src/pages/docs/extensions.mdx
+++ b/src/pages/docs/extensions.mdx
@@ -29,19 +29,24 @@ The default extensions are in the `Settings` > `Extensions`.
## List of Default Extensions
-| Extension Name | Version | Description | Source Code Link |
-| ---------------------------------- | --------- | ---------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------- |
-| Assistant Extension | `v1.0.0` | This extension enables assistants, including Jan, a default assistant that can call all downloaded models. | [Source](https://github.com/janhq/jan/tree/dev/extensions/assistant-extension) |
-| Conversational Extension | `v1.0.0` | This extension enables conversations and state persistence via your filesystem. | [Source](https://github.com/janhq/jan/tree/dev/extensions/conversational-extension) |
-| Inference Engine Nitro Extension | `v1.0.0` | This extension embeds Nitro, a lightweight (3 MB) inference engine in C++. See https://nitro.jan.ai. | [Source](https://github.com/janhq/jan/tree/dev/extensions/inference-nitro-extension) |
-| Inference Engine OpenAI Extension | `v1.0.0` | This extension enables OpenAI chat completion API calls. | [Source](https://github.com/janhq/jan/tree/dev/extensions/inference-openai-extension) |
-| Inference Engine Triton TRT-LLM Extension | `v1.0.0` | This extension enables Nvidia's TensorRT-LLM as an inference engine option. | [Source](https://github.com/janhq/jan/tree/dev/extensions/inference-triton-trtllm-extension) |
-| Inference Engine Tensor RT Llm Extension | `v0.0.3` | This extension enables Nvidia's TensorRT-LLM for the fastest GPU acceleration. | [Source](https://github.com/janhq/jan/tree/dev/extensions/tensorrt-llm-extension) |
-| Inference Engine MistralAI Extension | `v1.0.0` |This extension enables Mistral chat completion API calls. | [Source](https://github.com/janhq/jan/tree/dev/extensions/inference-mistral-extension) |
-| Inference Engine Groq Extension | `v1.0.0` |This extension enables Groq chat completion API calls. | [Source](https://github.com/janhq/jan/tree/dev/extensions/inference-groq-extension) |
-| HuggingFace Extension | `v1.0.0` |This extension converts HF models to GGUF. | [Source](https://github.com/janhq/jan/tree/dev/extensions/huggingface-extension) |
-| Model Management Extension | `v1.0.30` | Model Management Extension provides model exploration and seamless downloads. | [Source](https://github.com/janhq/jan/tree/dev/extensions/model-extension) |
-| System Monitoring Extension | `v1.0.10` | This extension offers system health and OS-level data. | [Source](https://github.com/janhq/jan/tree/dev/extensions/monitoring-extension) |
+| Extension Name | Version | Description | Source Code Link |
+| ------------------------------------------- | --------- | ---------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------- |
+| Assistant Extension | `v1.0.1` | This extension enables assistants, including Jan, a default assistant that can call all downloaded models. | [Source](https://github.com/janhq/jan/tree/dev/extensions/assistant-extension) |
+| Conversational Extension | `v1.0.0` | This extension enables conversations and state persistence via your filesystem. | [Source](https://github.com/janhq/jan/tree/dev/extensions/conversational-extension) |
+| Inference Engine Nitro Extension | `v1.0.0` | This extension embeds Nitro, a lightweight (3 MB) inference engine in C++. See https://nitro.jan.ai. | [Source](https://github.com/janhq/jan/tree/dev/extensions/inference-nitro-extension) |
+| Inference Engine OpenAI Extension | `v1.0.2` | This extension enables OpenAI chat completion API calls. | [Source](https://github.com/janhq/jan/tree/dev/extensions/inference-openai-extension) |
+| Inference Engine Triton TRT-LLM Extension | `v1.0.0` | This extension enables Nvidia's TensorRT-LLM as an inference engine option. | [Source](https://github.com/janhq/jan/tree/dev/extensions/inference-triton-trtllm-extension) |
+| Inference Engine Tensor RT Llm Extension | `v0.0.3` | This extension enables Nvidia's TensorRT-LLM for the fastest GPU acceleration. | [Source](https://github.com/janhq/jan/tree/dev/extensions/tensorrt-llm-extension) |
+| Inference Engine MistralAI Extension | `v1.0.1` | This extension enables Mistral chat completion API calls. | [Source](https://github.com/janhq/jan/tree/dev/extensions/inference-mistral-extension) |
+| Inference Engine Groq Extension | `v1.0.1` | This extension enables fast Groq chat completion API calls. | [Source](https://github.com/janhq/jan/tree/dev/extensions/inference-groq-extension) |
+| HuggingFace Extension | `v1.0.0` | This extension converts HF models to GGUF. | [Source](https://github.com/janhq/jan/tree/dev/extensions/huggingface-extension) |
+| Model Management Extension | `v1.0.30` | Model Management Extension provides model exploration and seamless downloads. | [Source](https://github.com/janhq/jan/tree/dev/extensions/model-extension) |
+| System Monitoring Extension | `v1.0.10` | This extension offers system health and OS-level data. | [Source](https://github.com/janhq/jan/tree/dev/extensions/monitoring-extension) |
+| Inference Engine Cortex Extension | `v1.0.10` | This extension embeds cortex.cpp, a lightweight inference engine written in C++. | [Source](https://github.com/janhq/jan/tree/dev/extensions/inference-cortex-extension) |
+| Inference Engine Martian Extension | `v1.0.1` | This extension enables Martian chat completion API calls. | [Source](https://github.com/janhq/jan/tree/dev/extensions/inference-martian-extension) |
+| Inference Engine Anthropic Extension | `v1.0.0` | This extension enables Anthropic chat completion API calls. | [Source](https://github.com/janhq/jan/tree/dev/extensions/inference-anthropic-extension) |
+| Inference Engine Cohere Extension | `v1.0.0` | This extension enables Cohere chat completion API calls. | [Source](https://github.com/janhq/jan/tree/dev/extensions/inference-cohere-extension) |
+
## Configure Extension Default Settings
@@ -81,32 +86,47 @@ To configure extension default settings:
"description": "This extension enables conversations and state persistence via your filesystem",
"url": "extension://@janhq/conversational-extension/dist/index.js"
},
- "@janhq/model-extension": {
+ "@janhq/inference-openai-extension": {
"_active": true,
"listeners": {},
- "origin": "C:\\Users\\ACER\\AppData\\Local\\Programs\\jan\\resources\\app.asar.unpacked\\pre-install\\janhq-model-extension-1.0.30.tgz",
+ "origin": "C:\\Users\\ACER\\AppData\\Local\\Programs\\jan\\resources\\app.asar.unpacked\\pre-install\\janhq-inference-openai-extension-1.0.2.tgz",
"installOptions": {
"version": false,
"fullMetadata": true
},
- "name": "@janhq/model-extension",
- "productName": "Model Management",
- "version": "1.0.30",
+ "name": "@janhq/inference-openai-extension",
+ "productName": "OpenAI Inference Engine",
+ "version": "1.0.2",
"main": "dist/index.js",
- "description": "Model Management Extension provides model exploration and seamless downloads",
- "url": "extension://@janhq/model-extension/dist/index.js"
+ "description": "This extension enables OpenAI chat completion API calls",
+ "url": "extension://@janhq/inference-openai-extension/dist/index.js"
+ },
+ "@janhq/inference-cohere-extension": {
+ "_active": true,
+ "listeners": {},
+ "origin": "C:\\Users\\ACER\\AppData\\Local\\Programs\\jan\\resources\\app.asar.unpacked\\pre-install\\janhq-inference-cohere-extension-1.0.0.tgz",
+ "installOptions": {
+ "version": false,
+ "fullMetadata": true
+ },
+ "name": "@janhq/inference-cohere-extension",
+ "productName": "Cohere Inference Engine",
+ "version": "1.0.0",
+ "main": "dist/index.js",
+ "description": "This extension enables Cohere chat completion API calls",
+ "url": "extension://@janhq/inference-cohere-extension/dist/index.js"
},
"@janhq/inference-mistral-extension": {
- "_active": false,
+ "_active": true,
"listeners": {},
- "origin": "C:\\Users\\ACER\\AppData\\Local\\Programs\\jan\\resources\\app.asar.unpacked\\pre-install\\janhq-inference-mistral-extension-1.0.0.tgz",
+ "origin": "C:\\Users\\ACER\\AppData\\Local\\Programs\\jan\\resources\\app.asar.unpacked\\pre-install\\janhq-inference-mistral-extension-1.0.1.tgz",
"installOptions": {
"version": false,
"fullMetadata": true
},
"name": "@janhq/inference-mistral-extension",
"productName": "MistralAI Inference Engine",
- "version": "1.0.0",
+ "version": "1.0.1",
"main": "dist/index.js",
"description": "This extension enables Mistral chat completion API calls",
"url": "extension://@janhq/inference-mistral-extension/dist/index.js"
@@ -114,32 +134,62 @@ To configure extension default settings:
"@janhq/inference-groq-extension": {
"_active": true,
"listeners": {},
- "origin": "C:\\Users\\ACER\\AppData\\Local\\Programs\\jan\\resources\\app.asar.unpacked\\pre-install\\janhq-inference-groq-extension-1.0.0.tgz",
+ "origin": "C:\\Users\\ACER\\AppData\\Local\\Programs\\jan\\resources\\app.asar.unpacked\\pre-install\\janhq-inference-groq-extension-1.0.1.tgz",
"installOptions": {
"version": false,
"fullMetadata": true
},
"name": "@janhq/inference-groq-extension",
"productName": "Groq Inference Engine",
- "version": "1.0.0",
+ "version": "1.0.1",
"main": "dist/index.js",
"description": "This extension enables fast Groq chat completion API calls",
"url": "extension://@janhq/inference-groq-extension/dist/index.js"
},
- "@janhq/inference-openai-extension": {
+ "@janhq/inference-martian-extension": {
"_active": true,
"listeners": {},
- "origin": "C:\\Users\\ACER\\AppData\\Local\\Programs\\jan\\resources\\app.asar.unpacked\\pre-install\\janhq-inference-openai-extension-1.0.0.tgz",
+ "origin": "C:\\Users\\ACER\\AppData\\Local\\Programs\\jan\\resources\\app.asar.unpacked\\pre-install\\janhq-inference-martian-extension-1.0.1.tgz",
"installOptions": {
"version": false,
"fullMetadata": true
},
- "name": "@janhq/inference-openai-extension",
- "productName": "OpenAI Inference Engine",
+ "name": "@janhq/inference-martian-extension",
+ "productName": "Martian Inference Engine",
+ "version": "1.0.1",
+ "main": "dist/index.js",
+ "description": "This extension enables Martian chat completion API calls",
+ "url": "extension://@janhq/inference-martian-extension/dist/index.js"
+ },
+ "@janhq/inference-openrouter-extension": {
+ "_active": true,
+ "listeners": {},
+ "origin": "C:\\Users\\ACER\\AppData\\Local\\Programs\\jan\\resources\\app.asar.unpacked\\pre-install\\janhq-inference-openrouter-extension-1.0.0.tgz",
+ "installOptions": {
+ "version": false,
+ "fullMetadata": true
+ },
+ "name": "@janhq/inference-openrouter-extension",
+ "productName": "OpenRouter Inference Engine",
"version": "1.0.0",
"main": "dist/index.js",
- "description": "This extension enables OpenAI chat completion API calls",
- "url": "extension://@janhq/inference-openai-extension/dist/index.js"
+ "description": "This extension enables Open Router chat completion API calls",
+ "url": "extension://@janhq/inference-openrouter-extension/dist/index.js"
+ },
+ "@janhq/inference-anthropic-extension": {
+ "_active": true,
+ "listeners": {},
+ "origin": "C:\\Users\\ACER\\AppData\\Local\\Programs\\jan\\resources\\app.asar.unpacked\\pre-install\\janhq-inference-anthropic-extension-1.0.0.tgz",
+ "installOptions": {
+ "version": false,
+ "fullMetadata": true
+ },
+ "name": "@janhq/inference-anthropic-extension",
+ "productName": "Anthropic Inference Engine",
+ "version": "1.0.0",
+ "main": "dist/index.js",
+ "description": "This extension enables Anthropic chat completion API calls",
+ "url": "extension://@janhq/inference-anthropic-extension/dist/index.js"
},
"@janhq/inference-triton-trt-llm-extension": {
"_active": true,
@@ -156,20 +206,20 @@ To configure extension default settings:
"description": "This extension enables Nvidia's TensorRT-LLM as an inference engine option",
"url": "extension://@janhq/inference-triton-trt-llm-extension/dist/index.js"
},
- "@janhq/huggingface-extension": {
+ "@janhq/model-extension": {
"_active": true,
"listeners": {},
- "origin": "C:\\Users\\ACER\\AppData\\Local\\Programs\\jan\\resources\\app.asar.unpacked\\pre-install\\janhq-huggingface-extension-1.0.0.tgz",
+ "origin": "C:\\Users\\ACER\\AppData\\Local\\Programs\\jan\\resources\\app.asar.unpacked\\pre-install\\janhq-model-extension-1.0.30.tgz",
"installOptions": {
"version": false,
"fullMetadata": true
},
- "name": "@janhq/huggingface-extension",
- "productName": "HuggingFace",
- "version": "1.0.0",
+ "name": "@janhq/model-extension",
+ "productName": "Model Management",
+ "version": "1.0.30",
"main": "dist/index.js",
- "description": "Hugging Face extension for converting HF models to GGUF",
- "url": "extension://@janhq/huggingface-extension/dist/index.js"
+ "description": "Model Management Extension provides model exploration and seamless downloads",
+ "url": "extension://@janhq/model-extension/dist/index.js"
},
"@janhq/monitoring-extension": {
"_active": true,
@@ -216,20 +266,20 @@ To configure extension default settings:
"description": "This extension enables Nvidia's TensorRT-LLM for the fastest GPU acceleration. See the [setup guide](https://jan.ai/guides/providers/tensorrt-llm/) for next steps.",
"url": "extension://@janhq/tensorrt-llm-extension/dist/index.js"
},
- "@janhq/inference-nitro-extension": {
+ "@janhq/inference-cortex-extension": {
"_active": true,
"listeners": {},
- "origin": "C:\\Users\\ACER\\AppData\\Local\\Programs\\jan\\resources\\app.asar.unpacked\\pre-install\\janhq-inference-nitro-extension-1.0.0.tgz",
+ "origin": "C:\\Users\\ACER\\AppData\\Local\\Programs\\jan\\resources\\app.asar.unpacked\\pre-install\\janhq-inference-cortex-extension-1.0.10.tgz",
"installOptions": {
"version": false,
"fullMetadata": true
},
- "name": "@janhq/inference-nitro-extension",
- "productName": "Nitro Inference Engine",
- "version": "1.0.0",
+ "name": "@janhq/inference-cortex-extension",
+ "productName": "Cortex Inference Engine",
+ "version": "1.0.10",
"main": "dist/index.js",
- "description": "This extension embeds Nitro, a lightweight (3mb) inference engine written in C++. See https://nitro.jan.ai.\nAdditional dependencies could be installed to run without Cuda Toolkit installation.",
- "url": "extension://@janhq/inference-nitro-extension/dist/index.js"
+ "description": "This extension embeds cortex.cpp, a lightweight inference engine written in C++. See https://nitro.jan.ai.\nAdditional dependencies could be installed to run without Cuda Toolkit installation.",
+ "url": "extension://@janhq/inference-cortex-extension/dist/index.js"
}
}
```
@@ -240,9 +290,9 @@ Jan offers an Extensions settings menu for configuring extensions that have regi
![Settings](./_assets/settings.png)
-3. The registered extension settings can be customized under the **Extensions** section.
+3. Click **Extensions**.
-![Extensions](./_assets/extensions-page.png)
+![Extensions](./_assets/extensions-page2.png)
## System Monitor Extension Feature
The System Monitor extension now offers enhanced customization for app logging. Users can toggle the application logging feature on or off and set a custom interval for clearing the app logs. To configure the app log feature, follow these steps:
@@ -251,17 +301,17 @@ The System Monitor extension now offers enhanced customization for app logging.
![Settings](./_assets/settings.png)
-3. Under the **Extensions** section, select the **System Monitoring** extension.
+3. Under the **Model Providers** section, select the **System Monitoring** extension.
-![System Monitoring extension](./_assets/system-monitor.png)
+![System Monitoring extension](./_assets/system-monitor2.png)
4. Use the **slider** to turn the app logging feature on or off.
-![System Monitoring Enable](./_assets/system-slider.png)
+![System Monitoring Enable](./_assets/system-slider2.png)
5. Specify the log cleaning interval in milliseconds.
-![System Monitoring Interval](./_assets/system-mili.png)
+![System Monitoring Interval](./_assets/system-mili2.png)
- You can clear the app logs manually by clicking the **Clear logs** button in the advanced settings.
diff --git a/src/pages/docs/installing-extension.mdx b/src/pages/docs/installing-extension.mdx
index 8f2fc6a2..b1442d8a 100644
--- a/src/pages/docs/installing-extension.mdx
+++ b/src/pages/docs/installing-extension.mdx
@@ -29,36 +29,36 @@ Here are the steps to install a custom extension:
Jan only accepts the `.tgz` file format for installing a custom extension.
-1. Navigate to **Settings** > **Extensions**.
-2. Click Select under **Manual Installation**.
+1. Click the **Gear Icon (โ๏ธ)** on the bottom left of your screen.
+
+![Settings](./_assets/settings.png)
+
+2. Click the **Extensions** button.
+
+![Extensions](./_assets/extensions-page2.png)
+
+2. Select **Install Extension** on top right corner.
+
+![Install Extension](./_assets/install-ext.png)
+
3. Select a `.tgz` extension file.
4. Restart the Jan application.
5. Then, the `~/jan/extensions/extensions.json` file will be updated automatically.
-
-![Install Extension](./_assets/install-ext.gif)
-## Disable an Extension
+## Turn Off an Extension
-To disable the extension, follow the steps below:
+To turn off the extension, follow the steps below:
-1. Navigate to the **Settings** > **Advanced Settings**.
-2. On the **Jan Data Folder** click the **folder icon (๐)** to access the data folder.
-3. Navigate to the `~/jan/extensions` folder.
-4. Open the `extensions.json` and change the `_active` value of the TensorRT-LLM to `false`
-5. Restart the app to see that the TensorRT-LLM settings page has been removed.
+1. Click the **Gear Icon (โ๏ธ)** on the bottom left of your screen.
-![Disable Extension](./_assets/disable-tensor.gif)
-
-
-## Uninstall an Extension
-
-To uninstall the extension, follow the steps below:
-
-1. Quit the app.
-2. Navigate to the **Settings** > **Advanced Settings**.
-3. On the **Jan Data Folder** click the **folder icon (๐)** to access the data folder.
-4. Navigate to the `~/jan/extensions/@janhq` folder.
-5. Delete the **tensorrt-llm-extension** folder.
-4. Reopen the app.
+![Settings](./_assets/settings.png)
+
+2. Click the **Extensions** button.
+
+![Extensions](./_assets/extensions-page2.png)
+
+3. Click the slider button to turn off the extension.
+
+![Extensions](./_assets/turn-off.png)
-![Delete Extension](./_assets/delete-tensor.gif)
\ No newline at end of file
+4. Restart the app to see that the extension has been disabled.
\ No newline at end of file
diff --git a/src/pages/docs/local-api.mdx b/src/pages/docs/local-api.mdx
index e500006c..2a466ac6 100644
--- a/src/pages/docs/local-api.mdx
+++ b/src/pages/docs/local-api.mdx
@@ -105,12 +105,12 @@ To start the local server, follow the steps below:
- Replace `$PORT` with your server Port number.
-![Local API Server](./_assets/set-url.gif)
+![Local API Server](./_assets/local-api4.png)
### Step 3: Start Chatting with the Model
1. Go to the **Threads** tab.
2. Create a new chat.
-3. Select **Remote** in the Model dropdown menu and choose the **Local Model** name.
+3. Select **Model** tab > select **local test** model under the **OpenAI** section.
4. Chat with the model.
-![Local API Server](./_assets/set-model.gif)
+![Local API Server](./_assets/local-api5.png)
\ No newline at end of file
diff --git a/src/pages/docs/local-models/lmstudio.mdx b/src/pages/docs/local-models/lmstudio.mdx
index ab62f149..8624c2d6 100644
--- a/src/pages/docs/local-models/lmstudio.mdx
+++ b/src/pages/docs/local-models/lmstudio.mdx
@@ -41,10 +41,10 @@ To integrate LM Studio with Jan, follow the steps below:
2. Select your desired model.
3. Start the server after configuring the port and options.
4. Navigate back to Jan.
-5. Navigate to the **Settings** > **Extensions**.
-6. In the **OpenAI Inference Engine** section, add the full web address of the LM Studio server.
+5. Navigate to the **Settings** > **Model Provider**.
+6. In the **OpenAI** section, add the full web address of the LM Studio server.
-![Server Setup](../_assets/server-phi.gif)
+![Server Setup](../_assets/LM-Studio-v1.gif)
- Replace `(port)` with your chosen port number. The default is 1234.
@@ -58,15 +58,16 @@ To integrate LM Studio with Jan, follow the steps below:
2. we will use the `phi-2` model in this example. Insert the `https://huggingface.co/TheBloke/phi-2-GGUF` link into the search bar.
3. Select and download the model you want to use.
-![Download Model](../_assets/download-phi.gif)
+![Download Model](../_assets/LM-Studio-v2.gif)
### Step 3: Start the Model
1. Proceed to the **Threads**.
-2. Select the `phi-2` model and configure the model parameters.
+2. Click the **Model** tab.
+3. Select the `phi-2` model and configure the model parameters.
3. Start chatting with the model.
-![Start Model](../_assets/phi.gif)
+![Start Model](../_assets/LM-Studio-v3.gif)
\ No newline at end of file
diff --git a/src/pages/docs/local-models/ollama.mdx b/src/pages/docs/local-models/ollama.mdx
index 1b6ddea8..73a8fa27 100644
--- a/src/pages/docs/local-models/ollama.mdx
+++ b/src/pages/docs/local-models/ollama.mdx
@@ -38,10 +38,10 @@ To integrate Ollama with Jan, follow the steps below:
### Step 1: Server Setup
According to the [Ollama documentation on OpenAI compatibility](https://github.com/ollama/ollama/blob/main/docs/openai.md), you can connect to the Ollama server using the web address `http://localhost:11434/v1/chat/completions`. To do this, follow the steps below:
-1. Navigate to the **Settings** > **Extensions**.
-2. In the **OpenAI Inference Engine** section, add the full web address of the Ollama server.
+1. Navigate to the **Settings** > **Model Providers**.
+2. In the **OpenAI** section, add the full web address of the Ollama server.
-![Server Setup](../_assets/server-llama2.gif)
+![Server Setup](../_assets/Ollama-1.gif)
@@ -53,14 +53,15 @@ According to the [Ollama documentation on OpenAI compatibility](https://github.c
1. Navigate to the **Hub**.
2. Download the Ollama model, for example, `Llama 2 Chat 7B Q4`.
-![Download Model](../_assets/download-llama2.gif)
+![Download Model](../_assets/Ollama-2.gif)
### Step 3: Start the Model
1. Navigate to the **Threads**.
-2. Select the `Llama 2 Chat 7B Q4` model and configure the model parameters.
-3. Start chatting with the model.
+2. Click the **Model** tab.
+3. Select the `Llama 2 Chat 7B Q4` model and configure the model parameters.
+4. Start chatting with the model.
-![Start Model](../_assets/llama2.gif)
+![Start Model](../_assets/Ollama-3.gif)
\ No newline at end of file
diff --git a/src/pages/docs/models/manage-models.mdx b/src/pages/docs/models/manage-models.mdx
index 52668ac8..bd8094d6 100644
--- a/src/pages/docs/models/manage-models.mdx
+++ b/src/pages/docs/models/manage-models.mdx
@@ -37,10 +37,13 @@ Jan Hub provides three convenient methods to access machine learning models. Her
The Recommended List is a great starting point if you're looking for popular and pre-configured models that work well and quickly on most computers.
1. Open the Jan app and navigate to the Hub.
+
+![Jan Hub](../_assets/hub.png)
+
2. Select models, clicking the `v` dropdown for more information. Models with the `Recommended` label will likely run faster on your computer.
3. Click **Download** to download the model.
-![Download Model](../_assets/hub.gif)
+![Download Model](../_assets/download-button.png)
#### 2. Download with HuggingFace Model's ID or URL
If you need a specific model from [Hugging Face](https://huggingface.co/models), Jan Hub lets you download it directly using the modelโs ID or URL.
@@ -51,11 +54,17 @@ Only `GGUF` models are supported for this feature.
2. Select the model you want to use.
3. Copy the Model's ID or URL, for example: `MaziyarPanahi/Mixtral-8x22B-Instruct-v0.1-GGUF` or `https://huggingface.co/MaziyarPanahi/Mixtral-8x22B-Instruct-v0.1-GGUF`.
4. Return to the Jan app and click on the Hub tab.
+
+![Jan Hub](../_assets/hub.png)
+
5. Paste the **URL** or the **model ID** you have copied into the search bar.
+
+![Search Bar](../_assets/search-bar.png)
+
6. The app will show all available versions of the model.
7. Click **Download** to download the model.
-![Import Model](../_assets/import-hf.gif)
+![Download Model](../_assets/download-button2.png)
#### 3. Download with Deep Link
You can also use Jan's deep link feature to download a specific model from [Hugging Face](https://huggingface.co/models). The deep link format is: `jan://models/huggingface/`.
@@ -70,11 +79,17 @@ You will need to download such models manually.
2. Select the model you want to use.
3. Copy the Model's ID or URL, for example: `TheBloke/Magicoder-S-DS-6.7B-GGUF`.
4. Enter the deep link URL with your chosen model's ID in your browser. For example: `jan://models/huggingface/TheBloke/Magicoder-S-DS-6.7B-GGUF`
+
+![Paste the URL](../_assets/browser1.png)
+
5. A prompt will appear, click **Open** to open the Jan app.
+
+![Click Open](../_assets/browser2.png)
+
6. The app will show all available versions of the model.
7. Click **Download** to download the model.
-![Import Model](../_assets/deeplink.gif)
+![Download Model](../_assets/download-button3.png)
### Import or Symlink Local Models
@@ -82,12 +97,23 @@ You can also point to existing model binary files on your local filesystem.
This is the easiest and most space-efficient way if you have already used other local AI applications.
1. Navigate to the Hub.
+
+![Jan Hub](../_assets/hub.png)
+
2. Click on `Import Model` at the top.
-3. Select to import using `.GGUF` file or a folder.
-3. Select the model or the folder containing multiple models.
-4. Optionally, check the box to symlink the model files instead of copying them over the Jan Data Folder. This saves disk space.
-![Import Folder](../_assets/import-folder.gif)
+![Import Model](../_assets/import.png)
+
+3. Click the download icon button.
+
+![Download Icon](../_assets/download-icon.png)
+
+4. Select to import using `.GGUF` file or a folder.
+
+![Import Model](../_assets/import2.png)
+
+5. Select the model or the folder containing multiple models.
+6. Optionally, check the box to symlink the model files instead of copying them over the Jan Data Folder. This saves disk space.
Windows users should drag and drop the model file, as **Click to Upload** might not show the model files in Folder Preview.
@@ -97,10 +123,16 @@ Windows users should drag and drop the model file, as **Click to Upload** might
You can also add a specific model that is not available within the **Hub** section by following the steps below:
1. Open the Jan app.
2. Click the **gear icon (โ๏ธ)** on the bottom left of your screen.
+
+![Settings](../_assets/settings.png)
+
3. Under the **Settings screen**, click **Advanced Settings**.
+
+![Settings](../_assets/advance-set.png)
+
4. Open the **Jan Data folder**.
-![Jan Data Folder](../_assets/data-folder.gif)
+![Jan Data Folder](../_assets/data-folder.png)
5. Head to the `~/jan/models/`.
6. Make a new model folder and put a file named `model.json` in it.
diff --git a/src/pages/docs/models/model-parameters.mdx b/src/pages/docs/models/model-parameters.mdx
index 42903666..d778721c 100644
--- a/src/pages/docs/models/model-parameters.mdx
+++ b/src/pages/docs/models/model-parameters.mdx
@@ -54,25 +54,15 @@ By default, Jan sets the **Context Length** to the maximum supported by your mod
## Customize the Model Settings
-Adjust model settings for a specific conversation or across all conversations:
+Adjust model settings for a specific conversation:
-### A Specific Conversation
-To customize model settings for a specific conversation only:
-
-1. Create a new thread.
-2. Expand the right panel.
-3. Change settings under the `model` dropdown.
+1. Navigate to a **thread**.
+2. Click the **Model** tab.
-![Specific Conversation](../_assets/specific-model.gif)
-
-
-### All Conversations
-To customize default model settings for all conversations:
-
-1. Open any thread.
-2. Select the three dots next to the `model` dropdown.
-3. Select `Edit global defaults for [model]`.
-4. Edit the default settings directly in the `model.json`.
-5. Save the file and refresh the app.
+![Specific Conversation](../_assets/model-tab.png)
+3. You can customize the following parameters:
+ - Inference parameters
+ - Model parameters
+ - Engine parameters
-![Customize model settings for all conversations](../_assets/modelparam.gif)
+![Specific Conversation](../_assets/model-parameters.png)
diff --git a/src/pages/docs/quickstart.mdx b/src/pages/docs/quickstart.mdx
index ac4d0add..9efc840e 100644
--- a/src/pages/docs/quickstart.mdx
+++ b/src/pages/docs/quickstart.mdx
@@ -34,7 +34,7 @@ You can run Jan either on your desktop using the Jan desktop app or on a server
Once you have installed Jan, you should see the Jan application as shown below without any local model installed:
-![Default State](./_assets/models.gif)
+![Default State](./_assets/default.gif)
@@ -47,6 +47,9 @@ If you have a graphics card, boost model performance by enabling GPU acceleratio
Ensure you have installed your GPU driver. Please see [Desktop](/docs/desktop) for more information on activating the GPU acceleration.
+
+
+![Turn on GPU acceleration](./_assets/gpu2.gif)
### Step 3: Download a Model
@@ -61,7 +64,7 @@ Ensure you select the appropriate model size by balancing performance, cost, and
4. Click the **Download** button.
-![Download a Model](./_assets/download-model.gif)
+![Download a Model](./_assets/download-model2.gif)
@@ -75,9 +78,8 @@ Ensure you select the appropriate model size by balancing performance, cost, and
-![Configure a Model](./_assets/configure.gif)
+![Parameters](./_assets/inf.gif)
-
### Step 4: Customize the Assistant Instruction
Customize Jan's assistant behavior by specifying queries, commands, or requests in the Assistant Instructions field to get the most responses from your assistant. To customize, follow the steps below:
@@ -86,7 +88,7 @@ Customize Jan's assistant behavior by specifying queries, commands, or requests
3. Provide a specific guideline under the **Instructions** field.
-![Assistant Instruction](./_assets/instructions.gif)
+![Assistant Instruction](./_assets/asst.gif)
### Step 5: Start Thread
@@ -95,20 +97,19 @@ Once you have downloaded a model and customized your assistant instruction, you
-![Chat with a Model](./_assets/model.gif)
+![Chat with a Model](./_assets/chat.gif)
### Step 6: Connect to a Remote API
Jan also offers access to remote models hosted on external servers. You can link up with any Remote AI APIs compatible with OpenAI. Jan comes with numerous extensions that facilitate connections to various remote AI APIs. To explore and connect to Remote APIs, follow these steps:
1. Click the **Gear Icon (โ๏ธ)** on the bottom left of your screen.
-2. Under the **Settings screen**, click the **Extensions**.
-3. Ensure that you have installed the extensions.
-4. Fill in the API **URL** and **Keys** in the OpenAI Inference Engine section.
+2. Click the **OpenAI** under the **Model Provider** section.
+3. Fill in the API **URL** and **Keys** in the OpenAI Inference Engine section.
-![Connect Remote API](./_assets/server-openai.gif)
+![Connect Remote API](./_assets/server-openai2.gif)
diff --git a/src/pages/docs/remote-models/anthropic.mdx b/src/pages/docs/remote-models/anthropic.mdx
index 9db97453..3f89bb89 100644
--- a/src/pages/docs/remote-models/anthropic.mdx
+++ b/src/pages/docs/remote-models/anthropic.mdx
@@ -35,34 +35,26 @@ Before proceeding, ensure you have the following:
1. Obtain Anthropic API Keys from your [Anthropic Console](https://console.anthropic.com/).
2. Copy your Anthropic API Key and the endpoint URL you want.
3. Navigate to the **Jan app** > **Settings**.
-4. Select the **Anthropic Inference Engine**.
+4. Select the **Anthropic**.
5. Insert the **API Key** and the **endpoint URL** into their respective fields.
-![Server Setup](../_assets/server-anthropic.gif)
+![Server Setup](../_assets/Anthropic-2.gif)
You can also manually edit the JSON file in `~/jan/settings/@janhq/inference-anthropic-extension`.
-### Step 2: Select Model
+### Step 2: Start Chatting with the Model
1. Navigate to the **Hub** section.
-2. Ensure you have downloaded the Anthropic model you want to use.
-
-![Select Model](../_assets/model-anthropic.gif)
-
+2. Select the Anthropic model you want to use.
- The Anthropic Inference Engine is the default extension for the Jan application. All the Anthropic models are automatically installed when you install the Jan application.
+Anthropic is the default extension for the Jan application. All the Anthropic models are automatically installed when you install the Jan application.
-
-### Step 3: Start the Model
-
-1. Navigate to the **Thread** section.
-2. Under the **Model** section, click **Remote**.
-3. Select the Anthropic model you want to use.
-3. Start the conversation with the Anthropic model.
+3. Specify the model's parameters.
+4. Start the conversation with the Anthropic model.
-![Start Model](../_assets/start-anthropic.gif)
+![Start Model](../_assets/Anthropic-1.gif)
diff --git a/src/pages/docs/remote-models/azure.mdx b/src/pages/docs/remote-models/azure.mdx
index 7a4c0d04..a3a6653d 100644
--- a/src/pages/docs/remote-models/azure.mdx
+++ b/src/pages/docs/remote-models/azure.mdx
@@ -29,37 +29,29 @@ This guide provides step-by-step instructions for integrating the Azure OpenAI A
## Integration Steps
### Step 1: Configure OpenAI API Key
-1. Obtain Azure OpenAI API Key from your [Azure OpenAI Platform](https://oai.azure.com/portal).
-2. Copy your Azure OpenAI Key and the endpoint URL you want to use.
+1. Obtain OpenAI API Key from your [OpenAI Platform](https://platform.openai.com/api-keys) dashboard.
+2. Copy your OpenAI Key and the endpoint URL you want to use.
3. Navigate to the **Jan app** > **Settings**.
-4. Select the **OpenAI Inference Engine**.
+4. Select the **OpenAI**.
-The **OpenAI Inference Engine** fields can be used for any OpenAI-compatible API.
+The **OpenAI** fields can be used for any OpenAI-compatible API.
5. Insert the **API Key** and the **endpoint URL** into their respective fields.
-![Server Setup](../_assets/server-openai.gif)
+![Server Setup](../_assets/OpenAi-1.gif)
You can also manually edit the JSON file in `~/jan/settings/@janhq/inference-openai-extension`.
-### Step 2: Select Model
+### Step 2: Start Chatting with the Model
1. Navigate to the **Hub** section.
-2. Ensure you have downloaded the model compatible with your selected OpenAI-compatible API.
+2. Select the OpenAI model you want to use.
+3. Specify the model's parameters.
+4. Start the conversation with the OpenAI model.
-![Select Model](../_assets/model-openai.gif)
-
-
-### Step 3: Start the Model
-
-1. Navigate to the **Thread** section.
-2. Under the **Model** section, click **Remote**.
-3. Select the model you want to use.
-4. Start the conversation with the model.
-
-![Start Model](../_assets/select-openai.gif)
+![Start Model](../_assets/OpenAi-2.gif)
## Troubleshooting
diff --git a/src/pages/docs/remote-models/cohere.mdx b/src/pages/docs/remote-models/cohere.mdx
index ea0d25b4..e31d3af2 100644
--- a/src/pages/docs/remote-models/cohere.mdx
+++ b/src/pages/docs/remote-models/cohere.mdx
@@ -35,34 +35,26 @@ Before proceeding, ensure you have the following:
1. Obtain Cohere API Keys from your [Cohere Dashboard](https://dashboard.cohere.com/).
2. Copy your Cohere API Key and the endpoint URL you want.
3. Navigate to the **Jan app** > **Settings**.
-4. Select the **Cohere Inference Engine**.
+4. Select the **Cohere**.
5. Insert the **API Key** and the **endpoint URL** into their respective fields.
-![Server Setup](../_assets/server-cohere.gif)
+![Server Setup](../_assets/Cohere-1.gif)
You can also manually edit the JSON file in `~/jan/settings/@janhq/inference-cohere-extension`.
-### Step 2: Select Model
+### Step 2: Start Chatting with the Model
1. Navigate to the **Hub** section.
-2. Ensure you have downloaded the Cohere model you want to use.
-
-![Select Model](../_assets/model-cohere.gif)
-
+2. Select the Cohere model you want to use.
- The Cohere Inference Engine is the default extension for the Jan application. All the Cohere models are automatically installed when you install the Jan application.
+Cohere is the default extension for the Jan application. All the Cohere models are automatically installed when you install the Jan application.
-
-### Step 3: Start the Model
-
-1. Navigate to the **Thread** section.
-2. Under the **Model** section, click **Remote**.
-3. Select the Cohere model you want to use.
-3. Start the conversation with the Cohere model.
+3. Specify the model's parameters.
+4. Start the conversation with the Cohere model.
-![Start Model](../_assets/start-cohere.gif)
+![Start Model](../_assets/Cohere-2.gif)
diff --git a/src/pages/docs/remote-models/generic-openai.mdx b/src/pages/docs/remote-models/generic-openai.mdx
index c8b2d433..828159de 100644
--- a/src/pages/docs/remote-models/generic-openai.mdx
+++ b/src/pages/docs/remote-models/generic-openai.mdx
@@ -31,10 +31,10 @@ This guide outlines the process for configuring Jan as a client for both remote
### Step 1: Configure a Client Connection
1. Navigate to the **Jan app** > **Settings**.
-2. Select the **OpenAI Inference Engine**.
+2. Select the **OpenAI**.
-The **OpenAI Inference Engine** fields can be used for any OpenAI-compatible API.
+The **OpenAI** fields can be used for any OpenAI-compatible API.
3. Insert the **API Key** and the **endpoint URL** into their respective fields. For example, if you're going to communicate to Jan's API server, you can configure it as follows:
@@ -45,22 +45,16 @@ The **OpenAI Inference Engine** fields can be used for any OpenAI-compatible API
Please note that currently, the code that supports any OpenAI-compatible endpoint only reads the `~/jan/settings/@janhq/inference-openai-extension/settings.json` file, which is OpenAI Inference Engines in the extensions page. Thus, it will not search any other files in this directory.
-![Server Setup](../_assets/server-openai.gif)
+![Server Setup](../_assets/OpenAi-1.gif)
-### Step 2: Select Model
+### Step 2: Start Chatting with the Model
1. Navigate to the **Hub** section.
-2. Ensure you have downloaded the model compatible with your selected OpenAI-compatible API.
+2. Select the model you want to use.
+3. Specify the model's parameters.
+4. Start the conversation with the model.
-![Select Model](../_assets/model-openai.gif)
-
-### Step 3: Start the Model
-
-1. Restart Jan and go to the **Hub**.
-2. Under the **Model** section, click **Remote**.
-3. Select the model you want to use.
-
-![Start Model](../_assets/select-openai.gif)
+![Start Model](../_assets/OpenAi-2.gif)
If you have questions or want more preconfigured GGUF models, please join our [Discord community](https://discord.gg/Dt7MxDyNNZ) for support, updates, and discussions.
diff --git a/src/pages/docs/remote-models/groq.mdx b/src/pages/docs/remote-models/groq.mdx
index 64313dbd..bbeca68a 100644
--- a/src/pages/docs/remote-models/groq.mdx
+++ b/src/pages/docs/remote-models/groq.mdx
@@ -35,34 +35,27 @@ Before proceeding, ensure you have the following:
1. Obtain Groq API Keys from your [Groq Console](https://console.groq.com/keys) dashboard.
2. Copy your Groq API Key and the endpoint URL you want.
3. Navigate to the **Jan app** > **Settings**.
-4. Select the **Groq Inference Engine**.
+4. Select the **Groq**.
5. Insert the **API Key** and the **endpoint URL** into their respective fields.
-![Server Setup](../_assets/server-groq.gif)
+![Server Setup](../_assets/Groq-1.gif)
You can also manually edit the JSON file in `~/jan/settings/@janhq/inference-groq-extension`.
-### Step 2: Select Model
+### Step 2: Start Chatting with the Model
1. Navigate to the **Hub** section.
-2. Ensure you have downloaded the Groq model you want to use.
-
-![Select Model](../_assets/model-groq.gif)
+2. Select the Groq model you want to use.
The Groq Inference Engine is the default extension for the Jan application. All the Groq models are automatically installed when you install the Jan application.
-
-### Step 3: Start the Model
-
-1. Navigate to the **Thread** section.
-2. Under the **Model** section, click **Remote**.
-3. Select the Groq model you want to use.
-3. Start the conversation with the Groq API.
+3. Specify the model's parameters.
+4. Start the conversation with the Groq model.
-![Start Model](../_assets/start-groq.gif)
+![Start Model](../_assets/Groq-2.gif)
diff --git a/src/pages/docs/remote-models/martian.mdx b/src/pages/docs/remote-models/martian.mdx
index aad9b4cb..1a452fc8 100644
--- a/src/pages/docs/remote-models/martian.mdx
+++ b/src/pages/docs/remote-models/martian.mdx
@@ -35,30 +35,26 @@ Before proceeding, ensure you have the following:
1. Obtain Martian API Keys from your [Martian Dashboard](https://auth.withmartian.com/).
2. Copy your Martian API Key and the endpoint URL you want.
3. Navigate to the **Jan app** > **Settings**.
-4. Select the **Martian Inference Engine**.
+4. Select the **Martian**.
5. Insert the **API Key** and the **endpoint URL** into their respective fields.
-![Server Setup](../_assets/server-martian.gif)
+![Server Setup](../_assets/Martian-1.gif)
You can also manually edit the JSON file in `~/jan/settings/@janhq/inference-martian-extension`.
-### Step 2: Select Model
+### Step 2: Start Chatting with the Model
1. Navigate to the **Hub** section.
-2. Ensure you have downloaded the model you want to use.
-
-![Select Model](../_assets/model-martian.gif)
-
-### Step 3: Start the Model
-
-1. Navigate to the **Thread** section.
-2. Under the **Model** section, click **Remote**.
-3. Select the Martian model you want to use.
-3. Start the conversation with the model.
+2. Select the Martian model you want to use.
+
+Martian is the default extension for the Jan application. All the Martian models are automatically installed when you install the Jan application.
+
+3. Specify the model's parameters.
+4. Start the conversation with the Martian model.
-![Start Model](../_assets/start-martian.gif)
+![Start Model](../_assets/Martian-2.gif)
diff --git a/src/pages/docs/remote-models/mistralai.mdx b/src/pages/docs/remote-models/mistralai.mdx
index 55e7ed59..09631650 100644
--- a/src/pages/docs/remote-models/mistralai.mdx
+++ b/src/pages/docs/remote-models/mistralai.mdx
@@ -35,34 +35,28 @@ Before proceeding, ensure you have the following:
1. Obtain the Mistral API Key from your [Mistral](https://console.mistral.ai/user/api-keys/) dashboard.
2. Copy your Mistral API Key and the endpoint URL you want.
3. Navigate to the **Jan app** > **Settings**.
-4. Select the **MistralAI Inference Engine**.
+4. Select the **MistralAI**.
5. Insert the **API Key** and the **endpoint URL** into their respective fields.
-![Start Server](../_assets/server-mistral.gif)
+![Start Server](../_assets/Mistral-1.gif)
- You can also manually edit the JSON file in `~/jan/settings/@janhq/inference-mistral-extension`.
- Mistral AI offers various endpoints. Refer to their [endpoint documentation](https://docs.mistral.ai/platform/endpoints/) to select the one that fits your requirements.
-### Step 2: Select Model
+### Step 2: Start Chatting with the Model
1. Navigate to the **Hub** section.
-2. Ensure you have downloaded the Mistral model you want to use.
-![Select Model](../_assets/model-mistral.gif)
+2. Select the Mistral model you want to use.
- The MistralAI Inference Engine is the default extension for the Jan application. All the Mistral models are automatically installed when you install the Jan application.
+ The MistralAI is the default extension for the Jan application. All the Mistral models are automatically installed when you install the Jan application.
-
-### Step 3: Start the Model
-
-1. Navigate to the **Thread** section.
-2. Under the **Model** section, click **Remote**.
-3. Select the Mistral model you want to use.
-3. Start the conversation with the Mistral API.
+3. Specify the model's parameters.
+4. Start the conversation with the Mistral model.
-![Start Model](../_assets/start-mistral.gif)
+![Start Model](../_assets/Mistral-2.gif)
diff --git a/src/pages/docs/remote-models/openai.mdx b/src/pages/docs/remote-models/openai.mdx
index 39b93844..dbc54611 100644
--- a/src/pages/docs/remote-models/openai.mdx
+++ b/src/pages/docs/remote-models/openai.mdx
@@ -32,37 +32,29 @@ This guide provides step-by-step instructions for integrating the OpenAI API wit
1. Obtain OpenAI API Key from your [OpenAI Platform](https://platform.openai.com/api-keys) dashboard.
2. Copy your OpenAI Key and the endpoint URL you want to use.
3. Navigate to the **Jan app** > **Settings**.
-4. Select the **OpenAI Inference Engine**.
+4. Select the **OpenAI**.
-The **OpenAI Inference Engine** fields can be used for any OpenAI-compatible API.
+The **OpenAI** fields can be used for any OpenAI-compatible API.
5. Insert the **API Key** and the **endpoint URL** into their respective fields.
-![Server Setup](../_assets/server-openai.gif)
+![Server Setup](../_assets/OpenAi-1.gif)
You can also manually edit the JSON file in `~/jan/settings/@janhq/inference-openai-extension`.
-### Step 2: Select Model
+### Step 2: Start Chatting with the Model
1. Navigate to the **Hub** section.
-2. Ensure you have downloaded the OpenAI model you want to use.
-
-![Select Model](../_assets/model-openai.gif)
-
+2. Select the OpenAI model you want to use.
-The OpenAI Inference Engine is the default extension for the Jan application. All the OpenAI models are automatically installed when you install the Jan application.
+The OpenAI is the default extension for the Jan application. All the OpenAI models are automatically installed when you install the Jan application.
-
-### Step 3: Start the Model
-
-1. Navigate to the **Thread** section.
-2. Under the **Model** section, click **Remote**.
-3. Select the OpenAI model you want to use.
+3. Specify the model's parameters.
4. Start the conversation with the OpenAI model.
-![Start Model](../_assets/select-openai.gif)
+![Start Model](../_assets/OpenAi-2.gif)
### OpenAI Models
diff --git a/src/pages/docs/remote-models/openrouter.mdx b/src/pages/docs/remote-models/openrouter.mdx
index 10cf3be3..7d7b252c 100644
--- a/src/pages/docs/remote-models/openrouter.mdx
+++ b/src/pages/docs/remote-models/openrouter.mdx
@@ -33,56 +33,19 @@ To connect Jan with OpenRouter for accessing remote Large Language Models (LLMs)
1. Find your API Key in the [OpenRouter API Key](https://openrouter.ai/keys).
. Copy your OpenAI Key and the endpoint URL you want to use.
3. Navigate to the **Jan app** > **Settings**.
-4. Select the **OpenAI Inference Engine**.
+4. Select the **OpenRouter**.
5. Insert the **API Key** and the **endpoint URL** into their respective fields.
-![Server Setup](../_assets/server-openai.gif)
+![Server Setup](../_assets/OpenRouter-1.gif)
-### Step 2: Model Configuration
+### Step 2: Start Chatting with the Model
-1. Go to the directory `~/jan/models`.
-2. Make a new folder called `openrouter-(modelname)`, like `openrouter-dolphin-mixtral-8x7b`.
-3. Inside the folder, create a `model.json` file with the following settings:
-
-- Set the `id` property to the model ID obtained from OpenRouter.
-- Set the `format` property to `api`.
-- Set the `engine` property to `openai`.
-- Ensure the `state` property is set to `ready`.
-
-```json title="~/jan/models/openrouter-dolphin-mixtral-8x7b/model.json"
-{
- "sources": [
- {
- "filename": "openrouter",
- "url": "https://openrouter.ai/"
- }
- ],
- "id": "cognitivecomputations/dolphin-mixtral-8x7b",
- "object": "model",
- "name": "Dolphin 2.6 Mixtral 8x7B",
- "version": "1.0",
- "description": "This is a 16k context fine-tune of Mixtral-8x7b. It excels in coding tasks due to extensive training with coding data and is known for its obedience, although it lacks DPO tuning. The model is uncensored and is stripped of alignment and bias. It requires an external alignment layer for ethical use. Users are cautioned to use this highly compliant model responsibly, as detailed in a blog post about uncensored models at erichartford.com/uncensored-models.",
- "format": "api",
- "settings": {},
- "parameters": {},
- "metadata": {
- "tags": ["General", "Big Context Length"]
- },
- "engine": "openai"
-}
-```
-
-
- For more details regarding the `model.json` settings and parameters fields, please see [here](/docs/models).
-
-
-### Step 3: Start the Model
-
-1. Restart Jan and go to the **Hub**.
-2. Under the **Model** section, click **Remote**.
-3. Select the OpenRouter model you want to use.
+1. Navigate to the **Hub** section.
+2. Select the OpenRouter model you want to use.
+3. Specify the model's parameters.
+4. Start the conversation with the OpenRouter model.
-![Start Model](../_assets/start-router.gif)
+![Start Model](../_assets/OpenRouter-2.gif)
## Troubleshooting
diff --git a/src/pages/docs/settings.mdx b/src/pages/docs/settings.mdx
index b7a793b8..c971a331 100644
--- a/src/pages/docs/settings.mdx
+++ b/src/pages/docs/settings.mdx
@@ -32,18 +32,27 @@ Settings for the Jan application are stored in a `settings.json` file located at
My Settings is where you can customize the color of Jan's desktop app UI. Here's how to personalize the color scheme of Jan's desktop app UI:
1. Navigate to the main dashboard.
2. Click the **Gear Icon (โ๏ธ)** on the bottom left of your screen.
-3. Under the **Settings screen**, click the **Advanced Settings**.
-4. Click **My Settings**.
-5. Pick the **Base Color Scheme** for your Jan desktop app. Options include:
- - Light
- - Dark
- - System color scheme.
-6. Choose the **Accent Color** for your Jan desktop app. Options include:
- - Blue
- - Purple
- - Green
-
-![Customize UI](./_assets/ui.gif)
+
+![Settings](./_assets/settings.png)
+
+3. Select the **Appearance** section.
+
+![Settings](./_assets/appearance.png)
+
+4. Pick the **Appearance Scheme** for your Jan desktop app. Options include:
+ - Joi Light
+ - Joi Dark
+ - Dark Dimmed
+ - Night Blue
+
+![Settings](./_assets/scheme.png)
+
+5. Choose the **Interface theme** for your Jan desktop app. Options include:
+ - Solid
+ - Transparent
+
+![Settings](./_assets/theme.png)
+
## Access Advanced Settings
Advanced Settings is the GUI version of the `settings.json`. To access Jan's advanced settings, follow the steps below:
@@ -53,16 +62,22 @@ Whenever you make changes in the Jan application's Settings screen, they are aut
1. Navigate to the main dashboard.
2. Click the **Gear Icon (โ๏ธ)** on the bottom left of your screen.
-3. Under the **Settings screen**, click the **Advanced Settings**.
+
+![Settings](./_assets/settings.png)
+
+3. Click the **Advanced Settings**.
+
+![Settings](./_assets/advance-settings2.png)
+
4. You can configure the following settings:
| Feature | Description |
| ---------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
-| **Keyboard Shortcuts** | Keyboard shortcuts speed up your workflow. For a quick overview of useful keyboard shortcuts, refer to the list [below](settings#keyboard-shortcuts). |
| **Experimental Mode** | Enables experimental features that may be unstable. |
| **GPU Acceleration** | Enables boosting your model performance by using your GPU devices for acceleration. |
| **Jan Data Folder** | Location for messages, model configurations, and user data. Changeable to a different location. |
| **HTTPS Proxy & Ignore SSL Certificate** | Use a proxy server for internet connections and ignore SSL certificates for self-signed certificates. Please check out the guide on setting up your HTTPS proxy server [here](settings#https-proxy). |
+| **Ignore SSL Certificates** | Enables the self-signed or unverified certificates |
| **Clear Logs** | Removes all logs from the Jan application. |
| **Reset To Factory Default** | Resets the application to its original state, deleting all data, including model customizations and conversation history. |
@@ -70,12 +85,10 @@ Whenever you make changes in the Jan application's Settings screen, they are aut
To try out new features that are still in the testing phase, follow the steps below:
-1. Navigate to the main dashboard.
-2. Click the **gear icon (โ๏ธ)** on the bottom left of your screen.
-3. Under the **Settings screen**, click the **Advanced Settings**.
-4. On the **Experimental Mode**, click the slider to enable.
+1. Navigate to the **Advanced Settings**.
+2. On the **Experimental Mode**, click the slider to enable.
-![Experimental](./_assets/experimental.gif)
+![Experimental](./_assets/exp-mode.png)
## Enable the GPU Acceleration
@@ -87,23 +100,23 @@ To enhance your model performance, follow the steps below:
assistance.
-1. Navigate to the main dashboard.
-2. Click the **gear icon (โ๏ธ)** on the bottom left of your screen.
-3. Under the **Settings screen**, click the **Advanced Settings**.
-4. On the **GPU Acceleration**, click the slider to enable.
+1. Navigate to the **Advanced Settings**.
+2. On the **GPU Acceleration**, click the slider to enable.
-![Enable GPU](./_assets/gpu.gif)
+![Enable GPU](./_assets/gpu-accel.png)
## Access the Jan Data Folder
To access the folder where messages, model configurations, and user data are stored, follow the steps below:
-1. Navigate to the main dashboard.
-2. Click the **gear icon (โ๏ธ)** on the bottom left of your screen.
-3. Under the **Settings screen**, click the **Advanced Settings**.
-4. On the **Jan Data Folder** click the **folder icon (๐)** to access the data or the **pencil icon (โ๏ธ)** to change the folder where you keep your data.
+1. Navigate to the **Advanced Settings**.
+
+![Settings](./_assets/advance-set.png)
+
+2. On the **Jan Data Folder** click the **folder icon (๐)** to access the data or the **pencil icon (โ๏ธ)** to change the folder where you keep your data.
+
+![Jan Data Folder](./_assets/data-folder.png)
-![Jan Data Folder](./_assets/data-folder.gif)
- Uninstalling Jan will delete the default Jan Data Folder.
- If you have moved the Jan Data Folder from its original location, this relocated folder will remain intact while the default folder is deleted upon uninstallation.
@@ -222,7 +235,7 @@ Once you set up your HTTPS proxy server, you can configure it for Jan.
2. On the **HTTPS Proxy**, click the slider to enable.
3. Input your domain in the blank field.
-![HTTPS Proxy](./_assets/https.gif)
+![HTTPS Proxy](./_assets/http.png)
@@ -230,24 +243,11 @@ Once you set up your HTTPS proxy server, you can configure it for Jan.
To Allow self-signed or unverified certificates, follow the steps below:
-1. Navigate to the main dashboard.
-2. Click the **gear icon (โ๏ธ)** on the bottom left of your screen.
-3. Under the **Settings screen**, click the **Advanced Settings**.
-4. On the **Ignore SSL Certificates**, click the slider to enable.
+1. Navigate to the **Advanced Settings**.
+2. On the **Ignore SSL Certificates**, click the slider to enable.
-![Ignore SSL](./_assets/ssl.gif)
-
-## Enable Quick Ask
+![Ignore SSL](./_assets/ssl.png)
-To enable the Jan quick ask feature, follow the steps below:
-
-
-1. Navigate to the main dashboard.
-2. Click the **gear icon (โ๏ธ)** on the bottom left of your screen.
-3. Under the **Settings screen**, click the **Advanced Settings**.
-4. On the **Jan Quick Ask**, click the slider to enable.
-
-![Jan Quick Ask](./_assets/quick-ask.gif)
## Clear Logs
@@ -257,12 +257,10 @@ To clear all logs on your Jan app, follow the steps below:
This feature clears all the data in your **Jan Data Folder**.
-1. Navigate to the main dashboard.
-2. Click the **gear icon (โ๏ธ)** on the bottom left of your screen.
-3. Under the **Settings screen**, click the **Advanced Settings**.
-4. On the **Clear Logs** click the the **Clear** button.
+1. Navigate to the **Advanced Settings**.
+2. On the **Clear Logs** click the the **Clear** button.
-![Clear Logs](./_assets/clear.gif)
+![Clear Logs](./_assets/clear-logs.png)
## Reset To Factory Default
@@ -273,9 +271,7 @@ To reset the Jan app to its original state, follow the steps below:
-1. Navigate to the main dashboard.
-2. Click the **gear icon (โ๏ธ)** on the bottom left of your screen.
-3. Under the **Settings screen**, click the **Advanced Settings**.
-4. On the **Reset To Factory Default** click the the **Reset** button.
+1. Navigate to the **Advanced Settings**.
+2. On the **Reset To Factory Default** click the the **Reset** button.
-![Reset](./_assets/reset.gif)
+![Reset](./_assets/reset-jan.png)
diff --git a/src/pages/docs/shortcuts.mdx b/src/pages/docs/shortcuts.mdx
index 432ce071..9f8ae12f 100644
--- a/src/pages/docs/shortcuts.mdx
+++ b/src/pages/docs/shortcuts.mdx
@@ -23,6 +23,16 @@ keywords:
import { Tabs, Steps, Callout } from 'nextra/components'
## Keyboard Shortcuts
+To find the list of all the available shortcuts within Jan app, please follow the steps below:
+1. Navigate to the main dashboard.
+2. Click the **Gear Icon (โ๏ธ)** on the bottom left of your screen.
+
+![Settings](./_assets/settings.png)
+
+3. Click the **Hotkey & Shortcut**.
+
+![Keyboard Shortcut](./_assets/shortcut.png)
+
Here are some of the keyboard shortcuts that you can use in Jan.
@@ -30,45 +40,40 @@ Here are some of the keyboard shortcuts that you can use in Jan.
| Combination | Description |
| --------------- | -------------------------------------------------- |
- | `โ E` | Show list your models |
- | `โ K` | Show list navigation pages |
- | `โ B` | Toggle collapsible left panel |
- | `โ ,` | Navigate to setting page |
- | `Enter` | Send a message |
- | `Shift + Enter` | Insert new line in input box |
- | `Arrow Up` | Navigate to the previous option (within the search dialog) |
- | `Arrow Down` | Navigate to the next option (within the search dialog) |
+ | `โ N` | Create a new thread. |
+ | `โ B` | Toggle collapsible left panel. |
+ | `โ ,` | Navigate to the setting page. |
+ | `Enter` | Send a message. |
+ | `Shift + Enter` | Insert new line in input box. |
+ | `Arrow Up` | Navigate to the previous option (within the search dialog). |
+ | `Arrow Down` | Navigate to the next option (within the search dialog). |
| Combination | Description |
| --------------- | ---------------------------------------------------------- |
- | `Ctrl E` | Show list your models |
- | `Ctrl K` | Show list navigation pages |
- | `Ctrl B` | Toggle collapsible left panel |
- | `Ctrl ,` | Navigate to setting page |
- | `Enter` | Send a message |
- | `Shift + Enter` | Insert new line in input box |
- | `Arrow Up` | Navigate to the previous option (within the search dialog) |
- | `Arrow Down` | Navigate to the next option (within the search dialog) |
+ | `Ctrl N` | Create a new thread. |
+ | `Ctrl B` | Toggle collapsible left panel. |
+ | `Ctrl ,` | Navigate to the setting page. |
+ | `Enter` | Send a message. |
+ | `Shift + Enter` | Insert new line in input box. |
+ | `Arrow Up` | Navigate to the previous option (within the search dialog). |
+ | `Arrow Down` | Navigate to the next option (within the search dialog). |
| Combination | Description |
| --------------- | ---------------------------------------------------------- |
- | `Ctrl E` | Show list your models |
- | `Ctrl K` | Show list navigation pages |
- | `Ctrl B` | Toggle collapsible left panel |
- | `Ctrl ,` | Navigate to setting page |
- | `Enter` | Send a message |
- | `Shift + Enter` | Insert new line in input box |
- | `Arrow Up` | Navigate to the previous option (within the search dialog) |
- | `Arrow Down` | Navigate to the next option (within the search dialog) |
+ | `Ctrl N` | Create a new thread. |
+ | `Ctrl B` | Toggle collapsible left panel. |
+ | `Ctrl ,` | Navigate to the setting page. |
+ | `Enter` | Send a message. |
+ | `Shift + Enter` | Insert new line in input box. |
+ | `Arrow Up` | Navigate to the previous option (within the search dialog). |
+ | `Arrow Down` | Navigate to the next option (within the search dialog). |
-
-The keyboard shortcuts are customizable.
diff --git a/src/pages/docs/threads.mdx b/src/pages/docs/threads.mdx
index 712cbf3c..9fea52d6 100644
--- a/src/pages/docs/threads.mdx
+++ b/src/pages/docs/threads.mdx
@@ -25,7 +25,7 @@ import { Callout } from 'nextra/components'
Jan provides a straightforward and private solution for managing your threads with AI on your device. As you interact with AI using Jan, you'll accumulate a history of threads.
Jan offers easy tools to organize, delete, or review your past threads with AI. This guide will show you how to keep your threads private and well-organized.
-### View Thread History
+## View Thread History
To view your thread history, follow the steps below:
@@ -35,22 +35,16 @@ To view your thread history, follow the steps below:
![History](./_assets/history.png)
-### Manage the Threads via Folder
+## Change the Thread's Title
+To change a thread's title, follow the steps below:
-To manage your thread history and configurations, follow the steps below:
-
-1. Navigate to the Thread you want to manage via the list of threads on the left side of the dashboard.
-2. Click on the **three dots (โฎ)** in the Thread section.
-
-![Thread Settings](./_assets/thread-settings.png)
-
-3. There are two available options to select:
- - **Show in File Explorer**: Opens the thread history and configurations folder.
- - **Edit Threads Settings**: Opens the thread.json file in your default code editor.
+1. Navigate to the Thread that you want to edit.
+2. Hover to a thread and click on the **three dots (โฎ)** in the Thread section.
+3. Select the **Edit Title** button.
-![Thread Settings](./_assets/thread-settings2.png)
+![Clean Thread](./_assets/title.png)
-### Clean Threads History
+## Clean Threads History
To clean all the messages from a thread, follow the steps below:
diff --git a/src/pages/docs/tools/retrieval.mdx b/src/pages/docs/tools/retrieval.mdx
index 94643c34..c0276d8e 100644
--- a/src/pages/docs/tools/retrieval.mdx
+++ b/src/pages/docs/tools/retrieval.mdx
@@ -32,8 +32,14 @@ To access this feature, please enable Experimental mode in the [Advanced Setting
To chat with PDFs using RAG in Jan, follow these steps:
1. Create a **new thread**.
-2. Click the **Tools** dropdown menu.
-3. Ensure that **Retrieval** is toggled on.
+2. Click the **Tools** tab.
+
+![Retrieval](../_assets/tools.png)
+
+3. Enable the **Retrieval**.
+
+![Retrieval](../_assets/retrieval1.png)
+
4. Adjust the **Retrieval** settings as needed. These settings include the following:
| Feature | Description |
@@ -52,4 +58,4 @@ To upload an image or GIF, ensure that you are using a multimodal model. If not,
6. Click on the ๐ icon in the chat input field.
7. Select **Document** to upload a document file.
-![Retrieval](../_assets/retrieval.gif)
+![Retrieval](../_assets/retrieval2.png)