From 25f404bdf8152f97e16cfe0fed2b81ffed01d1cb Mon Sep 17 00:00:00 2001
From: Isotr0py <2037008807@qq.com>
Date: Fri, 20 Sep 2024 03:15:55 +0800
Subject: [PATCH] [Doc] Add documentation for GGUF quantization (#8618)

Signed-off-by: Alvant <alvasian@yandex.ru>
---
 docs/source/index.rst             |  1 +
 docs/source/quantization/gguf.rst | 73 +++++++++++++++++++++++++++++++
 2 files changed, 74 insertions(+)
 create mode 100644 docs/source/quantization/gguf.rst

diff --git a/docs/source/index.rst b/docs/source/index.rst
index 4b817c4ba9498..79f723eace762 100644
--- a/docs/source/index.rst
+++ b/docs/source/index.rst
@@ -107,6 +107,7 @@ Documentation
    quantization/supported_hardware
    quantization/auto_awq
    quantization/bnb
+   quantization/gguf
    quantization/int8
    quantization/fp8
    quantization/fp8_e5m2_kvcache
diff --git a/docs/source/quantization/gguf.rst b/docs/source/quantization/gguf.rst
new file mode 100644
index 0000000000000..9f00dc5563909
--- /dev/null
+++ b/docs/source/quantization/gguf.rst
@@ -0,0 +1,73 @@
+.. _gguf:
+
+GGUF
+==================
+
+.. warning::
+
+   Please note that GGUF support in vLLM is highly experimental and under-optimized at the moment, it might be incompatible with other features. Currently, you can use GGUF as a way to reduce memory footprint. If you encounter any issues, please report them to the vLLM team.
+
+.. warning::
+
+   Currently, vllm only supports loading single-file GGUF models. If you have a multi-files GGUF model, you can use `gguf-split <https://github.com/ggerganov/llama.cpp/pull/6135>`_ tool to merge them to a single-file model.
+
+To run a GGUF model with vLLM, you can download and use the local GGUF model from `TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF <https://huggingface.co/TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF>`_ with the following command:
+
+.. code-block:: console
+
+   $ wget https://huggingface.co/TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF/resolve/main/tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf
+   $ # We recommend using the tokenizer from base model to avoid long-time and buggy tokenizer conversion.
+   $ vllm serve ./tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf --tokenizer TinyLlama/TinyLlama-1.1B-Chat-v1.0
+
+You can also add ``--tensor-parallel-size 2`` to enable tensor parallelism inference with 2 GPUs:
+
+.. code-block:: console
+
+   $ # We recommend using the tokenizer from base model to avoid long-time and buggy tokenizer conversion.
+   $ vllm serve ./tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf --tokenizer TinyLlama/TinyLlama-1.1B-Chat-v1.0 --tensor-parallel-size 2
+
+.. warning::
+
+   We recommend using the tokenizer from base model instead of GGUF model. Because the tokenizer conversion from GGUF is time-consuming and unstable, especially for some models with large vocab size.
+
+You can also use the GGUF model directly through the LLM entrypoint:
+
+.. code-block:: python
+
+   from vllm import LLM, SamplingParams
+
+   # In this script, we demonstrate how to pass input to the chat method:
+   conversation = [
+      {
+         "role": "system",
+         "content": "You are a helpful assistant"
+      },
+      {
+         "role": "user",
+         "content": "Hello"
+      },
+      {
+         "role": "assistant",
+         "content": "Hello! How can I assist you today?"
+      },
+      {
+         "role": "user",
+         "content": "Write an essay about the importance of higher education.",
+      },
+   ]
+
+   # Create a sampling params object.
+   sampling_params = SamplingParams(temperature=0.8, top_p=0.95)
+
+   # Create an LLM.
+   llm = LLM(model="./tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf",
+            tokenizer="TinyLlama/TinyLlama-1.1B-Chat-v1.0")
+   # Generate texts from the prompts. The output is a list of RequestOutput objects
+   # that contain the prompt, generated text, and other information.
+   outputs = llm.chat(conversation, sampling_params)
+
+   # Print the outputs.
+   for output in outputs:
+      prompt = output.prompt
+      generated_text = output.outputs[0].text
+      print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")