diff --git a/configs/dbgpt-graphrag.toml b/configs/dbgpt-graphrag.toml
index f20e893c6..ce4980bce 100644
--- a/configs/dbgpt-graphrag.toml
+++ b/configs/dbgpt-graphrag.toml
@@ -53,5 +53,3 @@ name = "${env:EMBEDDING_MODEL_NAME:-text-embedding-3-small}"
provider = "${env:EMBEDDING_MODEL_PROVIDER:-proxy/openai}"
api_url = "${env:EMBEDDING_MODEL_API_URL:-https://api.openai.com/v1/embeddings}"
api_key = "${env:OPENAI_API_KEY}"
-
-
diff --git a/configs/dbgpt-local-glm.toml b/configs/dbgpt-local-glm.toml
index d130aa8aa..b4cd15284 100644
--- a/configs/dbgpt-local-glm.toml
+++ b/configs/dbgpt-local-glm.toml
@@ -26,6 +26,7 @@ provider = "hf"
# If not provided, the model will be downloaded from the Hugging Face model hub
# uncomment the following line to specify the model path in the local file system
# path = "the-model-path-in-the-local-file-system"
+path = "models/THUDM/glm-4-9b-chat-hf"
[[models.embeddings]]
name = "BAAI/bge-large-zh-v1.5"
@@ -33,4 +34,4 @@ provider = "hf"
# If not provided, the model will be downloaded from the Hugging Face model hub
# uncomment the following line to specify the model path in the local file system
# path = "the-model-path-in-the-local-file-system"
-
+path = "models/BAAI/glm-4-9b-chat-hf"
diff --git a/configs/dbgpt-local-llama-cpp-server.toml b/configs/dbgpt-local-llama-cpp-server.toml
index 999a94c25..8467c3538 100644
--- a/configs/dbgpt-local-llama-cpp-server.toml
+++ b/configs/dbgpt-local-llama-cpp-server.toml
@@ -21,20 +21,13 @@ persist_path = "pilot/data"
# Model Configurations
[models]
[[models.llms]]
-name = "Qwen2.5-Coder-0.5B-Instruct"
-# Please make sure install DB-GPT with '--extra llama_cpp_server' extra
+name = "DeepSeek-R1-Distill-Qwen-1.5B"
provider = "llama.cpp.server"
# If not provided, the model will be downloaded from the Hugging Face model hub
# uncomment the following line to specify the model path in the local file system
-# path = "the-model-path-in-the-local-file-system"
-path = "/data/models/qwen2.5-coder-0.5b-instruct-q4_k_m.gguf"
-
-[[models.llms]]
-name = "DeepSeek-R1-Distill-Qwen-1.5B"
-provider = "llama.cpp.server"
# https://huggingface.co/bartowski/DeepSeek-R1-Distill-Qwen-1.5B-GGUF
-path = "/data/models/DeepSeek-R1-Distill-Qwen-1.5B-Q4_K_M.gguf"
-
+# path = "the-model-path-in-the-local-file-system"
+path = "models/DeepSeek-R1-Distill-Qwen-1.5B-Q4_K_M.gguf"
[[models.embeddings]]
name = "BAAI/bge-large-zh-v1.5"
@@ -42,5 +35,4 @@ provider = "hf"
# If not provided, the model will be downloaded from the Hugging Face model hub
# uncomment the following line to specify the model path in the local file system
# path = "the-model-path-in-the-local-file-system"
-path = "/data/models/bge-large-zh-v1.5"
-
+path = "models/BAAI/bge-large-zh-v1.5"
diff --git a/configs/dbgpt-local-llama-cpp.toml b/configs/dbgpt-local-llama-cpp.toml
index 0f6b88071..ac59a47f0 100644
--- a/configs/dbgpt-local-llama-cpp.toml
+++ b/configs/dbgpt-local-llama-cpp.toml
@@ -21,19 +21,13 @@ persist_path = "pilot/data"
# Model Configurations
[models]
[[models.llms]]
-name = "Qwen2.5-Coder-0.5B-Instruct"
-# Please make sure install DB-GPT with '--extra llama_cpp' extra
+name = "DeepSeek-R1-Distill-Qwen-1.5B"
provider = "llama.cpp"
# If not provided, the model will be downloaded from the Hugging Face model hub
# uncomment the following line to specify the model path in the local file system
-# path = "the-model-path-in-the-local-file-system"
-path = "/data/models/qwen2.5-coder-0.5b-instruct-q4_k_m.gguf"
-
-[[models.llms]]
-name = "DeepSeek-R1-Distill-Qwen-1.5B"
-provider = "llama.cpp"
# https://huggingface.co/bartowski/DeepSeek-R1-Distill-Qwen-1.5B-GGUF
-path = "/data/models/DeepSeek-R1-Distill-Qwen-1.5B-Q4_K_M.gguf"
+# path = "the-model-path-in-the-local-file-system"
+path = "models/DeepSeek-R1-Distill-Qwen-1.5B-Q4_K_M.gguf"
[[models.embeddings]]
@@ -42,5 +36,5 @@ provider = "hf"
# If not provided, the model will be downloaded from the Hugging Face model hub
# uncomment the following line to specify the model path in the local file system
# path = "the-model-path-in-the-local-file-system"
-path = "/data/models/bge-large-zh-v1.5"
+path = "models/BAAI/bge-large-zh-v1.5"
diff --git a/configs/dbgpt-local-qwen.toml b/configs/dbgpt-local-qwen.toml
index 86b0698e0..7701b8826 100644
--- a/configs/dbgpt-local-qwen.toml
+++ b/configs/dbgpt-local-qwen.toml
@@ -26,7 +26,7 @@ provider = "hf"
# If not provided, the model will be downloaded from the Hugging Face model hub
# uncomment the following line to specify the model path in the local file system
# path = "the-model-path-in-the-local-file-system"
-path = "/data/models/Qwen2.5-Coder-0.5B-Instruct"
+path = "models/Qwen2.5-Coder-0.5B-Instruct"
[[models.embeddings]]
name = "BAAI/bge-large-zh-v1.5"
@@ -34,5 +34,5 @@ provider = "hf"
# If not provided, the model will be downloaded from the Hugging Face model hub
# uncomment the following line to specify the model path in the local file system
# path = "the-model-path-in-the-local-file-system"
-path = "/data/models/bge-large-zh-v1.5"
+path = "models/BAAI/bge-large-zh-v1.5"
diff --git a/configs/dbgpt-local-vllm.toml b/configs/dbgpt-local-vllm.toml
index eb0442897..e58712819 100644
--- a/configs/dbgpt-local-vllm.toml
+++ b/configs/dbgpt-local-vllm.toml
@@ -21,21 +21,13 @@ persist_path = "pilot/data"
# Model Configurations
[models]
[[models.llms]]
-name = "Qwen2.5-Coder-0.5B-Instruct"
+name = "DeepSeek-R1-Distill-Qwen-1.5B"
provider = "vllm"
# If not provided, the model will be downloaded from the Hugging Face model hub
# uncomment the following line to specify the model path in the local file system
# path = "the-model-path-in-the-local-file-system"
-path = "/data/models/Qwen2.5-Coder-0.5B-Instruct"
-# dtype="float32"
-
-# [[models.llms]]
-# name = "DeepSeek-R1-Distill-Qwen-1.5B"
-# provider = "vllm"
-# # If not provided, the model will be downloaded from the Hugging Face model hub
-# # uncomment the following line to specify the model path in the local file system
-# # path = "the-model-path-in-the-local-file-system"
-# path = "/data/models/DeepSeek-R1-Distill-Qwen-1.5B"
+path = "models/DeepSeek-R1-Distill-Qwen-1.5B"
+# dtype = "float32"
[[models.embeddings]]
name = "BAAI/bge-large-zh-v1.5"
diff --git a/configs/dbgpt-proxy-deepseek.toml b/configs/dbgpt-proxy-deepseek.toml
index 4eb2fdfed..1ca4ba539 100644
--- a/configs/dbgpt-proxy-deepseek.toml
+++ b/configs/dbgpt-proxy-deepseek.toml
@@ -24,13 +24,9 @@ persist_path = "pilot/data"
[models]
[[models.llms]]
name = "deepseek-reasoner"
+# name = "deepseek-chat"
provider = "proxy/deepseek"
-api_key = "${env:DEEPSEEK_API_KEY}"
-
-[[models.llms]]
-name = "deepseek-chat"
-provider = "proxy/deepseek"
-api_key = "${env:DEEPSEEK_API_KEY}"
+api_key = "your_deepseek_api_key"
[[models.embeddings]]
name = "BAAI/bge-large-zh-v1.5"
@@ -38,4 +34,4 @@ provider = "hf"
# If not provided, the model will be downloaded from the Hugging Face model hub
# uncomment the following line to specify the model path in the local file system
# path = "the-model-path-in-the-local-file-system"
-path = "/data/models/bge-large-zh-v1.5"
+path = "models/bge-large-zh-v1.5"
diff --git a/docs/README.md b/docs/README.md
index b36db76ba..dac5995d3 100755
--- a/docs/README.md
+++ b/docs/README.md
@@ -7,7 +7,7 @@
- Install docusaurus dependencies, generate node_modules folder.
```
-sudo yarn install
+yarn install
```
### launch
diff --git a/docs/docs/quickstart.md b/docs/docs/quickstart.md
index 110b329e3..7ab4b0863 100644
--- a/docs/docs/quickstart.md
+++ b/docs/docs/quickstart.md
@@ -74,6 +74,19 @@ uv --version
```
## Deploy DB-GPT
+:::tip
+If you are in the China region, you can add --index-url=https://pypi.tuna.tsinghua.edu.cn/simple at the end of the command.Like this:
+```bash
+uv sync --all-packages \
+--extra "base" \
+--extra "proxy_openai" \
+--extra "rag" \
+--extra "storage_chromadb" \
+--extra "dbgpts" \
+--index-url=https://pypi.tuna.tsinghua.edu.cn/simple
+```
+This tutorial assumes that you can establish network communication with the dependency download sources.
+:::
### Install Dependencies
@@ -83,13 +96,15 @@ uv --version
{label: 'OpenAI (proxy)', value: 'openai'},
{label: 'DeepSeek (proxy)', value: 'deepseek'},
{label: 'GLM4 (local)', value: 'glm-4'},
+ {label: 'VLLM (local)', value: 'vllm'},
+ {label: 'LLAMA_CPP (local)', value: 'llama_cpp'},
]}>
```bash
# Use uv to install dependencies needed for OpenAI proxy
-uv sync --all-packages --frozen \
+uv sync --all-packages \
--extra "base" \
--extra "proxy_openai" \
--extra "rag" \
@@ -129,7 +144,7 @@ uv run python packages/dbgpt-app/src/dbgpt_app/dbgpt_server.py --config configs/
```bash
# Use uv to install dependencies needed for OpenAI proxy
-uv sync --all-packages --frozen \
+uv sync --all-packages \
--extra "base" \
--extra "proxy_openai" \
--extra "rag" \
@@ -141,8 +156,17 @@ uv sync --all-packages --frozen \
To run DB-GPT with DeepSeek proxy, you must provide the DeepSeek API key in the `configs/dbgpt-proxy-deepseek.toml`.
-And you can specify your embedding model in the `configs/dbgpt-proxy-deepseek.toml` configuration file, the default embedding model is `BAAI/bge-large-zh-v1.5`. If you want to use other embedding models, you can modify the `configs/dbgpt-proxy-deepseek.toml` configuration file and specify the `name` and `provider` of the embedding model in the `[[models.embeddings]]` section. The provider can be `hf`.
-
+And you can specify your embedding model in the `configs/dbgpt-proxy-deepseek.toml` configuration file, the default embedding model is `BAAI/bge-large-zh-v1.5`. If you want to use other embedding models, you can modify the `configs/dbgpt-proxy-deepseek.toml` configuration file and specify the `name` and `provider` of the embedding model in the `[[models.embeddings]]` section. The provider can be `hf`.Finally, you need to append `--extra "hf"` at the end of the dependency installation command. Here's the updated command:
+```bash
+uv sync --all-packages \
+--extra "base" \
+--extra "proxy_openai" \
+--extra "rag" \
+--extra "storage_chromadb" \
+--extra "dbgpts" \
+--extra "hf"
+```
+**Model Configurations**:
```toml
# Model Configurations
[models]
@@ -178,7 +202,7 @@ uv run python packages/dbgpt-app/src/dbgpt_app/dbgpt_server.py --config configs/
```bash
# Use uv to install dependencies needed for GLM4
# Install core dependencies and select desired extensions
-uv sync --all-packages --frozen \
+uv sync --all-packages \
--extra "base" \
--extra "hf" \
--extra "rag" \
@@ -214,6 +238,94 @@ Then run the following command to start the webserver:
```bash
uv run dbgpt start webserver --config configs/dbgpt-local-glm.toml
+```
+
+
+
+
+```bash
+# Use uv to install dependencies needed for vllm
+# Install core dependencies and select desired extensions
+uv sync --all-packages \
+--extra "base" \
+--extra "vllm" \
+--extra "rag" \
+--extra "storage_chromadb" \
+--extra "quant_bnb" \
+--extra "dbgpts"
+```
+
+### Run Webserver
+
+To run DB-GPT with the local model. You can modify the `configs/dbgpt-local-vllm.toml` configuration file to specify the model path and other parameters.
+
+```toml
+# Model Configurations
+[models]
+[[models.llms]]
+name = "THUDM/glm-4-9b-chat-hf"
+provider = "vllm"
+# If not provided, the model will be downloaded from the Hugging Face model hub
+# uncomment the following line to specify the model path in the local file system
+# path = "the-model-path-in-the-local-file-system"
+
+[[models.embeddings]]
+name = "BAAI/bge-large-zh-v1.5"
+provider = "hf"
+# If not provided, the model will be downloaded from the Hugging Face model hub
+# uncomment the following line to specify the model path in the local file system
+# path = "the-model-path-in-the-local-file-system"
+```
+In the above configuration file, `[[models.llms]]` specifies the LLM model, and `[[models.embeddings]]` specifies the embedding model. If you not provide the `path` parameter, the model will be downloaded from the Hugging Face model hub according to the `name` parameter.
+
+Then run the following command to start the webserver:
+
+```bash
+uv run dbgpt start webserver --config configs/dbgpt-local-vllm.toml
+```
+
+
+
+
+```bash
+# Use uv to install dependencies needed for llama-cpp
+# Install core dependencies and select desired extensions
+uv sync --all-packages \
+--extra "base" \
+--extra "llama_cpp" \
+--extra "rag" \
+--extra "storage_chromadb" \
+--extra "quant_bnb" \
+--extra "dbgpts"
+```
+
+### Run Webserver
+
+To run DB-GPT with the local model. You can modify the `configs/dbgpt-local-llama-cpp.toml` configuration file to specify the model path and other parameters.
+
+```toml
+# Model Configurations
+[models]
+[[models.llms]]
+name = "DeepSeek-R1-Distill-Qwen-1.5B"
+provider = "llama.cpp"
+# If not provided, the model will be downloaded from the Hugging Face model hub
+# uncomment the following line to specify the model path in the local file system
+# path = "the-model-path-in-the-local-file-system"
+
+[[models.embeddings]]
+name = "BAAI/bge-large-zh-v1.5"
+provider = "hf"
+# If not provided, the model will be downloaded from the Hugging Face model hub
+# uncomment the following line to specify the model path in the local file system
+# path = "the-model-path-in-the-local-file-system"
+```
+In the above configuration file, `[[models.llms]]` specifies the LLM model, and `[[models.embeddings]]` specifies the embedding model. If you not provide the `path` parameter, the model will be downloaded from the Hugging Face model hub according to the `name` parameter.
+
+Then run the following command to start the webserver:
+
+```bash
+uv run dbgpt start webserver --config configs/dbgpt-local-llama-cpp.toml
```