diff --git a/.env.template b/.env.template
index f53bf31f7..69152e7fa 100644
--- a/.env.template
+++ b/.env.template
@@ -22,7 +22,7 @@ WEB_SERVER_PORT=7860
#** LLM MODELS **#
#*******************************************************************#
# LLM_MODEL, see /pilot/configs/model_config.LLM_MODEL_CONFIG
-LLM_MODEL=vicuna-13b
+LLM_MODEL=vicuna-13b-v1.5
MODEL_SERVER=http://127.0.0.1:8000
LIMIT_MODEL_CONCURRENCY=5
MAX_POSITION_EMBEDDINGS=4096
@@ -37,6 +37,14 @@ QUANTIZE_8bit=True
## "PROXYLLM_BACKEND" is the model they actually deploy. We can use "PROXYLLM_BACKEND" to load the prompt of the corresponding scene.
# PROXYLLM_BACKEND=
+### You can configure parameters for a specific model with {model name}_{config key}=xxx
+### See /pilot/model/parameter.py
+## prompt template for current model
+# llama_cpp_prompt_template=vicuna_v1.1
+## llama-2-70b must be 8
+# llama_cpp_n_gqa=8
+## Model path
+# llama_cpp_model_path=/data/models/TheBloke/vicuna-7B-v1.5-GGML/vicuna-7b-v1.5.ggmlv3.q4_0.bin
#*******************************************************************#
#** EMBEDDING SETTINGS **#
@@ -65,18 +73,6 @@ LOCAL_DB_TYPE=sqlite
# LOCAL_DB_HOST=127.0.0.1
# LOCAL_DB_PORT=3306
-### MILVUS
-## MILVUS_ADDR - Milvus remote address (e.g. localhost:19530)
-## MILVUS_USERNAME - username for your Milvus database
-## MILVUS_PASSWORD - password for your Milvus database
-## MILVUS_SECURE - True to enable TLS. (Default: False)
-## Setting MILVUS_ADDR to a `https://` URL will override this setting.
-## MILVUS_COLLECTION - Milvus collection, change it if you want to start a new memory and retain the old memory.
-# MILVUS_ADDR=localhost:19530
-# MILVUS_USERNAME=
-# MILVUS_PASSWORD=
-# MILVUS_SECURE=
-# MILVUS_COLLECTION=dbgpt
#*******************************************************************#
#** COMMANDS **#
diff --git a/README.md b/README.md
index 0076ccbc1..eaaf983a5 100644
--- a/README.md
+++ b/README.md
@@ -145,19 +145,19 @@ The core capabilities mainly consist of the following parts:
## Image
🌐 [AutoDL Image](https://www.codewithgpu.com/i/csunny/DB-GPT/dbgpt-0.3.1-v2)
+
## Install
-[Quickstart](https://db-gpt.readthedocs.io/en/latest/getting_started/getting_started.html)
+
+
+
+
+[**Quickstart**](https://db-gpt.readthedocs.io/en/latest/getting_started/install/deploy/deploy.html)
### Language Switching
In the .env configuration file, modify the LANGUAGE parameter to switch to different languages. The default is English (Chinese: zh, English: en, other languages to be added later).
-### Platform Deployment
-- autodl
- [autodl image](https://www.codewithgpu.com/i/csunny/DB-GPT/csunny-db-gpt). You can refer to the image instructions to build from scratch, or use `docker pull` to obtain the shared image, follow the instructions in the document to operate. If you have any questions, please leave a comment.
## Usage Instructions
-- [Multi LLMs Usage](https://db-gpt.readthedocs.io/en/latest/modules/llms.html)
-- [Create your own knowledge repository](https://db-gpt.readthedocs.io/en/latest/modules/knowledge.html)
If nltk-related errors occur during the use of the knowledge base, you need to install the nltk toolkit. For more details, please refer to: [nltk documents](https://www.nltk.org/data.html)
Run the Python interpreter and type the commands:
diff --git a/README.zh.md b/README.zh.md
index 35333eb3b..df4bb77f6 100644
--- a/README.zh.md
+++ b/README.zh.md
@@ -148,25 +148,29 @@ DB-GPT基于 [FastChat](https://github.com/lm-sys/FastChat) 构建大模型运
- [DB-GPT-Web](https://github.com/csunny/DB-GPT-Web) 多端交互前端界面
## Image
+
🌐 [AutoDL镜像](https://www.codewithgpu.com/i/csunny/DB-GPT/dbgpt-0.3.1-v2)
🌐 [阿里云镜像](http://dbgpt.site/web/#/p/dc4bb97e0bc15302dbf3a5d5571142dd)
## 安装
-[快速开始](https://db-gpt.readthedocs.io/projects/db-gpt-docs-zh-cn/zh_CN/latest/getting_started/getting_started.html)
+
+
+
+
+
+[**快速开始
+**](https://db-gpt.readthedocs.io/projects/db-gpt-docs-zh-cn/zh_CN/latest/getting_started/install/deploy/deploy.html)
### 多语言切换
- 在.env 配置文件当中,修改LANGUAGE参数来切换使用不同的语言,默认是英文(中文zh, 英文en, 其他语言待补充)
-### 平台部署
-- autodl
- [autodl镜像](https://www.codewithgpu.com/i/csunny/DB-GPT/csunny-db-gpt),从头搭建可参考镜像说明,或通过`docker pull`获取共享镜像,按照文档中的说明操作即可,若有问题,欢迎评论。
+在.env 配置文件当中,修改LANGUAGE参数来切换使用不同的语言,默认是英文(中文zh, 英文en, 其他语言待补充)
## 使用说明
### 多模型使用
- [使用指南](https://db-gpt.readthedocs.io/projects/db-gpt-docs-zh-cn/zh_CN/latest/modules/llms.html)
+[使用指南](https://db-gpt.readthedocs.io/projects/db-gpt-docs-zh-cn/zh_CN/latest/modules/llms.html)
如果在使用知识库时遇到与nltk相关的错误,您需要安装nltk工具包。更多详情,请参见:[nltk文档](https://www.nltk.org/data.html)
Run the Python interpreter and type the commands:
diff --git a/assets/DB-GPT-Product.jpg b/assets/DB-GPT-Product.jpg
new file mode 100644
index 000000000..9a6979328
Binary files /dev/null and b/assets/DB-GPT-Product.jpg differ
diff --git a/assets/chat_data/add_datasource.png b/assets/chat_data/add_datasource.png
new file mode 100644
index 000000000..6b008895b
Binary files /dev/null and b/assets/chat_data/add_datasource.png differ
diff --git a/assets/chat_data/chat_data.jpg b/assets/chat_data/chat_data.jpg
new file mode 100644
index 000000000..15181d2cb
Binary files /dev/null and b/assets/chat_data/chat_data.jpg differ
diff --git a/assets/chat_data/chatdata_eg.png b/assets/chat_data/chatdata_eg.png
new file mode 100644
index 000000000..44b6033bf
Binary files /dev/null and b/assets/chat_data/chatdata_eg.png differ
diff --git a/assets/chat_data/chatdb_eg.png b/assets/chat_data/chatdb_eg.png
new file mode 100644
index 000000000..2f547db25
Binary files /dev/null and b/assets/chat_data/chatdb_eg.png differ
diff --git a/assets/chat_data/db_connect.png b/assets/chat_data/db_connect.png
new file mode 100644
index 000000000..25684b0c7
Binary files /dev/null and b/assets/chat_data/db_connect.png differ
diff --git a/assets/chat_data/db_entry.png b/assets/chat_data/db_entry.png
new file mode 100644
index 000000000..69cc863a7
Binary files /dev/null and b/assets/chat_data/db_entry.png differ
diff --git a/assets/chat_data/img.png b/assets/chat_data/img.png
new file mode 100644
index 000000000..62b652059
Binary files /dev/null and b/assets/chat_data/img.png differ
diff --git a/assets/kbqa/begin_chat.jpg b/assets/kbqa/begin_chat.jpg
new file mode 100644
index 000000000..89f5034d6
Binary files /dev/null and b/assets/kbqa/begin_chat.jpg differ
diff --git a/assets/kbqa/create_space.png b/assets/kbqa/create_space.png
new file mode 100644
index 000000000..3ce374673
Binary files /dev/null and b/assets/kbqa/create_space.png differ
diff --git a/assets/kbqa/document.jpg b/assets/kbqa/document.jpg
new file mode 100644
index 000000000..56d34f4cf
Binary files /dev/null and b/assets/kbqa/document.jpg differ
diff --git a/assets/kbqa/embedding.png b/assets/kbqa/embedding.png
new file mode 100644
index 000000000..041126575
Binary files /dev/null and b/assets/kbqa/embedding.png differ
diff --git a/assets/kbqa/prompt.png b/assets/kbqa/prompt.png
new file mode 100644
index 000000000..24b9284c1
Binary files /dev/null and b/assets/kbqa/prompt.png differ
diff --git a/assets/kbqa/tasks.jpg b/assets/kbqa/tasks.jpg
new file mode 100644
index 000000000..6fd30337e
Binary files /dev/null and b/assets/kbqa/tasks.jpg differ
diff --git a/assets/kbqa/upload.jpg b/assets/kbqa/upload.jpg
new file mode 100644
index 000000000..59091d1fe
Binary files /dev/null and b/assets/kbqa/upload.jpg differ
diff --git a/assets/llm/dbgpt_llm.jpg b/assets/llm/dbgpt_llm.jpg
new file mode 100644
index 000000000..742c8b922
Binary files /dev/null and b/assets/llm/dbgpt_llm.jpg differ
diff --git a/assets/wechat.jpg b/assets/wechat.jpg
index 40ab434b4..1d56229f0 100644
Binary files a/assets/wechat.jpg and b/assets/wechat.jpg differ
diff --git a/docker/base/Dockerfile b/docker/base/Dockerfile
index 7df9e2d4e..3cb063e47 100644
--- a/docker/base/Dockerfile
+++ b/docker/base/Dockerfile
@@ -11,38 +11,48 @@ ARG LANGUAGE="en"
ARG PIP_INDEX_URL="https://pypi.org/simple"
ENV PIP_INDEX_URL=$PIP_INDEX_URL
-# COPY only requirements.txt first to leverage Docker cache
-COPY ./requirements.txt /tmp/requirements.txt
-
-WORKDIR /app
-
RUN pip3 install --upgrade pip -i $PIP_INDEX_URL \
- && (if [ "${BUILD_LOCAL_CODE}" = "false" ]; \
- # if not build local code, clone latest code from git, and rename to /app, TODO: download by version, like: https://github.com/eosphoros-ai/DB-GPT/archive/refs/tags/$DBGPT_VERSION.zip
- then git clone https://github.com/eosphoros-ai/DB-GPT.git /app \
- && cp /app/requirements.txt /tmp/requirements.txt; \
- fi;) \
- && pip3 install -r /tmp/requirements.txt -i $PIP_INDEX_URL --no-cache-dir \
- && rm /tmp/requirements.txt
-
-RUN (if [ "${LANGUAGE}" = "zh" ]; \
+ && (if [ "${LANGUAGE}" = "zh" ]; \
# language is zh, download zh_core_web_sm from github
then wget https://github.com/explosion/spacy-models/releases/download/zh_core_web_sm-3.5.0/zh_core_web_sm-3.5.0-py3-none-any.whl -O /tmp/zh_core_web_sm-3.5.0-py3-none-any.whl \
&& pip3 install /tmp/zh_core_web_sm-3.5.0-py3-none-any.whl -i $PIP_INDEX_URL \
&& rm /tmp/zh_core_web_sm-3.5.0-py3-none-any.whl; \
# not zh, download directly
else python3 -m spacy download zh_core_web_sm; \
- fi;) \
+ fi;)
+
+RUN mkdir -p /app
+
+# COPY only requirements.txt first to leverage Docker cache
+COPY ./requirements.txt /app/requirements.txt
+COPY ./setup.py /app/setup.py
+COPY ./README.md /app/README.md
+
+WORKDIR /app
+
+# RUN pip3 install --upgrade pip -i $PIP_INDEX_URL \
+# && (if [ "${BUILD_LOCAL_CODE}" = "false" ]; \
+# # if not build local code, clone latest code from git, and rename to /app, TODO: download by version, like: https://github.com/eosphoros-ai/DB-GPT/archive/refs/tags/$DBGPT_VERSION.zip
+# then git clone https://github.com/eosphoros-ai/DB-GPT.git /app \
+# && cp /app/requirements.txt /tmp/requirements.txt; \
+# fi;) \
+# && pip3 install -r /tmp/requirements.txt -i $PIP_INDEX_URL --no-cache-dir \
+# && rm /tmp/requirements.txt
+
+RUN pip3 install -i $PIP_INDEX_URL .
+
+# ENV CMAKE_ARGS="-DLLAMA_CUBLAS=ON -DLLAMA_AVX2=OFF -DLLAMA_F16C=OFF -DLLAMA_FMA=OFF"
+# ENV FORCE_CMAKE=1
+RUN pip3 install -i $PIP_INDEX_URL .[llama_cpp] \
&& rm -rf `pip3 cache dir`
ARG BUILD_LOCAL_CODE="false"
# COPY the rest of the app
-COPY . /tmp/app
+COPY . /app
# TODO:Need to find a better way to determine whether to build docker image with local code.
RUN (if [ "${BUILD_LOCAL_CODE}" = "true" ]; \
- then mv /tmp/app / && rm -rf /app/logs && rm -rf /app/pilot/data && rm -rf /app/pilot/message; \
- else rm -rf /tmp/app; \
+ then rm -rf /app/logs && rm -rf /app/pilot/data && rm -rf /app/pilot/message; \
fi;)
ARG LOAD_EXAMPLES="true"
diff --git a/docs/getting_started/application.rst b/docs/getting_started/application.rst
new file mode 100644
index 000000000..19494d1a1
--- /dev/null
+++ b/docs/getting_started/application.rst
@@ -0,0 +1,20 @@
+Applications
+==================================
+DB-GPT product is a Web application that you can chat database, chat knowledge, text2dashboard.
+
+.. image:: https://github.com/eosphoros-ai/DB-GPT/assets/13723926/f6564da2-bed3-43ad-9659-d00e0eeb255b
+
+
+- Chat DB
+- Chat Knowledge
+- Dashboard
+- Plugins
+
+.. toctree::
+ :maxdepth: 2
+ :caption: Application
+ :name: chatdb
+ :hidden:
+
+ ./application/chatdb/chatdb.md
+ ./application/kbqa/kbqa.md
\ No newline at end of file
diff --git a/docs/getting_started/application/chatdb/chatdb.md b/docs/getting_started/application/chatdb/chatdb.md
new file mode 100644
index 000000000..5dca2aeb0
--- /dev/null
+++ b/docs/getting_started/application/chatdb/chatdb.md
@@ -0,0 +1,19 @@
+ChatData & ChatDB
+==================================
+ChatData generates SQL from natural language and executes it. ChatDB involves conversing with metadata from the Database, including metadata about databases, tables, and fields.
+### 1.Choose Datasource
+If you are using DB-GPT for the first time, you need to add a data source and set the relevant connection information for the data source.
+#### 1.1 Datasource management
+
+#### 1.2 Connection management
+
+#### 1.3 Add Datasource
+
+### 2.ChatData
+After successfully setting up the data source, you can start conversing with the database. You can ask it to generate SQL for you or inquire about relevant information on the database's metadata.
+
+
+### 3.ChatDB
+
+
+
diff --git a/docs/getting_started/application/dashboard/dashboard.md b/docs/getting_started/application/dashboard/dashboard.md
new file mode 100644
index 000000000..e69de29bb
diff --git a/docs/getting_started/application/kbqa/kbqa.md b/docs/getting_started/application/kbqa/kbqa.md
new file mode 100644
index 000000000..eb1e4acd8
--- /dev/null
+++ b/docs/getting_started/application/kbqa/kbqa.md
@@ -0,0 +1,80 @@
+KBQA
+==================================
+DB-GPT supports a knowledge question-answering module, which aims to create an intelligent expert in the field of databases and provide professional knowledge-based answers to database practitioners.
+
+
+
+## KBQA abilities
+
+
+```{admonition} KBQA abilities
+* Knowledge Space.
+* Multi Source Knowledge Source Embedding.
+* Embedding Argument Adjust
+* Chat Knowledge
+* Multi Vector DB
+```
+
+
+## Steps to KBQA In DB-GPT
+
+#### 1.Create Knowledge Space
+If you are using Knowledge Space for the first time, you need to create a Knowledge Space and set your name, owner, description.
+
+
+
+
+#### 2.Create Knowledge Document
+DB-GPT now support Multi Knowledge Source, including Text, WebUrl, and Document(PDF, Markdown, Word, PPT, HTML and CSV).
+After successfully uploading a document for translation, the backend system will automatically read and split and chunk the document, and then import it into the vector database. Alternatively, you can manually synchronize the document. You can also click on details to view the specific document slicing content.
+##### 2.1 Choose Knowledge Type:
+
+
+##### 2.2 Upload Document:
+
+
+
+#### 3.Chat With Knowledge
+
+
+#### 4.Adjust Space arguments
+Each knowledge space supports argument customization, including the relevant arguments for vector retrieval and the arguments for knowledge question-answering prompts.
+##### 4.1 Embedding
+Embedding Argument
+
+
+```{tip} Embedding arguments
+* topk:the top k vectors based on similarity score.
+* recall_score:set a threshold score for the retrieval of similar vectors.
+* recall_type:recall type.
+* model:A model used to create vector representations of text or other data.
+* chunk_size:The size of the data chunks used in processing.
+* chunk_overlap:The amount of overlap between adjacent data chunks.
+```
+
+##### 4.2 Prompt
+Prompt Argument
+
+
+```{tip} Prompt arguments
+* scene:A contextual parameter used to define the setting or environment in which the prompt is being used.
+* template:A pre-defined structure or format for the prompt, which can help ensure that the AI system generates responses that are consistent with the desired style or tone.
+* max_token:The maximum number of tokens or words allowed in a prompt.
+```
+
+#### 5.Change Vector Database
+
+```{admonition} Vector Store SETTINGS
+#### Chroma
+* VECTOR_STORE_TYPE=Chroma
+#### MILVUS
+* VECTOR_STORE_TYPE=Milvus
+* MILVUS_URL=127.0.0.1
+* MILVUS_PORT=19530
+* MILVUS_USERNAME
+* MILVUS_PASSWORD
+* MILVUS_SECURE=
+
+#### WEAVIATE
+* WEAVIATE_URL=https://kt-region-m8hcy0wc.weaviate.network
+```
\ No newline at end of file
diff --git a/docs/getting_started/faq.rst b/docs/getting_started/faq.rst
new file mode 100644
index 000000000..0d758c013
--- /dev/null
+++ b/docs/getting_started/faq.rst
@@ -0,0 +1,22 @@
+FAQ
+==================================
+DB-GPT product is a Web application that you can chat database, chat knowledge, text2dashboard.
+
+.. image:: ./assets/DB-GPT-Product.jpg
+
+
+- deploy
+- llm
+- chatdb
+- kbqa
+
+.. toctree::
+ :maxdepth: 2
+ :caption: Deloy
+ :name: deploy
+ :hidden:
+
+ ./faq/deploy/deploy_faq.md
+ ./faq/llm/llm_faq.md
+ ./faq/chatdb/chatdb_faq.md
+ ./faq/kbqa/kbqa_faq.md
\ No newline at end of file
diff --git a/docs/getting_started/faq/chatdb/chatdb_faq.md b/docs/getting_started/faq/chatdb/chatdb_faq.md
new file mode 100644
index 000000000..108db9ebf
--- /dev/null
+++ b/docs/getting_started/faq/chatdb/chatdb_faq.md
@@ -0,0 +1,10 @@
+Chat DB FAQ
+==================================
+##### Q1: What difference between ChatData and ChatDB
+ChatData generates SQL from natural language and executes it. ChatDB involves conversing with metadata from the Database, including metadata about databases, tables, and fields.
+
+##### Q2: The suitable llm model currently supported for text-to-SQL is?
+Now vicunna-13b-1.5 and llama2-70b is more suitable for text-to-SQL.
+
+##### Q3: How to fine-tune Text-to-SQL in DB-GPT
+there is another github project for Text-to-SQL fine-tune (https://github.com/eosphoros-ai/DB-GPT-Hub)
diff --git a/docs/getting_started/faq/deploy/deploy_faq.md b/docs/getting_started/faq/deploy/deploy_faq.md
new file mode 100644
index 000000000..0c3091042
--- /dev/null
+++ b/docs/getting_started/faq/deploy/deploy_faq.md
@@ -0,0 +1,33 @@
+Installation FAQ
+==================================
+
+
+##### Q1: execute `pip install -r requirements.txt` error, found some package cannot find correct version.
+change the pip source.
+
+```bash
+# pypi
+$ pip install -r requirements.txt -i https://pypi.python.org/simple
+```
+
+or
+
+```bash
+# tsinghua
+$ pip install -r requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple/
+```
+
+or
+
+```bash
+# aliyun
+$ pip install -r requirements.txt -i http://mirrors.aliyun.com/pypi/simple/
+```
+
+##### Q2: sqlalchemy.exc.OperationalError: (sqlite3.OperationalError) unable to open database file
+
+make sure you pull latest code or create directory with mkdir pilot/data
+
+##### Q3: The model keeps getting killed.
+your GPU VRAM size is not enough, try replace your hardware or replace other llms.
+
diff --git a/docs/getting_started/faq/kbqa/kbqa_faq.md b/docs/getting_started/faq/kbqa/kbqa_faq.md
new file mode 100644
index 000000000..adf667a3c
--- /dev/null
+++ b/docs/getting_started/faq/kbqa/kbqa_faq.md
@@ -0,0 +1,58 @@
+KBQA FAQ
+==================================
+
+##### Q1: text2vec-large-chinese not found
+
+make sure you have download text2vec-large-chinese embedding model in right way
+
+```tip
+centos:yum install git-lfs
+ubuntu:apt-get install git-lfs -y
+macos:brew install git-lfs
+```
+```bash
+cd models
+git lfs clone https://huggingface.co/GanymedeNil/text2vec-large-chinese
+```
+
+##### Q2:How to change Vector DB Type in DB-GPT.
+
+Update .env file and set VECTOR_STORE_TYPE.
+
+DB-GPT currently support Chroma(Default), Milvus(>2.1), Weaviate vector database.
+If you want to change vector db, Update your .env, set your vector store type, VECTOR_STORE_TYPE=Chroma (now only support Chroma and Milvus(>2.1), if you set Milvus, please set MILVUS_URL and MILVUS_PORT)
+If you want to support more vector db, you can integrate yourself.[how to integrate](https://db-gpt.readthedocs.io/en/latest/modules/vector.html)
+```commandline
+#*******************************************************************#
+#** VECTOR STORE SETTINGS **#
+#*******************************************************************#
+VECTOR_STORE_TYPE=Chroma
+#MILVUS_URL=127.0.0.1
+#MILVUS_PORT=19530
+#MILVUS_USERNAME
+#MILVUS_PASSWORD
+#MILVUS_SECURE=
+
+#WEAVIATE_URL=https://kt-region-m8hcy0wc.weaviate.network
+```
+##### Q3:When I use vicuna-13b, found some illegal character like this.
+
+
+
+
+Set KNOWLEDGE_SEARCH_TOP_SIZE smaller or set KNOWLEDGE_CHUNK_SIZE smaller, and reboot server.
+
+##### Q4:space add error (pymysql.err.OperationalError) (1054, "Unknown column 'knowledge_space.context' in 'field list'")
+
+1.shutdown dbgpt_server(ctrl c)
+
+2.add column context for table knowledge_space
+```commandline
+mysql -h127.0.0.1 -uroot -paa12345678
+```
+3.execute sql ddl
+```commandline
+mysql> use knowledge_management;
+mysql> ALTER TABLE knowledge_space ADD COLUMN context TEXT COMMENT "arguments context";
+```
+4.restart dbgpt serve
\ No newline at end of file
diff --git a/docs/getting_started/faq/llm/llm_faq.md b/docs/getting_started/faq/llm/llm_faq.md
new file mode 100644
index 000000000..62b09c665
--- /dev/null
+++ b/docs/getting_started/faq/llm/llm_faq.md
@@ -0,0 +1,40 @@
+LLM USE FAQ
+==================================
+##### Q1:how to use openai chatgpt service
+change your LLM_MODEL
+````shell
+LLM_MODEL=proxyllm
+````
+
+set your OPENAPI KEY
+````shell
+PROXY_API_KEY={your-openai-sk}
+PROXY_SERVER_URL=https://api.openai.com/v1/chat/completions
+````
+
+ make sure your openapi API_KEY is available
+
+##### Q2 how to use MultiGPUs
+DB-GPT will use all available gpu by default. And you can modify the setting `CUDA_VISIBLE_DEVICES=0,1` in `.env` file to use the specific gpu IDs.
+
+Optionally, you can also specify the gpu ID to use before the starting command, as shown below:
+
+````shell
+# Specify 1 gpu
+CUDA_VISIBLE_DEVICES=0 python3 pilot/server/dbgpt_server.py
+
+# Specify 4 gpus
+CUDA_VISIBLE_DEVICES=3,4,5,6 python3 pilot/server/dbgpt_server.py
+````
+
+You can modify the setting `MAX_GPU_MEMORY=xxGib` in `.env` file to configure the maximum memory used by each GPU.
+
+##### Q3 Not Enough Memory
+
+DB-GPT supported 8-bit quantization and 4-bit quantization.
+
+You can modify the setting `QUANTIZE_8bit=True` or `QUANTIZE_4bit=True` in `.env` file to use quantization(8-bit quantization is enabled by default).
+
+Llama-2-70b with 8-bit quantization can run with 80 GB of VRAM, and 4-bit quantization can run with 48 GB of VRAM.
+
+Note: you need to install the latest dependencies according to [requirements.txt](https://github.com/eosphoros-ai/DB-GPT/blob/main/requirements.txt).
\ No newline at end of file
diff --git a/docs/getting_started/install.rst b/docs/getting_started/install.rst
new file mode 100644
index 000000000..6a188e85e
--- /dev/null
+++ b/docs/getting_started/install.rst
@@ -0,0 +1,25 @@
+Install
+==================================
+DB-GPT product is a Web application that you can chat database, chat knowledge, text2dashboard.
+
+.. image:: ./assets/DB-GPT-Product.jpg
+
+
+- deploy
+- docker
+- docker_compose
+- environment
+- deploy_faq
+
+.. toctree::
+ :maxdepth: 2
+ :caption: Install
+ :name: deploy
+ :hidden:
+
+ ./install/deploy/deploy.md
+ ./install/docker/docker.md
+ ./install/docker_compose/docker_compose.md
+ ./install/llm/llm.rst
+ ./install/environment/environment.md
+ ./install/faq/deploy_faq.md
\ No newline at end of file
diff --git a/docs/getting_started/install/deploy/deploy.md b/docs/getting_started/install/deploy/deploy.md
new file mode 100644
index 000000000..a2595cdff
--- /dev/null
+++ b/docs/getting_started/install/deploy/deploy.md
@@ -0,0 +1,146 @@
+# Installation From Source
+
+This tutorial gives you a quick walkthrough about use DB-GPT with you environment and data.
+
+## Installation
+
+To get started, install DB-GPT with the following steps.
+
+### 1. Hardware Requirements
+As our project has the ability to achieve ChatGPT performance of over 85%, there are certain hardware requirements. However, overall, the project can be deployed and used on consumer-grade graphics cards. The specific hardware requirements for deployment are as follows:
+
+| GPU | VRAM Size | Performance |
+|----------|-----------| ------------------------------------------- |
+| RTX 4090 | 24 GB | Smooth conversation inference |
+| RTX 3090 | 24 GB | Smooth conversation inference, better than V100 |
+| V100 | 16 GB | Conversation inference possible, noticeable stutter |
+| T4 | 16 GB | Conversation inference possible, noticeable stutter |
+
+if your VRAM Size is not enough, DB-GPT supported 8-bit quantization and 4-bit quantization.
+
+Here are some of the VRAM size usage of the models we tested in some common scenarios.
+
+| Model | Quantize | VRAM Size |
+| --------- | --------- | --------- |
+| vicuna-7b-v1.5 | 4-bit | 8 GB |
+| vicuna-7b-v1.5 | 8-bit | 12 GB |
+| vicuna-13b-v1.5 | 4-bit | 12 GB |
+| vicuna-13b-v1.5 | 8-bit | 20 GB |
+| llama-2-7b | 4-bit | 8 GB |
+| llama-2-7b | 8-bit | 12 GB |
+| llama-2-13b | 4-bit | 12 GB |
+| llama-2-13b | 8-bit | 20 GB |
+| llama-2-70b | 4-bit | 48 GB |
+| llama-2-70b | 8-bit | 80 GB |
+| baichuan-7b | 4-bit | 8 GB |
+| baichuan-7b | 8-bit | 12 GB |
+| baichuan-13b | 4-bit | 12 GB |
+| baichuan-13b | 8-bit | 20 GB |
+
+### 2. Install
+```bash
+git clone https://github.com/eosphoros-ai/DB-GPT.git
+```
+
+We use Sqlite as default database, so there is no need for database installation. If you choose to connect to other databases, you can follow our tutorial for installation and configuration.
+For the entire installation process of DB-GPT, we use the miniconda3 virtual environment. Create a virtual environment and install the Python dependencies.
+[How to install Miniconda](https://docs.conda.io/en/latest/miniconda.html)
+```bash
+python>=3.10
+conda create -n dbgpt_env python=3.10
+conda activate dbgpt_env
+pip install -r requirements.txt
+```
+Before use DB-GPT Knowledge
+```bash
+python -m spacy download zh_core_web_sm
+
+```
+
+Once the environment is installed, we have to create a new folder "models" in the DB-GPT project, and then we can put all the models downloaded from huggingface in this directory
+
+```{tip}
+Notice make sure you have install git-lfs
+
+centos:yum install git-lfs
+
+ubuntu:app-get install git-lfs
+
+macos:brew install git-lfs
+```
+
+```bash
+cd DB-GPT
+mkdir models and cd models
+#### llm model
+git clone https://huggingface.co/lmsys/vicuna-13b-v1.5
+or
+git clone https://huggingface.co/THUDM/chatglm2-6b
+
+#### embedding model
+git clone https://huggingface.co/GanymedeNil/text2vec-large-chinese
+or
+git clone https://huggingface.co/moka-ai/m3e-large
+```
+
+The model files are large and will take a long time to download. During the download, let's configure the .env file, which needs to be copied and created from the .env.template
+
+if you want to use openai llm service, see [LLM Use FAQ](https://db-gpt.readthedocs.io/en/latest/getting_started/faq/llm/llm_faq.html)
+
+```{tip}
+cp .env.template .env
+```
+
+You can configure basic parameters in the .env file, for example setting LLM_MODEL to the model to be used
+
+([Vicuna-v1.5](https://huggingface.co/lmsys/vicuna-13b-v1.5) based on llama-2 has been released, we recommend you set `LLM_MODEL=vicuna-13b-v1.5` to try this model)
+
+### 3. Run
+
+1.Run db-gpt server
+
+```bash
+$ python pilot/server/dbgpt_server.py
+```
+
+Open http://localhost:5000 with your browser to see the product.
+
+```tip
+If you want to access an external LLM service, you need to
+
+1.set the variables LLM_MODEL=YOUR_MODEL_NAME, MODEL_SERVER=YOUR_MODEL_SERVER(eg:http://localhost:5000) in the .env file.
+
+2.execute dbgpt_server.py in light mode
+```
+
+If you want to learn about dbgpt-webui, read https://github./csunny/DB-GPT/tree/new-page-framework/datacenter
+
+```bash
+$ python pilot/server/dbgpt_server.py --light
+```
+
+### Multiple GPUs
+
+DB-GPT will use all available gpu by default. And you can modify the setting `CUDA_VISIBLE_DEVICES=0,1` in `.env` file to use the specific gpu IDs.
+
+Optionally, you can also specify the gpu ID to use before the starting command, as shown below:
+
+````shell
+# Specify 1 gpu
+CUDA_VISIBLE_DEVICES=0 python3 pilot/server/dbgpt_server.py
+
+# Specify 4 gpus
+CUDA_VISIBLE_DEVICES=3,4,5,6 python3 pilot/server/dbgpt_server.py
+````
+
+You can modify the setting `MAX_GPU_MEMORY=xxGib` in `.env` file to configure the maximum memory used by each GPU.
+
+### Not Enough Memory
+
+DB-GPT supported 8-bit quantization and 4-bit quantization.
+
+You can modify the setting `QUANTIZE_8bit=True` or `QUANTIZE_4bit=True` in `.env` file to use quantization(8-bit quantization is enabled by default).
+
+Llama-2-70b with 8-bit quantization can run with 80 GB of VRAM, and 4-bit quantization can run with 48 GB of VRAM.
+
+Note: you need to install the latest dependencies according to [requirements.txt](https://github.com/eosphoros-ai/DB-GPT/blob/main/requirements.txt).
diff --git a/docs/getting_started/install/docker/docker.md b/docs/getting_started/install/docker/docker.md
new file mode 100644
index 000000000..07aae5349
--- /dev/null
+++ b/docs/getting_started/install/docker/docker.md
@@ -0,0 +1,87 @@
+Docker Install
+==================================
+
+### Docker (Experimental)
+
+#### 1. Building Docker image
+
+```bash
+$ bash docker/build_all_images.sh
+```
+
+Review images by listing them:
+
+```bash
+$ docker images|grep db-gpt
+```
+
+Output should look something like the following:
+
+```
+db-gpt-allinone latest e1ffd20b85ac 45 minutes ago 14.5GB
+db-gpt latest e36fb0cca5d9 3 hours ago 14GB
+```
+
+You can pass some parameters to docker/build_all_images.sh.
+```bash
+$ bash docker/build_all_images.sh \
+--base-image nvidia/cuda:11.8.0-devel-ubuntu22.04 \
+--pip-index-url https://pypi.tuna.tsinghua.edu.cn/simple \
+--language zh
+```
+
+You can execute the command `bash docker/build_all_images.sh --help` to see more usage.
+
+#### 2. Run all in one docker container
+
+**Run with local model**
+
+```bash
+$ docker run --gpus "device=0" -d -p 3306:3306 \
+ -p 5000:5000 \
+ -e LOCAL_DB_HOST=127.0.0.1 \
+ -e LOCAL_DB_PASSWORD=aa123456 \
+ -e MYSQL_ROOT_PASSWORD=aa123456 \
+ -e LLM_MODEL=vicuna-13b \
+ -e LANGUAGE=zh \
+ -v /data/models:/app/models \
+ --name db-gpt-allinone \
+ db-gpt-allinone
+```
+
+Open http://localhost:5000 with your browser to see the product.
+
+
+- `-e LLM_MODEL=vicuna-13b`, means we use vicuna-13b as llm model, see /pilot/configs/model_config.LLM_MODEL_CONFIG
+- `-v /data/models:/app/models`, means we mount the local model file directory `/data/models` to the docker container directory `/app/models`, please replace it with your model file directory.
+
+You can see log with command:
+
+```bash
+$ docker logs db-gpt-allinone -f
+```
+
+**Run with openai interface**
+
+```bash
+$ PROXY_API_KEY="You api key"
+$ PROXY_SERVER_URL="https://api.openai.com/v1/chat/completions"
+$ docker run --gpus "device=0" -d -p 3306:3306 \
+ -p 5000:5000 \
+ -e LOCAL_DB_HOST=127.0.0.1 \
+ -e LOCAL_DB_PASSWORD=aa123456 \
+ -e MYSQL_ROOT_PASSWORD=aa123456 \
+ -e LLM_MODEL=proxyllm \
+ -e PROXY_API_KEY=$PROXY_API_KEY \
+ -e PROXY_SERVER_URL=$PROXY_SERVER_URL \
+ -e LANGUAGE=zh \
+ -v /data/models/text2vec-large-chinese:/app/models/text2vec-large-chinese \
+ --name db-gpt-allinone \
+ db-gpt-allinone
+```
+
+- `-e LLM_MODEL=proxyllm`, means we use proxy llm(openai interface, fastchat interface...)
+- `-v /data/models/text2vec-large-chinese:/app/models/text2vec-large-chinese`, means we mount the local text2vec model to the docker container.
+
+
+Open http://localhost:5000 with your browser to see the product.
diff --git a/docs/getting_started/install/docker_compose/docker_compose.md b/docs/getting_started/install/docker_compose/docker_compose.md
new file mode 100644
index 000000000..49741c94c
--- /dev/null
+++ b/docs/getting_started/install/docker_compose/docker_compose.md
@@ -0,0 +1,26 @@
+Docker Compose
+==================================
+
+#### Run with docker compose
+
+```bash
+$ docker compose up -d
+```
+
+Output should look something like the following:
+```
+[+] Building 0.0s (0/0)
+[+] Running 2/2
+ ✔ Container db-gpt-db-1 Started 0.4s
+ ✔ Container db-gpt-webserver-1 Started
+```
+
+You can see log with command:
+
+```bash
+$ docker logs db-gpt-webserver-1 -f
+```
+
+Open http://localhost:5000 with your browser to see the product.
+
+You can open docker-compose.yml in the project root directory to see more details.
diff --git a/docs/getting_started/install/environment/environment.md b/docs/getting_started/install/environment/environment.md
new file mode 100644
index 000000000..4c8b23751
--- /dev/null
+++ b/docs/getting_started/install/environment/environment.md
@@ -0,0 +1,122 @@
+Environment Parameter
+==================================
+
+```{admonition} LLM MODEL Config
+LLM Model Name, see /pilot/configs/model_config.LLM_MODEL_CONFIG
+* LLM_MODEL=vicuna-13b
+
+MODEL_SERVER_ADDRESS
+* MODEL_SERVER=http://127.0.0.1:8000
+LIMIT_MODEL_CONCURRENCY
+
+* LIMIT_MODEL_CONCURRENCY=5
+
+MAX_POSITION_EMBEDDINGS
+
+* MAX_POSITION_EMBEDDINGS=4096
+
+QUANTIZE_QLORA
+
+* QUANTIZE_QLORA=True
+
+QUANTIZE_8bit
+
+* QUANTIZE_8bit=True
+```
+
+```{admonition} LLM PROXY Settings
+OPENAI Key
+
+* PROXY_API_KEY={your-openai-sk}
+* PROXY_SERVER_URL=https://api.openai.com/v1/chat/completions
+
+from https://bard.google.com/ f12-> application-> __Secure-1PSID
+
+* BARD_PROXY_API_KEY={your-bard-token}
+```
+
+```{admonition} DATABASE SETTINGS
+### SQLite database (Current default database)
+* LOCAL_DB_PATH=data/default_sqlite.db
+* LOCAL_DB_TYPE=sqlite # Database Type default:sqlite
+
+### MYSQL database
+* LOCAL_DB_TYPE=mysql
+* LOCAL_DB_USER=root
+* LOCAL_DB_PASSWORD=aa12345678
+* LOCAL_DB_HOST=127.0.0.1
+* LOCAL_DB_PORT=3306
+```
+
+```{admonition} EMBEDDING SETTINGS
+EMBEDDING MODEL Name, see /pilot/configs/model_config.LLM_MODEL_CONFIG
+* EMBEDDING_MODEL=text2vec
+
+Embedding Chunk size, default 500
+
+* KNOWLEDGE_CHUNK_SIZE=500
+
+Embedding Chunk Overlap, default 100
+* KNOWLEDGE_CHUNK_OVERLAP=100
+
+embeding recall top k,5
+
+* KNOWLEDGE_SEARCH_TOP_SIZE=5
+
+embeding recall max token ,2000
+
+* KNOWLEDGE_SEARCH_MAX_TOKEN=5
+```
+
+```{admonition} Vector Store SETTINGS
+#### Chroma
+* VECTOR_STORE_TYPE=Chroma
+#### MILVUS
+* VECTOR_STORE_TYPE=Milvus
+* MILVUS_URL=127.0.0.1
+* MILVUS_PORT=19530
+* MILVUS_USERNAME
+* MILVUS_PASSWORD
+* MILVUS_SECURE=
+
+#### WEAVIATE
+* VECTOR_STORE_TYPE=Weaviate
+* WEAVIATE_URL=https://kt-region-m8hcy0wc.weaviate.network
+```
+
+```{admonition} Vector Store SETTINGS
+#### Chroma
+* VECTOR_STORE_TYPE=Chroma
+#### MILVUS
+* VECTOR_STORE_TYPE=Milvus
+* MILVUS_URL=127.0.0.1
+* MILVUS_PORT=19530
+* MILVUS_USERNAME
+* MILVUS_PASSWORD
+* MILVUS_SECURE=
+
+#### WEAVIATE
+* WEAVIATE_URL=https://kt-region-m8hcy0wc.weaviate.network
+```
+
+```{admonition} Multi-GPU Setting
+See https://developer.nvidia.com/blog/cuda-pro-tip-control-gpu-visibility-cuda_visible_devices/
+If CUDA_VISIBLE_DEVICES is not configured, all available gpus will be used
+
+* CUDA_VISIBLE_DEVICES=0
+
+Optionally, you can also specify the gpu ID to use before the starting command
+
+* CUDA_VISIBLE_DEVICES=3,4,5,6
+
+You can configure the maximum memory used by each GPU.
+
+* MAX_GPU_MEMORY=16Gib
+```
+
+```{admonition} Other Setting
+#### Language Settings(influence prompt language)
+* LANGUAGE=en
+* LANGUAGE=zh
+```
+
diff --git a/docs/getting_started/install/llm/llama/llama_cpp.md b/docs/getting_started/install/llm/llama/llama_cpp.md
new file mode 100644
index 000000000..7def97d2a
--- /dev/null
+++ b/docs/getting_started/install/llm/llama/llama_cpp.md
@@ -0,0 +1,80 @@
+### llama.cpp
+
+DB-GPT is now supported by [llama-cpp-python](https://github.com/abetlen/llama-cpp-python) through [llama.cpp](https://github.com/ggerganov/llama.cpp).
+
+## Running llama.cpp
+
+### Preparing Model Files
+
+To use llama.cpp, you need to prepare a ggml format model file, and there are two common ways to obtain it, you can choose either:
+
+1. Download a pre-converted model file.
+
+Suppose you want to use [Vicuna 7B v1.5](https://huggingface.co/lmsys/vicuna-7b-v1.5), you can download the file already converted from [TheBloke/vicuna-7B-v1.5-GGML](https://huggingface.co/TheBloke/vicuna-7B-v1.5-GGML), only one file is needed. Download it to the `models` directory and rename it to `ggml-model-q4_0.bin`.
+
+```bash
+wget https://huggingface.co/TheBloke/vicuna-7B-v1.5-GGML/resolve/main/vicuna-7b-v1.5.ggmlv3.q4_K_M.bin -O models/ggml-model-q4_0.bin
+```
+
+2. Convert It Yourself
+
+You can convert the model file yourself according to the instructions in [llama.cpp#prepare-data--run](https://github.com/ggerganov/llama.cpp#prepare-data--run), and put the converted file in the models directory and rename it to `ggml-model-q4_0.bin`.
+
+### Installing Dependencies
+
+llama.cpp is an optional dependency in DB-GPT, and you can manually install it using the following command:
+
+```bash
+pip install -e ".[llama_cpp]"
+```
+
+### Modifying the Configuration File
+
+Next, you can directly modify your `.env` file to enable llama.cpp.
+
+```env
+LLM_MODEL=llama-cpp
+llama_cpp_prompt_template=vicuna_v1.1
+```
+
+Then you can run it according to [Run](https://db-gpt.readthedocs.io/en/latest/getting_started/install/deploy/deploy.html#run).
+
+
+### More Configurations
+
+In DB-GPT, the model configuration can be done through `{model name}_{config key}`.
+
+| Environment Variable Key | default | Prompt Template Name|
+|----------|-----------| ----------- |
+| llama_cpp_prompt_template | None | Prompt template name, now support: `zero_shot, vicuna_v1.1, llama-2,baichuan-chat`, If None, the prompt template is automatically determined from model path。 |
+| llama_cpp_model_path | None | Model path |
+| llama_cpp_n_gpu_layers | 1000000000 |Number of layers to offload to the GPU, Set this to 1000000000 to offload all layers to the GPU. If your GPU VRAM is not enough, you can set a low number, eg: `10` |
+| llama_cpp_n_threads | None | Number of threads to use. If None, the number of threads is automatically determined |
+| llama_cpp_n_batch | 512 | Maximum number of prompt tokens to batch together when calling llama_eval |
+| llama_cpp_n_gqa | None | Grouped-query attention. Must be 8 for llama-2 70b.|
+| llama_cpp_rms_norm_eps | 5e-06 | 5e-6 is a good value for llama-2 models.|
+| llama_cpp_cache_capacity | None | Maximum cache capacity. Examples: 2000MiB, 2GiB |
+| llama_cpp_prefer_cpu | False | If a GPU is available, it will be preferred by default, unless prefer_cpu=False is configured. |
+
+## GPU Acceleration
+
+GPU acceleration is supported by default. If you encounter any issues, you can uninstall the dependent packages with the following command:
+```bash
+pip uninstall -y llama-cpp-python llama_cpp_python_cuda
+```
+
+Then install `llama-cpp-python` according to the instructions in [llama-cpp-python](https://github.com/abetlen/llama-cpp-python/blob/main/README.md).
+
+
+### Mac Usage
+
+Special attention, if you are using Apple Silicon (M1) Mac, it is highly recommended to install arm64 architecture python support, for example:
+
+```bash
+wget https://github.com/conda-forge/miniforge/releases/latest/download/Miniforge3-MacOSX-arm64.sh
+bash Miniforge3-MacOSX-arm64.sh
+```
+
+### Windows Usage
+
+The use under the Windows platform has not been rigorously tested and verified, and you are welcome to use it. If you have any problems, you can create an [issue](https://github.com/eosphoros-ai/DB-GPT/issues) or [contact us](https://github.com/eosphoros-ai/DB-GPT/tree/main#contact-information) directly.
diff --git a/docs/getting_started/install/llm/llm.rst b/docs/getting_started/install/llm/llm.rst
new file mode 100644
index 000000000..1eca893e2
--- /dev/null
+++ b/docs/getting_started/install/llm/llm.rst
@@ -0,0 +1,30 @@
+LLM Usage
+==================================
+DB-GPT provides a management and deployment solution for multiple models. This chapter mainly discusses how to deploy different models.
+
+.. image:: https://github.com/eosphoros-ai/DB-GPT/assets/13723926/a0d0840f-570a-4409-bb7d-bec6535a5f62
+
+
+Multi LLMs Support, Supports multiple large language models, currently supporting
+ - 🔥 Vicuna-v1.5(7b,13b)
+ - 🔥 llama-2(7b,13b,70b)
+ - WizardLM-v1.2(13b)
+ - Vicuna (7b,13b)
+ - ChatGLM-6b (int4,int8)
+ - ChatGLM2-6b (int4,int8)
+ - guanaco(7b,13b,33b)
+ - Gorilla(7b,13b)
+ - Baichuan(7b,13b)
+ - OpenAI
+
+- llama_cpp
+- quantization
+
+.. toctree::
+ :maxdepth: 2
+ :caption: LLM Usage
+ :name: llama_cpp
+ :hidden:
+
+ ./llama/llama_cpp.md
+ ./quantization/quantization.md
diff --git a/docs/getting_started/install/llm/quantization/quantization.md b/docs/getting_started/install/llm/quantization/quantization.md
new file mode 100644
index 000000000..58f0acd8a
--- /dev/null
+++ b/docs/getting_started/install/llm/quantization/quantization.md
@@ -0,0 +1,2 @@
+quantization
+==================================
\ No newline at end of file
diff --git a/docs/getting_started/installation.md b/docs/getting_started/installation.md
index 5b575e61a..9ee7c3a7c 100644
--- a/docs/getting_started/installation.md
+++ b/docs/getting_started/installation.md
@@ -1,4 +1,4 @@
-# Installation
+# Python SDK
DB-GPT provides a third-party Python API package that you can integrate into your own code.
### Installation from Pip
diff --git a/docs/getting_started/tutorials.md b/docs/getting_started/tutorials.md
index 2c5ef87c3..1e0ced59c 100644
--- a/docs/getting_started/tutorials.md
+++ b/docs/getting_started/tutorials.md
@@ -6,19 +6,18 @@ This is a collection of DB-GPT tutorials on Medium.
DB-GPT is divided into several functions, including chat with knowledge base, execute SQL, chat with database, and execute plugins.
### Introduction
+
+#### youtube
[What is DB-GPT](https://www.youtube.com/watch?v=QszhVJerc0I)
-### Knowledge
+[How to deploy DB-GPT step by step](https://www.youtube.com/watch?v=OJGU4fQCqPs)
-[How to Create your own knowledge repository](https://db-gpt.readthedocs.io/en/latest/modules/knownledge.html)
-
+#### bilibili
+[What is DB-GPT](https://www.bilibili.com/video/BV1SM4y1a7Nj/?spm_id_from=333.788&vd_source=7792e22c03b7da3c556a450eb42c8a0f)
+
+[How to deploy DB-GPT step by step](https://www.bilibili.com/video/BV1mu411Y7ve/?spm_id_from=pageDriver&vd_source=7792e22c03b7da3c556a450eb42c8a0f)
+
-### SQL Generation
-
-### SQL Execute
-
-### Plugins
-
\ No newline at end of file
diff --git a/docs/index.rst b/docs/index.rst
index 94beea632..b343dc9f7 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -47,10 +47,12 @@ Getting Started
:caption: Getting Started
:hidden:
- getting_started/getting_started.md
+ getting_started/install.rst
+ getting_started/application.md
getting_started/installation.md
getting_started/concepts.md
getting_started/tutorials.md
+ getting_started/faq.rst
Modules
@@ -130,32 +132,6 @@ Reference
./reference.md
-FAQ
------------
-| DB-GPT FAQ.
-
-.. toctree::
- :maxdepth: 1
- :caption: FAQ
- :name: FAQ
- :hidden:
-
- ./faq.md
-
-Ecosystem
-----------
-
-| Guides for how other companies/products can be used with DB-GPT
-
-.. toctree::
- :maxdepth: 1
- :glob:
- :caption: Ecosystem
- :name: ecosystem
- :hidden
-
- ./ecosystem.md
-
Resources
----------
diff --git a/docs/locales/zh_CN/LC_MESSAGES/faq.po b/docs/locales/zh_CN/LC_MESSAGES/faq.po
index 88347a269..958b9c3d2 100644
--- a/docs/locales/zh_CN/LC_MESSAGES/faq.po
+++ b/docs/locales/zh_CN/LC_MESSAGES/faq.po
@@ -8,7 +8,7 @@ msgid ""
msgstr ""
"Project-Id-Version: DB-GPT 👏👏 0.3.0\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2023-08-10 16:40+0800\n"
+"POT-Creation-Date: 2023-08-17 21:23+0800\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME \n"
"Language: zh_CN\n"
@@ -19,85 +19,84 @@ msgstr ""
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: Babel 2.12.1\n"
-#: ../../faq.md:1 c4b5b298c447462ba7aaffd954549def
+#: ../../faq.md:1 a39cbc25271841d79095c1557a817a76
msgid "FAQ"
-msgstr "FAQ"
+msgstr ""
-#: ../../faq.md:2 533e388b78594244aa0acbf2b0263f60
+#: ../../faq.md:2 b08ce199a11b4d309142866a637bc3d0
msgid "Q1: text2vec-large-chinese not found"
-msgstr "Q1: text2vec-large-chinese not found"
+msgstr ""
-#: ../../faq.md:4 e1c7e28c60f24f7a983f30ee43bad32e
+#: ../../faq.md:4 754a61fa05a846f4847bd988c4049ceb
msgid ""
"A1: make sure you have download text2vec-large-chinese embedding model in"
" right way"
-msgstr "按照正确的姿势下载text2vec-large-chinese模型"
+msgstr ""
-#: ../../faq.md:16 7cbfd6629267423a879735dd0dbba24e
+#: ../../faq.md:16 5a3d32eacdd94f59bb4039c3d6380fc9
msgid ""
"Q2: execute `pip install -r requirements.txt` error, found some package "
"cannot find correct version."
-msgstr "执行`pip install -r requirements.txt`报错"
+msgstr ""
-#: ../../faq.md:19 a2bedf7cf2984d35bc9e3edffd9cb991
+#: ../../faq.md:19 54b322726a074d6d9c1a957310774aba
msgid "A2: change the pip source."
-msgstr "修改pip源"
+msgstr ""
-#: ../../faq.md:26 ../../faq.md:33 307385dc581841148bad0dfa95541722
-#: 55a9cd0665e74b47bac71e652a80c8bd
+#: ../../faq.md:26 ../../faq.md:33 0c238f86900243e5b5e9a49e4ef37063
+#: 245e48f636524172b1b9ba4144946007
msgid "or"
-msgstr "或"
+msgstr ""
-#: ../../faq.md:41 8ec34a9f22744313825bd53e90e86695
+#: ../../faq.md:41 f2f7025f324c4065abf244a3adb4e4f6
msgid "Q3:Access denied for user 'root@localhost'(using password :NO)"
-msgstr "或"
+msgstr ""
-#: ../../faq.md:43 85d67326827145859f5af707c96c852f
+#: ../../faq.md:43 f14fd1c2d2ed454491e0a876fd2971a4
msgid "A3: make sure you have installed mysql instance in right way"
-msgstr "按照正确姿势安装mysql"
+msgstr ""
-#: ../../faq.md:45 c51cffb2777c49c6b15485e6151f8b70
+#: ../../faq.md:45 6d499bfb6c0142ec838f68696f793c3d
msgid "Docker:"
-msgstr "Docker:"
+msgstr ""
-#: ../../faq.md:49 09f16d57dbeb4f34a1274b2ac52aacf5
+#: ../../faq.md:49 71137fe8a30d42e7943dd2a4402b2094
msgid "Normal: [download mysql instance](https://dev.mysql.com/downloads/mysql/)"
-msgstr "[download mysql instance](https://dev.mysql.com/downloads/mysql/)"
+msgstr ""
-#: ../../faq.md:52 ab730de8f02f4cb895510a52ca90c02f
+#: ../../faq.md:52 ec5d5f79cbe54328902e6e9b820276e7
msgid "Q4:When I use openai(MODEL_SERVER=proxyllm) to chat"
-msgstr "使用openai-chatgpt模型时(MODEL_SERVER=proxyllm)"
+msgstr ""
-#: ../../faq.md:57 8308cf21e0654e5798c9a2638b8565ec
+#: ../../faq.md:58 d2cbee8bbfd54b4b853ccbdbf1c30c97
msgid "A4: make sure your openapi API_KEY is available"
-msgstr "确认openapi API_KEY是否可用"
+msgstr ""
-#: ../../faq.md:59 ff6361a5438943c2b9e613aa9e2322bf
+#: ../../faq.md:60 c506819975c841468af1899730df3ed1
msgid "Q5:When I Chat Data and Chat Meta Data, I found the error"
msgstr "Chat Data and Chat Meta Data报如下错"
-#: ../../faq.md:64 1a89acc3bf074b77b9e44d20f9b9c3cb
+#: ../../faq.md:67 af52123acad74c28a50f93d53da6afa9
msgid "A5: you have not create your database and table"
msgstr "需要创建自己的数据库"
-#: ../../faq.md:65 e7d63e2aa0d24c5d90dd1865753c7d52
+#: ../../faq.md:68 05bf6d858df44157bfb5480f9e8759fb
msgid "1.create your database."
msgstr "1.先创建数据库"
-#: ../../faq.md:71 8b0de08106a144b98538b12bc671cbb2
+#: ../../faq.md:74 363d4fbb2a474c64a54c2659844596b5
msgid "2.create table {$your_table} and insert your data. eg:"
msgstr "然后创建数据表,模拟数据"
-#: ../../faq.md:85 abb226843354448a925de4deb52db555
+#: ../../faq.md:88 5f3a9b9d7e6f444a87deb17b5a1a45af
msgid "Q6:How to change Vector DB Type in DB-GPT."
msgstr ""
-#: ../../faq.md:87 cc80165e2f6b46a28e53359ea7b3e0af
+#: ../../faq.md:90 ee1d4dfa813942e1a3d1219f21bc041f
msgid "A6: Update .env file and set VECTOR_STORE_TYPE."
msgstr ""
-#: ../../faq.md:88 d30fa1e2fdd94e00a72ef247af41fb43
-#, fuzzy
+#: ../../faq.md:91 71e9e9905bdb46e1925f66a6c12a6afd
msgid ""
"DB-GPT currently support Chroma(Default), Milvus(>2.1), Weaviate vector "
"database. If you want to change vector db, Update your .env, set your "
@@ -106,48 +105,144 @@ msgid ""
"If you want to support more vector db, you can integrate yourself.[how to"
" integrate](https://db-gpt.readthedocs.io/en/latest/modules/vector.html)"
msgstr ""
-"DB-GPT当前支持Chroma(默认),如果你想替换向量数据库,需要更新.env文件,VECTOR_STORE_TYPE=Chroma (now"
-" only support Chroma, Milvus Weaviate, if you set Milvus(>2.1), please "
-"set MILVUS_URL and "
-"MILVUS_PORT)。如果当前支持向量数据库无法满足你的需求,可以集成使用自己的向量数据库。[怎样集成](https://db-"
-"gpt.readthedocs.io/en/latest/modules/vector.html)"
-#: ../../faq.md:104 bed01b4f96e04f61aa536cf615254c87
-#, fuzzy
+#: ../../faq.md:107 d6c4ed8ff8244aa8aef6ea8d8f0a5555
msgid "Q7:When I use vicuna-13b, found some illegal character like this."
-msgstr "使用vicuna-13b,知识库问答出现乱码"
+msgstr ""
-#: ../../faq.md:109 1ea68292da6d46a89944212948d1719d
-#, fuzzy
+#: ../../faq.md:112 911a5051c37244e1b6ea9d3b1bd1fd97
msgid ""
"A7: set KNOWLEDGE_SEARCH_TOP_SIZE smaller or set KNOWLEDGE_CHUNK_SIZE "
"smaller, and reboot server."
-msgstr "将KNOWLEDGE_SEARCH_TOP_SIZE和KNOWLEDGE_CHUNK_SIZE设置小点然后重启"
+msgstr ""
-#: ../../faq.md:111 256b58e3749a4a17acbc456c71581597
+#: ../../faq.md:114 0566430bbc0541709ed60b81c7372175
msgid ""
"Q8:space add error (pymysql.err.OperationalError) (1054, \"Unknown column"
" 'knowledge_space.context' in 'field list'\")"
-msgstr "Q8:space add error (pymysql.err.OperationalError) (1054, \"Unknown column"
-" 'knowledge_space.context' in 'field list'\")"
+msgstr ""
-#: ../../faq.md:114 febe9ec0c51b4e10b3a2bb0aece94e7c
+#: ../../faq.md:117 37419da934b44575bd39bcffffa81482
msgid "A8:"
-msgstr "A8:"
+msgstr ""
-#: ../../faq.md:115 d713a368fe8147feb6db8f89938f3dcd
+#: ../../faq.md:118 1c5f46dbccc342329b544ac174a79994
msgid "1.shutdown dbgpt_server(ctrl c)"
msgstr ""
-#: ../../faq.md:117 4c6ad307caff4b57bb6c7085cc42fb64
+#: ../../faq.md:120 35bb76e9d9ec4230a8fab9aed475a4d7
msgid "2.add column context for table knowledge_space"
-msgstr "2.add column context for table knowledge_space"
+msgstr ""
-#: ../../faq.md:121 a3f955c090be4163bdb934eb32c25fd5
+#: ../../faq.md:124 e198605e42d4452680359487abc349a3
msgid "3.execute sql ddl"
-msgstr "3.执行 sql ddl"
+msgstr ""
-#: ../../faq.md:126 1861332d1c0342e2b968732fba55fa54
+#: ../../faq.md:129 88495f3e66c448faab9f06c4c5cd27ef
msgid "4.restart dbgpt server"
-msgstr "4.重启 dbgpt server"
+msgstr ""
+
+#~ msgid "FAQ"
+#~ msgstr "FAQ"
+
+#~ msgid "Q1: text2vec-large-chinese not found"
+#~ msgstr "Q1: text2vec-large-chinese not found"
+
+#~ msgid ""
+#~ "A1: make sure you have download "
+#~ "text2vec-large-chinese embedding model in"
+#~ " right way"
+#~ msgstr "按照正确的姿势下载text2vec-large-chinese模型"
+
+#~ msgid ""
+#~ "Q2: execute `pip install -r "
+#~ "requirements.txt` error, found some package"
+#~ " cannot find correct version."
+#~ msgstr "执行`pip install -r requirements.txt`报错"
+
+#~ msgid "A2: change the pip source."
+#~ msgstr "修改pip源"
+
+#~ msgid "or"
+#~ msgstr "或"
+
+#~ msgid "Q3:Access denied for user 'root@localhost'(using password :NO)"
+#~ msgstr "或"
+
+#~ msgid "A3: make sure you have installed mysql instance in right way"
+#~ msgstr "按照正确姿势安装mysql"
+
+#~ msgid "Docker:"
+#~ msgstr "Docker:"
+
+#~ msgid ""
+#~ "Normal: [download mysql "
+#~ "instance](https://dev.mysql.com/downloads/mysql/)"
+#~ msgstr "[download mysql instance](https://dev.mysql.com/downloads/mysql/)"
+
+#~ msgid "Q4:When I use openai(MODEL_SERVER=proxyllm) to chat"
+#~ msgstr "使用openai-chatgpt模型时(MODEL_SERVER=proxyllm)"
+
+#~ msgid "A4: make sure your openapi API_KEY is available"
+#~ msgstr "确认openapi API_KEY是否可用"
+
+#~ msgid "Q6:How to change Vector DB Type in DB-GPT."
+#~ msgstr ""
+
+#~ msgid "A6: Update .env file and set VECTOR_STORE_TYPE."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "DB-GPT currently support Chroma(Default), "
+#~ "Milvus(>2.1), Weaviate vector database. If "
+#~ "you want to change vector db, "
+#~ "Update your .env, set your vector "
+#~ "store type, VECTOR_STORE_TYPE=Chroma (now only"
+#~ " support Chroma and Milvus(>2.1), if "
+#~ "you set Milvus, please set MILVUS_URL"
+#~ " and MILVUS_PORT) If you want to "
+#~ "support more vector db, you can "
+#~ "integrate yourself.[how to integrate](https://db-"
+#~ "gpt.readthedocs.io/en/latest/modules/vector.html)"
+#~ msgstr ""
+#~ "DB-"
+#~ "GPT当前支持Chroma(默认),如果你想替换向量数据库,需要更新.env文件,VECTOR_STORE_TYPE=Chroma "
+#~ "(now only support Chroma, Milvus "
+#~ "Weaviate, if you set Milvus(>2.1), "
+#~ "please set MILVUS_URL and "
+#~ "MILVUS_PORT)。如果当前支持向量数据库无法满足你的需求,可以集成使用自己的向量数据库。[怎样集成](https://db-"
+#~ "gpt.readthedocs.io/en/latest/modules/vector.html)"
+
+#~ msgid "Q7:When I use vicuna-13b, found some illegal character like this."
+#~ msgstr "使用vicuna-13b,知识库问答出现乱码"
+
+#~ msgid ""
+#~ "A7: set KNOWLEDGE_SEARCH_TOP_SIZE smaller or"
+#~ " set KNOWLEDGE_CHUNK_SIZE smaller, and "
+#~ "reboot server."
+#~ msgstr "将KNOWLEDGE_SEARCH_TOP_SIZE和KNOWLEDGE_CHUNK_SIZE设置小点然后重启"
+
+#~ msgid ""
+#~ "Q8:space add error (pymysql.err.OperationalError)"
+#~ " (1054, \"Unknown column "
+#~ "'knowledge_space.context' in 'field list'\")"
+#~ msgstr ""
+#~ "Q8:space add error (pymysql.err.OperationalError)"
+#~ " (1054, \"Unknown column "
+#~ "'knowledge_space.context' in 'field list'\")"
+
+#~ msgid "A8:"
+#~ msgstr "A8:"
+
+#~ msgid "1.shutdown dbgpt_server(ctrl c)"
+#~ msgstr ""
+
+#~ msgid "2.add column context for table knowledge_space"
+#~ msgstr "2.add column context for table knowledge_space"
+
+#~ msgid "3.execute sql ddl"
+#~ msgstr "3.执行 sql ddl"
+
+#~ msgid "4.restart dbgpt server"
+#~ msgstr "4.重启 dbgpt server"
diff --git a/docs/locales/zh_CN/LC_MESSAGES/getting_started/application.po b/docs/locales/zh_CN/LC_MESSAGES/getting_started/application.po
new file mode 100644
index 000000000..04cf08b74
--- /dev/null
+++ b/docs/locales/zh_CN/LC_MESSAGES/getting_started/application.po
@@ -0,0 +1,51 @@
+# SOME DESCRIPTIVE TITLE.
+# Copyright (C) 2023, csunny
+# This file is distributed under the same license as the DB-GPT package.
+# FIRST AUTHOR , 2023.
+#
+#, fuzzy
+msgid ""
+msgstr ""
+"Project-Id-Version: DB-GPT 👏👏 0.3.5\n"
+"Report-Msgid-Bugs-To: \n"
+"POT-Creation-Date: 2023-08-16 18:31+0800\n"
+"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
+"Last-Translator: FULL NAME \n"
+"Language: zh_CN\n"
+"Language-Team: zh_CN \n"
+"Plural-Forms: nplurals=1; plural=0;\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=utf-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 2.12.1\n"
+
+#: ../../getting_started/application.rst:13
+msgid "Application"
+msgstr "应用"
+
+#: ../../getting_started/application.rst:2 4de9609785bc406aa6f3965f057f90d8
+msgid "Applications"
+msgstr "应用"
+
+#: ../../getting_started/application.rst:3 1ef4f6d81f7c4a2f9ee330fc890017dd
+msgid ""
+"DB-GPT product is a Web application that you can chat database, chat "
+"knowledge, text2dashboard."
+msgstr "DB-GPT是一个可以和数据库对话,知识问答,智能报表的web应用"
+
+#: ../../getting_started/application.rst:8 0451376d435b47bfa7b3f81c87683610
+msgid "Chat DB"
+msgstr "Chat DB"
+
+#: ../../getting_started/application.rst:9 4b1372dd4ae34881891c1dbcd83b92bf
+msgid "Chat Knowledge"
+msgstr "知识问答"
+
+#: ../../getting_started/application.rst:10 d8807a357f1144ccba933338cd0f619a
+msgid "Dashboard"
+msgstr "Dashboard"
+
+#: ../../getting_started/application.rst:11 f6a5806d4f0d4271bb0964935d9c2ff3
+msgid "Plugins"
+msgstr "Plugins"
+
diff --git a/docs/locales/zh_CN/LC_MESSAGES/getting_started/application/chatdb/chatdb.po b/docs/locales/zh_CN/LC_MESSAGES/getting_started/application/chatdb/chatdb.po
new file mode 100644
index 000000000..7fb225754
--- /dev/null
+++ b/docs/locales/zh_CN/LC_MESSAGES/getting_started/application/chatdb/chatdb.po
@@ -0,0 +1,118 @@
+# SOME DESCRIPTIVE TITLE.
+# Copyright (C) 2023, csunny
+# This file is distributed under the same license as the DB-GPT package.
+# FIRST AUTHOR , 2023.
+#
+#, fuzzy
+msgid ""
+msgstr ""
+"Project-Id-Version: DB-GPT 👏👏 0.3.5\n"
+"Report-Msgid-Bugs-To: \n"
+"POT-Creation-Date: 2023-08-16 18:31+0800\n"
+"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
+"Last-Translator: FULL NAME \n"
+"Language: zh_CN\n"
+"Language-Team: zh_CN \n"
+"Plural-Forms: nplurals=1; plural=0;\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=utf-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 2.12.1\n"
+
+#: ../../getting_started/application/chatdb/chatdb.md:1
+#: e3984d7305214fc59fc356bd2e382543
+msgid "ChatData & ChatDB"
+msgstr "ChatData & ChatDB"
+
+#: ../../getting_started/application/chatdb/chatdb.md:3
+#: e74daab738984be990aaa21ab5af7046
+msgid ""
+"ChatData generates SQL from natural language and executes it. ChatDB "
+"involves conversing with metadata from the Database, including metadata "
+"about databases, tables, and fields."
+msgstr "ChatData 是会将自然语言生成SQL,并将其执行。ChatDB是与Database里面的元数据,包括库、表、字段的元数据进行对话."
+
+#: ../../getting_started/application/chatdb/chatdb.md:3
+#: ../../getting_started/application/chatdb/chatdb.md:7
+#: ../../getting_started/application/chatdb/chatdb.md:9
+#: ../../getting_started/application/chatdb/chatdb.md:11
+#: ../../getting_started/application/chatdb/chatdb.md:13
+#: ../../getting_started/application/chatdb/chatdb.md:17
+#: 109d4292c91f4ca5a6b682189f10620a 23b1865780a24ad2b765196faff15550
+#: 3c87a8907e854e5d9a61b1ef658e5935 40eefe75a51e4444b1e4b67bdddc6da9
+#: 73eee6111545418e8f9ae533a255d589 876ee62e4e404da6b5d6c5cba0259e5c
+msgid "db plugins demonstration"
+msgstr "db plugins demonstration"
+
+#: ../../getting_started/application/chatdb/chatdb.md:4
+#: a760078b833e434681da72c87342083f
+msgid "1.Choose Datasource"
+msgstr "1.Choose Datasource"
+
+#: ../../getting_started/application/chatdb/chatdb.md:5
+#: b68ac469c7d34026b1cb8b587a446737
+msgid ""
+"If you are using DB-GPT for the first time, you need to add a data source"
+" and set the relevant connection information for the data source."
+msgstr "如果你是第一次使用DB-GPT, 首先需要添加数据源,设置数据源的相关连接信息"
+
+#: ../../getting_started/application/chatdb/chatdb.md:6
+#: 8edecf9287ce4077a984617f0a06e30c
+msgid "1.1 Datasource management"
+msgstr "1.1 Datasource management"
+
+#: ../../getting_started/application/chatdb/chatdb.md:7
+#: af7b5d2f6c9b4666a3466fed05968258
+msgid ""
+msgstr ""
+
+#: ../../getting_started/application/chatdb/chatdb.md:8
+#: f7d49ffe448d49169713452f54b3437c
+msgid "1.2 Connection management"
+msgstr "1.2 Connection管理"
+
+#: ../../getting_started/application/chatdb/chatdb.md:9
+#: 109a8c9da05d4ec7a80ae1665b8d48bb
+msgid ""
+msgstr ""
+
+#: ../../getting_started/application/chatdb/chatdb.md:10
+#: ac283fff85bf41efbd2cfeeda5980f9a
+msgid "1.3 Add Datasource"
+msgstr "1.3 添加Datasource"
+
+#: ../../getting_started/application/chatdb/chatdb.md:11
+#: a25dec01f4824bfe88cc801c93bb3e3b
+msgid ""
+""
+msgstr ""
+
+#: ../../getting_started/application/chatdb/chatdb.md:12
+#: 4751d6d3744148cab130a901594f673a
+msgid "2.ChatData"
+msgstr "2.ChatData"
+
+#: ../../getting_started/application/chatdb/chatdb.md:13
+#: bad82b572ba34ffb8669f0f8ff9b0a05
+msgid ""
+"After successfully setting up the data source, you can start conversing "
+"with the database. You can ask it to generate SQL for you or inquire "
+"about relevant information on the database's metadata. "
+msgstr "设置数据源成功后就可以和数据库进行对话了。你可以让它帮你生成SQL,也可以和问它数据库元数据的相关信息。 "
+
+#: ../../getting_started/application/chatdb/chatdb.md:16
+#: 0bf9c5a2f3764b36abc82102c53d3cd4
+msgid "3.ChatDB"
+msgstr "3.ChatDB"
+
+#: ../../getting_started/application/chatdb/chatdb.md:17
+#: f06e7583c2484a959c1681b1db0acfaa
+msgid ""
+msgstr ""
+
diff --git a/docs/locales/zh_CN/LC_MESSAGES/getting_started/application/dashboard/dashboard.po b/docs/locales/zh_CN/LC_MESSAGES/getting_started/application/dashboard/dashboard.po
new file mode 100644
index 000000000..cd964f520
--- /dev/null
+++ b/docs/locales/zh_CN/LC_MESSAGES/getting_started/application/dashboard/dashboard.po
@@ -0,0 +1,21 @@
+# SOME DESCRIPTIVE TITLE.
+# Copyright (C) 2023, csunny
+# This file is distributed under the same license as the DB-GPT package.
+# FIRST AUTHOR , 2023.
+#
+#, fuzzy
+msgid ""
+msgstr ""
+"Project-Id-Version: DB-GPT 👏👏 0.3.5\n"
+"Report-Msgid-Bugs-To: \n"
+"POT-Creation-Date: 2023-08-16 18:31+0800\n"
+"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
+"Last-Translator: FULL NAME \n"
+"Language: zh_CN\n"
+"Language-Team: zh_CN \n"
+"Plural-Forms: nplurals=1; plural=0;\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=utf-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 2.12.1\n"
+
diff --git a/docs/locales/zh_CN/LC_MESSAGES/getting_started/application/kbqa/kbqa.po b/docs/locales/zh_CN/LC_MESSAGES/getting_started/application/kbqa/kbqa.po
new file mode 100644
index 000000000..577979d80
--- /dev/null
+++ b/docs/locales/zh_CN/LC_MESSAGES/getting_started/application/kbqa/kbqa.po
@@ -0,0 +1,318 @@
+# SOME DESCRIPTIVE TITLE.
+# Copyright (C) 2023, csunny
+# This file is distributed under the same license as the DB-GPT package.
+# FIRST AUTHOR , 2023.
+#
+#, fuzzy
+msgid ""
+msgstr ""
+"Project-Id-Version: DB-GPT 👏👏 0.3.5\n"
+"Report-Msgid-Bugs-To: \n"
+"POT-Creation-Date: 2023-08-16 18:31+0800\n"
+"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
+"Last-Translator: FULL NAME \n"
+"Language: zh_CN\n"
+"Language-Team: zh_CN \n"
+"Plural-Forms: nplurals=1; plural=0;\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=utf-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 2.12.1\n"
+
+#: ../../getting_started/application/kbqa/kbqa.md:1
+#: a06d9329c98f44ffaaf6fc09ba53d97e
+msgid "KBQA"
+msgstr ""
+
+#: ../../getting_started/application/kbqa/kbqa.md:3
+#: bb6943c115754b1fafb7467313100753
+msgid ""
+"DB-GPT supports a knowledge question-answering module, which aims to "
+"create an intelligent expert in the field of databases and provide "
+"professional knowledge-based answers to database practitioners."
+msgstr " DB-GPT支持知识问答模块,知识问答的初衷是打造DB领域的智能专家,为数据库从业人员解决专业的知识问题回答"
+
+#: ../../getting_started/application/kbqa/kbqa.md:5
+#: a13ac963479e4d4fbcbcc4fec7863274
+msgid ""
+msgstr ""
+
+#: ../../getting_started/application/kbqa/kbqa.md:5
+#: f4db3a8d04634059a74be1b2b3c948ef
+msgid "chat_knowledge"
+msgstr "chat_knowledge"
+
+#: ../../getting_started/application/kbqa/kbqa.md:7
+#: ../../getting_started/application/kbqa/kbqa.md:10
+#: 86856cce95c845eb83ed44d8713b0ef6 f787e9ad9f7444a5923fe3476ab4d287
+msgid "KBQA abilities"
+msgstr "KBQA现有能力"
+
+#: ../../getting_started/application/kbqa/kbqa.md:11
+#: df1c88b28a2d46b2b3f5fae1caec000e
+msgid "Knowledge Space."
+msgstr "知识空间"
+
+#: ../../getting_started/application/kbqa/kbqa.md:12
+#: 7f6c99d0c1394f08b246daa1343c24b2
+msgid "Multi Source Knowledge Source Embedding."
+msgstr "多数据源Embedding"
+
+#: ../../getting_started/application/kbqa/kbqa.md:13
+#: 3c1274c3e65f426bbee219227f681e27
+msgid "Embedding Argument Adjust"
+msgstr "Embedding参数自定义"
+
+#: ../../getting_started/application/kbqa/kbqa.md:14
+#: 52fb29f7ee8745e6b78145f5be23b8ce
+msgid "Chat Knowledge"
+msgstr "知识问答"
+
+#: ../../getting_started/application/kbqa/kbqa.md:15
+#: 83e427c46d844c6a9bb5954342ae7c42
+msgid "Multi Vector DB"
+msgstr "多向量数据库管理"
+
+#: ../../getting_started/application/kbqa/kbqa.md:19
+#: 07c1e189d0f84db69b6a51cf23ede7dc
+msgid "Steps to KBQA In DB-GPT"
+msgstr "怎样一步一步使用KBQA"
+
+#: ../../getting_started/application/kbqa/kbqa.md:21
+#: 36e4195e5c3a4bedb7d11243e5705f0a
+msgid "1.Create Knowledge Space"
+msgstr "1.首先创建知识空间"
+
+#: ../../getting_started/application/kbqa/kbqa.md:22
+#: 5c92d41df2b04374a6c7ce40308f738b
+msgid ""
+"If you are using Knowledge Space for the first time, you need to create a"
+" Knowledge Space and set your name, owner, description. "
+""
+msgstr "如果你是第一次使用,先创建知识空间,指定名字,拥有者和描述信息"
+
+#: ../../getting_started/application/kbqa/kbqa.md:22
+#: c5f6e842be384977be1bd667b1e0ab5d
+msgid "create_space"
+msgstr "create_space"
+
+#: ../../getting_started/application/kbqa/kbqa.md:27
+#: c98434920955416ca5273892f9086bc5
+msgid "2.Create Knowledge Document"
+msgstr "2.上传知识"
+
+#: ../../getting_started/application/kbqa/kbqa.md:28
+#: d63f798e8270455587d1afd98c72e995
+msgid ""
+"DB-GPT now support Multi Knowledge Source, including Text, WebUrl, and "
+"Document(PDF, Markdown, Word, PPT, HTML and CSV). After successfully "
+"uploading a document for translation, the backend system will "
+"automatically read and split and chunk the document, and then import it "
+"into the vector database. Alternatively, you can manually synchronize the"
+" document. You can also click on details to view the specific document "
+"slicing content."
+msgstr "DB-GPT支持多数据源,包括Text纯文本, WebUrl和Document(PDF, Markdown, Word, PPT, HTML and CSV)。上传文档成功后后台会自动将文档内容进行读取,切片,然后导入到向量数据库中,当然你也可以手动进行同步,你也可以点击详情查看具体的文档切片内容"
+
+#: ../../getting_started/application/kbqa/kbqa.md:30
+#: 18becde7bdc34e9cb7017ff7711d1634
+msgid "2.1 Choose Knowledge Type:"
+msgstr "2.1 选择知识类型"
+
+#: ../../getting_started/application/kbqa/kbqa.md:31
+#: 755dfd18812249e591647d233ec9253b
+msgid ""
+msgstr ""
+
+#: ../../getting_started/application/kbqa/kbqa.md:31
+#: 10faa2236fa84dc284f7ce6d68acc43c
+msgid "document"
+msgstr "document"
+
+#: ../../getting_started/application/kbqa/kbqa.md:33
+#: 04449bfa78b14d1cb42c1259f67531d1
+msgid "2.2 Upload Document:"
+msgstr "2.2上传文档"
+
+#: ../../getting_started/application/kbqa/kbqa.md:34
+#: de14e2d7a6c54623bfc8af2fc6e20c62
+msgid ""
+msgstr ""
+
+#: ../../getting_started/application/kbqa/kbqa.md:34
+#: ../../getting_started/application/kbqa/kbqa.md:38
+#: ../../getting_started/application/kbqa/kbqa.md:43
+#: ../../getting_started/application/kbqa/kbqa.md:56
+#: 0e3ab13b9d064b238fba283d6f466051 65ea0b9b43ef4c64897a6e65781129c7
+#: cb65cb968a91492d9526f47e74b179e1 f0ff29911159497fb542942b6deb6972
+msgid "upload"
+msgstr "upload"
+
+#: ../../getting_started/application/kbqa/kbqa.md:37
+#: 77d936d2502f41feb2721da86cc1ebb1
+msgid "3.Chat With Knowledge"
+msgstr "3.知识问答"
+
+#: ../../getting_started/application/kbqa/kbqa.md:38
+#: ae72c3236e564949b142a945f3474425
+msgid ""
+msgstr ""
+
+#: ../../getting_started/application/kbqa/kbqa.md:40
+#: b9d701985d404a9196bb896718f41e36
+msgid "4.Adjust Space arguments"
+msgstr "4.调整知识参数"
+
+#: ../../getting_started/application/kbqa/kbqa.md:41
+#: 6155bbf401ef44ea8d761c83ae5ceb9a
+msgid ""
+"Each knowledge space supports argument customization, including the "
+"relevant arguments for vector retrieval and the arguments for knowledge "
+"question-answering prompts."
+msgstr "每一个知识空间都支持参数自定义, 包括向量召回的相关参数以及知识问答Promp参数"
+
+#: ../../getting_started/application/kbqa/kbqa.md:42
+#: b9ac70b5ee99435a962dcb040b5ba4fc
+msgid "4.1 Embedding"
+msgstr "4.1 Embedding"
+
+#: ../../getting_started/application/kbqa/kbqa.md:43
+#: 0ca2398756924828a904921456b863b5
+msgid "Embedding Argument "
+msgstr "Embedding Argument "
+
+#: ../../getting_started/application/kbqa/kbqa.md:47
+#: e8b3e973e3b940e4bbf3180b7d6057ec
+msgid "Embedding arguments"
+msgstr "Embedding arguments"
+
+#: ../../getting_started/application/kbqa/kbqa.md:48
+#: 1cff4790d546455abeca725ae8b53c0d
+msgid "topk:the top k vectors based on similarity score."
+msgstr "topk:相似性检索出tok条文档"
+
+#: ../../getting_started/application/kbqa/kbqa.md:49
+#: 525aaa02028b4900919526290b6da9ef
+msgid "recall_score:set a threshold score for the retrieval of similar vectors."
+msgstr "recall_score:向量检索相关度衡量指标分数"
+
+#: ../../getting_started/application/kbqa/kbqa.md:50
+#: dead3b559b134e52a3cf38e29b1982e1
+msgid "recall_type:recall type."
+msgstr "recall_type:召回类型"
+
+#: ../../getting_started/application/kbqa/kbqa.md:51
+#: 7c48de3186a14fcf96ee2bc6d715bd4c
+msgid "model:A model used to create vector representations of text or other data."
+msgstr "model:embdding模型"
+
+#: ../../getting_started/application/kbqa/kbqa.md:52
+#: 875ba3d051b847a5b3357a4eef583c0a
+msgid "chunk_size:The size of the data chunks used in processing."
+msgstr "chunk_size:文档切片阈值大小"
+
+#: ../../getting_started/application/kbqa/kbqa.md:53
+#: e43f2bb091594532ba69ed3e3a385cdd
+msgid "chunk_overlap:The amount of overlap between adjacent data chunks."
+msgstr "chunk_overlap:文本块之间的最大重叠量。保留一些重叠可以保持文本块之间的连续性(例如使用滑动窗口)"
+
+#: ../../getting_started/application/kbqa/kbqa.md:55
+#: e21e19688fa042a4b60860ffa6bcf119
+msgid "4.2 Prompt"
+msgstr "4.2 Prompt"
+
+#: ../../getting_started/application/kbqa/kbqa.md:56
+#: 00f0bddcd9174b7a84a25b7fe6d286e9
+msgid "Prompt Argument "
+msgstr "Prompt Argument "
+
+#: ../../getting_started/application/kbqa/kbqa.md:60
+#: d5ff2b76a04949708b3c9050db647927
+msgid "Prompt arguments"
+msgstr "Prompt arguments"
+
+#: ../../getting_started/application/kbqa/kbqa.md:61
+#: 817a8744378546a2b049f44793b4554b
+msgid ""
+"scene:A contextual parameter used to define the setting or environment in"
+" which the prompt is being used."
+msgstr "scene:上下文环境的场景定义"
+
+#: ../../getting_started/application/kbqa/kbqa.md:62
+#: 5330c26a4fc34ff6ba5fea1910cfbdc0
+msgid ""
+"template:A pre-defined structure or format for the prompt, which can help"
+" ensure that the AI system generates responses that are consistent with "
+"the desired style or tone."
+msgstr ""
+"template:预定义的提示结构或格式,可以帮助确保AI系统生成与所期望的风格或语气一致的回复。"
+
+#: ../../getting_started/application/kbqa/kbqa.md:63
+#: a34cd8248e6b44228868c2a02e12466f
+msgid "max_token:The maximum number of tokens or words allowed in a prompt."
+msgstr "max_token: prompt token最大值"
+
+#: ../../getting_started/application/kbqa/kbqa.md:65
+#: 7da49aac293f462b9e3968fa1493dba1
+msgid "5.Change Vector Database"
+msgstr "5.Change Vector Database"
+
+#: ../../getting_started/application/kbqa/kbqa.md:67
+#: 680bfb451eb040e0aa55f8faa12bb75a
+msgid "Vector Store SETTINGS"
+msgstr "Vector Store SETTINGS"
+
+#: ../../getting_started/application/kbqa/kbqa.md:68
+#: 32820ae6807840119a786e92124ad209
+msgid "Chroma"
+msgstr "Chroma"
+
+#: ../../getting_started/application/kbqa/kbqa.md:69
+#: 7573aa05bc914fc2993b2531475b3b99
+msgid "VECTOR_STORE_TYPE=Chroma"
+msgstr "VECTOR_STORE_TYPE=Chroma"
+
+#: ../../getting_started/application/kbqa/kbqa.md:70
+#: cc2c5521ba6e417eb5b3cf96375c2a08
+msgid "MILVUS"
+msgstr "MILVUS"
+
+#: ../../getting_started/application/kbqa/kbqa.md:71
+#: fc5dfa54c8a24b069e2709c309e43faa
+msgid "VECTOR_STORE_TYPE=Milvus"
+msgstr "VECTOR_STORE_TYPE=Milvus"
+
+#: ../../getting_started/application/kbqa/kbqa.md:72
+#: 8661384f0c4a40a0b79fad94bbee1792
+msgid "MILVUS_URL=127.0.0.1"
+msgstr "MILVUS_URL=127.0.0.1"
+
+#: ../../getting_started/application/kbqa/kbqa.md:73
+#: 2035d343d34f47fa9fa84eb841afa2f8
+msgid "MILVUS_PORT=19530"
+msgstr "MILVUS_PORT=19530"
+
+#: ../../getting_started/application/kbqa/kbqa.md:74
+#: e0d8bd8d1dfb425bb50d5c8bf4a134ab
+msgid "MILVUS_USERNAME"
+msgstr "MILVUS_USERNAME"
+
+#: ../../getting_started/application/kbqa/kbqa.md:75
+#: dec547e6b4354c71857fa7c92c08bfa6
+msgid "MILVUS_PASSWORD"
+msgstr "MILVUS_PASSWORD"
+
+#: ../../getting_started/application/kbqa/kbqa.md:76
+#: 9ad3f325f79e49309bd3419f48015e90
+msgid "MILVUS_SECURE="
+msgstr "MILVUS_SECURE="
+
+#: ../../getting_started/application/kbqa/kbqa.md:78
+#: f6e15bf479da496fbcdf7621bad4dda8
+msgid "WEAVIATE"
+msgstr "WEAVIATE"
+
+#: ../../getting_started/application/kbqa/kbqa.md:79
+#: f7838d5de0a9452486adceb3a26369ee
+msgid "WEAVIATE_URL=https://kt-region-m8hcy0wc.weaviate.network"
+msgstr "WEAVIATE_URL=https://kt-region-m8hcy0wc.weaviate.networkc"
+
diff --git a/docs/locales/zh_CN/LC_MESSAGES/getting_started/faq.po b/docs/locales/zh_CN/LC_MESSAGES/getting_started/faq.po
new file mode 100644
index 000000000..ed7fadc10
--- /dev/null
+++ b/docs/locales/zh_CN/LC_MESSAGES/getting_started/faq.po
@@ -0,0 +1,51 @@
+# SOME DESCRIPTIVE TITLE.
+# Copyright (C) 2023, csunny
+# This file is distributed under the same license as the DB-GPT package.
+# FIRST AUTHOR , 2023.
+#
+#, fuzzy
+msgid ""
+msgstr ""
+"Project-Id-Version: DB-GPT 👏👏 0.3.5\n"
+"Report-Msgid-Bugs-To: \n"
+"POT-Creation-Date: 2023-08-16 18:31+0800\n"
+"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
+"Last-Translator: FULL NAME \n"
+"Language: zh_CN\n"
+"Language-Team: zh_CN \n"
+"Plural-Forms: nplurals=1; plural=0;\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=utf-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 2.12.1\n"
+
+#: ../../getting_started/faq.rst:13
+msgid "Deloy"
+msgstr "Deloy"
+
+#: ../../getting_started/faq.rst:2 136e86216b564bcb91709d97ae03013c
+msgid "FAQ"
+msgstr "FAQ"
+
+#: ../../getting_started/faq.rst:3 9350e190f83648248a4e53a4703529a0
+msgid ""
+"DB-GPT product is a Web application that you can chat database, chat "
+"knowledge, text2dashboard."
+msgstr ""
+
+#: ../../getting_started/faq.rst:8 c4d7887a869f48ddb1819a5df9206005
+msgid "deploy"
+msgstr "deploy"
+
+#: ../../getting_started/faq.rst:9 3859638a380043dfa466d6f9c593ff14
+msgid "llm"
+msgstr "llm"
+
+#: ../../getting_started/faq.rst:10 2039dce8389d4f9fb8fb7a8afa393809
+msgid "chatdb"
+msgstr "chatdb"
+
+#: ../../getting_started/faq.rst:11 24a115044f984a71a83dea6000a98de8
+msgid "kbqa"
+msgstr "kbqa"
+
diff --git a/docs/locales/zh_CN/LC_MESSAGES/getting_started/faq/chatdb/chatdb_faq.po b/docs/locales/zh_CN/LC_MESSAGES/getting_started/faq/chatdb/chatdb_faq.po
new file mode 100644
index 000000000..dbc03b315
--- /dev/null
+++ b/docs/locales/zh_CN/LC_MESSAGES/getting_started/faq/chatdb/chatdb_faq.po
@@ -0,0 +1,61 @@
+# SOME DESCRIPTIVE TITLE.
+# Copyright (C) 2023, csunny
+# This file is distributed under the same license as the DB-GPT package.
+# FIRST AUTHOR , 2023.
+#
+#, fuzzy
+msgid ""
+msgstr ""
+"Project-Id-Version: DB-GPT 👏👏 0.3.5\n"
+"Report-Msgid-Bugs-To: \n"
+"POT-Creation-Date: 2023-08-16 23:15+0800\n"
+"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
+"Last-Translator: FULL NAME \n"
+"Language: zh_CN\n"
+"Language-Team: zh_CN \n"
+"Plural-Forms: nplurals=1; plural=0;\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=utf-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 2.12.1\n"
+
+#: ../../getting_started/faq/chatdb/chatdb_faq.md:1
+#: ffd1abb6b8f34e53a8ed83ede845b141
+msgid "Chat DB FAQ"
+msgstr ""
+
+#: ../../getting_started/faq/chatdb/chatdb_faq.md:3
+#: 2467f6603ab341bbb1ce17f75dc06e5e
+msgid "Q1: What difference between ChatData and ChatDB"
+msgstr ""
+
+#: ../../getting_started/faq/chatdb/chatdb_faq.md:4
+#: 6e47803e92064e379a1bb74e2b3d347a
+msgid ""
+"ChatData generates SQL from natural language and executes it. ChatDB "
+"involves conversing with metadata from the Database, including metadata "
+"about databases, tables, and fields."
+msgstr ""
+
+#: ../../getting_started/faq/chatdb/chatdb_faq.md:6
+#: fa3eef15697f43db963f6c875e85323b
+msgid "Q2: The suitable llm model currently supported for text-to-SQL is?"
+msgstr ""
+
+#: ../../getting_started/faq/chatdb/chatdb_faq.md:7
+#: 47f36a0a48c045269bcf790873d55f8c
+msgid "Now vicunna-13b-1.5 and llama2-70b is more suitable for text-to-SQL."
+msgstr ""
+
+#: ../../getting_started/faq/chatdb/chatdb_faq.md:9
+#: 9f5ef9a05fac4eb3b27875171ec4e763
+msgid "Q3: How to fine-tune Text-to-SQL in DB-GPT"
+msgstr ""
+
+#: ../../getting_started/faq/chatdb/chatdb_faq.md:10
+#: c9b6f6e969e04805a59d9ccabc03c0b8
+msgid ""
+"there is another github project for Text-to-SQL fine-tune "
+"(https://github.com/eosphoros-ai/DB-GPT-Hub)"
+msgstr ""
+
diff --git a/docs/locales/zh_CN/LC_MESSAGES/getting_started/faq/deploy/deploy_faq.po b/docs/locales/zh_CN/LC_MESSAGES/getting_started/faq/deploy/deploy_faq.po
new file mode 100644
index 000000000..6aa6fd5f9
--- /dev/null
+++ b/docs/locales/zh_CN/LC_MESSAGES/getting_started/faq/deploy/deploy_faq.po
@@ -0,0 +1,86 @@
+# SOME DESCRIPTIVE TITLE.
+# Copyright (C) 2023, csunny
+# This file is distributed under the same license as the DB-GPT package.
+# FIRST AUTHOR , 2023.
+#
+#, fuzzy
+msgid ""
+msgstr ""
+"Project-Id-Version: DB-GPT 👏👏 0.3.5\n"
+"Report-Msgid-Bugs-To: \n"
+"POT-Creation-Date: 2023-08-16 23:15+0800\n"
+"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
+"Last-Translator: FULL NAME \n"
+"Language: zh_CN\n"
+"Language-Team: zh_CN \n"
+"Plural-Forms: nplurals=1; plural=0;\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=utf-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 2.12.1\n"
+
+#: ../../getting_started/faq/deploy/deploy_faq.md:1
+#: 6aa73265a43d4e6ea287e6265ef4efe5
+msgid "Installation FAQ"
+msgstr "Installation FAQ"
+
+#: ../../getting_started/faq/deploy/deploy_faq.md:5
+#: 4efe241d5e724db5ab22548cfb88f8b6
+msgid ""
+"Q1: execute `pip install -r requirements.txt` error, found some package "
+"cannot find correct version."
+msgstr "Q1: execute `pip install -r requirements.txt` error, found some package "
+"cannot find correct version."
+
+#: ../../getting_started/faq/deploy/deploy_faq.md:6
+#: e837e10bdcfa49cebb71b32eece4831b
+msgid "change the pip source."
+msgstr "替换pip源."
+
+#: ../../getting_started/faq/deploy/deploy_faq.md:13
+#: ../../getting_started/faq/deploy/deploy_faq.md:20
+#: 84310ec0c54e4a02949da2e0b35c8c7d e8a7a8b38b7849b88c14fb6d647f9b63
+msgid "or"
+msgstr "或者"
+
+#: ../../getting_started/faq/deploy/deploy_faq.md:27
+#: 87797a5dafef47c8884f6f1be9a1fbd2
+msgid ""
+"Q2: sqlalchemy.exc.OperationalError: (sqlite3.OperationalError) unable to"
+" open database file"
+msgstr "Q2: sqlalchemy.exc.OperationalError: (sqlite3.OperationalError) unable to"
+" open database file"
+
+#: ../../getting_started/faq/deploy/deploy_faq.md:29
+#: bc96b22e201c47ec999c8d98227a956d
+msgid "make sure you pull latest code or create directory with mkdir pilot/data"
+msgstr "make sure you pull latest code or create directory with mkdir pilot/data"
+
+#: ../../getting_started/faq/deploy/deploy_faq.md:31
+#: d7938f1c70a64efa9948080a6d416964
+msgid "Q3: The model keeps getting killed."
+msgstr "Q3: The model keeps getting killed."
+
+#: ../../getting_started/faq/deploy/deploy_faq.md:32
+#: b072386586a64b2289c0fcdf6857b2b7
+msgid ""
+"your GPU VRAM size is not enough, try replace your hardware or replace "
+"other llms."
+msgstr "GPU显存不够, 增加显存或者换一个显存小的模型"
+
+#~ msgid ""
+#~ "Q2: When use Mysql, Access denied "
+#~ "for user 'root@localhost'(using password :NO)"
+#~ msgstr ""
+
+#~ msgid "A3: make sure you have installed mysql instance in right way"
+#~ msgstr ""
+
+#~ msgid "Docker:"
+#~ msgstr ""
+
+#~ msgid ""
+#~ "Normal: [download mysql "
+#~ "instance](https://dev.mysql.com/downloads/mysql/)"
+#~ msgstr ""
+
diff --git a/docs/locales/zh_CN/LC_MESSAGES/getting_started/faq/kbqa/kbqa_faq.po b/docs/locales/zh_CN/LC_MESSAGES/getting_started/faq/kbqa/kbqa_faq.po
new file mode 100644
index 000000000..1b352e972
--- /dev/null
+++ b/docs/locales/zh_CN/LC_MESSAGES/getting_started/faq/kbqa/kbqa_faq.po
@@ -0,0 +1,98 @@
+# SOME DESCRIPTIVE TITLE.
+# Copyright (C) 2023, csunny
+# This file is distributed under the same license as the DB-GPT package.
+# FIRST AUTHOR , 2023.
+#
+#, fuzzy
+msgid ""
+msgstr ""
+"Project-Id-Version: DB-GPT 👏👏 0.3.5\n"
+"Report-Msgid-Bugs-To: \n"
+"POT-Creation-Date: 2023-08-16 18:31+0800\n"
+"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
+"Last-Translator: FULL NAME \n"
+"Language: zh_CN\n"
+"Language-Team: zh_CN \n"
+"Plural-Forms: nplurals=1; plural=0;\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=utf-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 2.12.1\n"
+
+#: ../../getting_started/faq/kbqa/kbqa_faq.md:1
+#: bce4fd8751dd49bb8ab89a651094cfe1
+msgid "KBQA FAQ"
+msgstr ""
+
+#: ../../getting_started/faq/kbqa/kbqa_faq.md:4
+#: c9714f70a03f422fae7bdb2b3b3c8be5
+msgid "Q1: text2vec-large-chinese not found"
+msgstr ""
+
+#: ../../getting_started/faq/kbqa/kbqa_faq.md:6
+#: 1c56b9081adb46b29063947659e67083
+msgid ""
+"make sure you have download text2vec-large-chinese embedding model in "
+"right way"
+msgstr ""
+
+#: ../../getting_started/faq/kbqa/kbqa_faq.md:18
+#: 39763bdbc6824675b293f0f219fc05bb
+msgid "Q2:How to change Vector DB Type in DB-GPT."
+msgstr ""
+
+#: ../../getting_started/faq/kbqa/kbqa_faq.md:20
+#: 225cc6e5b6944ce3b675845aa93afb66
+msgid "Update .env file and set VECTOR_STORE_TYPE."
+msgstr ""
+
+#: ../../getting_started/faq/kbqa/kbqa_faq.md:22
+#: 5ad12db4aed44f4890911b5aea9529ce
+msgid ""
+"DB-GPT currently support Chroma(Default), Milvus(>2.1), Weaviate vector "
+"database. If you want to change vector db, Update your .env, set your "
+"vector store type, VECTOR_STORE_TYPE=Chroma (now only support Chroma and "
+"Milvus(>2.1), if you set Milvus, please set MILVUS_URL and MILVUS_PORT) "
+"If you want to support more vector db, you can integrate yourself.[how to"
+" integrate](https://db-gpt.readthedocs.io/en/latest/modules/vector.html)"
+msgstr ""
+
+#: ../../getting_started/faq/kbqa/kbqa_faq.md:38
+#: f8dd19ff86244cc7be91b7ab9190baeb
+msgid "Q3:When I use vicuna-13b, found some illegal character like this."
+msgstr ""
+
+#: ../../getting_started/faq/kbqa/kbqa_faq.md:43
+#: ec4a69476d4e4e4d92619ade829c2b39
+msgid ""
+"Set KNOWLEDGE_SEARCH_TOP_SIZE smaller or set KNOWLEDGE_CHUNK_SIZE "
+"smaller, and reboot server."
+msgstr ""
+
+#: ../../getting_started/faq/kbqa/kbqa_faq.md:45
+#: 5b039a860c0b4cb088d042467ad2a49c
+msgid ""
+"Q4:space add error (pymysql.err.OperationalError) (1054, \"Unknown column"
+" 'knowledge_space.context' in 'field list'\")"
+msgstr ""
+
+#: ../../getting_started/faq/kbqa/kbqa_faq.md:47
+#: bafac5e248894263a77ab688922df520
+msgid "1.shutdown dbgpt_server(ctrl c)"
+msgstr ""
+
+#: ../../getting_started/faq/kbqa/kbqa_faq.md:49
+#: 8ff5ca901f684e8ebe951f8c053ab925
+msgid "2.add column context for table knowledge_space"
+msgstr ""
+
+#: ../../getting_started/faq/kbqa/kbqa_faq.md:53
+#: ea9f945678aa426894f0a12013f8fe5c
+msgid "3.execute sql ddl"
+msgstr ""
+
+#: ../../getting_started/faq/kbqa/kbqa_faq.md:58
+#: 1a7941f2f1e94d359095e92b470ee02c
+msgid "4.restart dbgpt serve"
+msgstr ""
+
diff --git a/docs/locales/zh_CN/LC_MESSAGES/getting_started/faq/llm/llm_faq.po b/docs/locales/zh_CN/LC_MESSAGES/getting_started/faq/llm/llm_faq.po
new file mode 100644
index 000000000..260d2dd09
--- /dev/null
+++ b/docs/locales/zh_CN/LC_MESSAGES/getting_started/faq/llm/llm_faq.po
@@ -0,0 +1,92 @@
+# SOME DESCRIPTIVE TITLE.
+# Copyright (C) 2023, csunny
+# This file is distributed under the same license as the DB-GPT package.
+# FIRST AUTHOR , 2023.
+#
+#, fuzzy
+msgid ""
+msgstr ""
+"Project-Id-Version: DB-GPT 👏👏 0.3.5\n"
+"Report-Msgid-Bugs-To: \n"
+"POT-Creation-Date: 2023-08-16 18:31+0800\n"
+"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
+"Last-Translator: FULL NAME \n"
+"Language: zh_CN\n"
+"Language-Team: zh_CN \n"
+"Plural-Forms: nplurals=1; plural=0;\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=utf-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 2.12.1\n"
+
+#: ../../getting_started/faq/llm/llm_faq.md:1 f79c82f385904385b08618436e600d9f
+msgid "LLM USE FAQ"
+msgstr ""
+
+#: ../../getting_started/faq/llm/llm_faq.md:3 1fc802fa69224062b02403bc35084c18
+msgid "Q1:how to use openai chatgpt service"
+msgstr ""
+
+#: ../../getting_started/faq/llm/llm_faq.md:4 9094902d148a4cc99fe72aa0e41062ae
+msgid "change your LLM_MODEL"
+msgstr ""
+
+#: ../../getting_started/faq/llm/llm_faq.md:9 07073eb8d9eb4988a3b035666c63d3fb
+msgid "set your OPENAPI KEY"
+msgstr ""
+
+#: ../../getting_started/faq/llm/llm_faq.md:15 a71bb0d1181e47368a286b5694a00056
+msgid "make sure your openapi API_KEY is available"
+msgstr ""
+
+#: ../../getting_started/faq/llm/llm_faq.md:17 789b003864824970923bac474a9ab0cd
+msgid "Q2 how to use MultiGPUs"
+msgstr ""
+
+#: ../../getting_started/faq/llm/llm_faq.md:18 4be3dd71a8654202a210bcb11c50cc79
+msgid ""
+"DB-GPT will use all available gpu by default. And you can modify the "
+"setting `CUDA_VISIBLE_DEVICES=0,1` in `.env` file to use the specific gpu"
+" IDs."
+msgstr ""
+
+#: ../../getting_started/faq/llm/llm_faq.md:20 3a00c5fff666451cacda7f9af37564b9
+msgid ""
+"Optionally, you can also specify the gpu ID to use before the starting "
+"command, as shown below:"
+msgstr ""
+
+#: ../../getting_started/faq/llm/llm_faq.md:30 7fef386f3a3443569042e7d8b9a3ff15
+msgid ""
+"You can modify the setting `MAX_GPU_MEMORY=xxGib` in `.env` file to "
+"configure the maximum memory used by each GPU."
+msgstr ""
+
+#: ../../getting_started/faq/llm/llm_faq.md:32 d75d440da8ab49a3944c3a456db25bee
+msgid "Q3 Not Enough Memory"
+msgstr ""
+
+#: ../../getting_started/faq/llm/llm_faq.md:34 2a2cd59382e149ffb623cb3d42754dca
+msgid "DB-GPT supported 8-bit quantization and 4-bit quantization."
+msgstr ""
+
+#: ../../getting_started/faq/llm/llm_faq.md:36 755131812baa4f5a99b706849459e10a
+msgid ""
+"You can modify the setting `QUANTIZE_8bit=True` or `QUANTIZE_4bit=True` "
+"in `.env` file to use quantization(8-bit quantization is enabled by "
+"default)."
+msgstr ""
+
+#: ../../getting_started/faq/llm/llm_faq.md:38 b85424bc11134af985f687d8ee8d2c9f
+msgid ""
+"Llama-2-70b with 8-bit quantization can run with 80 GB of VRAM, and 4-bit"
+" quantization can run with 48 GB of VRAM."
+msgstr ""
+
+#: ../../getting_started/faq/llm/llm_faq.md:40 b6e4db679636492c9e4170a33fd6f638
+msgid ""
+"Note: you need to install the latest dependencies according to "
+"[requirements.txt](https://github.com/eosphoros-ai/DB-"
+"GPT/blob/main/requirements.txt)."
+msgstr ""
+
diff --git a/docs/locales/zh_CN/LC_MESSAGES/getting_started/getting_started.po b/docs/locales/zh_CN/LC_MESSAGES/getting_started/getting_started.po
index a4151e29e..f7bd2cfff 100644
--- a/docs/locales/zh_CN/LC_MESSAGES/getting_started/getting_started.po
+++ b/docs/locales/zh_CN/LC_MESSAGES/getting_started/getting_started.po
@@ -8,7 +8,7 @@ msgid ""
msgstr ""
"Project-Id-Version: DB-GPT 0.3.0\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2023-08-10 16:38+0800\n"
+"POT-Creation-Date: 2023-08-16 18:31+0800\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME \n"
"Language: zh_CN\n"
@@ -19,29 +19,29 @@ msgstr ""
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: Babel 2.12.1\n"
-#: ../../getting_started/getting_started.md:1 b5ce6e0075114e669c78cda63f22dfe6
+#: ../../getting_started/getting_started.md:1 70e40ad608d54bcfae6faf0437c09b6f
msgid "Quickstart Guide"
msgstr "使用指南"
-#: ../../getting_started/getting_started.md:3 caf6a35c0ddc43199750b2faae2bf95d
+#: ../../getting_started/getting_started.md:3 c22ff099d6e940f7938dcea0e2265f11
msgid ""
"This tutorial gives you a quick walkthrough about use DB-GPT with you "
"environment and data."
msgstr "本教程为您提供了关于如何使用DB-GPT的使用指南。"
-#: ../../getting_started/getting_started.md:5 338aae168e63461f84c8233c4cbf4bcc
+#: ../../getting_started/getting_started.md:5 dc717e76b3194a85ac5b9e8a4479b197
msgid "Installation"
msgstr "安装"
-#: ../../getting_started/getting_started.md:7 62519550906f4fdb99aad61502e7a5e6
+#: ../../getting_started/getting_started.md:7 a1d30c3d01b94310b89fae16ac581157
msgid "To get started, install DB-GPT with the following steps."
msgstr "请按照以下步骤安装DB-GPT"
-#: ../../getting_started/getting_started.md:9 f9072092ebf24ebc893604ca86116cd3
+#: ../../getting_started/getting_started.md:9 bab013745cb24538ac568b97045b72cc
msgid "1. Hardware Requirements"
msgstr "1. 硬件要求"
-#: ../../getting_started/getting_started.md:10 071c54f9a38e4ef5bb5dcfbf7d4a6004
+#: ../../getting_started/getting_started.md:10 7dd84870db394338a5c7e63b171207e0
msgid ""
"As our project has the ability to achieve ChatGPT performance of over "
"85%, there are certain hardware requirements. However, overall, the "
@@ -49,86 +49,76 @@ msgid ""
"specific hardware requirements for deployment are as follows:"
msgstr "由于我们的项目有能力达到85%以上的ChatGPT性能,所以对硬件有一定的要求。但总体来说,我们在消费级的显卡上即可完成项目的部署使用,具体部署的硬件说明如下:"
-#: ../../getting_started/getting_started.md 4757df7812c64eb4b511aa3b8950c899
+#: ../../getting_started/getting_started.md 055d07f830c54303a0b5596601c58870
msgid "GPU"
msgstr "GPU"
#: ../../getting_started/getting_started.md
-#: ../../getting_started/getting_started.md:60 8f316fbd032f474b88d823978d134328
-#: cb3d34d1ed074e1187b60c6f74cf1468
+#: ../../getting_started/getting_started.md:50 7323cee42940438b8a0752d3c2355e59
+#: e3fc2c10f81b4fe2ac0bfd9fe78feed9
msgid "VRAM Size"
msgstr "显存大小"
-#: ../../getting_started/getting_started.md ac41e6ccf820424b9c497156a317287d
+#: ../../getting_started/getting_started.md 68831daf63f14dcd92088dd6c866f110
msgid "Performance"
msgstr "显存大小"
-#: ../../getting_started/getting_started.md 3ca06ce116754dde958ba21beaedac6f
+#: ../../getting_started/getting_started.md 8a7197a5e92c40a9ba160b43656983d2
msgid "RTX 4090"
msgstr "RTX 4090"
-#: ../../getting_started/getting_started.md 54e3410cc9a241098ae4412356a89f02
-#: e3f31b30c4c84b45bc63b0ea193d0ed1
+#: ../../getting_started/getting_started.md 69b2bfdec17e43fbb57f47e3e10a5f5a
+#: 81d0c2444bbb4a2fb834a628689e4b68
msgid "24 GB"
msgstr "24 GB"
-#: ../../getting_started/getting_started.md ff0c37e041a848899b1c3a63bab43404
+#: ../../getting_started/getting_started.md 49b65b0eaffb41d78a1227bcc3e836e0
msgid "Smooth conversation inference"
msgstr "可以流畅的进行对话推理,无卡顿"
-#: ../../getting_started/getting_started.md 09120e60cfc640e182e8069a372038cc
+#: ../../getting_started/getting_started.md 527def44c8b24e4bbe589d110bd43e91
msgid "RTX 3090"
msgstr "RTX 3090"
-#: ../../getting_started/getting_started.md 0545dfec3b6d47e1bffc87301a0f05b3
+#: ../../getting_started/getting_started.md ad9666e0521c4ef0ad19c2e224daecda
msgid "Smooth conversation inference, better than V100"
msgstr "可以流畅进行对话推理,有卡顿感,但好于V100"
-#: ../../getting_started/getting_started.md d41078c0561f47a981a488eb4b9c7a54
+#: ../../getting_started/getting_started.md c090521969f042398236f6d04b017295
msgid "V100"
msgstr "V100"
-#: ../../getting_started/getting_started.md 6aefb19420f4452999544a0da646f067
+#: ../../getting_started/getting_started.md 60605e0de3fc494cbb7199cef8f831ad
msgid "16 GB"
msgstr "16 GB"
-#: ../../getting_started/getting_started.md 8e06966b88234f4898729750d724e272
+#: ../../getting_started/getting_started.md b82747aee44e444984565aab0faa2a64
msgid "Conversation inference possible, noticeable stutter"
msgstr "可以进行对话推理,有明显卡顿"
-#: ../../getting_started/getting_started.md:18 113957ae638e4799a47e58a2ca259ce8
+#: ../../getting_started/getting_started.md:18 b9df8ec7b2c34233b28db81f3c90f8c7
msgid "2. Install"
msgstr "2. 安装"
-#: ../../getting_started/getting_started.md:20 e068b950171f4c48a9e125ece31f75b9
+#: ../../getting_started/getting_started.md:20 bd0f0159a49b4493ab387e124fef15fa
#, fuzzy
msgid ""
-"1.This project relies on a local MySQL database service, which you need "
-"to install locally. We recommend using Docker for installation."
-msgstr "本项目依赖一个本地的 MySQL 数据库服务,你需要本地安装,推荐直接使用 Docker 安装。"
-
-#: ../../getting_started/getting_started.md:24 f115b61438ad403aaaae0fc4970a982b
-msgid "prepare server sql script"
-msgstr "准备db-gpt server sql脚本"
-
-#: ../../getting_started/getting_started.md:29 c5f707269582464e92e5fd57365f5a51
-msgid ""
"We use [Chroma embedding database](https://github.com/chroma-core/chroma)"
-" as the default for our vector database, so there is no need for special "
-"installation. If you choose to connect to other databases, you can follow"
-" our tutorial for installation and configuration. For the entire "
-"installation process of DB-GPT, we use the miniconda3 virtual "
-"environment. Create a virtual environment and install the Python "
-"dependencies."
+" as the default for our vector database and use SQLite as the default for"
+" our database, so there is no need for special installation. If you "
+"choose to connect to other databases, you can follow our tutorial for "
+"installation and configuration. For the entire installation process of "
+"DB-GPT, we use the miniconda3 virtual environment. Create a virtual "
+"environment and install the Python dependencies."
msgstr ""
"向量数据库我们默认使用的是Chroma内存数据库,所以无需特殊安装,如果有需要连接其他的同学,可以按照我们的教程进行安装配置。整个DB-"
"GPT的安装过程,我们使用的是miniconda3的虚拟环境。创建虚拟环境,并安装python依赖包"
-#: ../../getting_started/getting_started.md:38 377381c76f32409298f4184f23653205
+#: ../../getting_started/getting_started.md:29 bc8f2ee7894b4cad858d5e2cfbee10a9
msgid "Before use DB-GPT Knowledge Management"
msgstr "使用知识库管理功能之前"
-#: ../../getting_started/getting_started.md:44 62e7e96a18ca495595b3dfab97ca387e
+#: ../../getting_started/getting_started.md:34 03e1652f724946ec8d01a97811883b5f
msgid ""
"Once the environment is installed, we have to create a new folder "
"\"models\" in the DB-GPT project, and then we can put all the models "
@@ -137,40 +127,40 @@ msgstr ""
"环境安装完成后,我们必须在DB-"
"GPT项目中创建一个新文件夹\"models\",然后我们可以把从huggingface下载的所有模型放到这个目录下。"
-#: ../../getting_started/getting_started.md:47 e34d946ab06e47dba81cf50348987594
+#: ../../getting_started/getting_started.md:37 e26a827f1d5b4d2cbf92b42bce461082
#, fuzzy
msgid "Notice make sure you have install git-lfs"
msgstr "确保你已经安装了git-lfs"
-#: ../../getting_started/getting_started.md:58 07149cfac5314daabbd37dc1cef82905
+#: ../../getting_started/getting_started.md:48 af91349fe332472ca7a2e592a0c582f7
msgid ""
"The model files are large and will take a long time to download. During "
"the download, let's configure the .env file, which needs to be copied and"
" created from the .env.template"
msgstr "模型文件很大,需要很长时间才能下载。在下载过程中,让我们配置.env文件,它需要从。env.template中复制和创建。"
-#: ../../getting_started/getting_started.md:61 b8da240286e04573a2d731d228fbdb1e
+#: ../../getting_started/getting_started.md:51 84d2d93130034c2f94f4d0bebcc2b0d2
msgid "cp .env.template .env"
msgstr "cp .env.template .env"
-#: ../../getting_started/getting_started.md:64 c2b05910bba14e5b9b49f8ad8c436412
+#: ../../getting_started/getting_started.md:54 b3adfec002354f00baba9c43a1a3e381
msgid ""
"You can configure basic parameters in the .env file, for example setting "
"LLM_MODEL to the model to be used"
msgstr "您可以在.env文件中配置基本参数,例如将LLM_MODEL设置为要使用的模型。"
-#: ../../getting_started/getting_started.md:66 4af57c4cbf4744a8af83083d051098b3
+#: ../../getting_started/getting_started.md:56 1b11268133c0440cb3c26981f5d0c1fe
msgid ""
"([Vicuna-v1.5](https://huggingface.co/lmsys/vicuna-13b-v1.5) based on "
"llama-2 has been released, we recommend you set `LLM_MODEL=vicuna-"
"13b-v1.5` to try this model)"
msgstr ""
-#: ../../getting_started/getting_started.md:68 62287c3828ba48c4871dd831af7d819a
+#: ../../getting_started/getting_started.md:58 511d47f08bab42e0bd3f34df58c5a822
msgid "3. Run"
msgstr "3. 运行"
-#: ../../getting_started/getting_started.md:69 7a04f6c514c34d87a03b105de8a03f0b
+#: ../../getting_started/getting_started.md:59 eba1922d852b4d548370884987237c09
msgid ""
"You can refer to this document to obtain the Vicuna weights: "
"[Vicuna](https://github.com/lm-sys/FastChat/blob/main/README.md#model-"
@@ -179,7 +169,7 @@ msgstr ""
"关于基础模型, 可以根据[Vicuna](https://github.com/lm-"
"sys/FastChat/blob/main/README.md#model-weights) 合成教程进行合成。"
-#: ../../getting_started/getting_started.md:71 72c9ad9d95c34b42807ed0a1e46adb73
+#: ../../getting_started/getting_started.md:61 bb51e0506f4e44c3a4daf1d0fbd5a4ef
msgid ""
"If you have difficulty with this step, you can also directly use the "
"model from [this link](https://huggingface.co/Tribbiani/vicuna-7b) as a "
@@ -188,7 +178,7 @@ msgstr ""
"如果此步有困难的同学,也可以直接使用[此链接](https://huggingface.co/Tribbiani/vicuna-"
"7b)上的模型进行替代。"
-#: ../../getting_started/getting_started.md:73 889cf1a4dbd2422580fe59d9f3eab6bf
+#: ../../getting_started/getting_started.md:63 0d532891bd754e78a6390fcc97d0f59d
msgid ""
"set .env configuration set your vector store type, "
"eg:VECTOR_STORE_TYPE=Chroma, now we support Chroma and Milvus(version > "
@@ -197,21 +187,21 @@ msgstr ""
"在.env文件设置向量数据库环境变量,eg:VECTOR_STORE_TYPE=Chroma, 目前我们支持了 Chroma and "
"Milvus(version >2.1) "
-#: ../../getting_started/getting_started.md:76 28a2ec60d35c43edab71bac103ccb0e0
+#: ../../getting_started/getting_started.md:66 8beb3199650e4d2b8f567d0de20e2cb6
#, fuzzy
msgid "1.Run db-gpt server"
msgstr "运行模型服务"
-#: ../../getting_started/getting_started.md:81
-#: ../../getting_started/getting_started.md:140
-#: ../../getting_started/getting_started.md:194
-#: 04ed6e8729604ee59edd16995301f0d5 225139430ee947cb8a191f1641f581b3
-#: b33473644c124fdf9fe9912307cb6a9d
+#: ../../getting_started/getting_started.md:71
+#: ../../getting_started/getting_started.md:131
+#: ../../getting_started/getting_started.md:200
+#: 41984cf5289f4cefbead6b84fb011e92 5c3eb807af36425a8b0620705fc9c4e9
+#: 7c420ffa305d4eb1a052b25101e80011
#, fuzzy
msgid "Open http://localhost:5000 with your browser to see the product."
msgstr "打开浏览器访问http://localhost:5000"
-#: ../../getting_started/getting_started.md:83 8068aef8a84e46b59caf59de32ad6e2b
+#: ../../getting_started/getting_started.md:73 802ca13fbe0542a19258d40c08da509e
msgid ""
"If you want to access an external LLM service, you need to 1.set the "
"variables LLM_MODEL=YOUR_MODEL_NAME "
@@ -219,7 +209,7 @@ msgid ""
"file. 2.execute dbgpt_server.py in light mode"
msgstr "如果你想访问外部的大模型服务,1.需要在.env文件设置模型名和外部模型服务地址。2.使用light模式启动服务"
-#: ../../getting_started/getting_started.md:86 4a11e53950ac489a891ca8dc67c26ca0
+#: ../../getting_started/getting_started.md:76 7808d30d1642422fbed42f1929e37343
#, fuzzy
msgid ""
"If you want to learn about dbgpt-webui, read https://github./csunny/DB-"
@@ -228,268 +218,304 @@ msgstr ""
"如果你想了解DB-GPT前端服务,访问https://github.com/csunny/DB-GPT/tree/new-page-"
"framework/datacenter"
-#: ../../getting_started/getting_started.md:92 6a2b09ea9ba64c879d3e7ca154d95095
+#: ../../getting_started/getting_started.md:82 094ed036720c406c904c8a9ca6e7b8ae
msgid "4. Docker (Experimental)"
msgstr "4. Docker (Experimental)"
-#: ../../getting_started/getting_started.md:94 5cef89f829ce4da299f9b430fd52c446
+#: ../../getting_started/getting_started.md:84 9428d68d1eef42c28bf33d9bd7022c85
msgid "4.1 Building Docker image"
msgstr "4.1 Building Docker image"
-#: ../../getting_started/getting_started.md:100
-#: b3999a9169694b209bd4848c6458ff41
+#: ../../getting_started/getting_started.md:90 16bfb175a85c455e847c3cafe2d94ce1
msgid "Review images by listing them:"
msgstr "Review images by listing them:"
-#: ../../getting_started/getting_started.md:106
-#: ../../getting_started/getting_started.md:180
-#: 10486e7f872f4c13a384b75e564e37fe 5b142d0a103848fa82fafd40a557f998
+#: ../../getting_started/getting_started.md:96
+#: ../../getting_started/getting_started.md:186
+#: 46438a8f73f344ca84f5c77c8fe7434b eaca8cbd8bdd4c03863ef5ea218218b1
msgid "Output should look something like the following:"
msgstr "Output should look something like the following:"
-#: ../../getting_started/getting_started.md:113
-#: 83dd5525d0f7481fac57e5898605e7f3
+#: ../../getting_started/getting_started.md:103
+#: 47d72ad65ce0455ca0d85580eb0db619
+msgid ""
+"`eosphorosai/dbgpt` is the base image, which contains the project's base "
+"dependencies and a sqlite database. `eosphorosai/dbgpt-allinone` build "
+"from `eosphorosai/dbgpt`, which contains a mysql database."
+msgstr ""
+
+#: ../../getting_started/getting_started.md:105
+#: 980f39c088564de288cab120a4004d53
msgid "You can pass some parameters to docker/build_all_images.sh."
msgstr "You can pass some parameters to docker/build_all_images.sh."
-#: ../../getting_started/getting_started.md:121
-#: d5b916efd7b84972b68a4aa50ebc047c
+#: ../../getting_started/getting_started.md:113
+#: fad33767622f4b639881c4124bdc92cc
msgid ""
"You can execute the command `bash docker/build_all_images.sh --help` to "
"see more usage."
-msgstr "You can execute the command `bash docker/build_all_images.sh --help` to "
+msgstr ""
+"You can execute the command `bash docker/build_all_images.sh --help` to "
"see more usage."
-#: ../../getting_started/getting_started.md:123
-#: 0d01cafcfde04c92b4115d804dd9e912
+#: ../../getting_started/getting_started.md:115
+#: 878bb7ecfbe74a96bf42adef951bec44
msgid "4.2. Run all in one docker container"
msgstr "4.2. Run all in one docker container"
-#: ../../getting_started/getting_started.md:125
-#: 90d49baea1d34f01a0db696fd567bfba
-msgid "**Run with local model**"
+#: ../../getting_started/getting_started.md:117
+#: 2e8279c88e814e8db5dc16d4b05c62e1
+#, fuzzy
+msgid "**Run with local model and SQLite database**"
msgstr "**Run with local model**"
-#: ../../getting_started/getting_started.md:143
-#: 422a237241014cac9917832ac053aed5
+#: ../../getting_started/getting_started.md:134
+#: fb87d01dc56c48ac937b537dcc29627c
msgid ""
"`-e LLM_MODEL=vicuna-13b`, means we use vicuna-13b as llm model, see "
"/pilot/configs/model_config.LLM_MODEL_CONFIG"
-msgstr "`-e LLM_MODEL=vicuna-13b`, means we use vicuna-13b as llm model, see /pilot/configs/model_config.LLM_MODEL_CONFIG"
+msgstr ""
+"`-e LLM_MODEL=vicuna-13b`, means we use vicuna-13b as llm model, see "
+"/pilot/configs/model_config.LLM_MODEL_CONFIG"
-#: ../../getting_started/getting_started.md:144
-#: af5fb303a0c64567b9c82a3ce3b00aaa
+#: ../../getting_started/getting_started.md:135
+#: b2517db39b0b473ab4f302a925d86879
msgid ""
"`-v /data/models:/app/models`, means we mount the local model file "
"directory `/data/models` to the docker container directory `/app/models`,"
" please replace it with your model file directory."
-msgstr "`-v /data/models:/app/models`, means we mount the local model file "
+msgstr ""
+"`-v /data/models:/app/models`, means we mount the local model file "
"directory `/data/models` to the docker container directory `/app/models`,"
" please replace it with your model file directory."
-#: ../../getting_started/getting_started.md:146
-#: ../../getting_started/getting_started.md:188
-#: 05652e117c3f4d3aaea0269f980cf975 3844ae7eeec54804b05bc8aea01c3e36
+#: ../../getting_started/getting_started.md:137
+#: ../../getting_started/getting_started.md:194
+#: 140b925a0e5b4daeb6880fef503d6aac 67636495385943e3b81af74b5433c74b
msgid "You can see log with command:"
msgstr "You can see log with command:"
-#: ../../getting_started/getting_started.md:152
-#: 665aa47936aa40458fe33dd73c76513d
+#: ../../getting_started/getting_started.md:143
+#: 4ba6f304021746f6aa63fe196e752092
+#, fuzzy
+msgid "**Run with local model and MySQL database**"
+msgstr "**Run with local model**"
+
+#: ../../getting_started/getting_started.md:158
+#: 4f2fc76b150b4f248356418cf0183c83
msgid "**Run with openai interface**"
msgstr "**Run with openai interface**"
-#: ../../getting_started/getting_started.md:171
-#: 98d627b0f7f149dfb9c7365e3ba082a1
+#: ../../getting_started/getting_started.md:177
+#: ed6edd5016444324a98200f054c7d2a5
msgid ""
"`-e LLM_MODEL=proxyllm`, means we use proxy llm(openai interface, "
"fastchat interface...)"
-msgstr "`-e LLM_MODEL=proxyllm`, means we use proxy llm(openai interface, "
+msgstr ""
+"`-e LLM_MODEL=proxyllm`, means we use proxy llm(openai interface, "
"fastchat interface...)"
-#: ../../getting_started/getting_started.md:172
-#: b293dedb88034aaab9a3b740e82d7adc
+#: ../../getting_started/getting_started.md:178
+#: 7bc17c46292f43c488dcec172c835d49
msgid ""
"`-v /data/models/text2vec-large-chinese:/app/models/text2vec-large-"
"chinese`, means we mount the local text2vec model to the docker "
"container."
-msgstr "`-v /data/models/text2vec-large-chinese:/app/models/text2vec-large-"
+msgstr ""
+"`-v /data/models/text2vec-large-chinese:/app/models/text2vec-large-"
"chinese`, means we mount the local text2vec model to the docker "
"container."
-#: ../../getting_started/getting_started.md:174
-#: e48085324a324749af8a21bad8e7ca0e
+#: ../../getting_started/getting_started.md:180
+#: 257860df855843eb85383478d2e8baca
msgid "4.3. Run with docker compose"
msgstr ""
-#: ../../getting_started/getting_started.md:196
-#: a356d9ae292f4dc3913c5999e561d533
+#: ../../getting_started/getting_started.md:202
+#: 143be856423e412c8605488c0e50d2dc
msgid ""
"You can open docker-compose.yml in the project root directory to see more"
" details."
-msgstr "You can open docker-compose.yml in the project root directory to see more"
+msgstr ""
+"You can open docker-compose.yml in the project root directory to see more"
" details."
-#: ../../getting_started/getting_started.md:199
-#: 0c644409bce34bf4b898c26e24f7a1ac
+#: ../../getting_started/getting_started.md:205
+#: 0dd3dc0d7a7f4565a041ab5855808fd3
msgid "5. Multiple GPUs"
msgstr "5. Multiple GPUs"
-#: ../../getting_started/getting_started.md:201
-#: af435b17984341e5abfd2d9b64fbde20
+#: ../../getting_started/getting_started.md:207
+#: 65b1d32a91634c069a80269c44728727
msgid ""
"DB-GPT will use all available gpu by default. And you can modify the "
"setting `CUDA_VISIBLE_DEVICES=0,1` in `.env` file to use the specific gpu"
" IDs."
-msgstr "DB-GPT will use all available gpu by default. And you can modify the "
+msgstr ""
+"DB-GPT will use all available gpu by default. And you can modify the "
"setting `CUDA_VISIBLE_DEVICES=0,1` in `.env` file to use the specific gpu"
" IDs."
-#: ../../getting_started/getting_started.md:203
-#: aac3e9cec74344fd963e2136a26384af
+#: ../../getting_started/getting_started.md:209
+#: 1ef71b0a976640319211a245943d99d5
msgid ""
"Optionally, you can also specify the gpu ID to use before the starting "
"command, as shown below:"
-msgstr "Optionally, you can also specify the gpu ID to use before the starting "
+msgstr ""
+"Optionally, you can also specify the gpu ID to use before the starting "
"command, as shown below:"
-#: ../../getting_started/getting_started.md:213
-#: 3351daf4f5a04b76bae2e9cd8740549e
+#: ../../getting_started/getting_started.md:219
+#: efbc9ca4b8f44b219460577b691d2c0a
msgid ""
"You can modify the setting `MAX_GPU_MEMORY=xxGib` in `.env` file to "
"configure the maximum memory used by each GPU."
msgstr ""
-#: ../../getting_started/getting_started.md:215
-#: fabf9090b7c649b59b91742ba761339c
+#: ../../getting_started/getting_started.md:221
+#: c80f5e6d2a6042ad9ac0ef320cc6d987
msgid "6. Not Enough Memory"
msgstr ""
-#: ../../getting_started/getting_started.md:217
-#: d23322783fd34364acebe1679d7ca554
+#: ../../getting_started/getting_started.md:223
+#: b8151a333f804489ad6a2b59e739a8ed
msgid "DB-GPT supported 8-bit quantization and 4-bit quantization."
msgstr ""
-#: ../../getting_started/getting_started.md:219
-#: 8699aa0b5fe049099fb1e8038f9ad1ee
+#: ../../getting_started/getting_started.md:225
+#: be32dacd8bb84a19985417bd1b78db0f
msgid ""
"You can modify the setting `QUANTIZE_8bit=True` or `QUANTIZE_4bit=True` "
"in `.env` file to use quantization(8-bit quantization is enabled by "
"default)."
msgstr ""
-#: ../../getting_started/getting_started.md:221
-#: b115800c207b49b39de5e1a548feff58
+#: ../../getting_started/getting_started.md:227
+#: 247509e1392e4b4a8d8cdc59f2f94d37
msgid ""
"Llama-2-70b with 8-bit quantization can run with 80 GB of VRAM, and 4-bit"
" quantization can run with 48 GB of VRAM."
-msgstr "Llama-2-70b with 8-bit quantization can run with 80 GB of VRAM, and 4-bit"
+msgstr ""
+"Llama-2-70b with 8-bit quantization can run with 80 GB of VRAM, and 4-bit"
" quantization can run with 48 GB of VRAM."
-#: ../../getting_started/getting_started.md:223
-#: ae7146f0995d4eca9f775c39898d5313
+#: ../../getting_started/getting_started.md:229
+#: 4e8def991b8a491c83c29214e5f80669
msgid ""
"Note: you need to install the latest dependencies according to "
"[requirements.txt](https://github.com/eosphoros-ai/DB-"
"GPT/blob/main/requirements.txt)."
-msgstr "Note: you need to install the latest dependencies according to "
+msgstr ""
+"Note: you need to install the latest dependencies according to "
"[requirements.txt](https://github.com/eosphoros-ai/DB-"
"GPT/blob/main/requirements.txt)."
-#: ../../getting_started/getting_started.md:226
-#: 58987c486d2641cda3180a77af26c56d
+#: ../../getting_started/getting_started.md:232
+#: 85f7bb08aa714f5db6db00d905dd9dc8
msgid ""
"Here are some of the VRAM size usage of the models we tested in some "
"common scenarios."
-msgstr "Here are some of the VRAM size usage of the models we tested in some "
+msgstr ""
+"Here are some of the VRAM size usage of the models we tested in some "
"common scenarios."
-#: ../../getting_started/getting_started.md:60 4ca1a7e65f7e46c08ef12a1357f68817
+#: ../../getting_started/getting_started.md:50 165d0902ed064bbaa8b0bbe84befe139
msgid "Model"
msgstr "Model"
-#: ../../getting_started/getting_started.md:60 21ab5b38f81a49aeacadbcde55082adf
+#: ../../getting_started/getting_started.md:50 d077bca9cada4a9b89037ef5ab494c26
msgid "Quantize"
msgstr "Quantize"
-#: ../../getting_started/getting_started.md:60 a2ad84e7807c4d3ebc9d2f9aec323962
-#: b4e2d7a107074e58b91b263d815b2936
+#: ../../getting_started/getting_started.md:50 42723863614e42b1aa6c664bfd197474
+#: b71ce0c19de7471787cbbc09d6137a4b
msgid "vicuna-7b-v1.5"
msgstr "vicuna-7b-v1.5"
-#: ../../getting_started/getting_started.md:60 0134b7564c3f4cd9a605c3e7dabf5c78
-#: 12a931b938ee4e938a4ed166532946e9 9cc80bc860c7430cbc2b4aff3c4c67d2
-#: b34fb93b644a419d8073203a1a0091fc bb61c197b1c74eb2bf4e7e42ad05850d
-#: e3ab5cabd6f54faa9de0ec6c334c0d44 f942635d6f5e483293833ac5f92b6ab9
+#: ../../getting_started/getting_started.md:50 2654d358528a48e38bec445644ffd20a
+#: 2ad34cd14f54422491464967331d83fc 3b42a055e72847ec99d8d131fa9f5f84
+#: 6641e98d9bed44ee853fe7a11263a88b a69801bc92f143aa9e0818886ef6eade
+#: daa418b33705484ba944b0d92981a501 fc26bd1dace2432ca64b99ca16c2c80f
msgid "4-bit"
msgstr "4-bit"
-#: ../../getting_started/getting_started.md:60 a8556b0ec7b3488585efcdc2ba5e8565
-#: afb4365dab1e4be18f486fafbd5c6256 b50b8bc0e8084e6ebe4f9678d1f2af9d
+#: ../../getting_started/getting_started.md:50 1688f8a0972c4026abf69b5186d92137
+#: 683de09d524a4084bcdad1a184206f87 74586e40b480444591dc884e7f3f683f
#, fuzzy
msgid "8 GB"
msgstr "24 GB"
-#: ../../getting_started/getting_started.md:60 259b43952ea5424a970dfa29c84cc83d
-#: 64c6a8282eaf4e229d4196ccdbc2527f 7a426b8af2a6406fa4144142450e392e
-#: 8e5742b0c4b44bf6bd0536aade037671 a96071a0c713493fae2b381408ceaad3
-#: dcd6861abd2049a38f32957dd16408ab f334903a72934a2a86c3b24920f8cfdb
+#: ../../getting_started/getting_started.md:50 13aa8cac7a784ad1a1b58b166be99711
+#: 32633a38c1f44aac92f9647ee7867cd1 3b7fea4236174e2bb33894fd8234eddb
+#: 3b8104af560e41f285d9c433e19f6cb7 5862f0c57e2c411dada47fe71f6a74bd
+#: 6425f53f197742c8b3153a79cf4a220a d1fa83af3a884714a72f7a0af5f3be23
msgid "8-bit"
msgstr "8-bit"
-#: ../../getting_started/getting_started.md:60 09c7435ba68248bd885a41366a08f557
-#: 328ad98c8cb94f9083e8b5f158d7c14e 633e1f6d17404732a0c2f1cdf04f6591
-#: c351117229304ae88561cdd284a77f55 c77a5a73cde34e3195a6389fdd81deda
-#: f28704154fad48a8a08e16e136877ca7
+#: ../../getting_started/getting_started.md:50 107ed4fb15c44cb5bb7020a8092f7341
+#: 1d1791894226418ea623abdecd3107ba 1fdc42ef874f4022be31df7696e6a5de
+#: 718c49ddac8e4086a75cdbba166dc3cb 99404c8333974ae7a1e68885f4471b32
+#: da59674e4418488583cf4865545ad752
#, fuzzy
msgid "12 GB"
msgstr "24 GB"
-#: ../../getting_started/getting_started.md:60 63564bdcfa4249438d0245922068924f
-#: 9ad1bc9af45b4f52a1a0a9e7c8d14cce
+#: ../../getting_started/getting_started.md:50 79bf82a6dc9f4c22af779be4b1d2d13c
+#: b5bde8f01fc343baa567806fd53070dc
msgid "vicuna-13b-v1.5"
msgstr "vicuna-13b-v1.5"
-#: ../../getting_started/getting_started.md:60 0e05753ff4ec4c1a94fc68b63254115b
-#: 68a9e76f2089461692c8f396774e7634 a3136e5685cf49c4adbf79224cb0eb7d
+#: ../../getting_started/getting_started.md:50 362720115dd64143a214b3b9d3069512
+#: e0545557159b44dea0522a1848170216 e3c6ab8f25bc4adbae3b3087716a1efe
#, fuzzy
msgid "20 GB"
msgstr "24 GB"
-#: ../../getting_started/getting_started.md:60 173b58152fde49f4b3e03103323c4dd7
-#: 7ab8309661024d1cb656911acb7b0d58
+#: ../../getting_started/getting_started.md:50 295fd7d6e6c846748a1f2c82f8c79ba0
+#: 55efa8e8e4e74efc865df87fcfade84d
msgid "llama-2-7b"
msgstr "llama-2-7b"
-#: ../../getting_started/getting_started.md:60 069459a955274376a3d9a4021a032424
-#: 619257e4671141488feb6e71bd002880
+#: ../../getting_started/getting_started.md:50 15b1e541bdac43fda1dcccf2aaeaa40f
+#: 5785954810bc45369ed1745f5c503c9c
msgid "llama-2-13b"
msgstr "llama-2-13b"
-#: ../../getting_started/getting_started.md:60 ce39526cfbcc4c8c910928dc69293720
-#: f5e3fb53bd964e328aac908ae6fc06a4
+#: ../../getting_started/getting_started.md:50 185892d421684c1b903c04ea9b6653d7
+#: d7970a5fe574434798d72427436c82d5
msgid "llama-2-70b"
msgstr "llama-2-70b"
-#: ../../getting_started/getting_started.md:60 c09fb995952048d290dde484a0e09478
+#: ../../getting_started/getting_started.md:50 f732a7f73a504bd1b56b42dab1114d04
#, fuzzy
msgid "48 GB"
msgstr "24 GB"
-#: ../../getting_started/getting_started.md:60 4d9bc275dbc548918cda210f5f5d7722
+#: ../../getting_started/getting_started.md:50 04642b0dd4bc4563a45c6d15fa1d8f07
#, fuzzy
msgid "80 GB"
msgstr "24 GB"
-#: ../../getting_started/getting_started.md:60 962d13ed427041a19f20d9a1c8ca26ff
-#: dedfece8bc8c4ffabf9cc7166e4ca4db
+#: ../../getting_started/getting_started.md:50 b9c4d8b71b1e4185bab24a857433f884
+#: fc1b1927cc344e2e91bb7047c79ad227
msgid "baichuan-7b"
msgstr ""
-#: ../../getting_started/getting_started.md:60 43f629be73914d4cbb1d75c7a06f88e8
-#: 5cba8d23c6e847579986b7019e073eaf
+#: ../../getting_started/getting_started.md:50 c92417b527a04d82ac9caa837884113c
+#: dc17ae982c154b988433a0c623301bcb
msgid "baichuan-13b"
msgstr "baichuan-13b"
#~ msgid "4.2. Run with docker compose"
#~ msgstr "4.2. Run with docker compose"
+#~ msgid ""
+#~ "1.This project relies on a local "
+#~ "MySQL database service, which you need"
+#~ " to install locally. We recommend "
+#~ "using Docker for installation."
+#~ msgstr "本项目依赖一个本地的 MySQL 数据库服务,你需要本地安装,推荐直接使用 Docker 安装。"
+
+#~ msgid "prepare server sql script"
+#~ msgstr "准备db-gpt server sql脚本"
+
diff --git a/docs/locales/zh_CN/LC_MESSAGES/getting_started/install.po b/docs/locales/zh_CN/LC_MESSAGES/getting_started/install.po
new file mode 100644
index 000000000..9e31b74e5
--- /dev/null
+++ b/docs/locales/zh_CN/LC_MESSAGES/getting_started/install.po
@@ -0,0 +1,52 @@
+# SOME DESCRIPTIVE TITLE.
+# Copyright (C) 2023, csunny
+# This file is distributed under the same license as the DB-GPT package.
+# FIRST AUTHOR , 2023.
+#
+#, fuzzy
+msgid ""
+msgstr ""
+"Project-Id-Version: DB-GPT 👏👏 0.3.5\n"
+"Report-Msgid-Bugs-To: \n"
+"POT-Creation-Date: 2023-08-16 18:31+0800\n"
+"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
+"Last-Translator: FULL NAME \n"
+"Language: zh_CN\n"
+"Language-Team: zh_CN \n"
+"Plural-Forms: nplurals=1; plural=0;\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=utf-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 2.12.1\n"
+
+#: ../../getting_started/install.rst:2 ../../getting_started/install.rst:14
+#: 2861085e63144eaca1bb825e5f05d089
+msgid "Install"
+msgstr "Install"
+
+#: ../../getting_started/install.rst:3 01a6603d91fa4520b0f839379d4eda23
+msgid ""
+"DB-GPT product is a Web application that you can chat database, chat "
+"knowledge, text2dashboard."
+msgstr "DB-GPT 可以生成sql,智能报表, 知识库问答的产品"
+
+#: ../../getting_started/install.rst:8 beca85cddc9b4406aecf83d5dfcce1f7
+msgid "deploy"
+msgstr "部署"
+
+#: ../../getting_started/install.rst:9 601e9b9eb91f445fb07d2f1c807f0370
+msgid "docker"
+msgstr "docker"
+
+#: ../../getting_started/install.rst:10 6d1e094ac9284458a32a3e7fa6241c81
+msgid "docker_compose"
+msgstr "docker_compose"
+
+#: ../../getting_started/install.rst:11 ff1d1c60bbdc4e8ca82b7a9f303dd167
+msgid "environment"
+msgstr "environment"
+
+#: ../../getting_started/install.rst:12 33bfbe8defd74244bfc24e8fbfd640f6
+msgid "deploy_faq"
+msgstr "deploy_faq"
+
diff --git a/docs/locales/zh_CN/LC_MESSAGES/getting_started/install/deploy/deploy.po b/docs/locales/zh_CN/LC_MESSAGES/getting_started/install/deploy/deploy.po
new file mode 100644
index 000000000..44440d3f1
--- /dev/null
+++ b/docs/locales/zh_CN/LC_MESSAGES/getting_started/install/deploy/deploy.po
@@ -0,0 +1,443 @@
+# SOME DESCRIPTIVE TITLE.
+# Copyright (C) 2023, csunny
+# This file is distributed under the same license as the DB-GPT package.
+# FIRST AUTHOR , 2023.
+#
+#, fuzzy
+msgid ""
+msgstr ""
+"Project-Id-Version: DB-GPT 👏👏 0.3.5\n"
+"Report-Msgid-Bugs-To: \n"
+"POT-Creation-Date: 2023-08-17 21:23+0800\n"
+"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
+"Last-Translator: FULL NAME \n"
+"Language: zh_CN\n"
+"Language-Team: zh_CN \n"
+"Plural-Forms: nplurals=1; plural=0;\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=utf-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 2.12.1\n"
+
+#: ../../getting_started/install/deploy/deploy.md:1
+#: de0b03c3b94a4e2aad7d380f532b85c0
+msgid "Installation From Source"
+msgstr "源码安装"
+
+#: ../../getting_started/install/deploy/deploy.md:3
+#: 65a034e1a90f40bab24899be901cc97f
+msgid ""
+"This tutorial gives you a quick walkthrough about use DB-GPT with you "
+"environment and data."
+msgstr "本教程为您提供了关于如何使用DB-GPT的使用指南。"
+
+#: ../../getting_started/install/deploy/deploy.md:5
+#: 33b15956f7ef446a9aa4cac014163884
+msgid "Installation"
+msgstr "安装"
+
+#: ../../getting_started/install/deploy/deploy.md:7
+#: ad64dc334e8e43bebc8873afb27f7b15
+msgid "To get started, install DB-GPT with the following steps."
+msgstr "请按照以下步骤安装DB-GPT"
+
+#: ../../getting_started/install/deploy/deploy.md:9
+#: 33e12a5bef6c45dbb30fbffae556b664
+msgid "1. Hardware Requirements"
+msgstr "1. 硬件要求"
+
+#: ../../getting_started/install/deploy/deploy.md:10
+#: dfd3b7c074124de78169f34168b7c757
+msgid ""
+"As our project has the ability to achieve ChatGPT performance of over "
+"85%, there are certain hardware requirements. However, overall, the "
+"project can be deployed and used on consumer-grade graphics cards. The "
+"specific hardware requirements for deployment are as follows:"
+msgstr "由于我们的项目有能力达到85%以上的ChatGPT性能,所以对硬件有一定的要求。但总体来说,我们在消费级的显卡上即可完成项目的部署使用,具体部署的硬件说明如下:"
+
+#: ../../getting_started/install/deploy/deploy.md
+#: 3d4530d981bf4dbab815a11c74bfd897
+msgid "GPU"
+msgstr "GPU"
+
+#: ../../getting_started/install/deploy/deploy.md
+#: 348c8f9b734244258416ea2e11b76caa f2f5e55b8c9b4c7da0ac090e763a9f47
+msgid "VRAM Size"
+msgstr "显存"
+
+#: ../../getting_started/install/deploy/deploy.md
+#: 9e099897409f42339bc284c378318a72
+msgid "Performance"
+msgstr "Performance"
+
+#: ../../getting_started/install/deploy/deploy.md
+#: 65bd67a198a84a5399f4799b505e062c
+msgid "RTX 4090"
+msgstr "RTX 4090"
+
+#: ../../getting_started/install/deploy/deploy.md
+#: 75e7f58f8f5d42f081a3e4d2e51ccc18 d897e949b37344d084f6917b977bcceb
+msgid "24 GB"
+msgstr "24 GB"
+
+#: ../../getting_started/install/deploy/deploy.md
+#: 9cba72a2be3c41bda797ff447b63e448
+msgid "Smooth conversation inference"
+msgstr "Smooth conversation inference"
+
+#: ../../getting_started/install/deploy/deploy.md
+#: 90ea71d2099c47acac027773e69d2b23
+msgid "RTX 3090"
+msgstr "RTX 3090"
+
+#: ../../getting_started/install/deploy/deploy.md
+#: 7dd339251a1a45be9a45e0bb30bd09f7
+msgid "Smooth conversation inference, better than V100"
+msgstr "Smooth conversation inference, better than V100"
+
+#: ../../getting_started/install/deploy/deploy.md
+#: 8abe188052464e6aa395392db834c842
+msgid "V100"
+msgstr "V100"
+
+#: ../../getting_started/install/deploy/deploy.md
+#: 0b5263806a19446991cbd59c0fec6ba7 d645833d7e854eab81102374bf3fb7d8
+msgid "16 GB"
+msgstr "16 GB"
+
+#: ../../getting_started/install/deploy/deploy.md
+#: 83bb5f40fa7f4e9389ac6abdb6bbb285 f9e58862a0cb488194d7ad536f359f0d
+msgid "Conversation inference possible, noticeable stutter"
+msgstr "Conversation inference possible, noticeable stutter"
+
+#: ../../getting_started/install/deploy/deploy.md
+#: 281f7b3c9a9f450798e5cc7612ea3890
+msgid "T4"
+msgstr "T4"
+
+#: ../../getting_started/install/deploy/deploy.md:19
+#: 7276d432615040b3a9eea3f2c5764319
+msgid ""
+"if your VRAM Size is not enough, DB-GPT supported 8-bit quantization and "
+"4-bit quantization."
+msgstr "如果你的显存不够,DB-GPT支持8-bit和4-bit量化版本"
+
+#: ../../getting_started/install/deploy/deploy.md:21
+#: f68d085e03d244ed9b5ccec347466889
+msgid ""
+"Here are some of the VRAM size usage of the models we tested in some "
+"common scenarios."
+msgstr "这里是量化版本的相关说明"
+
+#: ../../getting_started/install/deploy/deploy.md
+#: 8d397d7ee603448b97153192e6d3e372
+msgid "Model"
+msgstr "Model"
+
+#: ../../getting_started/install/deploy/deploy.md
+#: 4c789597627447278e89462690563faa
+msgid "Quantize"
+msgstr "Quantize"
+
+#: ../../getting_started/install/deploy/deploy.md
+#: a8d7c76224544ce69f376ac7cb2f5a3b dbf2beb53d7e491393b00848d63d7ffa
+msgid "vicuna-7b-v1.5"
+msgstr "vicuna-7b-v1.5"
+
+#: ../../getting_started/install/deploy/deploy.md
+#: 1bd5afee2af84597b5a423308e92362c 46d030b6ff1849ebb22b590e6978d914
+#: 4cf8f3ebb0b743ffb2f1c123a25b75d0 6d96be424e6043daa2c02649894aa796
+#: 83bb935f520c4c818bfe37e13034b2a7 92ba161085374917b7f82810b1a2bf00
+#: ca5ccc49ba1046d2b4b13aaa7ceb62f5
+msgid "4-bit"
+msgstr "4-bit"
+
+#: ../../getting_started/install/deploy/deploy.md
+#: 1173b5ee04cb4686ba34a527bc618bdb 558136997f4d49998f2f4e6a9bb656b0
+#: 8d203a9a70684cbaa9d937af8450847f
+msgid "8 GB"
+msgstr "8 GB"
+
+#: ../../getting_started/install/deploy/deploy.md
+#: 033458b772e3493b80041122e067e194 0f3eda083eac4739b2cf7d21337b145e
+#: 6404eaa486cf45a69a27b0f87a7f6302 8e850aa3acf14ab2b231be74ddb34e86
+#: ba258645f41f47a693aacbbc0f38e981 df67cac599234c4ba667a9b40eb6d9bc
+#: fc07aeb321434722a320fed0afe3ffb8
+msgid "8-bit"
+msgstr "8-bit"
+
+#: ../../getting_started/install/deploy/deploy.md
+#: 3ed2cb5787c14f268c446b03d5531233 68e7f0b0e8ad44ee86a86189bb3b553d
+#: 8b4ea703d1df45c5be90d83c4723f16f cb606e0a458746fd86307c1e8aea08f1
+#: d5da58dbde3c4bb4ac8b464a0a507c62 e8e140c610ec4971afe1b7ec2690382a
+msgid "12 GB"
+msgstr "12 GB"
+
+#: ../../getting_started/install/deploy/deploy.md
+#: 512cd29c308c4d3ab66dbe63e7ea8f48 78f8307ab96c4245a1f09abcd714034c
+msgid "vicuna-13b-v1.5"
+msgstr "vicuna-13b-v1.5"
+
+#: ../../getting_started/install/deploy/deploy.md
+#: 48849903b3cb4888b6dd3d5efcbb24fb 83178c05f6cf431f82bb1d6d25b2645e
+#: 979e3ab64df14753b0987bdd49bd5cc6
+msgid "20 GB"
+msgstr "20 GB"
+
+#: ../../getting_started/install/deploy/deploy.md
+#: 0496046d1fb644c28361959b395231d7 3871c7c4432b4a15a9888586cdc70eda
+msgid "llama-2-7b"
+msgstr "llama-2-7b"
+
+#: ../../getting_started/install/deploy/deploy.md
+#: 0c7c632d7c4d44fabfeed58fcc13db8f 78ee1adc6a7e4fa188706a1d5356059f
+msgid "llama-2-13b"
+msgstr "llama-2-13b"
+
+#: ../../getting_started/install/deploy/deploy.md
+#: 15a9341d4a6649908ef88045edd0cb93 d385381b0b4d4eff96a93a9d299cf516
+msgid "llama-2-70b"
+msgstr "llama-2-70b"
+
+#: ../../getting_started/install/deploy/deploy.md
+#: de37a5c2b02a498b9344b24f626db9dc
+msgid "48 GB"
+msgstr "48 GB"
+
+#: ../../getting_started/install/deploy/deploy.md
+#: e897098f81314ce4bf729aee1de7354c
+msgid "80 GB"
+msgstr "80 GB"
+
+#: ../../getting_started/install/deploy/deploy.md
+#: 0782883260f840db8e8bf7c10b5ddf62 b03b5c9343454119ae11fcb2dedf9f90
+msgid "baichuan-7b"
+msgstr "baichuan-7b"
+
+#: ../../getting_started/install/deploy/deploy.md
+#: 008a7d56e8dc4242ae3503bbbf4db153 65ea9ba20adb45519d65da7b16069fa8
+msgid "baichuan-13b"
+msgstr "baichuan-13b"
+
+#: ../../getting_started/install/deploy/deploy.md:40
+#: 1e434048c4844cc1906d83dd68af6d8c
+msgid "2. Install"
+msgstr "2. Install"
+
+#: ../../getting_started/install/deploy/deploy.md:45
+#: c6190ea13c024ddcb8e45ea22a235c3b
+msgid ""
+"We use Sqlite as default database, so there is no need for database "
+"installation. If you choose to connect to other databases, you can "
+"follow our tutorial for installation and configuration. For the entire "
+"installation process of DB-GPT, we use the miniconda3 virtual "
+"environment. Create a virtual environment and install the Python "
+"dependencies. [How to install "
+"Miniconda](https://docs.conda.io/en/latest/miniconda.html)"
+msgstr ""
+"目前使用Sqlite作为默认数据库,因此DB-"
+"GPT快速部署不需要部署相关数据库服务。如果你想使用其他数据库,需要先部署相关数据库服务。我们目前使用Miniconda进行python环境和包依赖管理[安装"
+" Miniconda](https://docs.conda.io/en/latest/miniconda.html)"
+
+#: ../../getting_started/install/deploy/deploy.md:54
+#: ee1e44044b73460ea8cd2f6c2eb6100d
+msgid "Before use DB-GPT Knowledge"
+msgstr "在使用知识库之前"
+
+#: ../../getting_started/install/deploy/deploy.md:60
+#: 6a9ff4138c69429bb159bf452fa7ee55
+msgid ""
+"Once the environment is installed, we have to create a new folder "
+"\"models\" in the DB-GPT project, and then we can put all the models "
+"downloaded from huggingface in this directory"
+msgstr "如果你已经安装好了环境需要创建models, 然后到huggingface官网下载模型"
+
+#: ../../getting_started/install/deploy/deploy.md:63
+#: 1299b19bd0f24cc896c59e2c8e7e656c
+msgid "Notice make sure you have install git-lfs"
+msgstr ""
+
+#: ../../getting_started/install/deploy/deploy.md:65
+#: 69b3433c8e5c4cbb960e0178bdd6ac97
+msgid "centos:yum install git-lfs"
+msgstr ""
+
+#: ../../getting_started/install/deploy/deploy.md:67
+#: 50e3cfee5fd5484bb063d41693ac75f0
+msgid "ubuntu:app-get install git-lfs"
+msgstr ""
+
+#: ../../getting_started/install/deploy/deploy.md:69
+#: 81c85ca1188b4ef5b94e0431c6309f9b
+msgid "macos:brew install git-lfs"
+msgstr ""
+
+#: ../../getting_started/install/deploy/deploy.md:86
+#: 9b503ea553a24d488e1c180bf30055ff
+msgid ""
+"The model files are large and will take a long time to download. During "
+"the download, let's configure the .env file, which needs to be copied and"
+" created from the .env.template"
+msgstr "模型文件很大,需要很长时间才能下载。在下载过程中,让我们配置.env文件,它需要从。env.template中复制和创建。"
+
+#: ../../getting_started/install/deploy/deploy.md:88
+#: 643b6a27bc0f43ee9451d18d52a9a2eb
+msgid ""
+"if you want to use openai llm service, see [LLM Use FAQ](https://db-"
+"gpt.readthedocs.io/en/latest/getting_started/faq/llm/llm_faq.html)"
+msgstr ""
+"如果想使用openai大模型服务, 可以参考[LLM Use FAQ](https://db-"
+"gpt.readthedocs.io/en/latest/getting_started/faq/llm/llm_faq.html)"
+
+#: ../../getting_started/install/deploy/deploy.md:91
+#: cc869640e66949e99faa17b1098b1306
+msgid "cp .env.template .env"
+msgstr "cp .env.template .env"
+
+#: ../../getting_started/install/deploy/deploy.md:94
+#: 1b94ed0e469f413b8e9d0ff3cdabca33
+msgid ""
+"You can configure basic parameters in the .env file, for example setting "
+"LLM_MODEL to the model to be used"
+msgstr "您可以在.env文件中配置基本参数,例如将LLM_MODEL设置为要使用的模型。"
+
+#: ../../getting_started/install/deploy/deploy.md:96
+#: 52cfa3636f2b4f949035d2d54b39a123
+msgid ""
+"([Vicuna-v1.5](https://huggingface.co/lmsys/vicuna-13b-v1.5) based on "
+"llama-2 has been released, we recommend you set `LLM_MODEL=vicuna-"
+"13b-v1.5` to try this model)"
+msgstr ""
+"您可以在.env文件中配置基本参数,例如将LLM_MODEL设置为要使用的模型。([Vicuna-v1.5](https://huggingface.co/lmsys"
+"/vicuna-13b-v1.5), "
+"目前Vicuna-v1.5模型(基于llama2)已经开源了,我们推荐你使用这个模型通过设置LLM_MODEL=vicuna-13b-v1.5"
+
+#: ../../getting_started/install/deploy/deploy.md:98
+#: 491fd44ede1645a3a2db10097c10dbe8
+msgid "3. Run"
+msgstr "3. Run"
+
+#: ../../getting_started/install/deploy/deploy.md:100
+#: f66b8a2b18b34df5b3e74674b4a9d7a9
+msgid "1.Run db-gpt server"
+msgstr "1.Run db-gpt server"
+
+#: ../../getting_started/install/deploy/deploy.md:105
+#: b72283f0ffdc4ecbb4da5239be5fd126
+msgid "Open http://localhost:5000 with your browser to see the product."
+msgstr "打开浏览器访问http://localhost:5000"
+
+#: ../../getting_started/install/deploy/deploy.md:116
+#: 1fae8a8ce4184feba2d74f877a25d8d2
+msgid ""
+"If you want to learn about dbgpt-webui, read https://github./csunny/DB-"
+"GPT/tree/new-page-framework/datacenter"
+msgstr ""
+"如果你想了解web-ui, 请访问https://github./csunny/DB-GPT/tree/new-page-"
+"framework/datacenter"
+
+#: ../../getting_started/install/deploy/deploy.md:123
+#: 573c0349bd2140e9bb356b53f1da6ee3
+#, fuzzy
+msgid "Multiple GPUs"
+msgstr "4. Multiple GPUs"
+
+#: ../../getting_started/install/deploy/deploy.md:125
+#: af5d6a12ec954da19576decdf434df5d
+msgid ""
+"DB-GPT will use all available gpu by default. And you can modify the "
+"setting `CUDA_VISIBLE_DEVICES=0,1` in `.env` file to use the specific gpu"
+" IDs."
+msgstr "DB-GPT默认加载可利用的gpu,你也可以通过修改 在`.env`文件 `CUDA_VISIBLE_DEVICES=0,1`来指定gpu IDs"
+
+#: ../../getting_started/install/deploy/deploy.md:127
+#: de96662007194418a2877cece51dc5cb
+msgid ""
+"Optionally, you can also specify the gpu ID to use before the starting "
+"command, as shown below:"
+msgstr "你也可以指定gpu ID启动"
+
+#: ../../getting_started/install/deploy/deploy.md:137
+#: 9cb0ff253fb2428dbaec97570e5c4fa4
+msgid ""
+"You can modify the setting `MAX_GPU_MEMORY=xxGib` in `.env` file to "
+"configure the maximum memory used by each GPU."
+msgstr "同时你可以通过在.env文件设置`MAX_GPU_MEMORY=xxGib`修改每个GPU的最大使用内存"
+
+#: ../../getting_started/install/deploy/deploy.md:139
+#: c708ee0a321444dd91be00cda469976c
+#, fuzzy
+msgid "Not Enough Memory"
+msgstr "5. Not Enough Memory"
+
+#: ../../getting_started/install/deploy/deploy.md:141
+#: 760347ecf9a44d03a8e17cba153a2cc6
+msgid "DB-GPT supported 8-bit quantization and 4-bit quantization."
+msgstr "DB-GPT 支持 8-bit quantization 和 4-bit quantization."
+
+#: ../../getting_started/install/deploy/deploy.md:143
+#: 32e3dc941bfe4d6587e8be262f8fb4d3
+msgid ""
+"You can modify the setting `QUANTIZE_8bit=True` or `QUANTIZE_4bit=True` "
+"in `.env` file to use quantization(8-bit quantization is enabled by "
+"default)."
+msgstr "你可以通过在.env文件设置`QUANTIZE_8bit=True` or `QUANTIZE_4bit=True`"
+
+#: ../../getting_started/install/deploy/deploy.md:145
+#: bdc9a3788149427bac9f3cf35578e206
+msgid ""
+"Llama-2-70b with 8-bit quantization can run with 80 GB of VRAM, and 4-bit"
+" quantization can run with 48 GB of VRAM."
+msgstr ""
+"Llama-2-70b with 8-bit quantization 可以运行在 80 GB VRAM机器, 4-bit "
+"quantization 可以运行在 48 GB VRAM"
+
+#: ../../getting_started/install/deploy/deploy.md:147
+#: 9b6085c41b5c4b96ac3e917dc5002fc2
+msgid ""
+"Note: you need to install the latest dependencies according to "
+"[requirements.txt](https://github.com/eosphoros-ai/DB-"
+"GPT/blob/main/requirements.txt)."
+msgstr ""
+"注意,需要安装[requirements.txt](https://github.com/eosphoros-ai/DB-"
+"GPT/blob/main/requirements.txt)涉及的所有的依赖"
+
+#~ msgid ""
+#~ "Notice make sure you have install "
+#~ "git-lfs centos:yum install git-lfs "
+#~ "ubuntu:app-get install git-lfs "
+#~ "macos:brew install git-lfs"
+#~ msgstr ""
+#~ "注意下载模型之前确保git-lfs已经安ubuntu:app-get install "
+#~ "git-lfs macos:brew install git-lfs"
+
+#~ msgid ""
+#~ "You can refer to this document to"
+#~ " obtain the Vicuna weights: "
+#~ "[Vicuna](https://github.com/lm-sys/FastChat/blob/main/README.md"
+#~ "#model-weights) ."
+#~ msgstr ""
+#~ "你可以参考如何获取Vicuna weights文档[Vicuna](https://github.com/lm-"
+#~ "sys/FastChat/blob/main/README.md#model-weights) ."
+
+#~ msgid ""
+#~ "If you have difficulty with this "
+#~ "step, you can also directly use "
+#~ "the model from [this "
+#~ "link](https://huggingface.co/Tribbiani/vicuna-7b) as "
+#~ "a replacement."
+#~ msgstr ""
+#~ "如果觉得模型太大你也可以下载vicuna-7b [this "
+#~ "link](https://huggingface.co/Tribbiani/vicuna-7b) "
+
+#~ msgid ""
+#~ "If you want to access an external"
+#~ " LLM service, you need to 1.set "
+#~ "the variables LLM_MODEL=YOUR_MODEL_NAME "
+#~ "MODEL_SERVER=YOUR_MODEL_SERVER(eg:http://localhost:5000) in "
+#~ "the .env file. 2.execute dbgpt_server.py "
+#~ "in light mode"
+#~ msgstr ""
+#~ "如果你想访问外部的大模型服务(是通过DB-"
+#~ "GPT/pilot/server/llmserver.py启动的模型服务),1.需要在.env文件设置模型名和外部模型服务地址。2.使用light模式启动服务"
+
diff --git a/docs/locales/zh_CN/LC_MESSAGES/getting_started/install/docker/docker.po b/docs/locales/zh_CN/LC_MESSAGES/getting_started/install/docker/docker.po
new file mode 100644
index 000000000..7a82c1f67
--- /dev/null
+++ b/docs/locales/zh_CN/LC_MESSAGES/getting_started/install/docker/docker.po
@@ -0,0 +1,118 @@
+# SOME DESCRIPTIVE TITLE.
+# Copyright (C) 2023, csunny
+# This file is distributed under the same license as the DB-GPT package.
+# FIRST AUTHOR , 2023.
+#
+#, fuzzy
+msgid ""
+msgstr ""
+"Project-Id-Version: DB-GPT 👏👏 0.3.5\n"
+"Report-Msgid-Bugs-To: \n"
+"POT-Creation-Date: 2023-08-16 18:31+0800\n"
+"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
+"Last-Translator: FULL NAME \n"
+"Language: zh_CN\n"
+"Language-Team: zh_CN \n"
+"Plural-Forms: nplurals=1; plural=0;\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=utf-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 2.12.1\n"
+
+#: ../../getting_started/install/docker/docker.md:1
+#: ea5d6b95dea844b89d2f5d0e8f6ebfd3
+msgid "Docker Install"
+msgstr "Docker 安装"
+
+#: ../../getting_started/install/docker/docker.md:4
+#: c1125facb7334346a8e5b66bab892b1c
+msgid "Docker (Experimental)"
+msgstr "Docker (Experimental)"
+
+#: ../../getting_started/install/docker/docker.md:6
+#: 33a64984d0694ec9aee272f8a7ecd4cf
+msgid "1. Building Docker image"
+msgstr "1.构建Docker镜像"
+
+#: ../../getting_started/install/docker/docker.md:12
+#: 656beee32e6f4a49ad48a219910ba36c
+msgid "Review images by listing them:"
+msgstr "Review images by listing them:"
+
+#: ../../getting_started/install/docker/docker.md:18
+#: a8fd727500de480299e5bdfc86151473
+msgid "Output should look something like the following:"
+msgstr "输出日志应该长这样:"
+
+#: ../../getting_started/install/docker/docker.md:25
+#: 965edb9fe5184571b8afc5232cfd2773
+msgid "You can pass some parameters to docker/build_all_images.sh."
+msgstr "你也可以docker/build_all_images.sh构建的时候指定参数"
+
+#: ../../getting_started/install/docker/docker.md:33
+#: a1743c21a4db468db108a60540dd4754
+msgid ""
+"You can execute the command `bash docker/build_all_images.sh --help` to "
+"see more usage."
+msgstr "可以指定命令`bash docker/build_all_images.sh --help`查看如何使用"
+
+#: ../../getting_started/install/docker/docker.md:35
+#: cb7f05675c674fcf931a6afa6fb7d24c
+msgid "2. Run all in one docker container"
+msgstr "2. Run all in one docker container"
+
+#: ../../getting_started/install/docker/docker.md:37
+#: 2b2d95e668ed428e97eac851c604a74c
+msgid "**Run with local model**"
+msgstr "**Run with local model**"
+
+#: ../../getting_started/install/docker/docker.md:52
+#: ../../getting_started/install/docker/docker.md:87
+#: 731540ca95004dd2bb2c3a9871ddb404 e3ab41d312ea40d492b48cf8629553fe
+msgid "Open http://localhost:5000 with your browser to see the product."
+msgstr "打开浏览器访问http://localhost:5000"
+
+#: ../../getting_started/install/docker/docker.md:55
+#: 2cbab25caaa749f5b753c58947332cb2
+msgid ""
+"`-e LLM_MODEL=vicuna-13b`, means we use vicuna-13b as llm model, see "
+"/pilot/configs/model_config.LLM_MODEL_CONFIG"
+msgstr "`-e LLM_MODEL=vicuna-13b` 指定llm model is vicuna-13b "
+
+#: ../../getting_started/install/docker/docker.md:56
+#: ed7e79d43ee940dca788f15d520850a3
+msgid ""
+"`-v /data/models:/app/models`, means we mount the local model file "
+"directory `/data/models` to the docker container directory `/app/models`,"
+" please replace it with your model file directory."
+msgstr "`-v /data/models:/app/models`, 指定挂载的模型文件 "
+"directory `/data/models` to the docker container directory `/app/models`,"
+" 你也可以替换成你自己的模型."
+
+#: ../../getting_started/install/docker/docker.md:58
+#: 98e7bff5dab04979a0bb15abdf2ac1e0
+msgid "You can see log with command:"
+msgstr "你也可以通过命令查看日志"
+
+#: ../../getting_started/install/docker/docker.md:64
+#: 7ce23cecd6f24a6b8d3e4708d5f6265d
+msgid "**Run with openai interface**"
+msgstr "**Run with openai interface**"
+
+#: ../../getting_started/install/docker/docker.md:83
+#: bdf315780f454aaf9ead6414723f34c7
+msgid ""
+"`-e LLM_MODEL=proxyllm`, means we use proxy llm(openai interface, "
+"fastchat interface...)"
+msgstr "`-e LLM_MODEL=proxyllm`, 通过设置模型为第三方模型服务API, 可以是openai, 也可以是fastchat interface..."
+
+#: ../../getting_started/install/docker/docker.md:84
+#: 12b3e78d3ebd47288a8d081eea278b45
+msgid ""
+"`-v /data/models/text2vec-large-chinese:/app/models/text2vec-large-"
+"chinese`, means we mount the local text2vec model to the docker "
+"container."
+msgstr "`-v /data/models/text2vec-large-chinese:/app/models/text2vec-large-"
+"chinese`, 设置知识库embedding模型为text2vec. "
+"container.""
+
diff --git a/docs/locales/zh_CN/LC_MESSAGES/getting_started/install/docker_compose/docker_compose.po b/docs/locales/zh_CN/LC_MESSAGES/getting_started/install/docker_compose/docker_compose.po
new file mode 100644
index 000000000..e0e17c38d
--- /dev/null
+++ b/docs/locales/zh_CN/LC_MESSAGES/getting_started/install/docker_compose/docker_compose.po
@@ -0,0 +1,53 @@
+# SOME DESCRIPTIVE TITLE.
+# Copyright (C) 2023, csunny
+# This file is distributed under the same license as the DB-GPT package.
+# FIRST AUTHOR , 2023.
+#
+#, fuzzy
+msgid ""
+msgstr ""
+"Project-Id-Version: DB-GPT 👏👏 0.3.5\n"
+"Report-Msgid-Bugs-To: \n"
+"POT-Creation-Date: 2023-08-16 18:31+0800\n"
+"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
+"Last-Translator: FULL NAME \n"
+"Language: zh_CN\n"
+"Language-Team: zh_CN \n"
+"Plural-Forms: nplurals=1; plural=0;\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=utf-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 2.12.1\n"
+
+#: ../../getting_started/install/docker_compose/docker_compose.md:1
+#: f1092bd1601c48789cb6f35ff9141c69
+msgid "Docker Compose"
+msgstr "Docker Compose"
+
+#: ../../getting_started/install/docker_compose/docker_compose.md:4
+#: 63460a1c4e0f46378c6a48643684fa17
+msgid "Run with docker compose"
+msgstr "Run with docker compose"
+
+#: ../../getting_started/install/docker_compose/docker_compose.md:10
+#: fa53c93a8c8b451788987461e6b296b6
+msgid "Output should look something like the following:"
+msgstr "输出应该这样:"
+
+#: ../../getting_started/install/docker_compose/docker_compose.md:18
+#: 6c56a6ef5367442ab924f9a49d955d11
+msgid "You can see log with command:"
+msgstr "你可以通过命令查看日志"
+
+#: ../../getting_started/install/docker_compose/docker_compose.md:24
+#: 1bd9fac3803641fa9ec1730c7d15d3f1
+msgid "Open http://localhost:5000 with your browser to see the product."
+msgstr "打开浏览器访问Open http://localhost:5000"
+
+#: ../../getting_started/install/docker_compose/docker_compose.md:26
+#: 21214a8bf62746078ccf7ab83ccca4f7
+msgid ""
+"You can open docker-compose.yml in the project root directory to see more"
+" details."
+msgstr "可以打开docker-compose.yml查看更多内容"
+
diff --git a/docs/locales/zh_CN/LC_MESSAGES/getting_started/install/environment/environment.po b/docs/locales/zh_CN/LC_MESSAGES/getting_started/install/environment/environment.po
new file mode 100644
index 000000000..09b3c8fa2
--- /dev/null
+++ b/docs/locales/zh_CN/LC_MESSAGES/getting_started/install/environment/environment.po
@@ -0,0 +1,361 @@
+# SOME DESCRIPTIVE TITLE.
+# Copyright (C) 2023, csunny
+# This file is distributed under the same license as the DB-GPT package.
+# FIRST AUTHOR , 2023.
+#
+#, fuzzy
+msgid ""
+msgstr ""
+"Project-Id-Version: DB-GPT 👏👏 0.3.5\n"
+"Report-Msgid-Bugs-To: \n"
+"POT-Creation-Date: 2023-08-17 13:07+0800\n"
+"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
+"Last-Translator: FULL NAME \n"
+"Language: zh_CN\n"
+"Language-Team: zh_CN \n"
+"Plural-Forms: nplurals=1; plural=0;\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=utf-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 2.12.1\n"
+
+#: ../../getting_started/install/environment/environment.md:1
+#: be341d16f7b24bf4ad123ab78a6d855a
+#, fuzzy
+msgid "Environment Parameter"
+msgstr "环境变量说明"
+
+#: ../../getting_started/install/environment/environment.md:4
+#: 46eddb27c90f41548ea9a724bbcebd37
+msgid "LLM MODEL Config"
+msgstr "模型配置"
+
+#: ../../getting_started/install/environment/environment.md:5
+#: 7deaa85df4a04fb098f5994547a8724f
+msgid "LLM Model Name, see /pilot/configs/model_config.LLM_MODEL_CONFIG"
+msgstr "LLM Model Name, see /pilot/configs/model_config.LLM_MODEL_CONFIG"
+
+#: ../../getting_started/install/environment/environment.md:6
+#: 3902801c546547b3a4009df681ef7d52
+msgid "LLM_MODEL=vicuna-13b"
+msgstr "LLM_MODEL=vicuna-13b"
+
+#: ../../getting_started/install/environment/environment.md:8
+#: 84b0fdbfa1544ec28751e9b69b00cc02
+msgid "MODEL_SERVER_ADDRESS"
+msgstr "MODEL_SERVER_ADDRESS"
+
+#: ../../getting_started/install/environment/environment.md:9
+#: 0b430bfab77d405989470d00ca3f6fe0
+msgid "MODEL_SERVER=http://127.0.0.1:8000 LIMIT_MODEL_CONCURRENCY"
+msgstr "MODEL_SERVER=http://127.0.0.1:8000 LIMIT_MODEL_CONCURRENCY"
+
+#: ../../getting_started/install/environment/environment.md:12
+#: b477a25586c546729a93fb6785b7b2ec
+msgid "LIMIT_MODEL_CONCURRENCY=5"
+msgstr "LIMIT_MODEL_CONCURRENCY=5"
+
+#: ../../getting_started/install/environment/environment.md:14
+#: 1d6ea800af384fff9c265610f71cc94e
+msgid "MAX_POSITION_EMBEDDINGS"
+msgstr "MAX_POSITION_EMBEDDINGS"
+
+#: ../../getting_started/install/environment/environment.md:16
+#: 388e758ce4ea4692a4c34294cebce7f2
+msgid "MAX_POSITION_EMBEDDINGS=4096"
+msgstr "MAX_POSITION_EMBEDDINGS=4096"
+
+#: ../../getting_started/install/environment/environment.md:18
+#: 16a307dce1294ceba892ff93ae4e81c0
+msgid "QUANTIZE_QLORA"
+msgstr "QUANTIZE_QLORA"
+
+#: ../../getting_started/install/environment/environment.md:20
+#: 93ceb2b2fcd5454b82eefb0ae8c7ae77
+msgid "QUANTIZE_QLORA=True"
+msgstr "QUANTIZE_QLORA=True"
+
+#: ../../getting_started/install/environment/environment.md:22
+#: 15ffa35d023a4530b02a85ee6168dd4b
+msgid "QUANTIZE_8bit"
+msgstr "QUANTIZE_8bit"
+
+#: ../../getting_started/install/environment/environment.md:24
+#: 81df248ac5cb4ab0b13a711505f6a177
+msgid "QUANTIZE_8bit=True"
+msgstr "QUANTIZE_8bit=True"
+
+#: ../../getting_started/install/environment/environment.md:27
+#: 15cc7b7d41ad44f0891c1189709f00f1
+msgid "LLM PROXY Settings"
+msgstr "LLM PROXY Settings"
+
+#: ../../getting_started/install/environment/environment.md:28
+#: e6c1115a39404f11b193a1593bc51a22
+msgid "OPENAI Key"
+msgstr "OPENAI Key"
+
+#: ../../getting_started/install/environment/environment.md:30
+#: 8157e0a831fe4506a426822b7565e4f6
+msgid "PROXY_API_KEY={your-openai-sk}"
+msgstr "PROXY_API_KEY={your-openai-sk}"
+
+#: ../../getting_started/install/environment/environment.md:31
+#: 89b34d00bdb64e738bd9bc8c086b1f02
+msgid "PROXY_SERVER_URL=https://api.openai.com/v1/chat/completions"
+msgstr "PROXY_SERVER_URL=https://api.openai.com/v1/chat/completions"
+
+#: ../../getting_started/install/environment/environment.md:33
+#: 7a97df730aeb484daf19c8172e61a290
+msgid "from https://bard.google.com/ f12-> application-> __Secure-1PSID"
+msgstr "from https://bard.google.com/ f12-> application-> __Secure-1PSID"
+
+#: ../../getting_started/install/environment/environment.md:35
+#: d430ddf726a049c0a9e0a9bfd5a6fe0e
+msgid "BARD_PROXY_API_KEY={your-bard-token}"
+msgstr "BARD_PROXY_API_KEY={your-bard-token}"
+
+#: ../../getting_started/install/environment/environment.md:38
+#: 23d6b0da3e7042abb55f6181c4a382d2
+msgid "DATABASE SETTINGS"
+msgstr "DATABASE SETTINGS"
+
+#: ../../getting_started/install/environment/environment.md:39
+#: dbae0a2d847f41f5be9396a160ef88d0
+msgid "SQLite database (Current default database)"
+msgstr "SQLite database (Current default database)"
+
+#: ../../getting_started/install/environment/environment.md:40
+#: bdb55b7280c341a981e9d338cce53345
+msgid "LOCAL_DB_PATH=data/default_sqlite.db"
+msgstr "LOCAL_DB_PATH=data/default_sqlite.db"
+
+#: ../../getting_started/install/environment/environment.md:41
+#: 739d67927a9d46b28500deba1917916b
+msgid "LOCAL_DB_TYPE=sqlite # Database Type default:sqlite"
+msgstr "LOCAL_DB_TYPE=sqlite # Database Type default:sqlite"
+
+#: ../../getting_started/install/environment/environment.md:43
+#: eb4717bce6a6483b86d9780d924c5ff1
+msgid "MYSQL database"
+msgstr "MYSQL database"
+
+#: ../../getting_started/install/environment/environment.md:44
+#: 0f4cdf0ff5dd4ff0b397dfa88541a2e1
+msgid "LOCAL_DB_TYPE=mysql"
+msgstr "LOCAL_DB_TYPE=mysql"
+
+#: ../../getting_started/install/environment/environment.md:45
+#: c971ead492c34487bd766300730a9cba
+msgid "LOCAL_DB_USER=root"
+msgstr "LOCAL_DB_USER=root"
+
+#: ../../getting_started/install/environment/environment.md:46
+#: 02828b29ad044eeab890a2f8af0e5907
+msgid "LOCAL_DB_PASSWORD=aa12345678"
+msgstr "LOCAL_DB_PASSWORD=aa12345678"
+
+#: ../../getting_started/install/environment/environment.md:47
+#: 53dc7f15b3934987b1f4c2e2d0b11299
+msgid "LOCAL_DB_HOST=127.0.0.1"
+msgstr "LOCAL_DB_HOST=127.0.0.1"
+
+#: ../../getting_started/install/environment/environment.md:48
+#: 1ac95fc482934247a118bab8dcebeb57
+msgid "LOCAL_DB_PORT=3306"
+msgstr "LOCAL_DB_PORT=3306"
+
+#: ../../getting_started/install/environment/environment.md:51
+#: 34e46aa926844be19c7196759b03af63
+msgid "EMBEDDING SETTINGS"
+msgstr "EMBEDDING SETTINGS"
+
+#: ../../getting_started/install/environment/environment.md:52
+#: 2b5aa08cc995495e85a1f7dc4f97b5d7
+msgid "EMBEDDING MODEL Name, see /pilot/configs/model_config.LLM_MODEL_CONFIG"
+msgstr "EMBEDDING模型, 参考see /pilot/configs/model_config.LLM_MODEL_CONFIG"
+
+#: ../../getting_started/install/environment/environment.md:53
+#: 0de0ca551ed040248406f848feca541d
+msgid "EMBEDDING_MODEL=text2vec"
+msgstr "EMBEDDING_MODEL=text2vec"
+
+#: ../../getting_started/install/environment/environment.md:55
+#: 43019fb570904c9981eb68f33e64569c
+msgid "Embedding Chunk size, default 500"
+msgstr "Embedding 切片大小, 默认500"
+
+#: ../../getting_started/install/environment/environment.md:57
+#: 7e3f93854873461286e96887e04167aa
+msgid "KNOWLEDGE_CHUNK_SIZE=500"
+msgstr "KNOWLEDGE_CHUNK_SIZE=500"
+
+#: ../../getting_started/install/environment/environment.md:59
+#: 9504f4a59ae74352a524b7741113e2d6
+msgid "Embedding Chunk Overlap, default 100"
+msgstr "Embedding chunk Overlap, 文本块之间的最大重叠量。保留一些重叠可以保持文本块之间的连续性(例如使用滑动窗口),默认100"
+
+#: ../../getting_started/install/environment/environment.md:60
+#: 24e6119c2051479bbd9dba71a9c23dbe
+msgid "KNOWLEDGE_CHUNK_OVERLAP=100"
+msgstr "KNOWLEDGE_CHUNK_OVERLAP=100"
+
+#: ../../getting_started/install/environment/environment.md:62
+#: 0d180d7f2230442abee901c19526e442
+msgid "embeding recall top k,5"
+msgstr "embedding 召回topk, 默认5"
+
+#: ../../getting_started/install/environment/environment.md:64
+#: a5bb9ab2ba50411cbbe87f7836bfbb6d
+msgid "KNOWLEDGE_SEARCH_TOP_SIZE=5"
+msgstr "KNOWLEDGE_SEARCH_TOP_SIZE=5"
+
+#: ../../getting_started/install/environment/environment.md:66
+#: 183b8dd78cba4ae19bd2e08d69d21e0b
+msgid "embeding recall max token ,2000"
+msgstr "embedding向量召回最大token, 默认2000"
+
+#: ../../getting_started/install/environment/environment.md:68
+#: ce0c711febcb44c18ae0fc858c3718d1
+msgid "KNOWLEDGE_SEARCH_MAX_TOKEN=5"
+msgstr "KNOWLEDGE_SEARCH_MAX_TOKEN=5"
+
+#: ../../getting_started/install/environment/environment.md:71
+#: ../../getting_started/install/environment/environment.md:87
+#: 4cab1f399cc245b4a1a1976d2c4fc926 ec9cec667a1c4473bf9a796a26e1ce20
+msgid "Vector Store SETTINGS"
+msgstr "Vector Store SETTINGS"
+
+#: ../../getting_started/install/environment/environment.md:72
+#: ../../getting_started/install/environment/environment.md:88
+#: 4dd04aadd46948a5b1dcf01fdb0ef074 bab7d512f33e40cf9e10f0da67e699c8
+msgid "Chroma"
+msgstr "Chroma"
+
+#: ../../getting_started/install/environment/environment.md:73
+#: ../../getting_started/install/environment/environment.md:89
+#: 13eec36741b14e028e2d3859a320826e ab3ffbcf9358401993af636ba9ab2e2d
+msgid "VECTOR_STORE_TYPE=Chroma"
+msgstr "VECTOR_STORE_TYPE=Chroma"
+
+#: ../../getting_started/install/environment/environment.md:74
+#: ../../getting_started/install/environment/environment.md:90
+#: d15b91e2a2884f23a1dd2d54783b0638 d1f856d571b547098bb0c2a18f9f1979
+msgid "MILVUS"
+msgstr "MILVUS"
+
+#: ../../getting_started/install/environment/environment.md:75
+#: ../../getting_started/install/environment/environment.md:91
+#: 1e165f6c934343c7808459cc7a65bc70 985dd60c2b7d4baaa6601a810a6522d7
+msgid "VECTOR_STORE_TYPE=Milvus"
+msgstr "VECTOR_STORE_TYPE=Milvus"
+
+#: ../../getting_started/install/environment/environment.md:76
+#: ../../getting_started/install/environment/environment.md:92
+#: a1a53f051cee40ed886346a94babd75a d263e8eaee684935a58f0a4fe61c6f0e
+msgid "MILVUS_URL=127.0.0.1"
+msgstr "MILVUS_URL=127.0.0.1"
+
+#: ../../getting_started/install/environment/environment.md:77
+#: ../../getting_started/install/environment/environment.md:93
+#: 2741a312db1a4c6a8a1c1d62415c5fba d03bbf921ddd4f4bb715fe5610c3d0aa
+msgid "MILVUS_PORT=19530"
+msgstr "MILVUS_PORT=19530"
+
+#: ../../getting_started/install/environment/environment.md:78
+#: ../../getting_started/install/environment/environment.md:94
+#: d0786490d38c4e4f971cc14f62fe1fc8 e9e0854873dc4c209861ee4eb77d25cd
+msgid "MILVUS_USERNAME"
+msgstr "MILVUS_USERNAME"
+
+#: ../../getting_started/install/environment/environment.md:79
+#: ../../getting_started/install/environment/environment.md:95
+#: 9a82d07153cc432ebe754b5bc02fde0d a6485c1cfa7d4069a6894c43674c8c2b
+msgid "MILVUS_PASSWORD"
+msgstr "MILVUS_PASSWORD"
+
+#: ../../getting_started/install/environment/environment.md:80
+#: ../../getting_started/install/environment/environment.md:96
+#: 2f233f32b8ba408a9fbadb21fabb99ec 809b3219dd824485bc2cfc898530d708
+msgid "MILVUS_SECURE="
+msgstr "MILVUS_SECURE="
+
+#: ../../getting_started/install/environment/environment.md:82
+#: ../../getting_started/install/environment/environment.md:98
+#: f00603661f2b42e1bd2bca74ad1e3c31 f378e16fdec44c559e34c6929de812e8
+msgid "WEAVIATE"
+msgstr "WEAVIATE"
+
+#: ../../getting_started/install/environment/environment.md:83
+#: da2049ebc6874cf0a6b562e0e2fd9ec7
+msgid "VECTOR_STORE_TYPE=Weaviate"
+msgstr "VECTOR_STORE_TYPE=Weaviate"
+
+#: ../../getting_started/install/environment/environment.md:84
+#: ../../getting_started/install/environment/environment.md:99
+#: 25f1246629934289aad7ef01c7304097 c9fe0e413d9a4fc8abf86b3ed99e0581
+msgid "WEAVIATE_URL=https://kt-region-m8hcy0wc.weaviate.network"
+msgstr "WEAVIATE_URL=https://kt-region-m8hcy0wc.weaviate.network"
+
+#: ../../getting_started/install/environment/environment.md:102
+#: ba7c9e707f6a4cd6b99e52b58da3ab2d
+msgid "Multi-GPU Setting"
+msgstr "Multi-GPU Setting"
+
+#: ../../getting_started/install/environment/environment.md:103
+#: 5ca75fdf2c264b2c844d77f659b4f0b3
+msgid ""
+"See https://developer.nvidia.com/blog/cuda-pro-tip-control-gpu-"
+"visibility-cuda_visible_devices/ If CUDA_VISIBLE_DEVICES is not "
+"configured, all available gpus will be used"
+msgstr ""
+"参考 https://developer.nvidia.com/blog/cuda-pro-tip-control-gpu-visibility-"
+"cuda_visible_devices/ 如果 CUDA_VISIBLE_DEVICES没有设置, 会使用所有可用的gpu"
+
+#: ../../getting_started/install/environment/environment.md:106
+#: de92eb310aff43fbbbf3c5a116c3b2c6
+msgid "CUDA_VISIBLE_DEVICES=0"
+msgstr "CUDA_VISIBLE_DEVICES=0"
+
+#: ../../getting_started/install/environment/environment.md:108
+#: d2641df6123a442b8e4444ad5f01a9aa
+msgid ""
+"Optionally, you can also specify the gpu ID to use before the starting "
+"command"
+msgstr "你也可以通过启动命令设置gpu ID"
+
+#: ../../getting_started/install/environment/environment.md:110
+#: 76c66179d11a4e5fa369421378609aae
+msgid "CUDA_VISIBLE_DEVICES=3,4,5,6"
+msgstr "CUDA_VISIBLE_DEVICES=3,4,5,6"
+
+#: ../../getting_started/install/environment/environment.md:112
+#: 29bd0f01fdf540ad98385ea8473f7647
+msgid "You can configure the maximum memory used by each GPU."
+msgstr "可以设置GPU的最大内存"
+
+#: ../../getting_started/install/environment/environment.md:114
+#: 31e5e23838734ba7a2810e2387e6d6a0
+msgid "MAX_GPU_MEMORY=16Gib"
+msgstr "MAX_GPU_MEMORY=16Gib"
+
+#: ../../getting_started/install/environment/environment.md:117
+#: 99aa63ab1ae049d9b94536d6a96f3443
+msgid "Other Setting"
+msgstr "Other Setting"
+
+#: ../../getting_started/install/environment/environment.md:118
+#: 3168732183874bffb59a3575d3473d62
+msgid "Language Settings(influence prompt language)"
+msgstr "Language Settings(涉及prompt语言以及知识切片方式)"
+
+#: ../../getting_started/install/environment/environment.md:119
+#: 73eb0a96f29b4739bd456faa9cb5033d
+msgid "LANGUAGE=en"
+msgstr "LANGUAGE=en"
+
+#: ../../getting_started/install/environment/environment.md:120
+#: c6646b78c6cf4d25a13108232f5b2046
+msgid "LANGUAGE=zh"
+msgstr "LANGUAGE=zh"
+
diff --git a/docs/locales/zh_CN/LC_MESSAGES/getting_started/install/llm/llama/llama_cpp.po b/docs/locales/zh_CN/LC_MESSAGES/getting_started/install/llm/llama/llama_cpp.po
new file mode 100644
index 000000000..d3c61cc54
--- /dev/null
+++ b/docs/locales/zh_CN/LC_MESSAGES/getting_started/install/llm/llama/llama_cpp.po
@@ -0,0 +1,322 @@
+# SOME DESCRIPTIVE TITLE.
+# Copyright (C) 2023, csunny
+# This file is distributed under the same license as the DB-GPT package.
+# FIRST AUTHOR , 2023.
+#
+#, fuzzy
+msgid ""
+msgstr ""
+"Project-Id-Version: DB-GPT 👏👏 0.3.5\n"
+"Report-Msgid-Bugs-To: \n"
+"POT-Creation-Date: 2023-08-17 21:23+0800\n"
+"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
+"Last-Translator: FULL NAME \n"
+"Language: zh_CN\n"
+"Language-Team: zh_CN \n"
+"Plural-Forms: nplurals=1; plural=0;\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=utf-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 2.12.1\n"
+
+#: ../../getting_started/install/llm/llama/llama_cpp.md:1
+#: 911085eb102a47c1832411ada8b8b906
+msgid "llama.cpp"
+msgstr "llama.cpp"
+
+#: ../../getting_started/install/llm/llama/llama_cpp.md:3
+#: 8099fe03f2204c7f90968ae5c0cae117
+msgid ""
+"DB-GPT is now supported by [llama-cpp-python](https://github.com/abetlen"
+"/llama-cpp-python) through "
+"[llama.cpp](https://github.com/ggerganov/llama.cpp)."
+msgstr "DB-GPT is now supported by [llama-cpp-python](https://github.com/abetlen"
+"/llama-cpp-python) through "
+"[llama.cpp](https://github.com/ggerganov/llama.cpp)."
+
+#: ../../getting_started/install/llm/llama/llama_cpp.md:5
+#: 79c33615ad6d44859b33ed0d05fdb1a5
+msgid "Running llama.cpp"
+msgstr "运行 llama.cpp"
+
+#: ../../getting_started/install/llm/llama/llama_cpp.md:7
+#: 3111bf827639484fb5e5f72a42b1b4e7
+msgid "Preparing Model Files"
+msgstr "准备模型文件"
+
+#: ../../getting_started/install/llm/llama/llama_cpp.md:9
+#: 823f04ec193946d080b203a19c4ed96f
+msgid ""
+"To use llama.cpp, you need to prepare a ggml format model file, and there"
+" are two common ways to obtain it, you can choose either:"
+msgstr "使用llama.cpp, 你需要准备ggml格式的文件,你可以通过以下两种方法获取"
+
+#: ../../getting_started/install/llm/llama/llama_cpp.md:11
+#: c631f3c1a1db429c801c24fa7799b2e1
+msgid "Download a pre-converted model file."
+msgstr "Download a pre-converted model file."
+
+#: ../../getting_started/install/llm/llama/llama_cpp.md:13
+#: 1ac7d5845ca241519ec15236e9802af6
+msgid ""
+"Suppose you want to use [Vicuna 7B v1.5](https://huggingface.co/lmsys"
+"/vicuna-7b-v1.5), you can download the file already converted from "
+"[TheBloke/vicuna-7B-v1.5-GGML](https://huggingface.co/TheBloke/vicuna-"
+"7B-v1.5-GGML), only one file is needed. Download it to the `models` "
+"directory and rename it to `ggml-model-q4_0.bin`."
+msgstr "假设您想使用[Vicuna 7B v1.5](https://huggingface.co/lmsys"
+"/vicuna-7b-v1.5)您可以从[TheBloke/vicuna-7B-v1.5-GGML](https://huggingface.co/TheBloke/vicuna-"
+"7B-v1.5-GGML)下载已转换的文件,只需要一个文件。将其下载到models目录并将其重命名为ggml-model-q4_0.bin。"
+
+#: ../../getting_started/install/llm/llama/llama_cpp.md:19
+#: 65344fafdaa1469797592e454ebee7b5
+msgid "Convert It Yourself"
+msgstr "Convert It Yourself"
+
+#: ../../getting_started/install/llm/llama/llama_cpp.md:21
+#: 8da2bda172884c9fb2d64901d8b9178c
+msgid ""
+"You can convert the model file yourself according to the instructions in "
+"[llama.cpp#prepare-data--run](https://github.com/ggerganov/llama.cpp"
+"#prepare-data--run), and put the converted file in the models directory "
+"and rename it to `ggml-model-q4_0.bin`."
+msgstr "您可以根据[llama.cpp#prepare-data--run](https://github.com/ggerganov/llama.cpp"
+"#prepare-data--run)中的说明自己转换模型文件,然后将转换后的文件放入models目录中,并将其重命名为ggml-model-q4_0.bin。"
+
+#: ../../getting_started/install/llm/llama/llama_cpp.md:23
+#: d30986c0a84448ff89bc4bb84e3d0deb
+msgid "Installing Dependencies"
+msgstr "安装依赖"
+
+#: ../../getting_started/install/llm/llama/llama_cpp.md:25
+#: b91ca009587b45679c54f4dce07c2eb3
+msgid ""
+"llama.cpp is an optional dependency in DB-GPT, and you can manually "
+"install it using the following command:"
+msgstr "llama.cpp在DB-GPT中是可选安装项, 你可以通过一下命令进行安装"
+
+#: ../../getting_started/install/llm/llama/llama_cpp.md:31
+#: 2c89087ba2214d97bc01a286826042bc
+msgid "Modifying the Configuration File"
+msgstr "修改配置文件"
+
+#: ../../getting_started/install/llm/llama/llama_cpp.md:33
+#: e4ebd4dac0cd4fb4a8e3c1f6edde7ea8
+msgid "Next, you can directly modify your `.env` file to enable llama.cpp."
+msgstr "修改`.env`文件使用llama.cpp"
+
+#: ../../getting_started/install/llm/llama/llama_cpp.md:40
+#: 2fce7ec613784c8e96f19e9f4c4fb818
+msgid ""
+"Then you can run it according to [Run](https://db-"
+"gpt.readthedocs.io/en/latest/getting_started/install/deploy/deploy.html#run)."
+msgstr "然后你可以通过[Run](https://db-"
+"gpt.readthedocs.io/en/latest/getting_started/install/deploy/deploy.html#run).来运行"
+
+#: ../../getting_started/install/llm/llama/llama_cpp.md:43
+#: 9bbaa16512d2420aa368ba34825cc024
+msgid "More Configurations"
+msgstr "更多配置文件"
+
+#: ../../getting_started/install/llm/llama/llama_cpp.md:45
+#: 5ce014aa175d4a119150cf184098a0c3
+msgid ""
+"In DB-GPT, the model configuration can be done through `{model "
+"name}_{config key}`."
+msgstr "In DB-GPT, the model configuration can be done through `{model "
+"name}_{config key}`."
+
+#: ../../getting_started/install/llm/llama/llama_cpp.md
+#: 09d1f9eaf6cc4267b1eb94e4a8e78ba9
+msgid "Environment Variable Key"
+msgstr "Environment Variable Key"
+
+#: ../../getting_started/install/llm/llama/llama_cpp.md
+#: 6650b3f2495e41588e23d8a2647e7ce3
+msgid "default"
+msgstr "default"
+
+#: ../../getting_started/install/llm/llama/llama_cpp.md
+#: 8f33e692a7fc41e1a42663535b95a08c
+msgid "Prompt Template Name"
+msgstr "Prompt Template Name"
+
+#: ../../getting_started/install/llm/llama/llama_cpp.md
+#: 2f8ca1f267694949a2b251d0ef576fd8
+msgid "llama_cpp_prompt_template"
+msgstr "llama_cpp_prompt_template"
+
+#: ../../getting_started/install/llm/llama/llama_cpp.md
+#: 9b6d310d3f3c454488f76062c8bcda67 c80abec11c3240cf9d0122543e9401c3
+#: ed439c9374d74543a8d2a4f88f4db958 f49b3e4281b14f1b8909cd13159d406a
+#: ffa824cc22a946ab851124b58cf7441a
+msgid "None"
+msgstr "None"
+
+#: ../../getting_started/install/llm/llama/llama_cpp.md
+#: f41bd292bcb9491096c39b36bceb3816
+msgid ""
+"Prompt template name, now support: `zero_shot, vicuna_v1.1, llama-2"
+",baichuan-chat`, If None, the prompt template is automatically determined"
+" from model path。"
+msgstr "Prompt template 现在可以支持`zero_shot, vicuna_v1.1, llama-2"
+",baichuan-chat`, 如果是None, the prompt template可以自动选择模型路径"
+
+#: ../../getting_started/install/llm/llama/llama_cpp.md
+#: 8125df74f1a7429eb0c5ce350edc9315
+msgid "llama_cpp_model_path"
+msgstr "llama_cpp_model_path"
+
+#: ../../getting_started/install/llm/llama/llama_cpp.md
+#: 28ad71d410fb4757821bf8cc1c232357
+msgid "Model path"
+msgstr "Model path"
+
+#: ../../getting_started/install/llm/llama/llama_cpp.md
+#: b7133f115d79477ab416dc63c64d8aa7
+msgid "llama_cpp_n_gpu_layers"
+msgstr "llama_cpp_n_gpu_layers"
+
+#: ../../getting_started/install/llm/llama/llama_cpp.md
+#: 7cdd27333a464ebeab6bf41bca709816
+msgid "1000000000"
+msgstr "1000000000"
+
+#: ../../getting_started/install/llm/llama/llama_cpp.md
+#: 667ec37bc6824eba999f39f4e6072999
+msgid "Number of layers to offload to the GPU, Set this to 1000000000 to offload"
+" all layers to the GPU. If your GPU VRAM is not enough, you can set a low"
+" number, eg: `10`"
+msgstr "要将层数转移到GPU上,将其设置为1000000000以将所有层转移到GPU上。如果您的GPU VRAM不足,可以设置较低的数字,例如:10。"
+
+#: ../../getting_started/install/llm/llama/llama_cpp.md
+#: edae6c77475d4958a96d59b4bd165916
+msgid "llama_cpp_n_threads"
+msgstr "llama_cpp_n_threads"
+
+#: ../../getting_started/install/llm/llama/llama_cpp.md
+#: 8fef1cdd4b0b42faadc717a58b9434a4
+msgid ""
+"Number of threads to use. If None, the number of threads is automatically"
+" determined"
+msgstr "要使用的线程数量。如果为None,则线程数量将自动确定。"
+
+#: ../../getting_started/install/llm/llama/llama_cpp.md
+#: ef5f27f600aa4eaf8080d6dca46ad434
+msgid "llama_cpp_n_batch"
+msgstr "llama_cpp_n_batch"
+
+#: ../../getting_started/install/llm/llama/llama_cpp.md
+#: be999101e5f84b83bde0d8e801083c52
+msgid "512"
+msgstr "512"
+
+#: ../../getting_started/install/llm/llama/llama_cpp.md
+#: cdc7a51c720a4fe38323eb7a1cfa6bd1
+msgid "Maximum number of prompt tokens to batch together when calling llama_eval"
+msgstr "在调用llama_eval时,批处理在一起的prompt tokens的最大数量。
+
+"
+
+#: ../../getting_started/install/llm/llama/llama_cpp.md
+#: 26d08360c2df4d018b2416cf8e4b3f48
+msgid "llama_cpp_n_gqa"
+msgstr "llama_cpp_n_gqa"
+
+#: ../../getting_started/install/llm/llama/llama_cpp.md
+#: 4d22c4f8e285445f9c26d181cf350cb7
+msgid "Grouped-query attention. Must be 8 for llama-2 70b."
+msgstr "对于llama-2 70b模型,Grouped-query attention必须为8。"
+
+#: ../../getting_started/install/llm/llama/llama_cpp.md
+#: 18e5e5ba6ac64e818546742458fb4c84
+msgid "llama_cpp_rms_norm_eps"
+msgstr "llama_cpp_rms_norm_eps"
+
+#: ../../getting_started/install/llm/llama/llama_cpp.md
+#: fc85743d71e1428fa2aa0423ff9d9170
+msgid "5e-06"
+msgstr "5e-06"
+
+#: ../../getting_started/install/llm/llama/llama_cpp.md
+#: bfc53bec0df94d9e8e9cc7d3333a69c1
+msgid "5e-6 is a good value for llama-2 models."
+msgstr "对于llama-2模型来说,5e-6是一个不错的值。"
+
+#: ../../getting_started/install/llm/llama/llama_cpp.md
+#: 81fc8f37787d48b0b1da826a2f887886
+msgid "llama_cpp_cache_capacity"
+msgstr "llama_cpp_cache_capacity"
+
+#: ../../getting_started/install/llm/llama/llama_cpp.md
+#: 125e3285249449cb90bbff063868d4d4
+msgid "Maximum cache capacity. Examples: 2000MiB, 2GiB"
+msgstr "cache capacity最大值. Examples: 2000MiB, 2GiB"
+
+#: ../../getting_started/install/llm/llama/llama_cpp.md
+#: 31ed49d49f574d4983738d91bed95fc9
+msgid "llama_cpp_prefer_cpu"
+msgstr "llama_cpp_prefer_cpu"
+
+#: ../../getting_started/install/llm/llama/llama_cpp.md
+#: b6ed0d5394874bb38c7468216b8bca88
+msgid "False"
+msgstr "False"
+
+#: ../../getting_started/install/llm/llama/llama_cpp.md
+#: d148d6b130454666a601b65b444c7e51
+msgid ""
+"If a GPU is available, it will be preferred by default, unless "
+"prefer_cpu=False is configured."
+msgstr "如果有可用的GPU,默认情况下会优先使用GPU,除非配置了prefer_cpu=False。"
+
+#: ../../getting_started/install/llm/llama/llama_cpp.md:59
+#: 75bd31d245b148569bcf9eca6c8bec9c
+msgid "GPU Acceleration"
+msgstr "GPU 加速"
+
+#: ../../getting_started/install/llm/llama/llama_cpp.md:61
+#: 1cf5538f9d39457e901f4120f76d54c1
+msgid ""
+"GPU acceleration is supported by default. If you encounter any issues, "
+"you can uninstall the dependent packages with the following command:"
+msgstr "默认情况下支持GPU加速。如果遇到任何问题,您可以使用以下命令卸载相关的依赖包"
+
+#: ../../getting_started/install/llm/llama/llama_cpp.md:66
+#: 08f8eea38e5e44fa80b528b75a379acf
+msgid ""
+"Then install `llama-cpp-python` according to the instructions in [llama-"
+"cpp-python](https://github.com/abetlen/llama-cpp-"
+"python/blob/main/README.md)."
+msgstr "然后通过指令[llama-"
+"cpp-python](https://github.com/abetlen/llama-cpp-"
+"python/blob/main/README.md).安装`llama-cpp-python`"
+
+#: ../../getting_started/install/llm/llama/llama_cpp.md:69
+#: abea5d4418c54657981640d6227b7be2
+msgid "Mac Usage"
+msgstr "Mac Usage"
+
+#: ../../getting_started/install/llm/llama/llama_cpp.md:71
+#: be9fb5ecbdd5495b98007decccbd0372
+msgid ""
+"Special attention, if you are using Apple Silicon (M1) Mac, it is highly "
+"recommended to install arm64 architecture python support, for example:"
+msgstr "特别注意:如果您正在使用苹果芯片(M1)的Mac电脑,强烈建议安装arm64架构的Python支持,例如:"
+
+#: ../../getting_started/install/llm/llama/llama_cpp.md:78
+#: efd6dfd4e9e24bf884803143e2b123f2
+msgid "Windows Usage"
+msgstr "Windows使用"
+
+#: ../../getting_started/install/llm/llama/llama_cpp.md:80
+#: 27ecf054aa294a2eaed701c46edf27a7
+msgid ""
+"The use under the Windows platform has not been rigorously tested and "
+"verified, and you are welcome to use it. If you have any problems, you "
+"can create an [issue](https://github.com/eosphoros-ai/DB-GPT/issues) or "
+"[contact us](https://github.com/eosphoros-ai/DB-GPT/tree/main#contact-"
+"information) directly."
+msgstr "在Windows平台上的使用尚未经过严格的测试和验证,欢迎您使用。如果您有任何问题,可以创建一个[issue](https://github.com/eosphoros-ai/DB-GPT/issues)或者[contact us](https://github.com/eosphoros-ai/DB-GPT/tree/main#contact-"
+"information) directly."
+
diff --git a/docs/locales/zh_CN/LC_MESSAGES/getting_started/install/llm/llm.po b/docs/locales/zh_CN/LC_MESSAGES/getting_started/install/llm/llm.po
new file mode 100644
index 000000000..01c1607ca
--- /dev/null
+++ b/docs/locales/zh_CN/LC_MESSAGES/getting_started/install/llm/llm.po
@@ -0,0 +1,99 @@
+# SOME DESCRIPTIVE TITLE.
+# Copyright (C) 2023, csunny
+# This file is distributed under the same license as the DB-GPT package.
+# FIRST AUTHOR , 2023.
+#
+#, fuzzy
+msgid ""
+msgstr ""
+"Project-Id-Version: DB-GPT 👏👏 0.3.5\n"
+"Report-Msgid-Bugs-To: \n"
+"POT-Creation-Date: 2023-08-17 23:29+0800\n"
+"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
+"Last-Translator: FULL NAME \n"
+"Language: zh_CN\n"
+"Language-Team: zh_CN \n"
+"Plural-Forms: nplurals=1; plural=0;\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=utf-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 2.12.1\n"
+
+#: ../../getting_started/install/llm/llm.rst:2
+#: ../../getting_started/install/llm/llm.rst:23
+#: 9e4baaff732b49f1baded5fd1bde4bd5
+msgid "LLM Usage"
+msgstr "LLM使用"
+
+#: ../../getting_started/install/llm/llm.rst:3 d80e7360f17c4be889f220e2a846a81e
+msgid ""
+"DB-GPT provides a management and deployment solution for multiple models."
+" This chapter mainly discusses how to deploy different models."
+msgstr "DB-GPT提供了多模型的管理和部署方案,本章主要讲解针对不同的模型该怎么部署"
+
+#: ../../getting_started/install/llm/llm.rst:18
+#: 1cebd6c84b924eeb877ba31bd520328f
+msgid ""
+"Multi LLMs Support, Supports multiple large language models, currently "
+"supporting"
+msgstr "目前DB-GPT已适配如下模型"
+
+#: ../../getting_started/install/llm/llm.rst:9 1163d5d3a8834b24bb6a5716d4fcc680
+msgid "🔥 Vicuna-v1.5(7b,13b)"
+msgstr "🔥 Vicuna-v1.5(7b,13b)"
+
+#: ../../getting_started/install/llm/llm.rst:10
+#: 318dbc8e8de64bfb9cbc65343d050500
+msgid "🔥 llama-2(7b,13b,70b)"
+msgstr "🔥 llama-2(7b,13b,70b)"
+
+#: ../../getting_started/install/llm/llm.rst:11
+#: f2b1ef2bfbde46889131b6852d1211e8
+msgid "WizardLM-v1.2(13b)"
+msgstr "WizardLM-v1.2(13b)"
+
+#: ../../getting_started/install/llm/llm.rst:12
+#: 2462160e5928434aaa6f58074b585c14
+msgid "Vicuna (7b,13b)"
+msgstr "Vicuna (7b,13b)"
+
+#: ../../getting_started/install/llm/llm.rst:13
+#: 61cc1b9ed1914e379dee87034e9fbaea
+msgid "ChatGLM-6b (int4,int8)"
+msgstr "ChatGLM-6b (int4,int8)"
+
+#: ../../getting_started/install/llm/llm.rst:14
+#: 653f5fd3b6ab4c508a2ce449231b5a17
+msgid "ChatGLM2-6b (int4,int8)"
+msgstr "ChatGLM2-6b (int4,int8)"
+
+#: ../../getting_started/install/llm/llm.rst:15
+#: 6a1cf7a271f6496ba48d593db1f756f1
+msgid "guanaco(7b,13b,33b)"
+msgstr "guanaco(7b,13b,33b)"
+
+#: ../../getting_started/install/llm/llm.rst:16
+#: f15083886362437ba96e1cb9ade738aa
+msgid "Gorilla(7b,13b)"
+msgstr "Gorilla(7b,13b)"
+
+#: ../../getting_started/install/llm/llm.rst:17
+#: 5e1ded19d1b24ba485e5d2bf22ce2db7
+msgid "Baichuan(7b,13b)"
+msgstr "Baichuan(7b,13b)"
+
+#: ../../getting_started/install/llm/llm.rst:18
+#: 30f6f71dd73b48d29a3647d58a9cdcaf
+msgid "OpenAI"
+msgstr "OpenAI"
+
+#: ../../getting_started/install/llm/llm.rst:20
+#: 9ad21acdaccb41c1bbfa30b5ce114732
+msgid "llama_cpp"
+msgstr "llama_cpp"
+
+#: ../../getting_started/install/llm/llm.rst:21
+#: b5064649f7c9443c9b9f15ec7fc02434
+msgid "quantization"
+msgstr "quantization"
+
diff --git a/docs/locales/zh_CN/LC_MESSAGES/getting_started/install/llm/quantization/quantization.po b/docs/locales/zh_CN/LC_MESSAGES/getting_started/install/llm/quantization/quantization.po
new file mode 100644
index 000000000..3631c9b53
--- /dev/null
+++ b/docs/locales/zh_CN/LC_MESSAGES/getting_started/install/llm/quantization/quantization.po
@@ -0,0 +1,26 @@
+# SOME DESCRIPTIVE TITLE.
+# Copyright (C) 2023, csunny
+# This file is distributed under the same license as the DB-GPT package.
+# FIRST AUTHOR , 2023.
+#
+#, fuzzy
+msgid ""
+msgstr ""
+"Project-Id-Version: DB-GPT 👏👏 0.3.5\n"
+"Report-Msgid-Bugs-To: \n"
+"POT-Creation-Date: 2023-08-17 13:07+0800\n"
+"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
+"Last-Translator: FULL NAME \n"
+"Language: zh_CN\n"
+"Language-Team: zh_CN \n"
+"Plural-Forms: nplurals=1; plural=0;\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=utf-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 2.12.1\n"
+
+#: ../../getting_started/install/llm/quantization/quantization.md:1
+#: 2b540d0fbece43c5a9d90e0935eb574c
+msgid "quantization"
+msgstr "quantization"
+
diff --git a/docs/locales/zh_CN/LC_MESSAGES/getting_started/installation.po b/docs/locales/zh_CN/LC_MESSAGES/getting_started/installation.po
index 29afa1148..72147f848 100644
--- a/docs/locales/zh_CN/LC_MESSAGES/getting_started/installation.po
+++ b/docs/locales/zh_CN/LC_MESSAGES/getting_started/installation.po
@@ -8,7 +8,7 @@ msgid ""
msgstr ""
"Project-Id-Version: DB-GPT 👏👏 0.3.0\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2023-07-13 15:39+0800\n"
+"POT-Creation-Date: 2023-08-16 18:31+0800\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME \n"
"Language: zh_CN\n"
@@ -19,67 +19,76 @@ msgstr ""
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: Babel 2.12.1\n"
-#: ../../getting_started/installation.md:1 bc5bfc8ebfc847c5a22f2346357cf747
-msgid "Installation"
-msgstr "安装dbgpt包指南"
+#: ../../getting_started/installation.md:1 f9e65f84d13249098aa2768bb7cc69ed
+msgid "Python SDK"
+msgstr ""
-#: ../../getting_started/installation.md:2 1aaef0db5ee9426aa337021d782666af
+#: ../../getting_started/installation.md:2 1902cffb20fe4994aecb577c1a3fa8bf
msgid ""
"DB-GPT provides a third-party Python API package that you can integrate "
"into your own code."
msgstr "DB-GPT提供了python第三方包,你可以在你的代码中引入"
-#: ../../getting_started/installation.md:4 de542f259e20441991a0e5a7d52769b8
+#: ../../getting_started/installation.md:4 908a7c1971d04e37bd5e88171bd93d3b
msgid "Installation from Pip"
msgstr "使用pip安装"
-#: ../../getting_started/installation.md:6 3357f019aa8249b292162de92757eec4
+#: ../../getting_started/installation.md:6 0eff7cb3c9c643e0884209daddf5d709
msgid "You can simply pip install:"
msgstr "你可以使用pip install"
-#: ../../getting_started/installation.md:12 9c610d593608452f9d7d8d7e462251e3
+#: ../../getting_started/installation.md:12 1b0f308412cd44bda55efff5830b99f3
msgid "Notice:make sure python>=3.10"
msgstr "注意:确保你的python版本>=3.10"
-#: ../../getting_started/installation.md:15 b2ed238c29bb40cba990068e8d7ceae7
+#: ../../getting_started/installation.md:15 bec8afbfa9af4594bb09307f92c617a4
msgid "Environment Setup"
msgstr "环境设置"
-#: ../../getting_started/installation.md:17 4804ad4d8edf44f49b1d35b271635fad
+#: ../../getting_started/installation.md:17 61f60acc7a6449ac974593bd5f164e3e
msgid "By default, if you use the EmbeddingEngine api"
msgstr "如果你想使用EmbeddingEngine api"
-#: ../../getting_started/installation.md:19 2205f69ec60d4f73bb3a93a583928455
+#: ../../getting_started/installation.md:19 f31d9989e0514881baed8c690bc8035a
msgid "you will prepare embedding models from huggingface"
msgstr "你需要从huggingface下载embedding models"
-#: ../../getting_started/installation.md:22 693c18a83f034dcc8c263674418bcde2
+#: ../../getting_started/installation.md:22 02eb7897412c4223a013e7921eaed253
msgid "Notice make sure you have install git-lfs"
msgstr "确保你已经安装了git-lfs"
-#: ../../getting_started/installation.md:30 dd8d0880b55e4c48bfc414f8cbdda268
+#: ../../getting_started/installation.md:30 ced612323d5b47d3b93febbc637515bd
msgid "version:"
msgstr "版本:"
-#: ../../getting_started/installation.md:31 731e634b96164efbbc1ce9fa88361b12
+#: ../../getting_started/installation.md:31 4c168bdddb664c76ad4c2be177ef01a0
msgid "db-gpt0.3.0"
msgstr "db-gpt0.3.0"
-#: ../../getting_started/installation.md:32 38fb635be4554d94b527c6762253d46d
+#: ../../getting_started/installation.md:32 9bc5bb66e7214ca0b945b6b46a989307
msgid ""
"[embedding_engine api](https://db-"
"gpt.readthedocs.io/en/latest/modules/knowledge.html)"
-msgstr "[embedding_engine api](https://db-gpt.readthedocs.io/en/latest/modules/knowledge.html)"
+msgstr ""
+"[embedding_engine api](https://db-"
+"gpt.readthedocs.io/en/latest/modules/knowledge.html)"
-#: ../../getting_started/installation.md:33 a60b0ffe21a74ebca05529dc1dd1ba99
+#: ../../getting_started/installation.md:33 52e656e2f0ae46688a6456a441f9c1e5
msgid ""
"[multi source embedding](https://db-"
"gpt.readthedocs.io/en/latest/modules/knowledge/pdf/pdf_embedding.html)"
-msgstr "[multi source embedding](https://db-gpt.readthedocs.io/en/latest/modules/knowledge/pdf/pdf_embedding.html)"
+msgstr ""
+"[multi source embedding](https://db-"
+"gpt.readthedocs.io/en/latest/modules/knowledge/pdf/pdf_embedding.html)"
-#: ../../getting_started/installation.md:34 3c752c9305414719bc3f561cf18a75af
+#: ../../getting_started/installation.md:34 726c87e53d8e4cd799d422aaf562fa29
msgid ""
"[vector connector](https://db-"
"gpt.readthedocs.io/en/latest/modules/vector.html)"
-msgstr "[vector connector](https://db-gpt.readthedocs.io/en/latest/modules/vector.html)"
+msgstr ""
+"[vector connector](https://db-"
+"gpt.readthedocs.io/en/latest/modules/vector.html)"
+
+#~ msgid "Installation"
+#~ msgstr "安装dbgpt包指南"
diff --git a/docs/locales/zh_CN/LC_MESSAGES/getting_started/tutorials.po b/docs/locales/zh_CN/LC_MESSAGES/getting_started/tutorials.po
index eef058f66..c685143fa 100644
--- a/docs/locales/zh_CN/LC_MESSAGES/getting_started/tutorials.po
+++ b/docs/locales/zh_CN/LC_MESSAGES/getting_started/tutorials.po
@@ -8,7 +8,7 @@ msgid ""
msgstr ""
"Project-Id-Version: DB-GPT 0.3.0\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2023-07-12 16:23+0800\n"
+"POT-Creation-Date: 2023-08-16 18:31+0800\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME \n"
"Language: zh_CN\n"
@@ -19,25 +19,29 @@ msgstr ""
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: Babel 2.12.1\n"
-#: ../../getting_started/tutorials.md:1 cb100b89a2a747cd90759e415c737070
+#: ../../getting_started/tutorials.md:1 3e4b863de01942d5823d5dd3975bcb05
msgid "Tutorials"
msgstr "教程"
-#: ../../getting_started/tutorials.md:4 dbc2a2346b384cc3930086f97181b14b
+#: ../../getting_started/tutorials.md:4 bf8a3dc2c8d045d0b4668ecbd25c954d
msgid "This is a collection of DB-GPT tutorials on Medium."
msgstr "这是知乎上DB-GPT教程的集合。."
-#: ../../getting_started/tutorials.md:6 67e5b6dbac654d428e6a8be9d1ec6473
+#: ../../getting_started/tutorials.md:6 e72430ed63ca4c13bab165d61293585a
msgid ""
"DB-GPT is divided into several functions, including chat with knowledge "
"base, execute SQL, chat with database, and execute plugins."
msgstr "DB-GPT包含以下功能,和知识库聊天,执行SQL,和数据库聊天以及执行插件。"
-#: ../../getting_started/tutorials.md:8 744aaec68aa3413c9b17b09714476d32
+#: ../../getting_started/tutorials.md:8 29cb77fcc97e494ba26ecd61fd21d84f
msgid "Introduction"
msgstr "介绍"
-#: ../../getting_started/tutorials.md:9 305bcf5e847a4322a2834b84fa3c694a
+#: ../../getting_started/tutorials.md:10 0158e41b603a47d8a8d0debcddc1d99c
+msgid "youtube"
+msgstr ""
+
+#: ../../getting_started/tutorials.md:11 bd8078ee97094559b3e9b4a90a89651a
#, fuzzy
msgid "[What is DB-GPT](https://www.youtube.com/watch?v=QszhVJerc0I)"
msgstr ""
@@ -45,69 +49,75 @@ msgstr ""
"GPT](https://www.bilibili.com/video/BV1SM4y1a7Nj/?buvid=551b023900b290f9497610b2155a2668&is_story_h5=false&mid=%2BVyE%2Fwau5woPcUKieCWS0A%3D%3D&p=1&plat_id=116&share_from=ugc&share_medium=iphone&share_plat=ios&share_session_id=5D08B533-82A4-4D40-9615-7826065B4574&share_source=GENERIC&share_tag=s_i×tamp=1686307943&unique_k=bhO3lgQ&up_id=31375446)"
" by csunny (https://github.com/csunny/DB-GPT)"
-#: ../../getting_started/tutorials.md:11 22fdc6937b2248ae8f5a7ef385aa55d9
-#, fuzzy
-msgid "Knowledge"
-msgstr "知识库"
-
-#: ../../getting_started/tutorials.md:13 9bbf0f5aece64389b93b16235abda58e
+#: ../../getting_started/tutorials.md:13 0330432860384964804cc6ea9054d06b
#, fuzzy
msgid ""
-"[How to Create your own knowledge repository](https://db-"
-"gpt.readthedocs.io/en/latest/modules/knownledge.html)"
+"[How to deploy DB-GPT step by "
+"step](https://www.youtube.com/watch?v=OJGU4fQCqPs)"
msgstr ""
-"[怎么创建自己的知识库](https://db-"
-"gpt.readthedocs.io/en/latest/modules/knowledge.html)"
+"###Introduction [什么是DB-"
+"GPT](https://www.bilibili.com/video/BV1SM4y1a7Nj/?buvid=551b023900b290f9497610b2155a2668&is_story_h5=false&mid=%2BVyE%2Fwau5woPcUKieCWS0A%3D%3D&p=1&plat_id=116&share_from=ugc&share_medium=iphone&share_plat=ios&share_session_id=5D08B533-82A4-4D40-9615-7826065B4574&share_source=GENERIC&share_tag=s_i×tamp=1686307943&unique_k=bhO3lgQ&up_id=31375446)"
+" by csunny (https://github.com/csunny/DB-GPT)"
-#: ../../getting_started/tutorials.md:15 ae201d75a3aa485e99b258103245db1c
-#, fuzzy
-msgid ""
-msgstr "[新增知识库演示](../../assets/new_knownledge_en.gif)"
+#: ../../getting_started/tutorials.md:16 a5b7b6e46df040ddaea184487b6a6ba3
+msgid "bilibili"
+msgstr "bilibili"
-#: ../../getting_started/tutorials.md:15 e7bfb3396f7b42f1a1be9f29df1773a2
-#, fuzzy
-msgid "Add new Knowledge demonstration"
-msgstr "[新增知识库演示](../../assets/new_knownledge_en.gif)"
+#: ../../getting_started/tutorials.md:17 9da89100917742fca40ab71bc1283504
+msgid ""
+"[What is DB-"
+"GPT](https://www.bilibili.com/video/BV1SM4y1a7Nj/?spm_id_from=333.788&vd_source=7792e22c03b7da3c556a450eb42c8a0f)"
+msgstr "[DB-"
+"GPT介绍](https://www.bilibili.com/video/BV1SM4y1a7Nj/?spm_id_from=333.788&vd_source=7792e22c03b7da3c556a450eb42c8a0f)"
-#: ../../getting_started/tutorials.md:17 d37acc0486ec40309e7e944bb0458b0a
-msgid "SQL Generation"
-msgstr "SQL生成"
+#: ../../getting_started/tutorials.md:19 d0b58d505c774d0e867dbd87e703d4ed
+msgid ""
+"[How to deploy DB-GPT step by "
+"step](https://www.bilibili.com/video/BV1mu411Y7ve/?spm_id_from=pageDriver&vd_source=7792e22c03b7da3c556a450eb42c8a0f)"
+msgstr "[How to deploy DB-GPT step by "
+"step](https://www.bilibili.com/video/BV1mu411Y7ve/?spm_id_from=pageDriver&vd_source=7792e22c03b7da3c556a450eb42c8a0f)"
-#: ../../getting_started/tutorials.md:18 86a328c9e15f46679a2611f7162f9fbe
-#, fuzzy
-msgid ""
-msgstr "[sql生成演示](../../assets/demo_en.gif)"
+#~ msgid "Knowledge"
+#~ msgstr "知识库"
-#: ../../getting_started/tutorials.md:18 03bc8d7320be44f0879a553a324ec26f
-#, fuzzy
-msgid "sql generation demonstration"
-msgstr "[sql生成演示](../../assets/demo_en.gif)"
+#~ msgid ""
+#~ "[How to Create your own knowledge "
+#~ "repository](https://db-"
+#~ "gpt.readthedocs.io/en/latest/modules/knownledge.html)"
+#~ msgstr ""
+#~ "[怎么创建自己的知识库](https://db-"
+#~ "gpt.readthedocs.io/en/latest/modules/knowledge.html)"
-#: ../../getting_started/tutorials.md:20 5f3b241f24634c09880d5de014f64f1b
-msgid "SQL Execute"
-msgstr "SQL执行"
+#~ msgid ""
+#~ msgstr "[新增知识库演示](../../assets/new_knownledge_en.gif)"
-#: ../../getting_started/tutorials.md:21 13a16debf2624f44bfb2e0453c11572d
-#, fuzzy
-msgid ""
-msgstr "[sql execute 演示](../../assets/auto_sql_en.gif)"
+#~ msgid "Add new Knowledge demonstration"
+#~ msgstr "[新增知识库演示](../../assets/new_knownledge_en.gif)"
-#: ../../getting_started/tutorials.md:21 2d9673cfd48b49a5b1942fdc9de292bf
-#, fuzzy
-msgid "sql execute demonstration"
-msgstr "SQL执行演示"
+#~ msgid "SQL Generation"
+#~ msgstr "SQL生成"
-#: ../../getting_started/tutorials.md:23 8cc0c647ad804969b470b133708de37f
-#, fuzzy
-msgid "Plugins"
-msgstr "DB插件"
+#~ msgid ""
+#~ msgstr "[sql生成演示](../../assets/demo_en.gif)"
-#: ../../getting_started/tutorials.md:24 cad5cc0cb94b42a1a6619bbd2a8b9f4c
-#, fuzzy
-msgid ""
-msgstr "[db plugins 演示](../../assets/dbgpt_bytebase_plugin.gif)"
+#~ msgid "sql generation demonstration"
+#~ msgstr "[sql生成演示](../../assets/demo_en.gif)"
-#: ../../getting_started/tutorials.md:24 adeee7ea37b743c9b251976124520725
-msgid "db plugins demonstration"
-msgstr "DB插件演示"
+#~ msgid "SQL Execute"
+#~ msgstr "SQL执行"
+
+#~ msgid ""
+#~ msgstr "[sql execute 演示](../../assets/auto_sql_en.gif)"
+
+#~ msgid "sql execute demonstration"
+#~ msgstr "SQL执行演示"
+
+#~ msgid "Plugins"
+#~ msgstr "DB插件"
+
+#~ msgid ""
+#~ msgstr "[db plugins 演示](../../assets/dbgpt_bytebase_plugin.gif)"
+
+#~ msgid "db plugins demonstration"
+#~ msgstr "DB插件演示"
diff --git a/docs/locales/zh_CN/LC_MESSAGES/index.po b/docs/locales/zh_CN/LC_MESSAGES/index.po
index 5f2f746e7..f626ea6ef 100644
--- a/docs/locales/zh_CN/LC_MESSAGES/index.po
+++ b/docs/locales/zh_CN/LC_MESSAGES/index.po
@@ -8,7 +8,7 @@ msgid ""
msgstr ""
"Project-Id-Version: DB-GPT 0.3.0\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2023-07-20 10:53+0800\n"
+"POT-Creation-Date: 2023-08-17 21:58+0800\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME \n"
"Language: zh_CN\n"
@@ -19,35 +19,31 @@ msgstr ""
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: Babel 2.12.1\n"
-#: ../../index.rst:34 ../../index.rst:45 5bab1511780c442e9dd9e18519ad7ef3
+#: ../../index.rst:34 ../../index.rst:45 3566ae9872dd4f97844f9e0680de5f5d
msgid "Getting Started"
msgstr "开始"
-#: ../../index.rst:57 ../../index.rst:78 dbdeae6c9a344e7889d399a5dbbca872
+#: ../../index.rst:59 ../../index.rst:80 6845a928056940b28be8c13d766009f8
msgid "Modules"
msgstr "模块"
-#: ../../index.rst:92 ../../index.rst:108 43594aec45bf49609d7c7435c6472a9b
+#: ../../index.rst:94 ../../index.rst:110 ef3bcf6f837345fca8539d51434e3c2c
msgid "Use Cases"
msgstr "示例"
-#: ../../index.rst:122 ../../index.rst:125 013a0e5d909e4332ac49f6200924043e
+#: ../../index.rst:124 ../../index.rst:127 b10b8b49e4f5459f8bea881ffc9259d5
msgid "Reference"
msgstr "参考"
-#: ../../index.rst:134 ../../index.rst:137 42907cf1d1bb491bae2fcd7ede06e421
-msgid "FAQ"
-msgstr ""
-
-#: ../../index.rst:161 ../../index.rst:167 64f4a587bc1f4ca9a456b97fac5d6def
+#: ../../index.rst:137 ../../index.rst:143 12e75881253a4c4383ac7364c1103348
msgid "Resources"
msgstr "资源"
-#: ../../index.rst:7 df1ad6da308f49debd4053e5899a4c8d
+#: ../../index.rst:7 f1a30af655744c0b8fb7197a5fc3a45b
msgid "Welcome to DB-GPT!"
msgstr "欢迎来到DB-GPT中文文档"
-#: ../../index.rst:8 cc5b388ea2924968b2769e4b4a159761
+#: ../../index.rst:8 426686c829a342798eaae9f789260621
msgid ""
"As large models are released and iterated upon, they are becoming "
"increasingly intelligent. However, in the process of using large models, "
@@ -65,7 +61,7 @@ msgstr ""
",我们启动了DB-"
"GPT项目,为所有基于数据库的场景构建一个完整的私有大模型解决方案。该方案“”支持本地部署,既可应用于“独立私有环境”,又可根据业务模块进行“独立部署”和“隔离”,确保“大模型”的能力绝对私有、安全、可控。"
-#: ../../index.rst:10 b831cd62a1ba46108dcf343792e8d67a
+#: ../../index.rst:10 0a7fbd17ecfd48cb8e0593e35a225e1b
msgid ""
"**DB-GPT** is an experimental open-source project that uses localized GPT"
" large models to interact with your data and environment. With this "
@@ -75,102 +71,102 @@ msgstr ""
"DB-GPT 是一个开源的以数据库为基础的GPT实验项目,使用本地化的GPT大模型与您的数据和环境进行交互,无数据泄露风险100% 私密,100%"
" 安全。"
-#: ../../index.rst:12 3baac7e36a824395a57c246e05560c67
+#: ../../index.rst:12 50e02964b1e24c4fa598f820796aec61
msgid "**Features**"
msgstr "特性"
-#: ../../index.rst:13 6638568ff94e47f6a0b04a6775ca45d1
+#: ../../index.rst:13 62b74478b9b046dfa7606785939ca70e
msgid ""
"Currently, we have released multiple key features, which are listed below"
" to demonstrate our current capabilities:"
msgstr "目前我们已经发布了多种关键的特性,这里一一列举展示一下当前发布的能力。"
-#: ../../index.rst:15 b49710ac36ed4a188280aeb3589f11fe
+#: ../../index.rst:15 f593159ba8dd4388bd2ba189f9efd5ea
msgid "SQL language capabilities - SQL generation - SQL diagnosis"
msgstr "SQL语言能力 - SQL生成 - SQL诊断"
-#: ../../index.rst:19 6bd495632b39477e8948677aeb4ab1ee
+#: ../../index.rst:19 b829655a3ef146528beb9c50538be84e
msgid ""
"Private domain Q&A and data processing - Database knowledge Q&A - Data "
"processing"
msgstr "私有领域问答与数据处理 - 数据库知识问答 - 数据处理"
-#: ../../index.rst:23 d1d3665a412246b9814139a938a29d1b
+#: ../../index.rst:23 43e988a100a740358f1a0be1710d7960
msgid ""
"Plugins - Support custom plugin execution tasks and natively support the "
"Auto-GPT plugin, such as:"
msgstr "插件模型 - 支持自定义插件执行任务,并原生支持Auto-GPT插件,例如:* SQL自动执行,获取查询结果 * 自动爬取学习知识"
-#: ../../index.rst:26 41e37b63a8f44fc09a4da3574b25fcd3
+#: ../../index.rst:26 888186524aef4fe5a0ba643d55783fd9
msgid ""
"Unified vector storage/indexing of knowledge base - Support for "
"unstructured data such as PDF, Markdown, CSV, and WebURL"
msgstr "知识库统一向量存储/索引 - 非结构化数据支持包括PDF、MarkDown、CSV、WebURL"
-#: ../../index.rst:29 1acac8a6cbe54f7b8c31135189d726d6
+#: ../../index.rst:29 fab4f961fe1746dca3d5e369de714108
msgid ""
"Milti LLMs Support - Supports multiple large language models, currently "
"supporting Vicuna (7b, 13b), ChatGLM-6b (int4, int8) - TODO: codegen2, "
"codet5p"
msgstr "多模型支持 - 支持多种大语言模型, 当前已支持Vicuna(7b,13b), ChatGLM-6b(int4, int8)"
-#: ../../index.rst:35 481edf63bcd348b4b3e14353dc5be952
+#: ../../index.rst:35 7bc21f280364448da0edc046378be622
msgid ""
"How to get started using DB-GPT to interact with your data and "
"environment."
msgstr "开始使用DB-GPT与您的数据环境进行交互。"
-#: ../../index.rst:36 6b26a04661f7445198b3797442d3f178
+#: ../../index.rst:36 5362221ecdf5427faa51df83d4a939ee
#, fuzzy
msgid "`Quickstart Guide <./getting_started/getting_started.html>`_"
msgstr "`使用指南 <./getting_started/getting_started.html>`_"
-#: ../../index.rst:38 ff435ef9964d44508ad840de2950c1c8
+#: ../../index.rst:38 e028ed5afce842fbb76a6ce825d5a8e2
msgid "Concepts and terminology"
msgstr "相关概念"
-#: ../../index.rst:40 db9dfacbe93e4f858843c4d2a0e2dd25
+#: ../../index.rst:40 f773ac5e50054c308920c0b95a44b0cb
#, fuzzy
msgid "`Concepts and Terminology <./getting_started/concepts.html>`_"
msgstr "`相关概念 <./getting_started/concepts.html>`_"
-#: ../../index.rst:42 50faf8302ba140ce84897daef2c90ca2
+#: ../../index.rst:42 afeee818ec45454da12b80161b5f1de0
msgid "Coming soon..."
msgstr ""
-#: ../../index.rst:44 547bb007f90e43c096f25647cebefd7b
+#: ../../index.rst:44 cb4f911316234b86aad88b83d2784ad3
msgid "`Tutorials <.getting_started/tutorials.html>`_"
msgstr "`教程 <.getting_started/tutorials.html>`_"
-#: ../../index.rst:59 89bc50c752b84e3fb789ce5da1b654dc
+#: ../../index.rst:61 de365722d61442b99f44fcde8a8c9efb
msgid ""
"These modules are the core abstractions with which we can interact with "
"data and environment smoothly."
msgstr "这些模块是我们可以与数据和环境顺利地进行交互的核心组成。"
-#: ../../index.rst:60 81f3860920964720958c62b3a6769f12
+#: ../../index.rst:62 8fbb6303d3cd4fcc956815f44ef1fa8d
msgid ""
"It's very important for DB-GPT, DB-GPT also provide standard, extendable "
"interfaces."
msgstr "DB-GPT还提供了标准的、可扩展的接口。"
-#: ../../index.rst:62 15faf914d05544df86359e50fdc70483
+#: ../../index.rst:64 797ac43f459b4662a5097c8cb783c4ba
msgid ""
"The docs for each module contain quickstart examples, how to guides, "
"reference docs, and conceptual guides."
msgstr "每个模块的文档都包含快速入门的例子、操作指南、参考文档和相关概念等内容。"
-#: ../../index.rst:64 18a673690afa47df8e6dd3d065b2580f
+#: ../../index.rst:66 70d7b89c1c154c65aabedbb3c94c8771
msgid "The modules are as follows"
msgstr "组成模块如下:"
-#: ../../index.rst:66 d2c20ed1b6ba4aa697090453b0f775a1
+#: ../../index.rst:68 794d8a939e274894910fcdbb3ee52429
msgid ""
"`LLMs <./modules/llms.html>`_: Supported multi models management and "
"integrations."
msgstr "`LLMs <./modules/llms.html>`_:基于FastChat提供大模型的运行环境。支持多模型管理和集成。 "
-#: ../../index.rst:68 d6c72e6fee3348e89422f9ff45804e3a
+#: ../../index.rst:70 58b0d2fcac2c471494fe2b6b5b3f1b49
msgid ""
"`Prompts <./modules/prompts.html>`_: Prompt management, optimization, and"
" serialization for multi database."
@@ -178,59 +174,59 @@ msgstr ""
"`Prompt自动生成与优化 <./modules/prompts.html>`_: 自动化生成高质量的Prompt "
",并进行优化,提高系统的响应效率"
-#: ../../index.rst:70 1756de47335d4eb7916a6c8b8b5ab70b
+#: ../../index.rst:72 d433b62e11f64e18995bd334f93992a6
msgid "`Plugins <./modules/plugins.html>`_: Plugins management, scheduler."
msgstr "`Agent与插件: <./modules/plugins.html>`_:提供Agent和插件机制,使得用户可以自定义并增强系统的行为。"
-#: ../../index.rst:72 542efb88411f4cc192a08e7d28c84863
+#: ../../index.rst:74 3e7eb10f64274c07ace90b84ffc904b4
#, fuzzy
msgid ""
"`Knowledge <./modules/knowledge.html>`_: Knowledge management, embedding,"
" and search."
msgstr "`知识库能力: <./modules/knowledge.html>`_: 支持私域知识库问答能力, "
-#: ../../index.rst:74 0204630cd5f14c68ada3bf11e0b0fbf5
+#: ../../index.rst:76 3af796f287f54de0869a086cfa24b568
msgid ""
"`Connections <./modules/connections.html>`_: Supported multi databases "
"connection. management connections and interact with this."
msgstr "`连接模块 <./modules/connections.html>`_: 用于连接不同的模块和数据源,实现数据的流转和交互 "
-#: ../../index.rst:76 f9363a963be44d0ea01bb5d65b69d0f8
+#: ../../index.rst:78 27273a4020a540f4b28e7c54ea9c9232
#, fuzzy
msgid "`Vector <./modules/vector.html>`_: Supported multi vector database."
msgstr "`LLMs <./modules/llms.html>`_:基于FastChat提供大模型的运行环境。支持多模型管理和集成。 "
-#: ../../index.rst:94 7223c3b95e9446bcae40a355e6b02324
+#: ../../index.rst:96 d887747669d1429f950c56131cd35a62
msgid "Best Practices and built-in implementations for common DB-GPT use cases:"
msgstr "DB-GPT用例的最佳实践和内置方法:"
-#: ../../index.rst:96 c088f2ab9f2247ac9a8c9af31d0da7a6
+#: ../../index.rst:98 63ea6a449012432baeeef975db5c3ac1
msgid ""
"`Sql generation and diagnosis "
"<./use_cases/sql_generation_and_diagnosis.html>`_: SQL generation and "
"diagnosis."
msgstr "`Sql生成和诊断 <./use_cases/sql_generation_and_diagnosis.html>`_: Sql生成和诊断。"
-#: ../../index.rst:98 23973b07c6ba42088a714b048d4b43c4
+#: ../../index.rst:100 c7388a2147af48d1a6619492a3b926db
msgid ""
"`knownledge Based QA <./use_cases/knownledge_based_qa.html>`_: A "
"important scene for user to chat with database documents, codes, bugs and"
" schemas."
msgstr "`知识库问答 <./use_cases/knownledge_based_qa.html>`_: 用户与数据库文档、代码和bug聊天的重要场景\""
-#: ../../index.rst:100 6a1d77bc56a14803b66ebc9f2b6c4b7b
+#: ../../index.rst:102 f5a459f5a84241648a6a05f7ba3026c0
msgid ""
"`Chatbots <./use_cases/chatbots.html>`_: Language model love to chat, use"
" multi models to chat."
msgstr "`聊天机器人 <./use_cases/chatbots.html>`_: 使用多模型进行对话"
-#: ../../index.rst:102 621774a68cba46daa7112f865e6e3af9
+#: ../../index.rst:104 d566174db6eb4834854c00ce7295c297
msgid ""
"`Querying Database Data <./use_cases/query_database_data.html>`_: Query "
"and Analysis data from databases and give charts."
msgstr "`查询数据库数据 <./use_cases/query_database_data.html>`_:从数据库中查询和分析数据并给出图表。"
-#: ../../index.rst:104 9f379fbe8aac47f4a1a53d84ab2a2f51
+#: ../../index.rst:106 89aac5a738ae4aeb84bc324803ada354
msgid ""
"`Interacting with apis <./use_cases/interacting_with_api.html>`_: "
"Interact with apis, such as create a table, deploy a database cluster, "
@@ -239,39 +235,39 @@ msgstr ""
"`API交互 <./use_cases/interacting_with_api.html>`_: "
"与API交互,例如创建表、部署数据库集群、创建数据库等。"
-#: ../../index.rst:106 3dbb717e1b024a20bb6049facb616b1b
+#: ../../index.rst:108 f100fb0cdd264cf186bf554771488aa1
msgid ""
"`Tool use with plugins <./use_cases/tool_use_with_plugin>`_: According to"
" Plugin use tools to manage databases autonomoly."
msgstr "`插件工具 <./use_cases/tool_use_with_plugin>`_: 根据插件使用工具自主管理数据库。"
-#: ../../index.rst:123 4acedc39ccf34e79b805189a11285a3a
+#: ../../index.rst:125 93809b95cc4a41249d7ab6b264981167
msgid ""
"Full documentation on all methods, classes, installation methods, and "
"integration setups for DB-GPT."
msgstr "关于DB-GPT的所有方法、类、安装方法和集成设置的完整文档。"
-#: ../../index.rst:135 622112f3cce34461ba7e0d52fa81d438
-msgid "DB-GPT FAQ."
-msgstr ""
-
-#: ../../index.rst:146 a989256fd69f4bbfae73191b505c59fa
-msgid "Ecosystem"
-msgstr "环境系统"
-
-#: ../../index.rst:148 1b5c410dd94842f2801f08540dd57647
-msgid "Guides for how other companies/products can be used with DB-GPT"
-msgstr "其他公司/产品如何与DB-GPT一起使用的方法指南"
-
-#: ../../index.rst:163 8368b4c62ebe41ad91551e241dbcc4df
+#: ../../index.rst:139 4a95e65e128e4fc0907a3f51f1f2611b
msgid ""
"Additional resources we think may be useful as you develop your "
"application!"
msgstr "“我们认为在您开发应用程序时可能有用的其他资源!”"
-#: ../../index.rst:165 2a4cd897b20c4683979b44fb9d7470e6
+#: ../../index.rst:141 4934fbae909644769dd83c7f99c0fcd0
msgid ""
-"`Discord `_: if your have some "
-"problem or ideas, you can talk from discord."
+"`Discord `_: if your have some problem or "
+"ideas, you can talk from discord."
msgstr "`Discord `_:如果您有任何问题,可以到discord中进行交流。"
+#~ msgid "FAQ"
+#~ msgstr ""
+
+#~ msgid "DB-GPT FAQ."
+#~ msgstr ""
+
+#~ msgid "Ecosystem"
+#~ msgstr "环境系统"
+
+#~ msgid "Guides for how other companies/products can be used with DB-GPT"
+#~ msgstr "其他公司/产品如何与DB-GPT一起使用的方法指南"
+
diff --git a/docs/locales/zh_CN/LC_MESSAGES/modules/knowledge/string/string_embedding.po b/docs/locales/zh_CN/LC_MESSAGES/modules/knowledge/string/string_embedding.po
new file mode 100644
index 000000000..d88fe1071
--- /dev/null
+++ b/docs/locales/zh_CN/LC_MESSAGES/modules/knowledge/string/string_embedding.po
@@ -0,0 +1,52 @@
+# SOME DESCRIPTIVE TITLE.
+# Copyright (C) 2023, csunny
+# This file is distributed under the same license as the DB-GPT package.
+# FIRST AUTHOR , 2023.
+#
+#, fuzzy
+msgid ""
+msgstr ""
+"Project-Id-Version: DB-GPT 👏👏 0.3.0\n"
+"Report-Msgid-Bugs-To: \n"
+"POT-Creation-Date: 2023-07-13 15:39+0800\n"
+"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
+"Last-Translator: FULL NAME \n"
+"Language: zh_CN\n"
+"Language-Team: zh_CN \n"
+"Plural-Forms: nplurals=1; plural=0;\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=utf-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 2.12.1\n"
+
+#: ../../modules/knowledge/string/string_embedding.md:1
+#: 0dbae4a197aa4e45a16cfd90c137c8f2
+msgid "String"
+msgstr ""
+
+#: ../../modules/knowledge/string/string_embedding.md:3
+#: bf9f445b1d2045848ad3c663e4551c1f
+msgid ""
+"string embedding can import a long raw text into a vector knowledge base."
+" The entire embedding process includes the read (loading data), "
+"data_process (data processing), and index_to_store (embedding to the "
+"vector database) methods."
+msgstr ""
+
+#: ../../modules/knowledge/string/string_embedding.md:5
+#: 4418539511024c2fa8cf1e73ade226a8
+msgid "inheriting the SourceEmbedding"
+msgstr ""
+
+#: ../../modules/knowledge/string/string_embedding.md:23
+#: 178050a4cdd6429aa73406043d89869b
+msgid ""
+"implement read() and data_process() read() method allows you to read data"
+" and split data into chunk"
+msgstr ""
+
+#: ../../modules/knowledge/string/string_embedding.md:32
+#: fdaf95f201624242bbfcff36480e1819
+msgid "data_process() method allows you to pre processing your ways"
+msgstr ""
+
diff --git a/pilot/configs/model_config.py b/pilot/configs/model_config.py
index 9ca56824a..0623cbe84 100644
--- a/pilot/configs/model_config.py
+++ b/pilot/configs/model_config.py
@@ -37,6 +37,10 @@ LLM_MODEL_CONFIG = {
"vicuna-13b-v1.5": os.path.join(MODEL_PATH, "vicuna-13b-v1.5"),
"vicuna-7b-v1.5": os.path.join(MODEL_PATH, "vicuna-7b-v1.5"),
"text2vec": os.path.join(MODEL_PATH, "text2vec-large-chinese"),
+ # https://huggingface.co/moka-ai/m3e-large
+ "m3e-base": os.path.join(MODEL_PATH, "m3e-base"),
+ # https://huggingface.co/moka-ai/m3e-base
+ "m3e-large": os.path.join(MODEL_PATH, "m3e-large"),
"sentence-transforms": os.path.join(MODEL_PATH, "all-MiniLM-L6-v2"),
"codegen2-1b": os.path.join(MODEL_PATH, "codegen2-1B"),
"codet5p-2b": os.path.join(MODEL_PATH, "codet5p-2b"),
@@ -64,6 +68,7 @@ LLM_MODEL_CONFIG = {
"baichuan-7b": os.path.join(MODEL_PATH, "baichuan-7b"),
# (Llama2 based) We only support WizardLM-13B-V1.2 for now, which is trained from Llama-2 13b, see https://huggingface.co/WizardLM/WizardLM-13B-V1.2
"wizardlm-13b": os.path.join(MODEL_PATH, "WizardLM-13B-V1.2"),
+ "llama-cpp": os.path.join(MODEL_PATH, "ggml-model-q4_0.bin"),
}
# Load model config
diff --git a/pilot/connections/rdbms/conn_sqlite.py b/pilot/connections/rdbms/conn_sqlite.py
index 339af025a..1740537cf 100644
--- a/pilot/connections/rdbms/conn_sqlite.py
+++ b/pilot/connections/rdbms/conn_sqlite.py
@@ -70,10 +70,10 @@ class SQLiteConnect(RDBMSDatabase):
def _sync_tables_from_db(self) -> Iterable[str]:
table_results = self.session.execute(
- "SELECT name FROM sqlite_master WHERE type='table'"
+ text("SELECT name FROM sqlite_master WHERE type='table'")
)
view_results = self.session.execute(
- "SELECT name FROM sqlite_master WHERE type='view'"
+ text("SELECT name FROM sqlite_master WHERE type='view'")
)
table_results = set(row[0] for row in table_results)
view_results = set(row[0] for row in view_results)
diff --git a/pilot/connections/rdbms/tests/test_conn_sqlite.py b/pilot/connections/rdbms/tests/test_conn_sqlite.py
index efe4ddf76..01ef51878 100644
--- a/pilot/connections/rdbms/tests/test_conn_sqlite.py
+++ b/pilot/connections/rdbms/tests/test_conn_sqlite.py
@@ -121,3 +121,17 @@ def test_get_database_list(db):
def test_get_database_names(db):
db.get_database_names() == []
+
+
+def test_db_dir_exist_dir():
+ with tempfile.TemporaryDirectory() as temp_dir:
+ new_dir = os.path.join(temp_dir, "new_dir")
+ file_path = os.path.join(new_dir, "sqlite.db")
+ db = SQLiteConnect.from_file_path(file_path)
+ assert os.path.exists(new_dir) == True
+ assert list(db.get_table_names()) == []
+ with tempfile.TemporaryDirectory() as existing_dir:
+ file_path = os.path.join(existing_dir, "sqlite.db")
+ db = SQLiteConnect.from_file_path(file_path)
+ assert os.path.exists(existing_dir) == True
+ assert list(db.get_table_names()) == []
diff --git a/pilot/model/adapter.py b/pilot/model/adapter.py
index 1c8562e78..d97d2cb2b 100644
--- a/pilot/model/adapter.py
+++ b/pilot/model/adapter.py
@@ -3,7 +3,9 @@
import torch
import os
-from typing import List
+import re
+from pathlib import Path
+from typing import List, Tuple
from functools import cache
from transformers import (
AutoModel,
@@ -12,8 +14,10 @@ from transformers import (
LlamaTokenizer,
BitsAndBytesConfig,
)
+from pilot.model.parameter import ModelParameters, LlamaCppModelParameters
from pilot.configs.model_config import DEVICE
from pilot.configs.config import Config
+from pilot.logs import logger
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
@@ -24,6 +28,14 @@ bnb_config = BitsAndBytesConfig(
CFG = Config()
+class ModelType:
+ """ "Type of model"""
+
+ HF = "huggingface"
+ LLAMA_CPP = "llama.cpp"
+ # TODO, support more model type
+
+
class BaseLLMAdaper:
"""The Base class for multi model, in our project.
We will support those model, which performance resemble ChatGPT"""
@@ -31,8 +43,17 @@ class BaseLLMAdaper:
def use_fast_tokenizer(self) -> bool:
return False
+ def model_type(self) -> str:
+ return ModelType.HF
+
+ def model_param_class(self, model_type: str = None) -> ModelParameters:
+ model_type = model_type if model_type else self.model_type()
+ if model_type == ModelType.LLAMA_CPP:
+ return LlamaCppModelParameters
+ return ModelParameters
+
def match(self, model_path: str):
- return True
+ return False
def loader(self, model_path: str, from_pretrained_kwargs: dict):
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)
@@ -52,12 +73,25 @@ def register_llm_model_adapters(cls):
@cache
-def get_llm_model_adapter(model_path: str) -> BaseLLMAdaper:
+def get_llm_model_adapter(model_name: str, model_path: str) -> BaseLLMAdaper:
+ # Prefer using model name matching
for adapter in llm_model_adapters:
- if adapter.match(model_path):
+ if adapter.match(model_name):
+ logger.info(
+ f"Found llm model adapter with model name: {model_name}, {adapter}"
+ )
return adapter
- raise ValueError(f"Invalid model adapter for {model_path}")
+ for adapter in llm_model_adapters:
+ if adapter.match(model_path):
+ logger.info(
+ f"Found llm model adapter with model path: {model_path}, {adapter}"
+ )
+ return adapter
+
+ raise ValueError(
+ f"Invalid model adapter for model name {model_name} and model path {model_path}"
+ )
# TODO support cpu? for practise we support gpt4all or chatglm-6b-int4?
@@ -296,6 +330,52 @@ class WizardLMAdapter(BaseLLMAdaper):
return "wizardlm" in model_path.lower()
+class LlamaCppAdapater(BaseLLMAdaper):
+ @staticmethod
+ def _parse_model_path(model_path: str) -> Tuple[bool, str]:
+ path = Path(model_path)
+ if not path.exists():
+ # Just support local model
+ return False, None
+ if not path.is_file():
+ model_paths = list(path.glob("*ggml*.bin"))
+ if not model_paths:
+ return False
+ model_path = str(model_paths[0])
+ logger.warn(
+ f"Model path {model_path} is not single file, use first *gglm*.bin model file: {model_path}"
+ )
+ if not re.fullmatch(".*ggml.*\.bin", model_path):
+ return False, None
+ return True, model_path
+
+ def model_type(self) -> ModelType:
+ return ModelType.LLAMA_CPP
+
+ def match(self, model_path: str):
+ """
+ https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML
+ """
+ if "llama-cpp" == model_path:
+ return True
+ is_match, _ = LlamaCppAdapater._parse_model_path(model_path)
+ return is_match
+
+ def loader(self, model_path: str, from_pretrained_kwargs: dict):
+ # TODO not support yet
+ _, model_path = LlamaCppAdapater._parse_model_path(model_path)
+ tokenizer = AutoTokenizer.from_pretrained(
+ model_path, trust_remote_code=True, use_fast=False
+ )
+ model = AutoModelForCausalLM.from_pretrained(
+ model_path,
+ trust_remote_code=True,
+ low_cpu_mem_usage=True,
+ **from_pretrained_kwargs,
+ )
+ return model, tokenizer
+
+
register_llm_model_adapters(VicunaLLMAdapater)
register_llm_model_adapters(ChatGLMAdapater)
register_llm_model_adapters(GuanacoAdapter)
@@ -305,6 +385,7 @@ register_llm_model_adapters(GPT4AllAdapter)
register_llm_model_adapters(Llama2Adapter)
register_llm_model_adapters(BaichuanAdapter)
register_llm_model_adapters(WizardLMAdapter)
+register_llm_model_adapters(LlamaCppAdapater)
# TODO Default support vicuna, other model need to tests and Evaluate
# just for test_py, remove this later
diff --git a/pilot/model/llm/llama_cpp/__init__.py b/pilot/model/llm/llama_cpp/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/pilot/model/llm/llama_cpp/llama_cpp.py b/pilot/model/llm/llama_cpp/llama_cpp.py
new file mode 100644
index 000000000..5029d9192
--- /dev/null
+++ b/pilot/model/llm/llama_cpp/llama_cpp.py
@@ -0,0 +1,145 @@
+"""
+Fork from text-generation-webui https://github.com/oobabooga/text-generation-webui/blob/main/modules/llamacpp_model.py
+"""
+import re
+from typing import Dict, Any
+import torch
+import llama_cpp
+
+from pilot.model.parameter import LlamaCppModelParameters
+from pilot.logs import logger
+
+if torch.cuda.is_available() and not torch.version.hip:
+ try:
+ import llama_cpp_cuda
+ except:
+ llama_cpp_cuda = None
+else:
+ llama_cpp_cuda = None
+
+
+def llama_cpp_lib(prefer_cpu: bool = False):
+ if prefer_cpu or llama_cpp_cuda is None:
+ logger.info(f"Llama.cpp use cpu")
+ return llama_cpp
+ else:
+ return llama_cpp_cuda
+
+
+def ban_eos_logits_processor(eos_token, input_ids, logits):
+ logits[eos_token] = -float("inf")
+ return logits
+
+
+def get_params(model_path: str, model_params: LlamaCppModelParameters) -> Dict:
+ return {
+ "model_path": model_path,
+ "n_ctx": model_params.max_context_size,
+ "seed": model_params.seed,
+ "n_threads": model_params.n_threads,
+ "n_batch": model_params.n_batch,
+ "use_mmap": True,
+ "use_mlock": False,
+ "low_vram": False,
+ "n_gpu_layers": 0 if model_params.prefer_cpu else model_params.n_gpu_layers,
+ "n_gqa": model_params.n_gqa,
+ "logits_all": True,
+ "rms_norm_eps": model_params.rms_norm_eps,
+ }
+
+
+class LlamaCppModel:
+ def __init__(self):
+ self.initialized = False
+ self.model = None
+ self.verbose = True
+
+ def __del__(self):
+ if self.model:
+ self.model.__del__()
+
+ @classmethod
+ def from_pretrained(self, model_path, model_params: LlamaCppModelParameters):
+ Llama = llama_cpp_lib(prefer_cpu=model_params.prefer_cpu).Llama
+ LlamaCache = llama_cpp_lib(prefer_cpu=model_params.prefer_cpu).LlamaCache
+
+ result = self()
+ cache_capacity = 0
+ cache_capacity_str = model_params.cache_capacity
+ if cache_capacity_str is not None:
+ if "GiB" in cache_capacity_str:
+ cache_capacity = (
+ int(re.sub("[a-zA-Z]", "", cache_capacity_str)) * 1000 * 1000 * 1000
+ )
+ elif "MiB" in cache_capacity_str:
+ cache_capacity = (
+ int(re.sub("[a-zA-Z]", "", cache_capacity_str)) * 1000 * 1000
+ )
+ else:
+ cache_capacity = int(cache_capacity_str)
+
+ params = get_params(model_path, model_params)
+ logger.info("Cache capacity is " + str(cache_capacity) + " bytes")
+ logger.info(f"Load LLama model with params: {params}")
+
+ result.model = Llama(**params)
+ result.verbose = model_params.verbose
+ if cache_capacity > 0:
+ result.model.set_cache(LlamaCache(capacity_bytes=cache_capacity))
+
+ # This is ugly, but the model and the tokenizer are the same object in this library.
+ return result, result
+
+ def encode(self, string):
+ if type(string) is str:
+ string = string.encode()
+
+ return self.model.tokenize(string)
+
+ def decode(self, tokens):
+ return self.model.detokenize(tokens)
+
+ def generate_streaming(self, params, context_len: int):
+ # LogitsProcessorList = llama_cpp_lib().LogitsProcessorList
+
+ # Read parameters
+ prompt = params["prompt"]
+ if self.verbose:
+ print(f"Prompt of model: \n{prompt}")
+
+ temperature = float(params.get("temperature", 1.0))
+ repetition_penalty = float(params.get("repetition_penalty", 1.1))
+ top_p = float(params.get("top_p", 1.0))
+ top_k = int(params.get("top_k", -1)) # -1 means disable
+ max_new_tokens = int(params.get("max_new_tokens", 2048))
+ echo = bool(params.get("echo", True))
+
+ max_src_len = context_len - max_new_tokens
+ # Handle truncation
+ prompt = self.encode(prompt)
+ prompt = prompt[-max_src_len:]
+ prompt = self.decode(prompt).decode("utf-8")
+
+ # TODO Compared with the original llama model, the Chinese effect of llama.cpp is very general, and it needs to be debugged
+ completion_chunks = self.model.create_completion(
+ prompt=prompt,
+ max_tokens=max_new_tokens,
+ temperature=temperature,
+ top_p=top_p,
+ top_k=top_k,
+ repeat_penalty=repetition_penalty,
+ # tfs_z=params['tfs'],
+ # mirostat_mode=int(params['mirostat_mode']),
+ # mirostat_tau=params['mirostat_tau'],
+ # mirostat_eta=params['mirostat_eta'],
+ stream=True,
+ echo=echo,
+ logits_processor=None,
+ )
+
+ output = ""
+ for completion_chunk in completion_chunks:
+ text = completion_chunk["choices"][0]["text"]
+ output += text
+ # print(output)
+ yield output
diff --git a/pilot/model/llm_out/llama_cpp_llm.py b/pilot/model/llm_out/llama_cpp_llm.py
new file mode 100644
index 000000000..921670065
--- /dev/null
+++ b/pilot/model/llm_out/llama_cpp_llm.py
@@ -0,0 +1,8 @@
+from typing import Dict
+import torch
+
+
+@torch.inference_mode()
+def generate_stream(model, tokenizer, params: Dict, device: str, context_len: int):
+ # Just support LlamaCppModel
+ return model.generate_streaming(params=params, context_len=context_len)
diff --git a/pilot/model/loader.py b/pilot/model/loader.py
index c1af72e6e..170f7c460 100644
--- a/pilot/model/loader.py
+++ b/pilot/model/loader.py
@@ -2,48 +2,24 @@
# -*- coding: utf-8 -*-
from typing import Optional, Dict
-import dataclasses
import torch
from pilot.configs.model_config import DEVICE
-from pilot.model.adapter import get_llm_model_adapter, BaseLLMAdaper
+from pilot.model.adapter import get_llm_model_adapter, BaseLLMAdaper, ModelType
from pilot.model.compression import compress_module
+from pilot.model.parameter import (
+ EnvArgumentParser,
+ ModelParameters,
+ LlamaCppModelParameters,
+ _genenv_ignoring_key_case,
+)
from pilot.model.llm.monkey_patch import replace_llama_attn_with_non_inplace_operations
from pilot.singleton import Singleton
from pilot.utils import get_gpu_memory
from pilot.logs import logger
-class ModelType:
- """ "Type of model"""
-
- HF = "huggingface"
- LLAMA_CPP = "llama.cpp"
- # TODO, support more model type
-
-
-@dataclasses.dataclass
-class ModelParams:
- device: str
- model_name: str
- model_path: str
- model_type: Optional[str] = ModelType.HF
- num_gpus: Optional[int] = None
- max_gpu_memory: Optional[str] = None
- cpu_offloading: Optional[bool] = False
- load_8bit: Optional[bool] = True
- load_4bit: Optional[bool] = False
- # quantization datatypes, `fp4` (four bit float) and `nf4` (normal four bit float)
- quant_type: Optional[str] = "nf4"
- # Nested quantization is activated through `use_double_quant``
- use_double_quant: Optional[bool] = True
- # "bfloat16", "float16", "float32"
- compute_dtype: Optional[str] = None
- debug: Optional[bool] = False
- trust_remote_code: Optional[bool] = True
-
-
-def _check_multi_gpu_or_4bit_quantization(model_params: ModelParams):
+def _check_multi_gpu_or_4bit_quantization(model_params: ModelParameters):
# TODO: vicuna-v1.5 8-bit quantization info is slow
# TODO: support wizardlm quantization, see: https://huggingface.co/WizardLM/WizardLM-13B-V1.2/discussions/5
model_name = model_params.model_name.lower()
@@ -51,7 +27,7 @@ def _check_multi_gpu_or_4bit_quantization(model_params: ModelParams):
return any(m in model_name for m in supported_models)
-def _check_quantization(model_params: ModelParams):
+def _check_quantization(model_params: ModelParameters):
model_name = model_params.model_name.lower()
has_quantization = any([model_params.load_8bit or model_params.load_4bit])
if has_quantization:
@@ -69,6 +45,21 @@ def _check_quantization(model_params: ModelParams):
return has_quantization
+def _get_model_real_path(model_name, default_model_path) -> str:
+ """Get model real path by model name
+ priority from high to low:
+ 1. environment variable with key: {model_name}_model_path
+ 2. environment variable with key: model_path
+ 3. default_model_path
+ """
+ env_prefix = model_name + "_"
+ env_prefix = env_prefix.replace("-", "_")
+ env_model_path = _genenv_ignoring_key_case("model_path", env_prefix=env_prefix)
+ if env_model_path:
+ return env_model_path
+ return _genenv_ignoring_key_case("model_path", default_value=default_model_path)
+
+
class ModelLoader(metaclass=Singleton):
"""Model loader is a class for model load
@@ -83,6 +74,7 @@ class ModelLoader(metaclass=Singleton):
self.device = DEVICE
self.model_path = model_path
self.model_name = model_name
+ self.prompt_template: str = None
self.kwargs = {
"torch_dtype": torch.float16,
"device_map": "auto",
@@ -97,7 +89,18 @@ class ModelLoader(metaclass=Singleton):
cpu_offloading=False,
max_gpu_memory: Optional[str] = None,
):
- model_params = ModelParams(
+ llm_adapter = get_llm_model_adapter(self.model_name, self.model_path)
+ model_type = llm_adapter.model_type()
+ param_cls = llm_adapter.model_param_class(model_type)
+
+ args_parser = EnvArgumentParser()
+ # Read the parameters of the model from the environment variable according to the model name prefix, which currently has the highest priority
+ # vicuna_13b_max_gpu_memory=13Gib or VICUNA_13B_MAX_GPU_MEMORY=13Gib
+ env_prefix = self.model_name + "_"
+ env_prefix = env_prefix.replace("-", "_")
+ model_params = args_parser.parse_args_into_dataclass(
+ param_cls,
+ env_prefix=env_prefix,
device=self.device,
model_path=self.model_path,
model_name=self.model_name,
@@ -105,14 +108,21 @@ class ModelLoader(metaclass=Singleton):
cpu_offloading=cpu_offloading,
load_8bit=load_8bit,
load_4bit=load_4bit,
- debug=debug,
+ verbose=debug,
)
+ self.prompt_template = model_params.prompt_template
+
logger.info(f"model_params:\n{model_params}")
- llm_adapter = get_llm_model_adapter(model_params.model_path)
- return huggingface_loader(llm_adapter, model_params)
+
+ if model_type == ModelType.HF:
+ return huggingface_loader(llm_adapter, model_params)
+ elif model_type == ModelType.LLAMA_CPP:
+ return llamacpp_loader(llm_adapter, model_params)
+ else:
+ raise Exception(f"Unkown model type {model_type}")
-def huggingface_loader(llm_adapter: BaseLLMAdaper, model_params: ModelParams):
+def huggingface_loader(llm_adapter: BaseLLMAdaper, model_params: ModelParameters):
device = model_params.device
max_memory = None
@@ -175,14 +185,14 @@ def huggingface_loader(llm_adapter: BaseLLMAdaper, model_params: ModelParams):
pass
except AttributeError:
pass
- if model_params.debug:
+ if model_params.verbose:
print(model)
return model, tokenizer
def load_huggingface_quantization_model(
llm_adapter: BaseLLMAdaper,
- model_params: ModelParams,
+ model_params: ModelParameters,
kwargs: Dict,
max_memory: Dict[int, str],
):
@@ -312,3 +322,17 @@ def load_huggingface_quantization_model(
)
return model, tokenizer
+
+
+def llamacpp_loader(llm_adapter: BaseLLMAdaper, model_params: LlamaCppModelParameters):
+ try:
+ from pilot.model.llm.llama_cpp.llama_cpp import LlamaCppModel
+ except ImportError as exc:
+ raise ValueError(
+ "Could not import python package: llama-cpp-python "
+ "Please install db-gpt llama support with `cd $DB-GPT-DIR && pip install .[llama_cpp]` "
+ "or install llama-cpp-python with `pip install llama-cpp-python`"
+ ) from exc
+ model_path = model_params.model_path
+ model, tokenizer = LlamaCppModel.from_pretrained(model_path, model_params)
+ return model, tokenizer
diff --git a/pilot/model/parameter.py b/pilot/model/parameter.py
new file mode 100644
index 000000000..6ede45d9b
--- /dev/null
+++ b/pilot/model/parameter.py
@@ -0,0 +1,151 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+import os
+
+from typing import Any, Optional, Type
+from dataclasses import dataclass, field, fields
+
+from pilot.model.conversation import conv_templates
+
+suported_prompt_templates = ",".join(conv_templates.keys())
+
+
+def _genenv_ignoring_key_case(env_key: str, env_prefix: str = None, default_value=None):
+ """Get the value from the environment variable, ignoring the case of the key"""
+ if env_prefix:
+ env_key = env_prefix + env_key
+ return os.getenv(
+ env_key, os.getenv(env_key.upper(), os.getenv(env_key.lower(), default_value))
+ )
+
+
+class EnvArgumentParser:
+ def parse_args_into_dataclass(
+ self, dataclass_type: Type, env_prefix: str = None, **kwargs
+ ) -> Any:
+ for field in fields(dataclass_type):
+ env_var_value = _genenv_ignoring_key_case(field.name, env_prefix)
+ if env_var_value:
+ env_var_value = env_var_value.strip()
+ if field.type is int or field.type == Optional[int]:
+ env_var_value = int(env_var_value)
+ elif field.type is float or field.type == Optional[float]:
+ env_var_value = float(env_var_value)
+ elif field.type is bool or field.type == Optional[bool]:
+ env_var_value = env_var_value.lower() == "true"
+ elif field.type is str or field.type == Optional[str]:
+ pass
+ else:
+ raise ValueError(f"Unsupported parameter type {field.type}")
+ kwargs[field.name] = env_var_value
+ return dataclass_type(**kwargs)
+
+
+@dataclass
+class ModelParameters:
+ device: str = field(metadata={"help": "Device to run model"})
+ model_name: str = field(metadata={"help": "Model name"})
+ model_path: str = field(metadata={"help": "Model path"})
+ model_type: Optional[str] = field(
+ default="huggingface", metadata={"help": "Model type, huggingface or llama.cpp"}
+ )
+ prompt_template: Optional[str] = field(
+ default=None,
+ metadata={
+ "help": f"Prompt template. If None, the prompt template is automatically determined from model path, supported template: {suported_prompt_templates}"
+ },
+ )
+ max_context_size: Optional[int] = field(
+ default=4096, metadata={"help": "Maximum context size"}
+ )
+
+ num_gpus: Optional[int] = field(
+ default=None,
+ metadata={
+ "help": "The number of gpus you expect to use, if it is empty, use all of them as much as possible"
+ },
+ )
+ max_gpu_memory: Optional[str] = field(
+ default=None,
+ metadata={
+ "help": "The maximum memory limit of each GPU, only valid in multi-GPU configuration"
+ },
+ )
+ cpu_offloading: Optional[bool] = field(
+ default=False, metadata={"help": "CPU offloading"}
+ )
+ load_8bit: Optional[bool] = field(
+ default=False, metadata={"help": "8-bit quantization"}
+ )
+ load_4bit: Optional[bool] = field(
+ default=False, metadata={"help": "4-bit quantization"}
+ )
+ quant_type: Optional[str] = field(
+ default="nf4",
+ metadata={
+ "valid_values": ["nf4", "fp4"],
+ "help": "Quantization datatypes, `fp4` (four bit float) and `nf4` (normal four bit float), only valid when load_4bit=True",
+ },
+ )
+ use_double_quant: Optional[bool] = field(
+ default=True,
+ metadata={"help": "Nested quantization, only valid when load_4bit=True"},
+ )
+ # "bfloat16", "float16", "float32"
+ compute_dtype: Optional[str] = field(
+ default=None,
+ metadata={
+ "valid_values": ["bfloat16", "float16", "float32"],
+ "help": "Model compute type",
+ },
+ )
+ trust_remote_code: Optional[bool] = field(
+ default=True, metadata={"help": "Trust remote code"}
+ )
+ verbose: Optional[bool] = field(
+ default=False, metadata={"help": "Show verbose output."}
+ )
+
+
+@dataclass
+class LlamaCppModelParameters(ModelParameters):
+ seed: Optional[int] = field(
+ default=-1, metadata={"help": "Random seed for llama-cpp models. -1 for random"}
+ )
+ n_threads: Optional[int] = field(
+ default=None,
+ metadata={
+ "help": "Number of threads to use. If None, the number of threads is automatically determined"
+ },
+ )
+ n_batch: Optional[int] = field(
+ default=512,
+ metadata={
+ "help": "Maximum number of prompt tokens to batch together when calling llama_eval"
+ },
+ )
+ n_gpu_layers: Optional[int] = field(
+ default=1000000000,
+ metadata={
+ "help": "Number of layers to offload to the GPU, Set this to 1000000000 to offload all layers to the GPU."
+ },
+ )
+ n_gqa: Optional[int] = field(
+ default=None,
+ metadata={"help": "Grouped-query attention. Must be 8 for llama-2 70b."},
+ )
+ rms_norm_eps: Optional[float] = field(
+ default=5e-06, metadata={"help": "5e-6 is a good value for llama-2 models."}
+ )
+ cache_capacity: Optional[str] = field(
+ default=None,
+ metadata={
+ "help": "Maximum cache capacity. Examples: 2000MiB, 2GiB. When provided without units, bytes will be assumed. "
+ },
+ )
+ prefer_cpu: Optional[bool] = field(
+ default=False,
+ metadata={
+ "help": "If a GPU is available, it will be preferred by default, unless prefer_cpu=False is configured."
+ },
+ )
diff --git a/pilot/openapi/api_v1/__init__.py b/pilot/openapi/api_v1/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/pilot/scene/chat_dashboard/out_parser.py b/pilot/scene/chat_dashboard/out_parser.py
index 00bdc4179..7f799f585 100644
--- a/pilot/scene/chat_dashboard/out_parser.py
+++ b/pilot/scene/chat_dashboard/out_parser.py
@@ -29,6 +29,8 @@ class ChatDashboardOutputParser(BaseOutputParser):
print("clean prompt response:", clean_str)
response = json.loads(clean_str)
chart_items: List[ChartItem] = []
+ if not isinstance(response, list):
+ response = [response]
for item in response:
chart_items.append(
ChartItem(
diff --git a/pilot/scene/chat_dashboard/prompt.py b/pilot/scene/chat_dashboard/prompt.py
index 6676ca678..5b93ebb1f 100644
--- a/pilot/scene/chat_dashboard/prompt.py
+++ b/pilot/scene/chat_dashboard/prompt.py
@@ -20,7 +20,7 @@ The output data of the analysis cannot exceed 4 columns, and do not use columns
According to the characteristics of the analyzed data, choose the most suitable one from the charts provided below for data display, chart type:
{supported_chat_type}
-Pay attention to the length of the output content of the analysis result, do not exceed 4000tokens
+Pay attention to the length of the output content of the analysis result, do not exceed 4000 tokens
Give the correct {dialect} analysis SQL (don't use unprovided values such as 'paid'), analysis title(don't exist the same), display method and summary of brief analysis thinking, and respond in the following json format:
{response}
diff --git a/pilot/server/chat_adapter.py b/pilot/server/chat_adapter.py
index ab3aec94e..07b44b28c 100644
--- a/pilot/server/chat_adapter.py
+++ b/pilot/server/chat_adapter.py
@@ -13,7 +13,7 @@ class BaseChatAdpter:
and fetch output from model"""
def match(self, model_path: str):
- return True
+ return False
def get_generate_stream_func(self, model_path: str):
"""Return the generate stream handler func"""
@@ -24,7 +24,9 @@ class BaseChatAdpter:
def get_conv_template(self, model_path: str) -> Conversation:
return None
- def model_adaptation(self, params: Dict, model_path: str) -> Tuple[Dict, Dict]:
+ def model_adaptation(
+ self, params: Dict, model_path: str, prompt_template: str = None
+ ) -> Tuple[Dict, Dict]:
"""Params adaptation"""
conv = self.get_conv_template(model_path)
messages = params.get("messages")
@@ -39,6 +41,10 @@ class BaseChatAdpter:
]
params["messages"] = messages
+ if prompt_template:
+ print(f"Use prompt template {prompt_template} from config")
+ conv = get_conv_template(prompt_template)
+
if not conv or not messages:
# Nothing to do
print(
@@ -94,14 +100,19 @@ def register_llm_model_chat_adapter(cls):
@cache
-def get_llm_chat_adapter(model_path: str) -> BaseChatAdpter:
+def get_llm_chat_adapter(model_name: str, model_path: str) -> BaseChatAdpter:
"""Get a chat generate func for a model"""
for adapter in llm_model_chat_adapters:
- if adapter.match(model_path):
- print(f"Get model path: {model_path} adapter {adapter}")
+ if adapter.match(model_name):
+ print(f"Get model chat adapter with model name {model_name}, {adapter}")
return adapter
-
- raise ValueError(f"Invalid model for chat adapter {model_path}")
+ for adapter in llm_model_chat_adapters:
+ if adapter.match(model_path):
+ print(f"Get model chat adapter with model path {model_path}, {adapter}")
+ return adapter
+ raise ValueError(
+ f"Invalid model for chat adapter with model name {model_name} and model path {model_path}"
+ )
class VicunaChatAdapter(BaseChatAdpter):
@@ -239,6 +250,24 @@ class WizardLMChatAdapter(BaseChatAdpter):
return get_conv_template("vicuna_v1.1")
+class LlamaCppChatAdapter(BaseChatAdpter):
+ def match(self, model_path: str):
+ from pilot.model.adapter import LlamaCppAdapater
+
+ if "llama-cpp" == model_path:
+ return True
+ is_match, _ = LlamaCppAdapater._parse_model_path(model_path)
+ return is_match
+
+ def get_conv_template(self, model_path: str) -> Conversation:
+ return get_conv_template("llama-2")
+
+ def get_generate_stream_func(self, model_path: str):
+ from pilot.model.llm_out.llama_cpp_llm import generate_stream
+
+ return generate_stream
+
+
register_llm_model_chat_adapter(VicunaChatAdapter)
register_llm_model_chat_adapter(ChatGLMChatAdapter)
register_llm_model_chat_adapter(GuanacoChatAdapter)
@@ -248,6 +277,7 @@ register_llm_model_chat_adapter(GPT4AllChatAdapter)
register_llm_model_chat_adapter(Llama2ChatAdapter)
register_llm_model_chat_adapter(BaichuanChatAdapter)
register_llm_model_chat_adapter(WizardLMChatAdapter)
+register_llm_model_chat_adapter(LlamaCppChatAdapter)
# Proxy model for test and develop, it's cheap for us now.
register_llm_model_chat_adapter(ProxyllmChatAdapter)
diff --git a/pilot/server/llmserver.py b/pilot/server/llmserver.py
index 21789b9c8..10cdb0299 100644
--- a/pilot/server/llmserver.py
+++ b/pilot/server/llmserver.py
@@ -23,7 +23,7 @@ sys.path.append(ROOT_PATH)
from pilot.configs.config import Config
from pilot.configs.model_config import *
from pilot.model.llm_out.vicuna_base_llm import get_embeddings
-from pilot.model.loader import ModelLoader
+from pilot.model.loader import ModelLoader, _get_model_real_path
from pilot.server.chat_adapter import get_llm_chat_adapter
from pilot.scene.base_message import ModelMessage
@@ -34,12 +34,13 @@ class ModelWorker:
def __init__(self, model_path, model_name, device):
if model_path.endswith("/"):
model_path = model_path[:-1]
- self.model_name = model_name or model_path.split("/")[-1]
+ model_path = _get_model_real_path(model_name, model_path)
+ # self.model_name = model_name or model_path.split("/")[-1]
self.device = device
- print(f"Loading {model_name} LLM ModelServer in {device}! Please Wait......")
- self.ml: ModelLoader = ModelLoader(
- model_path=model_path, model_name=self.model_name
+ print(
+ f"Loading {model_name} LLM ModelServer in {device} from model path {model_path}! Please Wait......"
)
+ self.ml: ModelLoader = ModelLoader(model_path=model_path, model_name=model_name)
self.model, self.tokenizer = self.ml.loader(
load_8bit=CFG.IS_LOAD_8BIT,
load_4bit=CFG.IS_LOAD_4BIT,
@@ -60,7 +61,7 @@ class ModelWorker:
else:
self.context_len = 2048
- self.llm_chat_adapter = get_llm_chat_adapter(model_path)
+ self.llm_chat_adapter = get_llm_chat_adapter(model_name, model_path)
self.generate_stream_func = self.llm_chat_adapter.get_generate_stream_func(
model_path
)
@@ -86,7 +87,7 @@ class ModelWorker:
try:
# params adaptation
params, model_context = self.llm_chat_adapter.model_adaptation(
- params, self.ml.model_path
+ params, self.ml.model_path, prompt_template=self.ml.prompt_template
)
for output in self.generate_stream_func(
self.model, self.tokenizer, params, DEVICE, CFG.MAX_POSITION_EMBEDDINGS
diff --git a/pilot/vector_store/connector.py b/pilot/vector_store/connector.py
index ca56986c8..eaa202e72 100644
--- a/pilot/vector_store/connector.py
+++ b/pilot/vector_store/connector.py
@@ -1,9 +1,15 @@
from pilot.vector_store.chroma_store import ChromaStore
-from pilot.vector_store.milvus_store import MilvusStore
from pilot.vector_store.weaviate_store import WeaviateStore
-connector = {"Chroma": ChromaStore, "Milvus": MilvusStore, "Weaviate": WeaviateStore}
+connector = {"Chroma": ChromaStore, "Weaviate": WeaviateStore}
+
+try:
+ from pilot.vector_store.milvus_store import MilvusStore
+
+ connector["Milvus"] = MilvusStore
+except:
+ pass
class VectorStoreConnector:
diff --git a/requirements.txt b/requirements.txt
index 78bcfadc1..55fdbadfb 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -5,8 +5,8 @@ async-timeout==4.0.2
attrs==22.2.0
cchardet==2.1.7
chardet==5.1.0
-contourpy==1.0.7
-cycler==0.11.0
+# contourpy==1.0.7
+# cycler==0.11.0
filelock==3.9.0
fonttools==4.38.0
frozenlist==1.3.3
@@ -14,20 +14,20 @@ huggingface-hub==0.14.1
importlib-resources==5.12.0
sqlparse==0.4.4
-kiwisolver==1.4.4
-matplotlib==3.7.1
+# kiwisolver==1.4.4
+# matplotlib==3.7.1
multidict==6.0.4
packaging==23.0
psutil==5.9.4
-pycocotools==2.0.6
-pyparsing==3.0.9
+# pycocotools==2.0.6
+# pyparsing==3.0.9
python-dateutil==2.8.2
pyyaml==6.0
tokenizers==0.13.2
tqdm==4.64.1
transformers>=4.31.0
transformers_stream_generator
-timm==0.6.13
+# timm==0.6.13
spacy==3.5.3
webdataset==0.2.48
yarl==1.8.2
@@ -40,15 +40,17 @@ peft
pycocoevalcap
cpm_kernels
umap-learn
-notebook
-wandb
-llama-index==0.5.27
+# notebook
+gradio==3.23
+gradio-client==0.0.8
+# wandb
+# llama-index==0.5.27
+# TODO move bitsandbytes to optional
bitsandbytes
accelerate>=0.20.3
unstructured==0.6.3
-grpcio==1.47.5
gpt4all==0.3.0
diskcache==5.6.1
@@ -58,7 +60,7 @@ gTTS==2.3.1
langchain
nltk
python-dotenv==1.0.0
-pymilvus==2.2.1
+
vcrpy
chromadb==0.3.22
markdown2
@@ -71,18 +73,7 @@ bardapi==0.1.29
# database
+# TODO moved to optional dependencies
pymysql
duckdb
-duckdb-engine
-pymssql
-
-# Testing dependencies
-pytest
-asynctest
-pytest-asyncio
-pytest-benchmark
-pytest-cov
-pytest-integration
-pytest-mock
-pytest-recording
-pytesseract==0.3.10
+duckdb-engine
\ No newline at end of file
diff --git a/requirements/test-requirements.txt b/requirements/test-requirements.txt
new file mode 100644
index 000000000..c2fb321a5
--- /dev/null
+++ b/requirements/test-requirements.txt
@@ -0,0 +1,10 @@
+# Testing dependencies
+pytest
+asynctest
+pytest-asyncio
+pytest-benchmark
+pytest-cov
+pytest-integration
+pytest-mock
+pytest-recording
+pytesseract==0.3.10
\ No newline at end of file
diff --git a/setup.py b/setup.py
index 5c4a4d7f4..5136f4fb8 100644
--- a/setup.py
+++ b/setup.py
@@ -1,6 +1,11 @@
-from typing import List
+from typing import List, Tuple
import setuptools
+import platform
+import subprocess
+import os
+from enum import Enum
+
from setuptools import find_packages
with open("README.md", "r") as fh:
@@ -16,9 +21,144 @@ def parse_requirements(file_name: str) -> List[str]:
]
+class SetupSpec:
+ def __init__(self) -> None:
+ self.extras: dict = {}
+
+
+setup_spec = SetupSpec()
+
+
+class AVXType(Enum):
+ BASIC = "basic"
+ AVX = "AVX"
+ AVX2 = "AVX2"
+ AVX512 = "AVX512"
+
+ @staticmethod
+ def of_type(avx: str):
+ for item in AVXType:
+ if item._value_ == avx:
+ return item
+ return None
+
+
+class OSType(Enum):
+ WINDOWS = "win"
+ LINUX = "linux"
+ DARWIN = "darwin"
+ OTHER = "other"
+
+
+def get_cpu_avx_support() -> Tuple[OSType, AVXType]:
+ system = platform.system()
+ os_type = OSType.OTHER
+ cpu_avx = AVXType.BASIC
+ env_cpu_avx = AVXType.of_type(os.getenv("DBGPT_LLAMA_CPP_AVX"))
+
+ cmds = ["lscpu"]
+ if system == "Windows":
+ cmds = ["coreinfo"]
+ os_type = OSType.WINDOWS
+ elif system == "Linux":
+ cmds = ["lscpu"]
+ os_type = OSType.LINUX
+ elif system == "Darwin":
+ cmds = ["sysctl", "-a"]
+ os_type = OSType.DARWIN
+ else:
+ os_type = OSType.OTHER
+ print("Unsupported OS to get cpu avx, use default")
+ return os_type, env_cpu_avx if env_cpu_avx else cpu_avx
+ result = subprocess.run(cmds, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ output = result.stdout.decode()
+ if "avx512" in output.lower():
+ cpu_avx = AVXType.AVX512
+ elif "avx2" in output.lower():
+ cpu_avx = AVXType.AVX2
+ elif "avx " in output.lower():
+ # cpu_avx = AVXType.AVX
+ pass
+ return os_type, env_cpu_avx if env_cpu_avx else cpu_avx
+
+
+def get_cuda_version() -> str:
+ try:
+ import torch
+
+ return torch.version.cuda
+ except Exception:
+ return None
+
+
+def llama_cpp_python_cuda_requires():
+ cuda_version = get_cuda_version()
+ device = "cpu"
+ if not cuda_version:
+ print("CUDA not support, use cpu version")
+ return
+ device = "cu" + cuda_version.replace(".", "")
+ os_type, cpu_avx = get_cpu_avx_support()
+ supported_os = [OSType.WINDOWS, OSType.LINUX]
+ if os_type not in supported_os:
+ print(
+ f"llama_cpp_python_cuda just support in os: {[r._value_ for r in supported_os]}"
+ )
+ return
+ cpu_avx = cpu_avx._value_
+ base_url = "https://github.com/jllllll/llama-cpp-python-cuBLAS-wheels/releases/download/textgen-webui"
+ llama_cpp_version = "0.1.77"
+ py_version = "cp310"
+ os_pkg_name = "linux_x86_64" if os_type == OSType.LINUX else "win_amd64"
+ extra_index_url = f"{base_url}/llama_cpp_python_cuda-{llama_cpp_version}+{device}{cpu_avx}-{py_version}-{py_version}-{os_pkg_name}.whl"
+ print(f"Install llama_cpp_python_cuda from {extra_index_url}")
+
+ setup_spec.extras["llama_cpp"].append(f"llama_cpp_python_cuda @ {extra_index_url}")
+
+
+def llama_cpp_requires():
+ """
+ pip install "db-gpt[llama_cpp]"
+ """
+ setup_spec.extras["llama_cpp"] = ["llama-cpp-python"]
+ llama_cpp_python_cuda_requires()
+
+
+def all_vector_store_requires():
+ """
+ pip install "db-gpt[vstore]"
+ """
+ setup_spec.extras["vstore"] = [
+ "grpcio==1.47.5", # maybe delete it
+ "pymilvus==2.2.1",
+ ]
+
+
+def all_datasource_requires():
+ """
+ pip install "db-gpt[datasource]"
+ """
+ setup_spec.extras["datasource"] = ["pymssql", "pymysql"]
+
+
+def all_requires():
+ requires = set()
+ for _, pkgs in setup_spec.extras.items():
+ for pkg in pkgs:
+ requires.add(pkg)
+ setup_spec.extras["all"] = list(requires)
+
+
+llama_cpp_requires()
+all_vector_store_requires()
+all_datasource_requires()
+
+# must be last
+all_requires()
+
setuptools.setup(
name="db-gpt",
- packages=find_packages(),
+ packages=find_packages(exclude=("tests", "*.tests", "*.tests.*", "examples")),
version="0.3.5",
author="csunny",
author_email="cfqcsunny@gmail.com",
@@ -27,9 +167,10 @@ setuptools.setup(
long_description=long_description,
long_description_content_type="text/markdown",
install_requires=parse_requirements("requirements.txt"),
- url="https://github.com/csunny/DB-GPT",
+ url="https://github.com/eosphoros-ai/DB-GPT",
license="https://opensource.org/license/mit/",
python_requires=">=3.10",
+ extras_require=setup_spec.extras,
entry_points={
"console_scripts": [
"dbgpt_server=pilot.server:webserver",