doc:add vector db use doc

This commit is contained in:
aries_ckt 2023-07-31 17:30:34 +08:00
parent 208f01fda7
commit ef335aa860
7 changed files with 270 additions and 112 deletions

View File

@ -51,14 +51,14 @@ Normal:
##### Q4:When I use openai(MODEL_SERVER=proxyllm) to chat
<p align="left">
<img src="../assets/faq/proxyerror.png" width="800px" />
<img src="../../assets/faq/proxyerror.png" width="800px" />
</p>
##### A4: make sure your openapi API_KEY is available
##### Q5:When I Chat Data and Chat Meta Data, I found the error
<p align="left">
<img src="../assets/faq/chatdataerror.png" width="800px" />
<img src="../../assets/faq/chatdataerror.png" width="800px" />
</p>
##### A5: you have not create your database and table
@ -103,7 +103,7 @@ VECTOR_STORE_TYPE=Chroma
```
##### Q7:When I use vicuna-13b, found some illegal character like this.
<p align="left">
<img src="../assets/faq/illegal_character.png" width="800px" />
<img src="../../assets/faq/illegal_character.png" width="800px" />
</p>
##### A7: set KNOWLEDGE_SEARCH_TOP_SIZE smaller or set KNOWLEDGE_CHUNK_SIZE smaller, and reboot server.

View File

@ -8,7 +8,7 @@ msgid ""
msgstr ""
"Project-Id-Version: DB-GPT 👏👏 0.3.0\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2023-07-20 10:53+0800\n"
"POT-Creation-Date: 2023-07-31 17:15+0800\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language: zh_CN\n"
@ -19,82 +19,108 @@ msgstr ""
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: Babel 2.12.1\n"
#: ../../faq.md:1 81cedd396b274db9b2a69448df98a28d
#: ../../faq.md:1 45fbeef1eff84435aced21f0a273e44f
msgid "FAQ"
msgstr "FAQ"
#: ../../faq.md:2 1a3f8c7661e34721a4465a34281416b1
#: ../../faq.md:2 fa6cad40d7384c9ba61dd0d4e69f23a0
msgid "Q1: text2vec-large-chinese not found"
msgstr "Q1: text2vec-large-chinese not found"
#: ../../faq.md:4 5d2844bcbcc843fc97de41a491f914fe
#: ../../faq.md:4 98f2cb936e6d4589bb27d918b6e96100
msgid ""
"A1: make sure you have download text2vec-large-chinese embedding model in"
" right way"
msgstr "按照正确的姿势下载text2vec-large-chinese模型"
#: ../../faq.md:16 82ade01884534030b81be7c3e06f1504
#: ../../faq.md:16 d9c0c02c44c74c8d9ab4c499548ca3b6
msgid ""
"Q2: execute `pip install -r requirements.txt` error, found some package "
"cannot find correct version."
msgstr "执行`pip install -r requirements.txt`报错"
#: ../../faq.md:19 451ede833da642788e5224811a71ba0f
#: ../../faq.md:19 08cb0691d56e495686c180cadef17380
msgid "A2: change the pip source."
msgstr "修改pip源"
#: ../../faq.md:26 ../../faq.md:33 0142abf1050d4fee9caf056322b52247
#: 0adb87dd67fa4122997871f7ab064637
#: ../../faq.md:26 ../../faq.md:33 7dd0a07a8e6544d5898c79d3e04e2664
#: f4ef94c1e50c467f9e575308755338ed
msgid "or"
msgstr "或"
#: ../../faq.md:41 d6981160ecc6491284fd32e1098fc10e
#: ../../faq.md:41 435cf523354443b89ffa243e67fe8ed3
msgid "Q3:Access denied for user 'root@localhost'(using password :NO)"
msgstr "或"
#: ../../faq.md:43 e50b195057804f28b84d0d10859e4f1b
#: ../../faq.md:43 03fd39015cd845f6a79ae03ef2ee5f7e
msgid "A3: make sure you have installed mysql instance in right way"
msgstr "按照正确姿势安装mysql"
#: ../../faq.md:45 03ba25aa7bd241d3b32cc1916f858a3e
#: ../../faq.md:45 1b6ea088a9bb45d0a92f0b77b6513b6b
msgid "Docker:"
msgstr "Docker:"
#: ../../faq.md:49 dd5336b44673459c93a1408097cb76f9
#: ../../faq.md:49 5bf5348377524ced9074207834bdd200
msgid "Normal: [download mysql instance](https://dev.mysql.com/downloads/mysql/)"
msgstr "[download mysql instance](https://dev.mysql.com/downloads/mysql/)"
#: ../../faq.md:52 2b290c4653a2410c8d330ed5b0e9a821
#: ../../faq.md:52 81194eff961c473c939d32d8fdba3c72
msgid "Q4:When I use openai(MODEL_SERVER=proxyllm) to chat"
msgstr "使用openai-chatgpt模型时(MODEL_SERVER=proxyllm)"
#: ../../faq.md:57 f4d0e8e8113f4ca4bc55f167b661fd6a
#: ../../faq.md:57 e0fd44c2be10410ea59e660e3efad53e
msgid "A4: make sure your openapi API_KEY is available"
msgstr "确认openapi API_KEY是否可用"
#: ../../faq.md:59 092ca3dea0c5466ab6e22ab0049f166e
#: ../../faq.md:59 9982f6a98d014189a838c725ef223b46
msgid "Q5:When I Chat Data and Chat Meta Data, I found the error"
msgstr "Chat Data and Chat Meta Data报如下错"
#: ../../faq.md:64 dbf61e6ea2c64ecebfdbbde83cb74e3e
#: ../../faq.md:64 b262611f9f4e4e1eaa49b69c8b06fe34
msgid "A5: you have not create your database and table"
msgstr "需要创建自己的数据库"
#: ../../faq.md:65 0505bb716e6445c2a7960436d93cb407
#: ../../faq.md:65 75c40019c7954eeb9db8da479b67b651
msgid "1.create your database."
msgstr "1.先创建数据库"
#: ../../faq.md:71 fd689b541ee549bd85385647c219b4cb
#: ../../faq.md:71 c50304bb737549feaebd0109541a00a1
msgid "2.create table {$your_table} and insert your data. eg:"
msgstr "然后创建数据表,模拟数据"
#: ../../faq.md:85 de2d78db5fb6450cb08b0f15385ed525
msgid "Q6:When I use vicuna-13b, found some illegal character like this."
#: ../../faq.md:85 e311370b69f24c90a0cffbb940b261e4
msgid "Q6:How to change Vector DB Type in DB-GPT."
msgstr ""
#: ../../faq.md:87 cd3b34421f044cb0922037fb909ac26a
msgid "A6: Update .env file and set VECTOR_STORE_TYPE."
msgstr ""
#: ../../faq.md:88 d20c3f4583d04cd285dcc2a052851e29
#, fuzzy
msgid ""
"DB-GPT currently support Chroma(Default), Milvus(>2.1), Weaviate vector "
"database. If you want to change vector db, Update your .env, set your "
"vector store type, VECTOR_STORE_TYPE=Chroma (now only support Chroma and "
"Milvus(>2.1), if you set Milvus, please set MILVUS_URL and MILVUS_PORT) "
"If you want to support more vector db, you can integrate yourself.[how to"
" integrate](https://db-gpt.readthedocs.io/en/latest/modules/vector.html)"
msgstr ""
"DB-GPT当前支持Chroma(默认),如果你想替换向量数据库,需要更新.env文件VECTOR_STORE_TYPE=Chroma (now"
" only support Chroma, Milvus Weaviate, if you set Milvus(>2.1), please "
"set MILVUS_URL and "
"MILVUS_PORT)。如果当前支持向量数据库无法满足你的需求,可以集成使用自己的向量数据库。[怎样集成](https://db-"
"gpt.readthedocs.io/en/latest/modules/vector.html)"
#: ../../faq.md:104 955e38d635df47039bfaf48bd8eabc66
#, fuzzy
msgid "Q7:When I use vicuna-13b, found some illegal character like this."
msgstr "使用vicuna-13b知识库问答出现乱码"
#: ../../faq.md:90 0cb1d0c2ec434763ae80e6f87d4a1665
#: ../../faq.md:109 6d11192c1f8c4ab5b20fb54a9d29790c
#, fuzzy
msgid ""
"A6: set KNOWLEDGE_SEARCH_TOP_SIZE smaller or set KNOWLEDGE_CHUNK_SIZE "
"A7: set KNOWLEDGE_SEARCH_TOP_SIZE smaller or set KNOWLEDGE_CHUNK_SIZE "
"smaller, and reboot server."
msgstr "将KNOWLEDGE_SEARCH_TOP_SIZE和KNOWLEDGE_CHUNK_SIZE设置小点然后重启"

View File

@ -8,7 +8,7 @@ msgid ""
msgstr ""
"Project-Id-Version: DB-GPT 0.3.0\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2023-07-20 10:53+0800\n"
"POT-Creation-Date: 2023-07-31 17:04+0800\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language: zh_CN\n"
@ -19,29 +19,29 @@ msgstr ""
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: Babel 2.12.1\n"
#: ../../getting_started/getting_started.md:1 7c12b6d7d5be4528be005cdadec568db
#: ../../getting_started/getting_started.md:1 7cfda7573f014955a3d1860a8944c3cd
msgid "Quickstart Guide"
msgstr "使用指南"
#: ../../getting_started/getting_started.md:3 ba312f5a132541be89dcc09012076784
#: ../../getting_started/getting_started.md:3 65d35e746cdc4fcdb8e712cd675081c6
msgid ""
"This tutorial gives you a quick walkthrough about use DB-GPT with you "
"environment and data."
msgstr "本教程为您提供了关于如何使用DB-GPT的使用指南。"
#: ../../getting_started/getting_started.md:5 8e56b3f0726740abaaafa57415b10bea
#: ../../getting_started/getting_started.md:5 dd221eaa459044bd805725f4d89fdde2
msgid "Installation"
msgstr "安装"
#: ../../getting_started/getting_started.md:7 3af8fe74db1043349e8f784c109b0417
#: ../../getting_started/getting_started.md:7 4736b4728e4f43409162371955e42987
msgid "To get started, install DB-GPT with the following steps."
msgstr "请按照以下步骤安装DB-GPT"
#: ../../getting_started/getting_started.md:9 0b33cf4604f846e781a63d857dde72b2
#: ../../getting_started/getting_started.md:9 550d49c0d50a4a93867840a15e624912
msgid "1. Hardware Requirements"
msgstr "1. 硬件要求"
#: ../../getting_started/getting_started.md:10 f1d4abb176494bcb85cead7f3f8b719d
#: ../../getting_started/getting_started.md:10 695be3b79d4d4f56b5c8de62646a8c41
msgid ""
"As our project has the ability to achieve ChatGPT performance of over "
"85%, there are certain hardware requirements. However, overall, the "
@ -49,67 +49,67 @@ msgid ""
"specific hardware requirements for deployment are as follows:"
msgstr "由于我们的项目有能力达到85%以上的ChatGPT性能所以对硬件有一定的要求。但总体来说我们在消费级的显卡上即可完成项目的部署使用具体部署的硬件说明如下:"
#: ../../getting_started/getting_started.md e8516902f29d4ca2bb46f19b5e3deb81
#: ../../getting_started/getting_started.md 1bfd2621414b4ea780167151df2f80ff
msgid "GPU"
msgstr "GPU"
#: ../../getting_started/getting_started.md a951ccca67364cf7ad5f0af2ec0ece8d
#: ../../getting_started/getting_started.md 1a0fc8a2721d46dbb59eb7629c02c480
msgid "VRAM Size"
msgstr "显存大小"
#: ../../getting_started/getting_started.md 01b7e055ee4543bdb619fbc14fea4d86
#: ../../getting_started/getting_started.md 0cc7f0bf931340a1851aa376df8a4a99
msgid "Performance"
msgstr "显存大小"
#: ../../getting_started/getting_started.md 0b20b224ff8a4e2c890a8b4ff43b6045
#: ../../getting_started/getting_started.md dfb52838915146bd906b04383c9a65b7
msgid "RTX 4090"
msgstr "RTX 4090"
#: ../../getting_started/getting_started.md 17a343c4359d45c987f29de1c73760b4
#: c9daaf0578434a7e812a8d3f3edde3f0
#: ../../getting_started/getting_started.md 9477f857f6df45f0843139ba5c23f263
#: d14dfe72732c462d8d8073a7804af548
msgid "24 GB"
msgstr "24 GB"
#: ../../getting_started/getting_started.md 6a35be039a0a43eaaf7d1aa40aece6f7
#: ../../getting_started/getting_started.md c76a371ed992440a8b7b16ad405c273d
msgid "Smooth conversation inference"
msgstr "可以流畅的进行对话推理,无卡顿"
#: ../../getting_started/getting_started.md a414f5ae7bdd450f8d171d1f075d6b66
#: ../../getting_started/getting_started.md 27fea237ff004247a6f5aa4593a5aabc
msgid "RTX 3090"
msgstr "RTX 3090"
#: ../../getting_started/getting_started.md 109ed9bda6e541b88eb3400a44e15df7
#: ../../getting_started/getting_started.md 843f16d5af974b82beabba7d0afceae5
msgid "Smooth conversation inference, better than V100"
msgstr "可以流畅进行对话推理有卡顿感但好于V100"
#: ../../getting_started/getting_started.md 3f2a05dc610f461faa989f3c12750d00
#: ../../getting_started/getting_started.md c1ddf9dc9e384a44b988a83bb209814c
msgid "V100"
msgstr "V100"
#: ../../getting_started/getting_started.md 90e24795876546ecbb4796ca5d313514
#: ../../getting_started/getting_started.md 94cc69a3ce8a4ea58a0b5c28fb972a41
msgid "16 GB"
msgstr "16 GB"
#: ../../getting_started/getting_started.md 34061757e69e4691b7a5ff3c2953f1e3
#: ../../getting_started/getting_started.md 9a8a3304aab84a2687db2185361e2fe0
msgid "Conversation inference possible, noticeable stutter"
msgstr "可以进行对话推理,有明显卡顿"
#: ../../getting_started/getting_started.md:18 61d59cb27daf43eb9aa2775fa6dac820
#: ../../getting_started/getting_started.md:18 8c83ffd018b242dbb626312ac0c69b79
msgid "2. Install"
msgstr "2. 安装"
#: ../../getting_started/getting_started.md:20 28a6b66c92464929bed8f04d5b841e8c
#: ../../getting_started/getting_started.md:20 bab57fbe66814f51959c77a55a24005e
#, fuzzy
msgid ""
"1.This project relies on a local MySQL database service, which you need "
"to install locally. We recommend using Docker for installation."
msgstr "本项目依赖一个本地的 MySQL 数据库服务,你需要本地安装,推荐直接使用 Docker 安装。"
#: ../../getting_started/getting_started.md:24 7793b19e96b44ba481b527a877c81170
#: ../../getting_started/getting_started.md:24 f0033070a3d5412a94d69deaca81a8a8
msgid "prepare server sql script"
msgstr "准备db-gpt server sql脚本"
#: ../../getting_started/getting_started.md:29 e3743699e95e45eab3e140df266bb3b5
#: ../../getting_started/getting_started.md:29 2d4c2ea402c1448e8372168ee81c7fb7
msgid ""
"We use [Chroma embedding database](https://github.com/chroma-core/chroma)"
" as the default for our vector database, so there is no need for special "
@ -122,11 +122,11 @@ msgstr ""
"向量数据库我们默认使用的是Chroma内存数据库所以无需特殊安装如果有需要连接其他的同学可以按照我们的教程进行安装配置。整个DB-"
"GPT的安装过程我们使用的是miniconda3的虚拟环境。创建虚拟环境并安装python依赖包"
#: ../../getting_started/getting_started.md:38 be9f388e255c4bc7837029ce8237ef0b
#: ../../getting_started/getting_started.md:38 125c03f282f3449b9f17f025857b10e2
msgid "Before use DB-GPT Knowledge Management"
msgstr "使用知识库管理功能之前"
#: ../../getting_started/getting_started.md:44 9175b34ed78c4310b048f53ac07b13cb
#: ../../getting_started/getting_started.md:44 4d1d8f2d4f044fdeb1b622fabb87a0ea
msgid ""
"Once the environment is installed, we have to create a new folder "
"\"models\" in the DB-GPT project, and then we can put all the models "
@ -135,33 +135,33 @@ msgstr ""
"环境安装完成后我们必须在DB-"
"GPT项目中创建一个新文件夹\"models\"然后我们可以把从huggingface下载的所有模型放到这个目录下。"
#: ../../getting_started/getting_started.md:47 40715051bd844b0187265d425debfbee
#: ../../getting_started/getting_started.md:47 00b27ca566f242cfbc3d0fac77654063
#, fuzzy
msgid "Notice make sure you have install git-lfs"
msgstr "确保你已经安装了git-lfs"
#: ../../getting_started/getting_started.md:57 7d7e7b1fe72b4e0c96eba721a8aa2113
#: ../../getting_started/getting_started.md:57 b8c136cbdbc34f90b09a52362b785563
msgid ""
"The model files are large and will take a long time to download. During "
"the download, let's configure the .env file, which needs to be copied and"
" created from the .env.template"
msgstr "模型文件很大,需要很长时间才能下载。在下载过程中,让我们配置.env文件它需要从。env.template中复制和创建。"
#: ../../getting_started/getting_started.md:60 6a25e5307bdb49a0afc69b9d17395a5a
#: ../../getting_started/getting_started.md:60 a15420a447c443dabb9f60e460a9ab97
msgid "cp .env.template .env"
msgstr "cp .env.template .env"
#: ../../getting_started/getting_started.md:63 14567647544f4036beaae158b59833f6
#: ../../getting_started/getting_started.md:63 134bf228f94c4aec89c34e69e006af35
msgid ""
"You can configure basic parameters in the .env file, for example setting "
"LLM_MODEL to the model to be used"
msgstr "您可以在.env文件中配置基本参数例如将LLM_MODEL设置为要使用的模型。"
#: ../../getting_started/getting_started.md:65 1b459d413a4d4b7e883d1ec17384ca30
#: ../../getting_started/getting_started.md:65 c25744934b7142449e09dd57e0c98cc6
msgid "3. Run"
msgstr "3. 运行"
#: ../../getting_started/getting_started.md:66 ed15ee15450e4a028bf5aa05a9309697
#: ../../getting_started/getting_started.md:66 e2bd77426e054a99801faa76ffa1046a
msgid ""
"You can refer to this document to obtain the Vicuna weights: "
"[Vicuna](https://github.com/lm-sys/FastChat/blob/main/README.md#model-"
@ -170,7 +170,7 @@ msgstr ""
"关于基础模型, 可以根据[Vicuna](https://github.com/lm-"
"sys/FastChat/blob/main/README.md#model-weights) 合成教程进行合成。"
#: ../../getting_started/getting_started.md:68 de15a70920a94192a1f2017cbe3cdb55
#: ../../getting_started/getting_started.md:68 794d5f1a8afc4667bbc848cc7192e9bd
msgid ""
"If you have difficulty with this step, you can also directly use the "
"model from [this link](https://huggingface.co/Tribbiani/vicuna-7b) as a "
@ -179,7 +179,7 @@ msgstr ""
"如果此步有困难的同学,也可以直接使用[此链接](https://huggingface.co/Tribbiani/vicuna-"
"7b)上的模型进行替代。"
#: ../../getting_started/getting_started.md:70 763aaed45fd948fab761552a7e06061a
#: ../../getting_started/getting_started.md:70 72be6fe338304f9e8639e16b75859343
msgid ""
"set .env configuration set your vector store type, "
"eg:VECTOR_STORE_TYPE=Chroma, now we support Chroma and Milvus(version > "
@ -188,17 +188,21 @@ msgstr ""
"在.env文件设置向量数据库环境变量eg:VECTOR_STORE_TYPE=Chroma, 目前我们支持了 Chroma and "
"Milvus(version >2.1) "
#: ../../getting_started/getting_started.md:73 a8f0dc3546c54a1098ff10157f980cef
#: ../../getting_started/getting_started.md:73 e36089f05a5a41a2affcea15ecd1ac3d
#, fuzzy
msgid "1.Run db-gpt server"
msgstr "运行模型服务"
#: ../../getting_started/getting_started.md:78 1715948545154c10af585de8960bf853
#: ../../getting_started/getting_started.md:78
#: ../../getting_started/getting_started.md:127
#: ../../getting_started/getting_started.md:181
#: 4136b297d88e4dedb1616f28885a8cd6 eec0acd695454df59c67358889f971fb
#: f4f47ac8b1c549e0b2a68f16774fac64
#, fuzzy
msgid "Open http://localhost:5000 with your browser to see the product."
msgstr "打开浏览器访问http://localhost:5000"
#: ../../getting_started/getting_started.md:80 8ea9964df477473e866fe844dcf4be54
#: ../../getting_started/getting_started.md:80 ef391125ac0d43358e4701bafe780b4a
msgid ""
"If you want to access an external LLM service, you need to 1.set the "
"variables LLM_MODEL=YOUR_MODEL_NAME "
@ -206,7 +210,7 @@ msgid ""
"file. 2.execute dbgpt_server.py in light mode"
msgstr "如果你想访问外部的大模型服务1.需要在.env文件设置模型名和外部模型服务地址。2.使用light模式启动服务"
#: ../../getting_started/getting_started.md:87 4c409a0e2a994f428712ab94b475e9bd
#: ../../getting_started/getting_started.md:87 eb22acf6142b4a81bbab0bf1192f577c
msgid ""
"If you want to learn about dbgpt-webui, read https://github.com/csunny"
"/DB-GPT/tree/new-page-framework/datacenter"
@ -214,3 +218,84 @@ msgstr ""
"如果你想了解DB-GPT前端服务访问https://github.com/csunny/DB-GPT/tree/new-page-"
"framework/datacenter"
#: ../../getting_started/getting_started.md:89 bc58c998b37242f8b4a37588bccc612b
msgid "4. Docker (Experimental)"
msgstr ""
#: ../../getting_started/getting_started.md:91 3c95ec21279b43ba83e3a4e845511f26
msgid "4.1 Building Docker image"
msgstr ""
#: ../../getting_started/getting_started.md:97 01be8cbb915f4d02a84b25be3005a200
msgid "Review images by listing them:"
msgstr ""
#: ../../getting_started/getting_started.md:103
#: ../../getting_started/getting_started.md:167
#: 8664141a7b2f4a6a8f770ae72bc5a815 f1be761680804bb9b6a4928b46dfe598
msgid "Output should look something like the following:"
msgstr ""
#: ../../getting_started/getting_started.md:110
#: 5fff19374f1d4b3ba3bb642f987e8b66
msgid "4.2. Run all in one docker container"
msgstr ""
#: ../../getting_started/getting_started.md:112
#: 5b0137ca2d0345bea0a005fdf4037702
msgid "**Run with local model**"
msgstr ""
#: ../../getting_started/getting_started.md:130
#: 51495a6801e94901935481d57399ed20
msgid ""
"`-e LLM_MODEL=vicuna-13b`, means we use vicuna-13b as llm model, see "
"/pilot/configs/model_config.LLM_MODEL_CONFIG"
msgstr ""
#: ../../getting_started/getting_started.md:131
#: 753a8cf0ebbd4c62be9d5fbb7e5cf691
msgid ""
"`-v /data/models:/app/models`, means we mount the local model file "
"directory `/data/models` to the docker container directory `/app/models`,"
" please replace it with your model file directory."
msgstr ""
#: ../../getting_started/getting_started.md:133
#: ../../getting_started/getting_started.md:175
#: 043b3966baf045aca5615214d9be632a 328b58e3a9ba46a39f0511970f43743c
msgid "You can see log with command:"
msgstr ""
#: ../../getting_started/getting_started.md:139
#: f6a0a66509f343d4a3306cd884a49dbc
msgid "**Run with openai interface**"
msgstr ""
#: ../../getting_started/getting_started.md:158
#: 65fabfc317ac41c6aaeb80e6f2e1d379
msgid ""
"`-e LLM_MODEL=proxyllm`, means we use proxy llm(openai interface, "
"fastchat interface...)"
msgstr ""
#: ../../getting_started/getting_started.md:159
#: fb2230ca032b4a4d9cdab8d37b3ad02c
msgid ""
"`-v /data/models/text2vec-large-chinese:/app/models/text2vec-large-"
"chinese`, means we mount the local text2vec model to the docker "
"container."
msgstr ""
#: ../../getting_started/getting_started.md:161
#: 12c7a94038274324a2996873df60db93
msgid "4.2. Run with docker compose"
msgstr ""
#: ../../getting_started/getting_started.md:183
#: 6ec270a68a094d4894abd2ef02739229
msgid ""
"You can open docker-compose.yml in the project root directory to see more"
" details."
msgstr ""

View File

@ -8,7 +8,7 @@ msgid ""
msgstr ""
"Project-Id-Version: DB-GPT 0.3.0\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2023-06-13 18:04+0800\n"
"POT-Creation-Date: 2023-07-31 17:04+0800\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language: zh_CN\n"
@ -19,11 +19,11 @@ msgstr ""
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: Babel 2.12.1\n"
#: ../../modules/knownledge.md:1 8c5aad32a2cc4c97bc988a1f4143097b
#: ../../modules/knownledge.md:1 b18cf12f806941f3b9d1c13b52d0dfe5
msgid "Knownledge"
msgstr "知识"
#: ../../modules/knownledge.md:3 d739696a9e6240c78db3906d55329636
#: ../../modules/knownledge.md:3 3fe78b30d3994e4484df41a677614eb2
msgid ""
"As the knowledge base is currently the most significant user demand "
"scenario, we natively support the construction and processing of "
@ -31,15 +31,15 @@ msgid ""
"base management strategies in this project, such as:"
msgstr "由于知识库是当前用户需求最显著的场景,我们原生支持知识库的构建和处理。同时,我们还在本项目中提供了多种知识库管理策略,如:"
#: ../../modules/knownledge.md:4 16e03ee1cd454786a736b6960c668c3a
#: ../../modules/knownledge.md:4 17b2485a12744b5587655201be50e023
msgid "Default built-in knowledge base"
msgstr "默认内置知识库"
#: ../../modules/knownledge.md:5 b5c57a8c773b40d18e344862adf7790e
#: ../../modules/knownledge.md:5 e137d9916a0a4a0681dbfed5d5a5065f
msgid "Custom addition of knowledge bases"
msgstr "自定义新增知识库"
#: ../../modules/knownledge.md:6 7c05adacafe34780a73fa2bc6748f92f
#: ../../modules/knownledge.md:6 0bca133996d4435b84245f2b53f43d72
msgid ""
"Various usage scenarios such as constructing knowledge bases through "
"plugin capabilities and web crawling. Users only need to organize the "
@ -47,45 +47,51 @@ msgid ""
"the knowledge base required for the large model."
msgstr "各种使用场景,例如通过插件功能和爬虫构建知识库。用户只需要组织知识文档,并且他们可以使用我们现有的功能来构建大型模型所需的知识库。"
#: ../../modules/knownledge.md:9 8b196a2a9efb435baf648a99d89e1220
#: ../../modules/knownledge.md:9 7355eed198514efc8e3bc178039b0251
msgid "Create your own knowledge repository"
msgstr "创建你自己的知识库"
#: ../../modules/knownledge.md:11 370071fde98c4c59bb18735364602adf
#: ../../modules/knownledge.md:11 96e0276a5d3047fea5410e9b33c33308
msgid ""
"1.Place personal knowledge files or folders in the pilot/datasets "
"directory."
msgstr "1.将个人知识文件或文件夹放在pilot/datasets目录中。"
#: ../../modules/knownledge.md:13 5ac32a1253c4433e87d64dccb2c8b600
#: ../../modules/knownledge.md:13 8762c0a463094c19924cdd4b7b1b1ede
msgid ""
"We currently support many document formats: txt, pdf, md, html, doc, ppt,"
" and url."
msgstr "当前支持txt, pdf, md, doc, ppt, html文档格式"
#: ../../modules/knownledge.md:15 1782a135e84f4e9f8cb090f8af935428
#: ../../modules/knownledge.md:15 752a5e7c623a49439ecf1ce8e6ccca7d
msgid "before execution:"
msgstr "在执行之前"
#: ../../modules/knownledge.md:22 43791873b7e043239e160790bbfc10e1
#: ../../modules/knownledge.md:22 fbef967557a94b938f8e47497bb43c20
msgid ""
"2.Update your .env, set your vector store type, VECTOR_STORE_TYPE=Chroma "
"(now only support Chroma and Milvus, if you set Milvus, please set "
"MILVUS_URL and MILVUS_PORT)"
msgstr "2.更新你的.env设置你的向量存储类型VECTOR_STORE_TYPE=Chroma(现在只支持Chroma和Milvus如果你设置了Milvus请设置MILVUS_URL和MILVUS_PORT)"
#: ../../modules/knownledge.md:25 197e043db45e444e9c930f29be808f31
#: ../../modules/knownledge.md:25 f1745e2f17864711a636ecbdd6cb9833
msgid "2.Run the knowledge repository script in the tools directory."
msgstr "2.在tools目录执行知识入库脚本"
#: ../../modules/knownledge.md:34 abeb77ed400c4838b2ca8e14dcd89b29
#: ../../modules/knownledge.md:34 5ae832d038b245a29a9e089f9e169cb0
msgid ""
"Optionally, you can run `python tools/knowledge_init.py -h` command to "
"see more usage."
msgstr ""
#: ../../modules/knownledge.md:36 4aab02276dfd41819dbd218ecc608326
msgid ""
"3.Add the knowledge repository in the interface by entering the name of "
"your knowledge repository (if not specified, enter \"default\") so you "
"can use it for Q&A based on your knowledge base."
msgstr "如果选择新增知识库,在界面上新增知识库输入你的知识库名"
#: ../../modules/knownledge.md:36 dcff9efafd9d441b91c1389af2a49780
#: ../../modules/knownledge.md:38 f990c8495c994aa1beb040ede6b2329a
msgid ""
"Note that the default vector model used is text2vec-large-chinese (which "
"is a large model, so if your personal computer configuration is not "

View File

@ -8,7 +8,7 @@ msgid ""
msgstr ""
"Project-Id-Version: DB-GPT 0.3.0\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2023-06-30 17:16+0800\n"
"POT-Creation-Date: 2023-07-31 17:04+0800\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language: zh_CN\n"
@ -19,11 +19,11 @@ msgstr ""
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: Babel 2.12.1\n"
#: ../../modules/llms.md:1 7fa87bcbc5de40b2902c41545027d8b8
#: ../../modules/llms.md:1 8322a2795eb74f1c99007fa0536ee7e5
msgid "LLMs"
msgstr "大语言模型"
#: ../../modules/llms.md:3 48a7c98128114cf58c57c41575ae53a6
#: ../../modules/llms.md:3 b8458e9c89b5403099b1eb04c4b7e755
#, python-format
msgid ""
"In the underlying large model integration, we have designed an open "
@ -36,47 +36,49 @@ msgid ""
"of use."
msgstr "在底层大模型接入中我们设计了开放的接口支持对接多种大模型。同时对于接入模型的效果我们有非常严格的把控与评审机制。对大模型能力上与ChatGPT对比在准确率上需要满足85%以上的能力对齐。我们用更高的标准筛选模型,是期望在用户使用过程中,可以省去前面繁琐的测试评估环节。"
#: ../../modules/llms.md:5 8d162757e5934dca9395605a5809193a
#: ../../modules/llms.md:5 3b4af62d13b24ec28b8b0c8128cabd03
msgid "Multi LLMs Usage"
msgstr "多模型使用"
#: ../../modules/llms.md:6 b1a5b940bee64aaa8945c078f9788459
#: ../../modules/llms.md:6 e51a4f2e4651459cab8bdc53114ae624
msgid ""
"To use multiple models, modify the LLM_MODEL parameter in the .env "
"configuration file to switch between the models."
msgstr "如果要使用不同的模型,请修改.env配置文件中的LLM MODEL参数以在模型之间切换。"
#: ../../modules/llms.md:8 77c89d2a77d64b8aa6796c59aad07ed2
#: ../../modules/llms.md:8 4ed94e391ae04868b6d30a71c73d1f31
msgid ""
"Notice: you can create .env file from .env.template, just use command "
"like this:"
msgstr "注意:你可以从 .env.template 创建 .env 文件。只需使用如下命令:"
#: ../../modules/llms.md:14 4fc870de8fc14861a97a5417a9e886f4
#: ../../modules/llms.md:14 3d7e9263b09d487ab23ada4f1ba01e02
#, fuzzy
msgid ""
"now we support models vicuna-13b, vicuna-7b, chatglm-6b, flan-t5-base, "
"guanaco-33b-merged, falcon-40b, gorilla-7b."
"guanaco-33b-merged, falcon-40b, gorilla-7b, llama-2-7b, llama-2-13b, "
"baichuan-7b, baichuan-13b"
msgstr ""
"现在我们支持的模型有vicuna-13b, vicuna-7b, chatglm-6b, flan-t5-base, guanaco-33b-"
"merged, falcon-40b, gorilla-7b."
#: ../../modules/llms.md:16 76cd62716d864bc48c362a5f095dbe94
#: ../../modules/llms.md:16 4cf68821b6704730bbe6a98fb1355f77
msgid ""
"if you want use other model, such as chatglm-6b, you just need update "
".env config file."
msgstr "如果你想使用其他模型比如chatglm-6b, 仅仅需要修改.env 配置文件"
#: ../../modules/llms.md:20 be990b58d30c469486c21e4a11739274
#: ../../modules/llms.md:20 c37b0e5ca58f420f91fdb93bf25215e0
msgid ""
"or chatglm2-6b, which is the second-generation version of the open-"
"source bilingual (Chinese-English) chat model ChatGLM-6B."
msgstr ""
#: ../../modules/llms.md:27 52b37848bd8345aea6a5a331019a01cd
#: ../../modules/llms.md:27 044b42a90acc4390b7714a27f38ee362
msgid "Run Model with cpu."
msgstr "用CPU运行模型"
#: ../../modules/llms.md:28 e3090ffbb05646e6af1b0eebedabc9e6
#: ../../modules/llms.md:28 da4226be516b4cb88d6c1dc94bb918e4
msgid ""
"we alse support smaller models, like gpt4all. you can use it with "
"cpu/mps(M1/M2), Download from [gpt4all model](https://gpt4all.io/models"
@ -85,11 +87,11 @@ msgstr ""
"我们也支持一些小模型你可以通过CPU/MPS(M1、M2)运行, 模型下载[gpt4all](https://gpt4all.io/models"
"/ggml-gpt4all-j-v1.3-groovy.bin)"
#: ../../modules/llms.md:30 67b0f633acee493d86033806a5f0db9a
#: ../../modules/llms.md:30 f869669ebb104270bb408c6a2df4f68c
msgid "put it in the models path, then change .env config."
msgstr "将模型放在models路径, 修改.env 配置文件"
#: ../../modules/llms.md:35 f1103e6c60b44657ba47129f97156251
#: ../../modules/llms.md:35 88dff0a183ca4052b28d6c06fe46a3ac
msgid ""
"DB-GPT provides a model load adapter and chat adapter. load adapter which"
" allows you to easily adapt load different LLM models by inheriting the "
@ -98,15 +100,15 @@ msgstr ""
"DB-GPT提供了多模型适配器load adapter和chat adapter.load adapter通过继承BaseLLMAdapter类,"
" 实现match和loader方法允许你适配不同的LLM."
#: ../../modules/llms.md:37 768ae456f55b4b1894d7dff43c7a7f01
#: ../../modules/llms.md:37 c013c4dc67a645ea8b0ae87d0c253e79
msgid "vicuna llm load adapter"
msgstr "vicuna llm load adapter"
#: ../../modules/llms.md:54 5364ea2d1b2e4759a98045b4ff7be4fe
#: ../../modules/llms.md:54 b239c68b5d1a44dfbbaa5ff1354b02d2
msgid "chatglm load adapter"
msgstr "chatglm load adapter"
#: ../../modules/llms.md:81 eb7e5dc538a14a67b030b7a8b89a8e8a
#: ../../modules/llms.md:81 ceccd36025064a2dbd8578032db00a01
msgid ""
"chat adapter which allows you to easily adapt chat different LLM models "
"by inheriting the BaseChatAdpter.you just implement match() and "
@ -115,48 +117,59 @@ msgstr ""
"chat "
"adapter通过继承BaseChatAdpter允许你通过实现match和get_generate_stream_func方法允许你适配不同的LLM."
#: ../../modules/llms.md:83 f8e2cbbbf1e64b07af1a18bc089d268f
#: ../../modules/llms.md:83 668aea86f47f4082b7bf168074d6514c
msgid "vicuna llm chat adapter"
msgstr "vicuna llm chat adapter"
#: ../../modules/llms.md:95 dba52e8a07354536bbfa38319129b98f
#: ../../modules/llms.md:95 dba103e2e54a447b9543b51cb9876f6b
msgid "chatglm llm chat adapter"
msgstr "chatglm llm chat adapter"
#: ../../modules/llms.md:108 3b9b667a154f4123ac0c2618ce9a8626
#: ../../modules/llms.md:108 3edb5d9efb0a4f90af21f37bb9243e34
msgid ""
"if you want to integrate your own model, just need to inheriting "
"BaseLLMAdaper and BaseChatAdpter and implement the methods"
msgstr "如果你想集成自己的模型只需要继承BaseLLMAdaper和BaseChatAdpter类然后实现里面的方法即可"
#: ../../modules/llms.md:110 713bb7cbd7644cc3b3734901b46d3ee0
#: ../../modules/llms.md:110 a39d9f39b42c4757b9df597dcbcc9603
#, fuzzy
msgid "Multi Proxy LLMs"
msgstr "多模型使用"
#: ../../modules/llms.md:111 125f310663e54a5b9a78f036df2c7062
#: ../../modules/llms.md:111 d205dedc7e93407da3624aed610a6093
msgid "1. Openai proxy"
msgstr ""
#: ../../modules/llms.md:112 56a578a5e1234fe38a61eacd74d52b93
#: ../../modules/llms.md:112 dc49675aa40f484ba1de16cffbf15e28
msgid ""
"If you haven't deployed a private infrastructure for a large model, or if"
" you want to use DB-GPT in a low-cost and high-efficiency way, you can "
"also use OpenAI's large model as your underlying model."
msgstr ""
#: ../../modules/llms.md:114 ce9d8db45c0441a59dc67babedfa1829
#: ../../modules/llms.md:114 30bfaa0fcf754c6cb51e1b400246dc9b
msgid ""
"If your environment deploying DB-GPT has access to OpenAI, then modify "
"the .env configuration file as below will work."
msgstr ""
#: ../../modules/llms.md:122 cc81057c5bb6453e90bd5ab6c5d29c19
#: ../../modules/llms.md:122 33ef5db4a49241a8ba1240cafece6d48
msgid ""
"If you can't access OpenAI locally but have an OpenAI proxy service, you "
"can configure as follows."
msgstr ""
#: ../../modules/llms.md:130 2dd7c019ee3f4c4f83948f0b006077f9
msgid "2. Bard Proxy"
msgstr ""
#: ../../modules/llms.md:131 7d7837484e474b0d936c838f6303723a
msgid ""
"If your environment deploying DB-GPT has access to "
"https://bard.google.com/ (F12-> application-> __Secure-1PSID), then "
"modify the .env configuration file as below will work."
msgstr ""
#~ msgid "Multi Proxy LLMs"
#~ msgstr "多模型使用"

View File

@ -8,7 +8,7 @@ msgid ""
msgstr ""
"Project-Id-Version: DB-GPT 👏👏 0.2.2\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2023-06-19 19:10+0800\n"
"POT-Creation-Date: 2023-07-31 17:15+0800\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language: zh_CN\n"
@ -19,16 +19,16 @@ msgstr ""
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: Babel 2.12.1\n"
#: ../../modules/vector.rst:2 ../../modules/vector.rst:17
#: 22f05e3df5a14919ba30198e3800fdb9
#: ../../modules/vector.rst:2 ../../modules/vector.rst:19
#: 3f1436c219c34b138a4db11ff55e070c
msgid "VectorConnector"
msgstr "VectorConnector"
#: ../../modules/vector.rst:4 caa4353c84a04157809b694a6a7b46ca
#: ../../modules/vector.rst:4 55445cdb9379484db2b2127620401adf
msgid "**VectorConnector Introduce**"
msgstr "VectorConnector介绍"
#: ../../modules/vector.rst:6 e2f584d6cf374a10ae4471d3b9a72cef
#: ../../modules/vector.rst:6 b3b361332bf0447c9ed28976e80f61d5
msgid ""
"vector knowledge base is a method of mapping words in language to a high-"
"dimensional vector space. In the vector space, each word is represented "
@ -43,7 +43,7 @@ msgid ""
" requires a large corpus and computing resources to complete."
msgstr "向量知识库是一种将语言中的单词映射到一个高维向量空间中的方法。在向量空间中每个单词都表示为一个向量这个向量包含了许多数值特征这些特征代表了该单词与其他单词之间的关系。这种映射是一种聚类技术通过将语言中的单词映射到向量空间中单词之间的语义关系可以通过计算它们在向量空间中的差异来计算。向量知识库可以用于自然语言处理任务如情感分析、文本分类、机器翻译等。常见的向量知识库有Word2Vec、GloVe、FastText等。这些向量知识库的训练通常需要大量的语料库和计算资源来完成。"
#: ../../modules/vector.rst:8 02a16590b6ec49c09a8247f89714eada
#: ../../modules/vector.rst:8 a3abe6fd2c82463993e5deac78e30700
msgid ""
"VectorConnector is a vector database connection adapter that allows you "
"to connect different vector databases and abstracts away implementation "
@ -54,21 +54,35 @@ msgstr ""
"vectorconnector是一个向量数据库连接适配器你可以通过它来连接不同的向量数据库并且屏蔽掉了不同的向量数据的是实现差异和底层细节。例如Milvus,"
" Chroma, Elasticsearch, Weaviate..."
#: ../../modules/vector.rst:10 c38e3540e8844ea2a550620e2afac343
msgid "DB-GPT VectorConnector currently support milvus and chroma vector database"
#: ../../modules/vector.rst:10 60bf9f9feab34835ada5b1954f4edefd
#, fuzzy
msgid ""
"DB-GPT VectorConnector currently support Chroma(Default), Milvus(>2.1), "
"Weaviate vector database."
msgstr "DB-GPT VectorConnector当前支持Milvus和Chroma未来会越来越多."
#: ../../modules/vector.rst:12 f7e25f67a8764cab9664f8b7208af1aa
#: ../../modules/vector.rst:12 cd057c92691247d486d0acc4b5827731
#, fuzzy
msgid ""
"If you want to change vector db, Update your .env, set your vector store "
"type, VECTOR_STORE_TYPE=Chroma (now only support Chroma, Milvus(>2.1) and"
" Weaviate, if you set Milvus, please set MILVUS_URL and MILVUS_PORT)"
msgstr ""
"如果你想替换向量数据库,需要更新.env文件VECTOR_STORE_TYPE=Chroma (now only support Chroma,"
" Milvus Weaviate, if you set Milvus(>2.1), please set MILVUS_URL and "
"MILVUS_PORT)"
#: ../../modules/vector.rst:14 bce04f3cde7b427895034aa5c2899857
#, fuzzy
msgid "`chroma <./vector/chroma.html>`_: supported chroma vector database."
msgstr "msgid \"`Chroma <./vector/chroma.html>`_: 支持Chroma向量数据库"
#: ../../modules/vector.rst:13 d08ef7653e964af590d9bb99d36204a8
#: ../../modules/vector.rst:15 f6947639cf0a4e1d8a83d3521c3b2781
#, fuzzy
msgid "`milvus <./vector/milvus.html>`_: supported milvus vector database."
msgstr "Milvus <./vector/milvus.html>`_: 支持Milvus向量数据库"
#: ../../modules/vector.rst:14 2f95f18ad59b471cbfbc3f4dda4f5982
#: ../../modules/vector.rst:16 e5880b2c53fe42ad87b2ee8882abe90e
#, fuzzy
msgid "`weaviate <./vector/weaviate.html>`_: supported weaviate vector database."
msgstr "Milvus <./vector/milvus.html>`_: 支持Milvus向量数据库"

View File

@ -11,6 +11,20 @@ DB-GPT VectorConnector currently support Chroma(Default), Milvus(>2.1), Weaviate
If you want to change vector db, Update your .env, set your vector store type, VECTOR_STORE_TYPE=Chroma (now only support Chroma, Milvus(>2.1) and Weaviate, if you set Milvus, please set MILVUS_URL and MILVUS_PORT)
::
#*******************************************************************#
#** VECTOR STORE SETTINGS **#
#*******************************************************************#
VECTOR_STORE_TYPE=Chroma
#MILVUS_URL=127.0.0.1
#MILVUS_PORT=19530
#MILVUS_USERNAME
#MILVUS_PASSWORD
#MILVUS_SECURE=
#WEAVIATE_URL=https://kt-region-m8hcy0wc.weaviate.network
- `chroma <./vector/chroma.html>`_: supported chroma vector database.
- `milvus <./vector/milvus.html>`_: supported milvus vector database.
- `weaviate <./vector/weaviate.html>`_: supported weaviate vector database.