doc:docs for knowledge space arguments

This commit is contained in:
aries_ckt 2023-08-10 16:50:17 +08:00
parent 33397f9583
commit 8171980e4c
5 changed files with 397 additions and 140 deletions

View File

@ -6,6 +6,7 @@ CREATE TABLE `knowledge_space` (
`vector_type` varchar(50) NOT NULL COMMENT 'vector type',
`desc` varchar(500) NOT NULL COMMENT 'description',
`owner` varchar(100) DEFAULT NULL COMMENT 'owner',
`context` TEXT DEFAULT NULL COMMENT 'context argument',
`gmt_created` TIMESTAMP DEFAULT CURRENT_TIMESTAMP COMMENT 'created time',
`gmt_modified` TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT 'update time',
PRIMARY KEY (`id`),

View File

@ -108,6 +108,25 @@ VECTOR_STORE_TYPE=Chroma
##### A7: set KNOWLEDGE_SEARCH_TOP_SIZE smaller or set KNOWLEDGE_CHUNK_SIZE smaller, and reboot server.
##### Q8:space add error (pymysql.err.OperationalError) (1054, "Unknown column 'knowledge_space.context' in 'field list'")
##### A8:
1.shutdown dbgpt_server(ctrl c)
2.add column context for table knowledge_space
```commandline
mysql -h127.0.0.1 -uroot -paa12345678
```
3.execute sql ddl
```commandline
mysql> use knowledge_management;
mysql> ALTER TABLE knowledge_space ADD COLUMN context TEXT COMMENT "arguments context";
```
4.restart dbgpt server

View File

@ -8,7 +8,7 @@ msgid ""
msgstr ""
"Project-Id-Version: DB-GPT 👏👏 0.3.0\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2023-07-31 17:15+0800\n"
"POT-Creation-Date: 2023-08-10 16:40+0800\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language: zh_CN\n"
@ -19,84 +19,84 @@ msgstr ""
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: Babel 2.12.1\n"
#: ../../faq.md:1 45fbeef1eff84435aced21f0a273e44f
#: ../../faq.md:1 c4b5b298c447462ba7aaffd954549def
msgid "FAQ"
msgstr "FAQ"
#: ../../faq.md:2 fa6cad40d7384c9ba61dd0d4e69f23a0
#: ../../faq.md:2 533e388b78594244aa0acbf2b0263f60
msgid "Q1: text2vec-large-chinese not found"
msgstr "Q1: text2vec-large-chinese not found"
#: ../../faq.md:4 98f2cb936e6d4589bb27d918b6e96100
#: ../../faq.md:4 e1c7e28c60f24f7a983f30ee43bad32e
msgid ""
"A1: make sure you have download text2vec-large-chinese embedding model in"
" right way"
msgstr "按照正确的姿势下载text2vec-large-chinese模型"
#: ../../faq.md:16 d9c0c02c44c74c8d9ab4c499548ca3b6
#: ../../faq.md:16 7cbfd6629267423a879735dd0dbba24e
msgid ""
"Q2: execute `pip install -r requirements.txt` error, found some package "
"cannot find correct version."
msgstr "执行`pip install -r requirements.txt`报错"
#: ../../faq.md:19 08cb0691d56e495686c180cadef17380
#: ../../faq.md:19 a2bedf7cf2984d35bc9e3edffd9cb991
msgid "A2: change the pip source."
msgstr "修改pip源"
#: ../../faq.md:26 ../../faq.md:33 7dd0a07a8e6544d5898c79d3e04e2664
#: f4ef94c1e50c467f9e575308755338ed
#: ../../faq.md:26 ../../faq.md:33 307385dc581841148bad0dfa95541722
#: 55a9cd0665e74b47bac71e652a80c8bd
msgid "or"
msgstr "或"
#: ../../faq.md:41 435cf523354443b89ffa243e67fe8ed3
#: ../../faq.md:41 8ec34a9f22744313825bd53e90e86695
msgid "Q3:Access denied for user 'root@localhost'(using password :NO)"
msgstr "或"
#: ../../faq.md:43 03fd39015cd845f6a79ae03ef2ee5f7e
#: ../../faq.md:43 85d67326827145859f5af707c96c852f
msgid "A3: make sure you have installed mysql instance in right way"
msgstr "按照正确姿势安装mysql"
#: ../../faq.md:45 1b6ea088a9bb45d0a92f0b77b6513b6b
#: ../../faq.md:45 c51cffb2777c49c6b15485e6151f8b70
msgid "Docker:"
msgstr "Docker:"
#: ../../faq.md:49 5bf5348377524ced9074207834bdd200
#: ../../faq.md:49 09f16d57dbeb4f34a1274b2ac52aacf5
msgid "Normal: [download mysql instance](https://dev.mysql.com/downloads/mysql/)"
msgstr "[download mysql instance](https://dev.mysql.com/downloads/mysql/)"
#: ../../faq.md:52 81194eff961c473c939d32d8fdba3c72
#: ../../faq.md:52 ab730de8f02f4cb895510a52ca90c02f
msgid "Q4:When I use openai(MODEL_SERVER=proxyllm) to chat"
msgstr "使用openai-chatgpt模型时(MODEL_SERVER=proxyllm)"
#: ../../faq.md:57 e0fd44c2be10410ea59e660e3efad53e
#: ../../faq.md:57 8308cf21e0654e5798c9a2638b8565ec
msgid "A4: make sure your openapi API_KEY is available"
msgstr "确认openapi API_KEY是否可用"
#: ../../faq.md:59 9982f6a98d014189a838c725ef223b46
#: ../../faq.md:59 ff6361a5438943c2b9e613aa9e2322bf
msgid "Q5:When I Chat Data and Chat Meta Data, I found the error"
msgstr "Chat Data and Chat Meta Data报如下错"
#: ../../faq.md:64 b262611f9f4e4e1eaa49b69c8b06fe34
#: ../../faq.md:64 1a89acc3bf074b77b9e44d20f9b9c3cb
msgid "A5: you have not create your database and table"
msgstr "需要创建自己的数据库"
#: ../../faq.md:65 75c40019c7954eeb9db8da479b67b651
#: ../../faq.md:65 e7d63e2aa0d24c5d90dd1865753c7d52
msgid "1.create your database."
msgstr "1.先创建数据库"
#: ../../faq.md:71 c50304bb737549feaebd0109541a00a1
#: ../../faq.md:71 8b0de08106a144b98538b12bc671cbb2
msgid "2.create table {$your_table} and insert your data. eg:"
msgstr "然后创建数据表,模拟数据"
#: ../../faq.md:85 e311370b69f24c90a0cffbb940b261e4
#: ../../faq.md:85 abb226843354448a925de4deb52db555
msgid "Q6:How to change Vector DB Type in DB-GPT."
msgstr ""
#: ../../faq.md:87 cd3b34421f044cb0922037fb909ac26a
#: ../../faq.md:87 cc80165e2f6b46a28e53359ea7b3e0af
msgid "A6: Update .env file and set VECTOR_STORE_TYPE."
msgstr ""
#: ../../faq.md:88 d20c3f4583d04cd285dcc2a052851e29
#: ../../faq.md:88 d30fa1e2fdd94e00a72ef247af41fb43
#, fuzzy
msgid ""
"DB-GPT currently support Chroma(Default), Milvus(>2.1), Weaviate vector "
@ -112,15 +112,42 @@ msgstr ""
"MILVUS_PORT)。如果当前支持向量数据库无法满足你的需求,可以集成使用自己的向量数据库。[怎样集成](https://db-"
"gpt.readthedocs.io/en/latest/modules/vector.html)"
#: ../../faq.md:104 955e38d635df47039bfaf48bd8eabc66
#: ../../faq.md:104 bed01b4f96e04f61aa536cf615254c87
#, fuzzy
msgid "Q7:When I use vicuna-13b, found some illegal character like this."
msgstr "使用vicuna-13b知识库问答出现乱码"
#: ../../faq.md:109 6d11192c1f8c4ab5b20fb54a9d29790c
#: ../../faq.md:109 1ea68292da6d46a89944212948d1719d
#, fuzzy
msgid ""
"A7: set KNOWLEDGE_SEARCH_TOP_SIZE smaller or set KNOWLEDGE_CHUNK_SIZE "
"smaller, and reboot server."
msgstr "将KNOWLEDGE_SEARCH_TOP_SIZE和KNOWLEDGE_CHUNK_SIZE设置小点然后重启"
#: ../../faq.md:111 256b58e3749a4a17acbc456c71581597
msgid ""
"Q8:space add error (pymysql.err.OperationalError) (1054, \"Unknown column"
" 'knowledge_space.context' in 'field list'\")"
msgstr "Q8:space add error (pymysql.err.OperationalError) (1054, \"Unknown column"
" 'knowledge_space.context' in 'field list'\")"
#: ../../faq.md:114 febe9ec0c51b4e10b3a2bb0aece94e7c
msgid "A8:"
msgstr "A8:"
#: ../../faq.md:115 d713a368fe8147feb6db8f89938f3dcd
msgid "1.shutdown dbgpt_server(ctrl c)"
msgstr ""
#: ../../faq.md:117 4c6ad307caff4b57bb6c7085cc42fb64
msgid "2.add column context for table knowledge_space"
msgstr "2.add column context for table knowledge_space"
#: ../../faq.md:121 a3f955c090be4163bdb934eb32c25fd5
msgid "3.execute sql ddl"
msgstr "3.执行 sql ddl"
#: ../../faq.md:126 1861332d1c0342e2b968732fba55fa54
msgid "4.restart dbgpt server"
msgstr "4.重启 dbgpt server"

View File

@ -8,7 +8,7 @@ msgid ""
msgstr ""
"Project-Id-Version: DB-GPT 0.3.0\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2023-07-31 17:04+0800\n"
"POT-Creation-Date: 2023-08-10 16:38+0800\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language: zh_CN\n"
@ -19,29 +19,29 @@ msgstr ""
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: Babel 2.12.1\n"
#: ../../getting_started/getting_started.md:1 7cfda7573f014955a3d1860a8944c3cd
#: ../../getting_started/getting_started.md:1 b5ce6e0075114e669c78cda63f22dfe6
msgid "Quickstart Guide"
msgstr "使用指南"
#: ../../getting_started/getting_started.md:3 65d35e746cdc4fcdb8e712cd675081c6
#: ../../getting_started/getting_started.md:3 caf6a35c0ddc43199750b2faae2bf95d
msgid ""
"This tutorial gives you a quick walkthrough about use DB-GPT with you "
"environment and data."
msgstr "本教程为您提供了关于如何使用DB-GPT的使用指南。"
#: ../../getting_started/getting_started.md:5 dd221eaa459044bd805725f4d89fdde2
#: ../../getting_started/getting_started.md:5 338aae168e63461f84c8233c4cbf4bcc
msgid "Installation"
msgstr "安装"
#: ../../getting_started/getting_started.md:7 4736b4728e4f43409162371955e42987
#: ../../getting_started/getting_started.md:7 62519550906f4fdb99aad61502e7a5e6
msgid "To get started, install DB-GPT with the following steps."
msgstr "请按照以下步骤安装DB-GPT"
#: ../../getting_started/getting_started.md:9 550d49c0d50a4a93867840a15e624912
#: ../../getting_started/getting_started.md:9 f9072092ebf24ebc893604ca86116cd3
msgid "1. Hardware Requirements"
msgstr "1. 硬件要求"
#: ../../getting_started/getting_started.md:10 695be3b79d4d4f56b5c8de62646a8c41
#: ../../getting_started/getting_started.md:10 071c54f9a38e4ef5bb5dcfbf7d4a6004
msgid ""
"As our project has the ability to achieve ChatGPT performance of over "
"85%, there are certain hardware requirements. However, overall, the "
@ -49,67 +49,69 @@ msgid ""
"specific hardware requirements for deployment are as follows:"
msgstr "由于我们的项目有能力达到85%以上的ChatGPT性能所以对硬件有一定的要求。但总体来说我们在消费级的显卡上即可完成项目的部署使用具体部署的硬件说明如下:"
#: ../../getting_started/getting_started.md 1bfd2621414b4ea780167151df2f80ff
#: ../../getting_started/getting_started.md 4757df7812c64eb4b511aa3b8950c899
msgid "GPU"
msgstr "GPU"
#: ../../getting_started/getting_started.md 1a0fc8a2721d46dbb59eb7629c02c480
#: ../../getting_started/getting_started.md
#: ../../getting_started/getting_started.md:60 8f316fbd032f474b88d823978d134328
#: cb3d34d1ed074e1187b60c6f74cf1468
msgid "VRAM Size"
msgstr "显存大小"
#: ../../getting_started/getting_started.md 0cc7f0bf931340a1851aa376df8a4a99
#: ../../getting_started/getting_started.md ac41e6ccf820424b9c497156a317287d
msgid "Performance"
msgstr "显存大小"
#: ../../getting_started/getting_started.md dfb52838915146bd906b04383c9a65b7
#: ../../getting_started/getting_started.md 3ca06ce116754dde958ba21beaedac6f
msgid "RTX 4090"
msgstr "RTX 4090"
#: ../../getting_started/getting_started.md 9477f857f6df45f0843139ba5c23f263
#: d14dfe72732c462d8d8073a7804af548
#: ../../getting_started/getting_started.md 54e3410cc9a241098ae4412356a89f02
#: e3f31b30c4c84b45bc63b0ea193d0ed1
msgid "24 GB"
msgstr "24 GB"
#: ../../getting_started/getting_started.md c76a371ed992440a8b7b16ad405c273d
#: ../../getting_started/getting_started.md ff0c37e041a848899b1c3a63bab43404
msgid "Smooth conversation inference"
msgstr "可以流畅的进行对话推理,无卡顿"
#: ../../getting_started/getting_started.md 27fea237ff004247a6f5aa4593a5aabc
#: ../../getting_started/getting_started.md 09120e60cfc640e182e8069a372038cc
msgid "RTX 3090"
msgstr "RTX 3090"
#: ../../getting_started/getting_started.md 843f16d5af974b82beabba7d0afceae5
#: ../../getting_started/getting_started.md 0545dfec3b6d47e1bffc87301a0f05b3
msgid "Smooth conversation inference, better than V100"
msgstr "可以流畅进行对话推理有卡顿感但好于V100"
#: ../../getting_started/getting_started.md c1ddf9dc9e384a44b988a83bb209814c
#: ../../getting_started/getting_started.md d41078c0561f47a981a488eb4b9c7a54
msgid "V100"
msgstr "V100"
#: ../../getting_started/getting_started.md 94cc69a3ce8a4ea58a0b5c28fb972a41
#: ../../getting_started/getting_started.md 6aefb19420f4452999544a0da646f067
msgid "16 GB"
msgstr "16 GB"
#: ../../getting_started/getting_started.md 9a8a3304aab84a2687db2185361e2fe0
#: ../../getting_started/getting_started.md 8e06966b88234f4898729750d724e272
msgid "Conversation inference possible, noticeable stutter"
msgstr "可以进行对话推理,有明显卡顿"
#: ../../getting_started/getting_started.md:18 8c83ffd018b242dbb626312ac0c69b79
#: ../../getting_started/getting_started.md:18 113957ae638e4799a47e58a2ca259ce8
msgid "2. Install"
msgstr "2. 安装"
#: ../../getting_started/getting_started.md:20 bab57fbe66814f51959c77a55a24005e
#: ../../getting_started/getting_started.md:20 e068b950171f4c48a9e125ece31f75b9
#, fuzzy
msgid ""
"1.This project relies on a local MySQL database service, which you need "
"to install locally. We recommend using Docker for installation."
msgstr "本项目依赖一个本地的 MySQL 数据库服务,你需要本地安装,推荐直接使用 Docker 安装。"
#: ../../getting_started/getting_started.md:24 f0033070a3d5412a94d69deaca81a8a8
#: ../../getting_started/getting_started.md:24 f115b61438ad403aaaae0fc4970a982b
msgid "prepare server sql script"
msgstr "准备db-gpt server sql脚本"
#: ../../getting_started/getting_started.md:29 2d4c2ea402c1448e8372168ee81c7fb7
#: ../../getting_started/getting_started.md:29 c5f707269582464e92e5fd57365f5a51
msgid ""
"We use [Chroma embedding database](https://github.com/chroma-core/chroma)"
" as the default for our vector database, so there is no need for special "
@ -122,11 +124,11 @@ msgstr ""
"向量数据库我们默认使用的是Chroma内存数据库所以无需特殊安装如果有需要连接其他的同学可以按照我们的教程进行安装配置。整个DB-"
"GPT的安装过程我们使用的是miniconda3的虚拟环境。创建虚拟环境并安装python依赖包"
#: ../../getting_started/getting_started.md:38 125c03f282f3449b9f17f025857b10e2
#: ../../getting_started/getting_started.md:38 377381c76f32409298f4184f23653205
msgid "Before use DB-GPT Knowledge Management"
msgstr "使用知识库管理功能之前"
#: ../../getting_started/getting_started.md:44 4d1d8f2d4f044fdeb1b622fabb87a0ea
#: ../../getting_started/getting_started.md:44 62e7e96a18ca495595b3dfab97ca387e
msgid ""
"Once the environment is installed, we have to create a new folder "
"\"models\" in the DB-GPT project, and then we can put all the models "
@ -135,33 +137,40 @@ msgstr ""
"环境安装完成后我们必须在DB-"
"GPT项目中创建一个新文件夹\"models\"然后我们可以把从huggingface下载的所有模型放到这个目录下。"
#: ../../getting_started/getting_started.md:47 00b27ca566f242cfbc3d0fac77654063
#: ../../getting_started/getting_started.md:47 e34d946ab06e47dba81cf50348987594
#, fuzzy
msgid "Notice make sure you have install git-lfs"
msgstr "确保你已经安装了git-lfs"
#: ../../getting_started/getting_started.md:57 b8c136cbdbc34f90b09a52362b785563
#: ../../getting_started/getting_started.md:58 07149cfac5314daabbd37dc1cef82905
msgid ""
"The model files are large and will take a long time to download. During "
"the download, let's configure the .env file, which needs to be copied and"
" created from the .env.template"
msgstr "模型文件很大,需要很长时间才能下载。在下载过程中,让我们配置.env文件它需要从。env.template中复制和创建。"
#: ../../getting_started/getting_started.md:60 a15420a447c443dabb9f60e460a9ab97
#: ../../getting_started/getting_started.md:61 b8da240286e04573a2d731d228fbdb1e
msgid "cp .env.template .env"
msgstr "cp .env.template .env"
#: ../../getting_started/getting_started.md:63 134bf228f94c4aec89c34e69e006af35
#: ../../getting_started/getting_started.md:64 c2b05910bba14e5b9b49f8ad8c436412
msgid ""
"You can configure basic parameters in the .env file, for example setting "
"LLM_MODEL to the model to be used"
msgstr "您可以在.env文件中配置基本参数例如将LLM_MODEL设置为要使用的模型。"
#: ../../getting_started/getting_started.md:65 c25744934b7142449e09dd57e0c98cc6
#: ../../getting_started/getting_started.md:66 4af57c4cbf4744a8af83083d051098b3
msgid ""
"([Vicuna-v1.5](https://huggingface.co/lmsys/vicuna-13b-v1.5) based on "
"llama-2 has been released, we recommend you set `LLM_MODEL=vicuna-"
"13b-v1.5` to try this model)"
msgstr ""
#: ../../getting_started/getting_started.md:68 62287c3828ba48c4871dd831af7d819a
msgid "3. Run"
msgstr "3. 运行"
#: ../../getting_started/getting_started.md:66 e2bd77426e054a99801faa76ffa1046a
#: ../../getting_started/getting_started.md:69 7a04f6c514c34d87a03b105de8a03f0b
msgid ""
"You can refer to this document to obtain the Vicuna weights: "
"[Vicuna](https://github.com/lm-sys/FastChat/blob/main/README.md#model-"
@ -170,7 +179,7 @@ msgstr ""
"关于基础模型, 可以根据[Vicuna](https://github.com/lm-"
"sys/FastChat/blob/main/README.md#model-weights) 合成教程进行合成。"
#: ../../getting_started/getting_started.md:68 794d5f1a8afc4667bbc848cc7192e9bd
#: ../../getting_started/getting_started.md:71 72c9ad9d95c34b42807ed0a1e46adb73
msgid ""
"If you have difficulty with this step, you can also directly use the "
"model from [this link](https://huggingface.co/Tribbiani/vicuna-7b) as a "
@ -179,7 +188,7 @@ msgstr ""
"如果此步有困难的同学,也可以直接使用[此链接](https://huggingface.co/Tribbiani/vicuna-"
"7b)上的模型进行替代。"
#: ../../getting_started/getting_started.md:70 72be6fe338304f9e8639e16b75859343
#: ../../getting_started/getting_started.md:73 889cf1a4dbd2422580fe59d9f3eab6bf
msgid ""
"set .env configuration set your vector store type, "
"eg:VECTOR_STORE_TYPE=Chroma, now we support Chroma and Milvus(version > "
@ -188,21 +197,21 @@ msgstr ""
"在.env文件设置向量数据库环境变量eg:VECTOR_STORE_TYPE=Chroma, 目前我们支持了 Chroma and "
"Milvus(version >2.1) "
#: ../../getting_started/getting_started.md:73 e36089f05a5a41a2affcea15ecd1ac3d
#: ../../getting_started/getting_started.md:76 28a2ec60d35c43edab71bac103ccb0e0
#, fuzzy
msgid "1.Run db-gpt server"
msgstr "运行模型服务"
#: ../../getting_started/getting_started.md:78
#: ../../getting_started/getting_started.md:127
#: ../../getting_started/getting_started.md:181
#: 4136b297d88e4dedb1616f28885a8cd6 eec0acd695454df59c67358889f971fb
#: f4f47ac8b1c549e0b2a68f16774fac64
#: ../../getting_started/getting_started.md:81
#: ../../getting_started/getting_started.md:140
#: ../../getting_started/getting_started.md:194
#: 04ed6e8729604ee59edd16995301f0d5 225139430ee947cb8a191f1641f581b3
#: b33473644c124fdf9fe9912307cb6a9d
#, fuzzy
msgid "Open http://localhost:5000 with your browser to see the product."
msgstr "打开浏览器访问http://localhost:5000"
#: ../../getting_started/getting_started.md:80 ef391125ac0d43358e4701bafe780b4a
#: ../../getting_started/getting_started.md:83 8068aef8a84e46b59caf59de32ad6e2b
msgid ""
"If you want to access an external LLM service, you need to 1.set the "
"variables LLM_MODEL=YOUR_MODEL_NAME "
@ -210,92 +219,277 @@ msgid ""
"file. 2.execute dbgpt_server.py in light mode"
msgstr "如果你想访问外部的大模型服务1.需要在.env文件设置模型名和外部模型服务地址。2.使用light模式启动服务"
#: ../../getting_started/getting_started.md:87 eb22acf6142b4a81bbab0bf1192f577c
#: ../../getting_started/getting_started.md:86 4a11e53950ac489a891ca8dc67c26ca0
#, fuzzy
msgid ""
"If you want to learn about dbgpt-webui, read https://github.com/csunny"
"/DB-GPT/tree/new-page-framework/datacenter"
"If you want to learn about dbgpt-webui, read https://github./csunny/DB-"
"GPT/tree/new-page-framework/datacenter"
msgstr ""
"如果你想了解DB-GPT前端服务访问https://github.com/csunny/DB-GPT/tree/new-page-"
"framework/datacenter"
#: ../../getting_started/getting_started.md:89 bc58c998b37242f8b4a37588bccc612b
#: ../../getting_started/getting_started.md:92 6a2b09ea9ba64c879d3e7ca154d95095
msgid "4. Docker (Experimental)"
msgstr ""
msgstr "4. Docker (Experimental)"
#: ../../getting_started/getting_started.md:91 3c95ec21279b43ba83e3a4e845511f26
#: ../../getting_started/getting_started.md:94 5cef89f829ce4da299f9b430fd52c446
msgid "4.1 Building Docker image"
msgstr ""
msgstr "4.1 Building Docker image"
#: ../../getting_started/getting_started.md:97 01be8cbb915f4d02a84b25be3005a200
#: ../../getting_started/getting_started.md:100
#: b3999a9169694b209bd4848c6458ff41
msgid "Review images by listing them:"
msgstr ""
msgstr "Review images by listing them:"
#: ../../getting_started/getting_started.md:103
#: ../../getting_started/getting_started.md:167
#: 8664141a7b2f4a6a8f770ae72bc5a815 f1be761680804bb9b6a4928b46dfe598
#: ../../getting_started/getting_started.md:106
#: ../../getting_started/getting_started.md:180
#: 10486e7f872f4c13a384b75e564e37fe 5b142d0a103848fa82fafd40a557f998
msgid "Output should look something like the following:"
msgstr ""
msgstr "Output should look something like the following:"
#: ../../getting_started/getting_started.md:110
#: 5fff19374f1d4b3ba3bb642f987e8b66
#: ../../getting_started/getting_started.md:113
#: 83dd5525d0f7481fac57e5898605e7f3
msgid "You can pass some parameters to docker/build_all_images.sh."
msgstr "You can pass some parameters to docker/build_all_images.sh."
#: ../../getting_started/getting_started.md:121
#: d5b916efd7b84972b68a4aa50ebc047c
msgid ""
"You can execute the command `bash docker/build_all_images.sh --help` to "
"see more usage."
msgstr "You can execute the command `bash docker/build_all_images.sh --help` to "
"see more usage."
#: ../../getting_started/getting_started.md:123
#: 0d01cafcfde04c92b4115d804dd9e912
msgid "4.2. Run all in one docker container"
msgstr ""
msgstr "4.2. Run all in one docker container"
#: ../../getting_started/getting_started.md:112
#: 5b0137ca2d0345bea0a005fdf4037702
#: ../../getting_started/getting_started.md:125
#: 90d49baea1d34f01a0db696fd567bfba
msgid "**Run with local model**"
msgstr ""
msgstr "**Run with local model**"
#: ../../getting_started/getting_started.md:130
#: 51495a6801e94901935481d57399ed20
#: ../../getting_started/getting_started.md:143
#: 422a237241014cac9917832ac053aed5
msgid ""
"`-e LLM_MODEL=vicuna-13b`, means we use vicuna-13b as llm model, see "
"/pilot/configs/model_config.LLM_MODEL_CONFIG"
msgstr ""
msgstr "`-e LLM_MODEL=vicuna-13b`, means we use vicuna-13b as llm model, see /pilot/configs/model_config.LLM_MODEL_CONFIG"
#: ../../getting_started/getting_started.md:131
#: 753a8cf0ebbd4c62be9d5fbb7e5cf691
#: ../../getting_started/getting_started.md:144
#: af5fb303a0c64567b9c82a3ce3b00aaa
msgid ""
"`-v /data/models:/app/models`, means we mount the local model file "
"directory `/data/models` to the docker container directory `/app/models`,"
" please replace it with your model file directory."
msgstr ""
msgstr "`-v /data/models:/app/models`, means we mount the local model file "
"directory `/data/models` to the docker container directory `/app/models`,"
" please replace it with your model file directory."
#: ../../getting_started/getting_started.md:133
#: ../../getting_started/getting_started.md:175
#: 043b3966baf045aca5615214d9be632a 328b58e3a9ba46a39f0511970f43743c
#: ../../getting_started/getting_started.md:146
#: ../../getting_started/getting_started.md:188
#: 05652e117c3f4d3aaea0269f980cf975 3844ae7eeec54804b05bc8aea01c3e36
msgid "You can see log with command:"
msgstr ""
msgstr "You can see log with command:"
#: ../../getting_started/getting_started.md:139
#: f6a0a66509f343d4a3306cd884a49dbc
#: ../../getting_started/getting_started.md:152
#: 665aa47936aa40458fe33dd73c76513d
msgid "**Run with openai interface**"
msgstr ""
msgstr "**Run with openai interface**"
#: ../../getting_started/getting_started.md:158
#: 65fabfc317ac41c6aaeb80e6f2e1d379
#: ../../getting_started/getting_started.md:171
#: 98d627b0f7f149dfb9c7365e3ba082a1
msgid ""
"`-e LLM_MODEL=proxyllm`, means we use proxy llm(openai interface, "
"fastchat interface...)"
msgstr ""
msgstr "`-e LLM_MODEL=proxyllm`, means we use proxy llm(openai interface, "
"fastchat interface...)"
#: ../../getting_started/getting_started.md:159
#: fb2230ca032b4a4d9cdab8d37b3ad02c
#: ../../getting_started/getting_started.md:172
#: b293dedb88034aaab9a3b740e82d7adc
msgid ""
"`-v /data/models/text2vec-large-chinese:/app/models/text2vec-large-"
"chinese`, means we mount the local text2vec model to the docker "
"container."
msgstr "`-v /data/models/text2vec-large-chinese:/app/models/text2vec-large-"
"chinese`, means we mount the local text2vec model to the docker "
"container."
#: ../../getting_started/getting_started.md:174
#: e48085324a324749af8a21bad8e7ca0e
msgid "4.3. Run with docker compose"
msgstr ""
#: ../../getting_started/getting_started.md:161
#: 12c7a94038274324a2996873df60db93
msgid "4.2. Run with docker compose"
msgstr ""
#: ../../getting_started/getting_started.md:183
#: 6ec270a68a094d4894abd2ef02739229
#: ../../getting_started/getting_started.md:196
#: a356d9ae292f4dc3913c5999e561d533
msgid ""
"You can open docker-compose.yml in the project root directory to see more"
" details."
msgstr "You can open docker-compose.yml in the project root directory to see more"
" details."
#: ../../getting_started/getting_started.md:199
#: 0c644409bce34bf4b898c26e24f7a1ac
msgid "5. Multiple GPUs"
msgstr "5. Multiple GPUs"
#: ../../getting_started/getting_started.md:201
#: af435b17984341e5abfd2d9b64fbde20
msgid ""
"DB-GPT will use all available gpu by default. And you can modify the "
"setting `CUDA_VISIBLE_DEVICES=0,1` in `.env` file to use the specific gpu"
" IDs."
msgstr "DB-GPT will use all available gpu by default. And you can modify the "
"setting `CUDA_VISIBLE_DEVICES=0,1` in `.env` file to use the specific gpu"
" IDs."
#: ../../getting_started/getting_started.md:203
#: aac3e9cec74344fd963e2136a26384af
msgid ""
"Optionally, you can also specify the gpu ID to use before the starting "
"command, as shown below:"
msgstr "Optionally, you can also specify the gpu ID to use before the starting "
"command, as shown below:"
#: ../../getting_started/getting_started.md:213
#: 3351daf4f5a04b76bae2e9cd8740549e
msgid ""
"You can modify the setting `MAX_GPU_MEMORY=xxGib` in `.env` file to "
"configure the maximum memory used by each GPU."
msgstr ""
#: ../../getting_started/getting_started.md:215
#: fabf9090b7c649b59b91742ba761339c
msgid "6. Not Enough Memory"
msgstr ""
#: ../../getting_started/getting_started.md:217
#: d23322783fd34364acebe1679d7ca554
msgid "DB-GPT supported 8-bit quantization and 4-bit quantization."
msgstr ""
#: ../../getting_started/getting_started.md:219
#: 8699aa0b5fe049099fb1e8038f9ad1ee
msgid ""
"You can modify the setting `QUANTIZE_8bit=True` or `QUANTIZE_4bit=True` "
"in `.env` file to use quantization(8-bit quantization is enabled by "
"default)."
msgstr ""
#: ../../getting_started/getting_started.md:221
#: b115800c207b49b39de5e1a548feff58
msgid ""
"Llama-2-70b with 8-bit quantization can run with 80 GB of VRAM, and 4-bit"
" quantization can run with 48 GB of VRAM."
msgstr "Llama-2-70b with 8-bit quantization can run with 80 GB of VRAM, and 4-bit"
" quantization can run with 48 GB of VRAM."
#: ../../getting_started/getting_started.md:223
#: ae7146f0995d4eca9f775c39898d5313
msgid ""
"Note: you need to install the latest dependencies according to "
"[requirements.txt](https://github.com/eosphoros-ai/DB-"
"GPT/blob/main/requirements.txt)."
msgstr "Note: you need to install the latest dependencies according to "
"[requirements.txt](https://github.com/eosphoros-ai/DB-"
"GPT/blob/main/requirements.txt)."
#: ../../getting_started/getting_started.md:226
#: 58987c486d2641cda3180a77af26c56d
msgid ""
"Here are some of the VRAM size usage of the models we tested in some "
"common scenarios."
msgstr "Here are some of the VRAM size usage of the models we tested in some "
"common scenarios."
#: ../../getting_started/getting_started.md:60 4ca1a7e65f7e46c08ef12a1357f68817
msgid "Model"
msgstr "Model"
#: ../../getting_started/getting_started.md:60 21ab5b38f81a49aeacadbcde55082adf
msgid "Quantize"
msgstr "Quantize"
#: ../../getting_started/getting_started.md:60 a2ad84e7807c4d3ebc9d2f9aec323962
#: b4e2d7a107074e58b91b263d815b2936
msgid "vicuna-7b-v1.5"
msgstr "vicuna-7b-v1.5"
#: ../../getting_started/getting_started.md:60 0134b7564c3f4cd9a605c3e7dabf5c78
#: 12a931b938ee4e938a4ed166532946e9 9cc80bc860c7430cbc2b4aff3c4c67d2
#: b34fb93b644a419d8073203a1a0091fc bb61c197b1c74eb2bf4e7e42ad05850d
#: e3ab5cabd6f54faa9de0ec6c334c0d44 f942635d6f5e483293833ac5f92b6ab9
msgid "4-bit"
msgstr "4-bit"
#: ../../getting_started/getting_started.md:60 a8556b0ec7b3488585efcdc2ba5e8565
#: afb4365dab1e4be18f486fafbd5c6256 b50b8bc0e8084e6ebe4f9678d1f2af9d
#, fuzzy
msgid "8 GB"
msgstr "24 GB"
#: ../../getting_started/getting_started.md:60 259b43952ea5424a970dfa29c84cc83d
#: 64c6a8282eaf4e229d4196ccdbc2527f 7a426b8af2a6406fa4144142450e392e
#: 8e5742b0c4b44bf6bd0536aade037671 a96071a0c713493fae2b381408ceaad3
#: dcd6861abd2049a38f32957dd16408ab f334903a72934a2a86c3b24920f8cfdb
msgid "8-bit"
msgstr "8-bit"
#: ../../getting_started/getting_started.md:60 09c7435ba68248bd885a41366a08f557
#: 328ad98c8cb94f9083e8b5f158d7c14e 633e1f6d17404732a0c2f1cdf04f6591
#: c351117229304ae88561cdd284a77f55 c77a5a73cde34e3195a6389fdd81deda
#: f28704154fad48a8a08e16e136877ca7
#, fuzzy
msgid "12 GB"
msgstr "24 GB"
#: ../../getting_started/getting_started.md:60 63564bdcfa4249438d0245922068924f
#: 9ad1bc9af45b4f52a1a0a9e7c8d14cce
msgid "vicuna-13b-v1.5"
msgstr "vicuna-13b-v1.5"
#: ../../getting_started/getting_started.md:60 0e05753ff4ec4c1a94fc68b63254115b
#: 68a9e76f2089461692c8f396774e7634 a3136e5685cf49c4adbf79224cb0eb7d
#, fuzzy
msgid "20 GB"
msgstr "24 GB"
#: ../../getting_started/getting_started.md:60 173b58152fde49f4b3e03103323c4dd7
#: 7ab8309661024d1cb656911acb7b0d58
msgid "llama-2-7b"
msgstr "llama-2-7b"
#: ../../getting_started/getting_started.md:60 069459a955274376a3d9a4021a032424
#: 619257e4671141488feb6e71bd002880
msgid "llama-2-13b"
msgstr "llama-2-13b"
#: ../../getting_started/getting_started.md:60 ce39526cfbcc4c8c910928dc69293720
#: f5e3fb53bd964e328aac908ae6fc06a4
msgid "llama-2-70b"
msgstr "llama-2-70b"
#: ../../getting_started/getting_started.md:60 c09fb995952048d290dde484a0e09478
#, fuzzy
msgid "48 GB"
msgstr "24 GB"
#: ../../getting_started/getting_started.md:60 4d9bc275dbc548918cda210f5f5d7722
#, fuzzy
msgid "80 GB"
msgstr "24 GB"
#: ../../getting_started/getting_started.md:60 962d13ed427041a19f20d9a1c8ca26ff
#: dedfece8bc8c4ffabf9cc7166e4ca4db
msgid "baichuan-7b"
msgstr ""
#: ../../getting_started/getting_started.md:60 43f629be73914d4cbb1d75c7a06f88e8
#: 5cba8d23c6e847579986b7019e073eaf
msgid "baichuan-13b"
msgstr "baichuan-13b"
#~ msgid "4.2. Run with docker compose"
#~ msgstr "4.2. Run with docker compose"

View File

@ -8,7 +8,7 @@ msgid ""
msgstr ""
"Project-Id-Version: DB-GPT 0.3.0\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2023-07-31 17:04+0800\n"
"POT-Creation-Date: 2023-08-10 16:38+0800\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language: zh_CN\n"
@ -19,11 +19,11 @@ msgstr ""
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: Babel 2.12.1\n"
#: ../../modules/llms.md:1 8322a2795eb74f1c99007fa0536ee7e5
#: ../../modules/llms.md:1 50913b0297da4485ae3fbdb6a90ba6ba
msgid "LLMs"
msgstr "大语言模型"
#: ../../modules/llms.md:3 b8458e9c89b5403099b1eb04c4b7e755
#: ../../modules/llms.md:3 608344bb1c6b49dc94487e400a92e1f9
#, python-format
msgid ""
"In the underlying large model integration, we have designed an open "
@ -36,23 +36,23 @@ msgid ""
"of use."
msgstr "在底层大模型接入中我们设计了开放的接口支持对接多种大模型。同时对于接入模型的效果我们有非常严格的把控与评审机制。对大模型能力上与ChatGPT对比在准确率上需要满足85%以上的能力对齐。我们用更高的标准筛选模型,是期望在用户使用过程中,可以省去前面繁琐的测试评估环节。"
#: ../../modules/llms.md:5 3b4af62d13b24ec28b8b0c8128cabd03
#: ../../modules/llms.md:5 138db7b30c624665930a7d532b557ea8
msgid "Multi LLMs Usage"
msgstr "多模型使用"
#: ../../modules/llms.md:6 e51a4f2e4651459cab8bdc53114ae624
#: ../../modules/llms.md:6 1c82e93d4b334b209d7f3e63c82efb0a
msgid ""
"To use multiple models, modify the LLM_MODEL parameter in the .env "
"configuration file to switch between the models."
msgstr "如果要使用不同的模型,请修改.env配置文件中的LLM MODEL参数以在模型之间切换。"
#: ../../modules/llms.md:8 4ed94e391ae04868b6d30a71c73d1f31
#: ../../modules/llms.md:8 5b1846c29dbe42c4b95a2e26dc85a358
msgid ""
"Notice: you can create .env file from .env.template, just use command "
"like this:"
msgstr "注意:你可以从 .env.template 创建 .env 文件。只需使用如下命令:"
#: ../../modules/llms.md:14 3d7e9263b09d487ab23ada4f1ba01e02
#: ../../modules/llms.md:14 95ecc02369ab4075bd245662809e4cc9
#, fuzzy
msgid ""
"now we support models vicuna-13b, vicuna-7b, chatglm-6b, flan-t5-base, "
@ -62,23 +62,23 @@ msgstr ""
"现在我们支持的模型有vicuna-13b, vicuna-7b, chatglm-6b, flan-t5-base, guanaco-33b-"
"merged, falcon-40b, gorilla-7b."
#: ../../modules/llms.md:16 4cf68821b6704730bbe6a98fb1355f77
#: ../../modules/llms.md:16 9397c2959c244f5595c74b417c2b7edb
msgid ""
"if you want use other model, such as chatglm-6b, you just need update "
".env config file."
msgstr "如果你想使用其他模型比如chatglm-6b, 仅仅需要修改.env 配置文件"
#: ../../modules/llms.md:20 c37b0e5ca58f420f91fdb93bf25215e0
#: ../../modules/llms.md:20 fe143c38ed9f48adb9042b648529e64c
msgid ""
"or chatglm2-6b, which is the second-generation version of the open-"
"source bilingual (Chinese-English) chat model ChatGLM-6B."
msgstr ""
#: ../../modules/llms.md:27 044b42a90acc4390b7714a27f38ee362
#: ../../modules/llms.md:27 d43eb7fe0a4949a1a4772cf8e91b028c
msgid "Run Model with cpu."
msgstr "用CPU运行模型"
#: ../../modules/llms.md:28 da4226be516b4cb88d6c1dc94bb918e4
#: ../../modules/llms.md:28 e6caaf6efa774131b8bef036c440db19
msgid ""
"we alse support smaller models, like gpt4all. you can use it with "
"cpu/mps(M1/M2), Download from [gpt4all model](https://gpt4all.io/models"
@ -87,11 +87,11 @@ msgstr ""
"我们也支持一些小模型你可以通过CPU/MPS(M1、M2)运行, 模型下载[gpt4all](https://gpt4all.io/models"
"/ggml-gpt4all-j-v1.3-groovy.bin)"
#: ../../modules/llms.md:30 f869669ebb104270bb408c6a2df4f68c
#: ../../modules/llms.md:30 8fc07b5d2d79496083ee3a279a131eb8
msgid "put it in the models path, then change .env config."
msgstr "将模型放在models路径, 修改.env 配置文件"
#: ../../modules/llms.md:35 88dff0a183ca4052b28d6c06fe46a3ac
#: ../../modules/llms.md:35 ddd90d901eaf4867934ea58fdb2398ab
msgid ""
"DB-GPT provides a model load adapter and chat adapter. load adapter which"
" allows you to easily adapt load different LLM models by inheriting the "
@ -100,15 +100,15 @@ msgstr ""
"DB-GPT提供了多模型适配器load adapter和chat adapter.load adapter通过继承BaseLLMAdapter类,"
" 实现match和loader方法允许你适配不同的LLM."
#: ../../modules/llms.md:37 c013c4dc67a645ea8b0ae87d0c253e79
#: ../../modules/llms.md:37 a1c446ab4dbe43d9b2558c5fad542069
msgid "vicuna llm load adapter"
msgstr "vicuna llm load adapter"
#: ../../modules/llms.md:54 b239c68b5d1a44dfbbaa5ff1354b02d2
#: ../../modules/llms.md:54 fb8655afefc4411ea1ef97230098e23c
msgid "chatglm load adapter"
msgstr "chatglm load adapter"
#: ../../modules/llms.md:81 ceccd36025064a2dbd8578032db00a01
#: ../../modules/llms.md:81 5c8c2b73419049e2b88e5e9d40149187
msgid ""
"chat adapter which allows you to easily adapt chat different LLM models "
"by inheriting the BaseChatAdpter.you just implement match() and "
@ -117,58 +117,66 @@ msgstr ""
"chat "
"adapter通过继承BaseChatAdpter允许你通过实现match和get_generate_stream_func方法允许你适配不同的LLM."
#: ../../modules/llms.md:83 668aea86f47f4082b7bf168074d6514c
#: ../../modules/llms.md:83 2d6be71d447846e4921f2e7fde678ae5
msgid "vicuna llm chat adapter"
msgstr "vicuna llm chat adapter"
#: ../../modules/llms.md:95 dba103e2e54a447b9543b51cb9876f6b
#: ../../modules/llms.md:95 e50e6ef380e74402a131c253f5ef1552
msgid "chatglm llm chat adapter"
msgstr "chatglm llm chat adapter"
#: ../../modules/llms.md:108 3edb5d9efb0a4f90af21f37bb9243e34
#: ../../modules/llms.md:108 e1825c1aa2384d4fbd40934ec32a3cf4
msgid ""
"if you want to integrate your own model, just need to inheriting "
"BaseLLMAdaper and BaseChatAdpter and implement the methods"
msgstr "如果你想集成自己的模型只需要继承BaseLLMAdaper和BaseChatAdpter类然后实现里面的方法即可"
#: ../../modules/llms.md:110 a39d9f39b42c4757b9df597dcbcc9603
#: ../../modules/llms.md:110 cfc357047ec14638934cc0514514718d
#, fuzzy
msgid "Multi Proxy LLMs"
msgstr "多模型使用"
#: ../../modules/llms.md:111 d205dedc7e93407da3624aed610a6093
#: ../../modules/llms.md:111 5a08dd4f88d4482dbac19403954f60bf
msgid "1. Openai proxy"
msgstr ""
msgstr "1. Openai proxy"
#: ../../modules/llms.md:112 dc49675aa40f484ba1de16cffbf15e28
#: ../../modules/llms.md:112 4984400803484dcc8de78252e3f74f9b
msgid ""
"If you haven't deployed a private infrastructure for a large model, or if"
" you want to use DB-GPT in a low-cost and high-efficiency way, you can "
"also use OpenAI's large model as your underlying model."
msgstr ""
#: ../../modules/llms.md:114 30bfaa0fcf754c6cb51e1b400246dc9b
#: ../../modules/llms.md:114 1696bedec29d4385b547a2d977720b76
msgid ""
"If your environment deploying DB-GPT has access to OpenAI, then modify "
"the .env configuration file as below will work."
msgstr ""
msgstr "如果本地能够访问OpenAI修改.env文件即可"
#: ../../modules/llms.md:122 33ef5db4a49241a8ba1240cafece6d48
#: ../../modules/llms.md:122 908d72c6bdfa40eab24c8966e20f55df
msgid ""
"If you can't access OpenAI locally but have an OpenAI proxy service, you "
"can configure as follows."
msgstr ""
msgstr "如果不能访问openapi服务,你可以如下配置"
#: ../../modules/llms.md:130 2dd7c019ee3f4c4f83948f0b006077f9
#: ../../modules/llms.md:130 141d43ea2f4d49778d5d35ec644ee7f6
msgid "2. Bard Proxy"
msgstr "2. Bard Proxy"
#: ../../modules/llms.md:131 47471243adc94f95807e9c0c2a112c20
msgid ""
"If your environment deploying DB-GPT has access to <a "
"href=\"https://bard.google.com/\">Bard</a> (F12-> application-> __Secure-"
"1PSID), then modify the .env configuration file as below will work."
msgstr ""
#: ../../modules/llms.md:131 7d7837484e474b0d936c838f6303723a
#: ../../modules/llms.md:139 c6a139d829514d299d92abaa0695b681
msgid ""
"If your environment deploying DB-GPT has access to "
"https://bard.google.com/ (F12-> application-> __Secure-1PSID), then "
"modify the .env configuration file as below will work."
msgstr ""
"If you want to use your own bard proxy server like <a "
"href=\"https://github.com/eosphoros-ai/Bard-Proxy\">Bard-Proxy</a>, so "
"that you can deploy DB-GPT on your PC easily."
msgstr "如果你想使用 bard proxy server <a "
"href=\"https://github.com/eosphoros-ai/Bard-Proxy\">Bard-Proxy</a> 你可以轻松部署 bard proxy server"
#~ msgid "Multi Proxy LLMs"
#~ msgstr "多模型使用"
@ -198,3 +206,11 @@ msgstr ""
#~ "you can configure as follows."
#~ msgstr "如果你本地无法访问openai但是你有一个openai的代理服务你可以参考如下配置"
#~ msgid ""
#~ "If your environment deploying DB-GPT "
#~ "has access to https://bard.google.com/ (F12->"
#~ " application-> __Secure-1PSID), then modify"
#~ " the .env configuration file as below"
#~ " will work."
#~ msgstr ""