diff --git a/assets/chart_db_city_users.png b/assets/chart_db_city_users.png new file mode 100644 index 000000000..13ccf1753 Binary files /dev/null and b/assets/chart_db_city_users.png differ diff --git a/assets/wechat.jpg b/assets/wechat.jpg index 626a51058..94f9bec2a 100644 Binary files a/assets/wechat.jpg and b/assets/wechat.jpg differ diff --git a/docs/conf.py b/docs/conf.py index 618e1f62a..e00f95f06 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -14,7 +14,7 @@ project = "DB-GPT" copyright = "2023, csunny" author = "csunny" -version = "0.1.0" +version = "👏👏 0.2.2" html_title = project + " " + version # -- General configuration --------------------------------------------------- diff --git a/docs/getting_started/getting_started.md b/docs/getting_started/getting_started.md index b89c01fac..d082a0c75 100644 --- a/docs/getting_started/getting_started.md +++ b/docs/getting_started/getting_started.md @@ -37,6 +37,7 @@ Once the environment is installed, we have to create a new folder "models" in th ``` git clone https://huggingface.co/Tribbiani/vicuna-13b git clone https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2 +git clone https://huggingface.co/GanymedeNil/text2vec-large-chinese ``` The model files are large and will take a long time to download. During the download, let's configure the .env file, which needs to be copied and created from the .env.template @@ -57,6 +58,11 @@ If you have difficulty with this step, you can also directly use the model from $ python pilot/server/llmserver.py ``` +Starting `llmserver.py` with the following command will result in a relatively stable Python service with multiple processes. +```bash +$ gunicorn llmserver:app -w 4 -k uvicorn.workers.UvicornWorker -b 0.0.0.0:8000 & +``` + Run gradio webui ```bash diff --git a/docs/getting_started/tutorials.md b/docs/getting_started/tutorials.md index 4c2245996..6bcad56db 100644 --- a/docs/getting_started/tutorials.md +++ b/docs/getting_started/tutorials.md @@ -5,21 +5,20 @@ This is a collection of DB-GPT tutorials on Medium. DB-GPT is divided into several functions, including chat with knowledge base, execute SQL, chat with database, and execute plugins. -###Introduce -[What is DB-GPT](https://www.youtube.com/watch?v=QszhVJerc0I) by csunny (https://github.com/csunny/DB-GPT): +### Introduce +[What is DB-GPT](https://www.youtube.com/watch?v=QszhVJerc0I) ### Knowledge [How to Create your own knowledge repository](https://db-gpt.readthedocs.io/en/latest/modules/knownledge.html) -[Add new Knowledge demonstration](../../assets/new_knownledge_en.gif) +![Add new Knowledge demonstration](../../assets/new_knownledge.gif) ### SQL Generation -[sql generation demonstration](../../assets/demo_en.gif) +![sql generation demonstration](../../assets/demo_en.gif) ### SQL Execute -[sql execute demonstration](../../assets/auto_sql_en.gif) - +![sql execute demonstration](../../assets/auto_sql_en.gif) ### Plugins -[db plugins demonstration](../../assets/dbgpt_bytebase_plugin.gif) \ No newline at end of file +![db plugins demonstration](../../assets/dbgpt_bytebase_plugin.gif) \ No newline at end of file diff --git a/docs/index.rst b/docs/index.rst index 2b33ed0be..941aa4ff4 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -68,7 +68,7 @@ It's very important for DB-GPT, DB-GPT also provide standard, extendable interfa - `Plugins <./modules/plugins.html>`_: Plugins management, scheduler. -- `Knownledge <./modules/knownledge.html>`_: Knownledge management, embedding, and search. +- `Knowledge <./modules/knowledge.html>`_: Knowledge management, embedding, and search. - `Connections <./modules/connections.html>`_: Supported multi databases connection. management connections and interact with this. @@ -81,8 +81,8 @@ It's very important for DB-GPT, DB-GPT also provide standard, extendable interfa ./modules/llms.md ./modules/prompts.md ./modules/plugins.md - ./modules/connections.md - ./modules/knownledge.md + ./modules/connections.rst + ./modules/knowledge.rst Use Cases --------- diff --git a/docs/locales/zh_CN/LC_MESSAGES/getting_started/getting_started.po b/docs/locales/zh_CN/LC_MESSAGES/getting_started/getting_started.po index bc3e499a5..683eb6938 100644 --- a/docs/locales/zh_CN/LC_MESSAGES/getting_started/getting_started.po +++ b/docs/locales/zh_CN/LC_MESSAGES/getting_started/getting_started.po @@ -8,7 +8,7 @@ msgid "" msgstr "" "Project-Id-Version: DB-GPT 0.1.0\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2023-06-11 14:10+0800\n" +"POT-Creation-Date: 2023-06-14 17:26+0800\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language: zh_CN\n" @@ -17,95 +17,94 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.11.0\n" +"Generated-By: Babel 2.12.1\n" -#: ../../getting_started/getting_started.md:1 cf1947dea9a843dd8b6fff68642f29b1 +#: ../../getting_started/getting_started.md:1 a0477412435c4c569cf71d243d2884c7 msgid "Quickstart Guide" msgstr "使用指南" -#: ../../getting_started/getting_started.md:3 4184879bf5b34521a95e497f4747241a +#: ../../getting_started/getting_started.md:3 331f8c3fbbac44c1b75d2ff595c0235f msgid "" "This tutorial gives you a quick walkthrough about use DB-GPT with you " "environment and data." msgstr "本教程为您提供了关于如何使用DB-GPT的使用指南。" -#: ../../getting_started/getting_started.md:5 7431b72cc1504b8bbcafb7512a6b6c92 +#: ../../getting_started/getting_started.md:5 df41fe97067d4ba680e3231f05a843de msgid "Installation" msgstr "安装" -#: ../../getting_started/getting_started.md:7 b8faf2ec4e034855a2674ffcade8cee2 +#: ../../getting_started/getting_started.md:7 73f72c06a89341f38f4bfbe70ed0d2ae msgid "To get started, install DB-GPT with the following steps." msgstr "请按照以下步骤安装DB-GPT" -#: ../../getting_started/getting_started.md:9 ae0f536a064647cda04ea3d253991d80 +#: ../../getting_started/getting_started.md:9 59c159fd30104ba081e2ebbf1605fe11 msgid "1. Hardware Requirements" msgstr "1. 硬件要求" -#: ../../getting_started/getting_started.md:10 8fa637100e644b478e0d6858f0a5b63d +#: ../../getting_started/getting_started.md:10 126558467caa4b05bea0bb051c864831 msgid "" "As our project has the ability to achieve ChatGPT performance of over " "85%, there are certain hardware requirements. However, overall, the " "project can be deployed and used on consumer-grade graphics cards. The " "specific hardware requirements for deployment are as follows:" -msgstr "由于我们的项目有能力达到85%以上的ChatGPT性能,所以对硬件有一定的要求。" -"但总体来说,我们在消费级的显卡上即可完成项目的部署使用,具体部署的硬件说明如下:" +msgstr "由于我们的项目有能力达到85%以上的ChatGPT性能,所以对硬件有一定的要求。但总体来说,我们在消费级的显卡上即可完成项目的部署使用,具体部署的硬件说明如下:" -#: ../../getting_started/getting_started.md c68539579083407882fb0d28943d40db +#: ../../getting_started/getting_started.md 967e5cde5c6241edae9e0e0e0b217221 msgid "GPU" msgstr "GPU" -#: ../../getting_started/getting_started.md 613fbe77d41a4a20a30c3c9a0b6ec20c +#: ../../getting_started/getting_started.md 05be5479b15b403c8aa3374ea53feff8 msgid "VRAM Size" msgstr "显存大小" -#: ../../getting_started/getting_started.md c0b7f8249d3d4c629ba5deb8188a49b4 +#: ../../getting_started/getting_started.md ea646d956e834a75912780789946bb47 msgid "Performance" msgstr "显存大小" -#: ../../getting_started/getting_started.md 5d103f7e4d1b4b6cb7358c0c717c9f73 +#: ../../getting_started/getting_started.md aba96df6b91441b982128426eb3a2ebb msgid "RTX 4090" msgstr "RTX 4090" -#: ../../getting_started/getting_started.md 48338f6b18dc41efb3613d47b1a762a7 -#: f14d278e083440b58fc7faeed30e2879 +#: ../../getting_started/getting_started.md d2797ffe3d534460ae77a3397fc07c1c +#: e1b5f1a7502e475b872eebd75972a87f msgid "24 GB" msgstr "24 GB" -#: ../../getting_started/getting_started.md dc238037ff3449cdb95cbd882d8de170 +#: ../../getting_started/getting_started.md 4ed99c06dc4643c290b5914142ea8371 msgid "Smooth conversation inference" msgstr "可以流畅的进行对话推理,无卡顿" -#: ../../getting_started/getting_started.md d7f84ac79bf84cb6a453d3bfd26eb935 +#: ../../getting_started/getting_started.md b3dc189389cd41f2883acfe3fad3d6a4 msgid "RTX 3090" msgstr "RTX 3090" -#: ../../getting_started/getting_started.md 511ee322b777476b87a3aa5624609944 +#: ../../getting_started/getting_started.md 0bea7ccef4154e2994144f81af877919 msgid "Smooth conversation inference, better than V100" msgstr "可以流畅进行对话推理,有卡顿感,但好于V100" -#: ../../getting_started/getting_started.md 974b704e8cf84f6483774153df8a8c6c +#: ../../getting_started/getting_started.md 7d1c9e5c16184f2bacf8b12d6f38f629 msgid "V100" msgstr "V100" -#: ../../getting_started/getting_started.md 72008961ce004a0fa24b74db55fcf96e +#: ../../getting_started/getting_started.md e9ddddf92e15420f85852cea1a1bbd8d msgid "16 GB" msgstr "16 GB" -#: ../../getting_started/getting_started.md 2a3b936fe04c4b7789680c26be7f4869 +#: ../../getting_started/getting_started.md f5532da60b99495c8329d674749ae79f msgid "Conversation inference possible, noticeable stutter" msgstr "可以进行对话推理,有明显卡顿" -#: ../../getting_started/getting_started.md:18 fb1dbccb8f804384ade8e171aa40f99c +#: ../../getting_started/getting_started.md:18 3357d10704b94249b8ccdf7fd3645624 msgid "2. Install" msgstr "2. 安装" -#: ../../getting_started/getting_started.md:20 695fdb8858c6488e9a0872d68fb387e5 +#: ../../getting_started/getting_started.md:20 08e5054dd0fa4e07aa92236dca03a1d3 msgid "" "This project relies on a local MySQL database service, which you need to " "install locally. We recommend using Docker for installation." msgstr "本项目依赖一个本地的 MySQL 数据库服务,你需要本地安装,推荐直接使用 Docker 安装。" -#: ../../getting_started/getting_started.md:25 954f3a282ec54b11a55ebfe1f680d1df +#: ../../getting_started/getting_started.md:25 f99a1af073e24b339390400251a50c9b msgid "" "We use [Chroma embedding database](https://github.com/chroma-core/chroma)" " as the default for our vector database, so there is no need for special " @@ -114,66 +113,74 @@ msgid "" "installation process of DB-GPT, we use the miniconda3 virtual " "environment. Create a virtual environment and install the Python " "dependencies." -msgstr "向量数据库我们默认使用的是Chroma内存数据库,所以无需特殊安装,如果有" -"需要连接其他的同学,可以按照我们的教程进行安装配置。整个DB-GPT的" -"安装过程,我们使用的是miniconda3的虚拟环境。创建虚拟环境,并安装python依赖包" +msgstr "" +"向量数据库我们默认使用的是Chroma内存数据库,所以无需特殊安装,如果有需要连接其他的同学,可以按照我们的教程进行安装配置。整个DB-" +"GPT的安装过程,我们使用的是miniconda3的虚拟环境。创建虚拟环境,并安装python依赖包" - -#: ../../getting_started/getting_started.md:35 0314bad0928940fc8e382d289d356c66 +#: ../../getting_started/getting_started.md:35 d8ebdf7c4ac54113be1c94ed879dc93f msgid "" "Once the environment is installed, we have to create a new folder " "\"models\" in the DB-GPT project, and then we can put all the models " "downloaded from huggingface in this directory" -msgstr "环境安装完成后,我们必须在DB-GPT项目中创建一个新文件夹\"models\"," -"然后我们可以把从huggingface下载的所有模型放到这个目录下。" +msgstr "" +"环境安装完成后,我们必须在DB-" +"GPT项目中创建一个新文件夹\"models\",然后我们可以把从huggingface下载的所有模型放到这个目录下。" -#: ../../getting_started/getting_started.md:42 afdf176f72224fd6b8b6e9e23c80c1ef +#: ../../getting_started/getting_started.md:42 cd3ce56a4d644574a0c30dd86148a58c msgid "" "The model files are large and will take a long time to download. During " "the download, let's configure the .env file, which needs to be copied and" " created from the .env.template" -msgstr "模型文件很大,需要很长时间才能下载。在下载过程中,让我们配置.env文件," -"它需要从。env.template中复制和创建。" +msgstr "模型文件很大,需要很长时间才能下载。在下载过程中,让我们配置.env文件,它需要从。env.template中复制和创建。" -#: ../../getting_started/getting_started.md:48 76c87610993f41059c3c0aade5117171 +#: ../../getting_started/getting_started.md:48 96d634498ae04c84b4eae8502d5f65e8 msgid "" "You can configure basic parameters in the .env file, for example setting " "LLM_MODEL to the model to be used" msgstr "您可以在.env文件中配置基本参数,例如将LLM_MODEL设置为要使用的模型。" -#: ../../getting_started/getting_started.md:35 443f5f92e4cd4ce4887bae2556b605b0 +#: ../../getting_started/getting_started.md:50 a78627885d2a41a481131308d2d061a6 msgid "3. Run" msgstr "3. 运行" -#: ../../getting_started/getting_started.md:36 3dab200eceda460b81a096d44de43d21 +#: ../../getting_started/getting_started.md:51 69c1d06867cb4a0ea36ad0f3686109db msgid "" "You can refer to this document to obtain the Vicuna weights: " "[Vicuna](https://github.com/lm-sys/FastChat/blob/main/README.md#model-" "weights) ." -msgstr "关于基础模型, 可以根据[Vicuna](https://github.com/lm-sys/FastChat/b" -"lob/main/README.md#model-weights) 合成教程进行合成。" +msgstr "" +"关于基础模型, 可以根据[Vicuna](https://github.com/lm-" +"sys/FastChat/blob/main/README.md#model-weights) 合成教程进行合成。" - -#: ../../getting_started/getting_started.md:38 b036ca6294f04bceb686187d2d8b6646 +#: ../../getting_started/getting_started.md:53 1dcccc1784e04fa3a3340aa446f1484f msgid "" "If you have difficulty with this step, you can also directly use the " "model from [this link](https://huggingface.co/Tribbiani/vicuna-7b) as a " "replacement." -msgstr "如果此步有困难的同学,也可以直接使用[此链接](https://huggingface.co/Tribbiani/vicuna-7b)上的模型进行替代。" +msgstr "" +"如果此步有困难的同学,也可以直接使用[此链接](https://huggingface.co/Tribbiani/vicuna-" +"7b)上的模型进行替代。" -#: ../../getting_started/getting_started.md:40 35537c13ff6f4bd69951c486274ca1f9 +#: ../../getting_started/getting_started.md:55 45a4c9f50ee04533bd57652505ab4f62 msgid "Run server" msgstr "运行模型服务" -#: ../../getting_started/getting_started.md:45 f7aa3668a6c94fb3a1b8346392d921f3 +#: ../../getting_started/getting_started.md:60 f3482509ad2f4137becd4775768180bd +msgid "" +"Starting `llmserver.py` with the following command will result in a " +"relatively stable Python service with multiple processes." +msgstr "使用以下命令启动llmserver.py将会得到一个相对稳定的Python服务,并且具有多个进程。" + +#: ../../getting_started/getting_started.md:65 ddf718f6863f4e58b8de1232dc8189dd msgid "Run gradio webui" msgstr "运行模型服务" -#: ../../getting_started/getting_started.md:51 d80c908f01144e2c8a15b7f6e8e7f88d +#: ../../getting_started/getting_started.md:71 d3df863c46ae49b1a63e5778d439d336 msgid "" "Notice: the webserver need to connect llmserver, so you need change the" " .env file. change the MODEL_SERVER = \"http://127.0.0.1:8000\" to your " "address. It's very important." -msgstr "注意: 在启动Webserver之前, 需要修改.env 文件中的MODEL_SERVER" -" = "http://127.0.0.1:8000", 将地址设置为你的服务器地址。" +msgstr "" +"注意: 在启动Webserver之前, 需要修改.env 文件中的MODEL_SERVER = " +"\"http://127.0.0.1:8000\", 将地址设置为你的服务器地址。" diff --git a/docs/locales/zh_CN/LC_MESSAGES/getting_started/tutorials.po b/docs/locales/zh_CN/LC_MESSAGES/getting_started/tutorials.po index b00cd631a..45163e6ee 100644 --- a/docs/locales/zh_CN/LC_MESSAGES/getting_started/tutorials.po +++ b/docs/locales/zh_CN/LC_MESSAGES/getting_started/tutorials.po @@ -8,7 +8,7 @@ msgid "" msgstr "" "Project-Id-Version: DB-GPT 0.1.0\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2023-06-13 18:04+0800\n" +"POT-Creation-Date: 2023-06-14 17:19+0800\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language: zh_CN\n" @@ -19,73 +19,94 @@ msgstr "" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 2.12.1\n" -#: ../../getting_started/tutorials.md:1 7011a2ab0e7f45ddb1fa85b6479cc442 +#: ../../getting_started/tutorials.md:1 f80ad98cf2d444a3a159655b37ae4d4f msgid "Tutorials" msgstr "教程" -#: ../../getting_started/tutorials.md:4 960f88b9c1b64940bfa0576bab5b0314 +#: ../../getting_started/tutorials.md:4 ff8771c31b024e538dbbaaeaccec8aa7 msgid "This is a collection of DB-GPT tutorials on Medium." msgstr "这是知乎上DB-GPT教程的集合。." -#: ../../getting_started/tutorials.md:6 1c8db33581ea4928905e029a98b9a155 +#: ../../getting_started/tutorials.md:6 0c1c544a5d184375aabd7fe6918042e2 msgid "" "DB-GPT is divided into several functions, including chat with knowledge " "base, execute SQL, chat with database, and execute plugins." msgstr "" -#: ../../getting_started/tutorials.md:8 3915395cc45742519bf0c607eeafc489 +#: ../../getting_started/tutorials.md:8 b5e106a97bbd4467b071bb05d837d7d9 +msgid "Introduce" +msgstr "" + +#: ../../getting_started/tutorials.md:9 9df43436183d4b41a75a09fec15743d7 #, fuzzy -msgid "" -"###Introduce [What is DB-" -"GPT](https://www.youtube.com/watch?v=QszhVJerc0I) by csunny " -"(https://github.com/csunny/DB-GPT):" +msgid "[What is DB-GPT](https://www.youtube.com/watch?v=QszhVJerc0I)" msgstr "" "###Introduce [什么是DB-" "GPT](https://www.bilibili.com/video/BV1SM4y1a7Nj/?buvid=551b023900b290f9497610b2155a2668&is_story_h5=false&mid=%2BVyE%2Fwau5woPcUKieCWS0A%3D%3D&p=1&plat_id=116&share_from=ugc&share_medium=iphone&share_plat=ios&share_session_id=5D08B533-82A4-4D40-9615-7826065B4574&share_source=GENERIC&share_tag=s_i×tamp=1686307943&unique_k=bhO3lgQ&up_id=31375446)" " by csunny (https://github.com/csunny/DB-GPT)" -#: ../../getting_started/tutorials.md:11 e213736923574b2cb039a457d789c27c +#: ../../getting_started/tutorials.md:11 3096811f862649bf84ff3cd29cdf14db msgid "Knowledge" msgstr "知识库" -#: ../../getting_started/tutorials.md:13 90b5472735a644168d51c054ed882748 +#: ../../getting_started/tutorials.md:13 ea00f3de8c754bf2950e735a2f14043a +#, fuzzy msgid "" "[How to Create your own knowledge repository](https://db-" "gpt.readthedocs.io/en/latest/modules/knownledge.html)" msgstr "" "[怎么创建自己的知识库](https://db-" -"gpt.readthedocs.io/en/latest/modules/knownledge.html)" +"gpt.readthedocs.io/en/latest/modules/knowledge.html)" -#: ../../getting_started/tutorials.md:15 6a851e1e88ea4bcbaf7ee742a12224ef -msgid "[Add new Knowledge demonstration](../../assets/new_knownledge_en.gif)" +#: ../../getting_started/tutorials.md:15 07195f11314945989eeeb9400c8a9b43 +#, fuzzy +msgid "![Add new Knowledge demonstration](../../assets/new_knownledge.gif)" msgstr "[新增知识库演示](../../assets/new_knownledge_en.gif)" -#: ../../getting_started/tutorials.md:17 59887be89d8046e28956f909fcbbc9dc +#: ../../getting_started/tutorials.md:15 333cdda401df4509a11d14535391b8a8 +#, fuzzy +msgid "Add new Knowledge demonstration" +msgstr "[新增知识库演示](../../assets/new_knownledge_en.gif)" + +#: ../../getting_started/tutorials.md:17 5245cd247a184f63a10f735f414f303f msgid "SQL Generation" msgstr "" -#: ../../getting_started/tutorials.md:18 ee5decd8441d40ae8a240a19c1a5a74a +#: ../../getting_started/tutorials.md:18 9a980e7625d34b98bf318851c43fb13d #, fuzzy -msgid "[sql generation demonstration](../../assets/demo_en.gif)" +msgid "![sql generation demonstration](../../assets/demo_en.gif)" msgstr "[sql生成演示](../../assets/demo_en.gif)" -#: ../../getting_started/tutorials.md:20 5d25c5d307c24c9198f2b52e70f2421c +#: ../../getting_started/tutorials.md:18 952c680cf62140978b4e94d36c49134a +#, fuzzy +msgid "sql generation demonstration" +msgstr "[sql生成演示](../../assets/demo_en.gif)" + +#: ../../getting_started/tutorials.md:20 c0a6f9fefbb9404695fe3bffb6ecc577 msgid "SQL Execute" msgstr "SQL执行" -#: ../../getting_started/tutorials.md:21 ee5decd8441d40ae8a240a19c1a5a74a +#: ../../getting_started/tutorials.md:21 e959cc6ca356407d854ee5541233c19a #, fuzzy -msgid "[sql execute demonstration](../../assets/auto_sql_en.gif)" +msgid "![sql execute demonstration](../../assets/auto_sql_en.gif)" msgstr "[sql execute 演示](../../assets/auto_sql_en.gif)" +#: ../../getting_started/tutorials.md:21 69247d51ccd349b082ea452f6d74d2b3 +#, fuzzy +msgid "sql execute demonstration" +msgstr "SQL执行" -#: ../../getting_started/tutorials.md:26 4487ef393e004e7c936f5104727212a4 +#: ../../getting_started/tutorials.md:23 0fd9770dbf3c49b0b644599dc70187a7 #, fuzzy msgid "Plugins" msgstr "DB Plugins" -#: ../../getting_started/tutorials.md:27 ee5decd8441d40ae8a240a19c1a5a74a +#: ../../getting_started/tutorials.md:24 cf58eb1ee13f49f69e501c0e221b4bed #, fuzzy -msgid "[db plugins demonstration](../../assets/dbgpt_bytebase_plugin.gif)" +msgid "![db plugins demonstration](../../assets/dbgpt_bytebase_plugin.gif)" msgstr "[db plugins 演示](../../assets/dbgpt_bytebase_plugin.gif)" +#: ../../getting_started/tutorials.md:24 9e474caadb87481ba51f8595067f7edd +msgid "db plugins demonstration" +msgstr "" + diff --git a/docs/locales/zh_CN/LC_MESSAGES/index.po b/docs/locales/zh_CN/LC_MESSAGES/index.po index 5d1b9c0d0..16319d31f 100644 --- a/docs/locales/zh_CN/LC_MESSAGES/index.po +++ b/docs/locales/zh_CN/LC_MESSAGES/index.po @@ -8,7 +8,7 @@ msgid "" msgstr "" "Project-Id-Version: DB-GPT 0.1.0\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2023-06-11 14:10+0800\n" +"POT-Creation-Date: 2023-06-14 14:51+0800\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language: zh_CN\n" @@ -17,33 +17,33 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.11.0\n" +"Generated-By: Babel 2.12.1\n" -#: ../../index.rst:34 ../../index.rst:45 e3275f133efd471582d952301a6e243e +#: ../../index.rst:34 ../../index.rst:45 558ad9c9fc2240589fabce35463ca24c msgid "Getting Started" msgstr "开始" -#: ../../index.rst:56 ../../index.rst:75 86e2ce002e604304a4032aa1555b36cb +#: ../../index.rst:56 ../../index.rst:75 abd248b06ac142a68a89e0fb7e477536 msgid "Modules" msgstr "模块" -#: ../../index.rst:88 ../../index.rst:104 b15c23cfcc084df9a8f8f9990e6903ac +#: ../../index.rst:88 ../../index.rst:104 e046db82aa5845598b373ca1b9a73ec2 msgid "Use Cases" msgstr "示例" -#: ../../index.rst:118 ../../index.rst:121 70605b76fe5348299dd5d48d8ab6a77c +#: ../../index.rst:118 ../../index.rst:121 e6fbf08bf59b48afb4ac3f479d88256b msgid "Reference" msgstr "参考" -#: ../../index.rst:145 ../../index.rst:151 f62cf565fab64977b0efbd50e83540cc +#: ../../index.rst:145 ../../index.rst:151 03496f75357d4b4d9fd721516faa5e54 msgid "Resources" msgstr "资源" -#: ../../index.rst:7 c8b3a0ca759f432095161f7baccde1c4 +#: ../../index.rst:7 e0764d3d816244b4bf047032c1a28760 msgid "Welcome to DB-GPT!" msgstr "欢迎来到DB-GPT中文文档" -#: ../../index.rst:8 0167fea2c4df4181bc10d6e71527d005 +#: ../../index.rst:8 a3500e3f721348ce859c7c774b59b41a msgid "" "As large models are released and iterated upon, they are becoming " "increasingly intelligent. However, in the process of using large models, " @@ -56,217 +56,207 @@ msgid "" "independent private environments but also to be independently deployed " "and isolated according to business modules, ensuring that the ability of " "large models is absolutely private, secure, and controllable." -msgstr "随着大型模型的发布和迭代,它们变得越来越智能。然而,在使用大型模型的过程中," -"我们在数据安全和隐私方面面临着重大挑战。我们需要确保我们的敏感数据和环境得到完全控制," -"避免任何数据隐私泄露或安全风险。基于此,我们启动了DB-GPT项目,为所有基于数据库的" -"场景构建一个完整的私有大模型解决方案。该方案“”支持本地部署,既可应用于“独立私" -"有环境”,又可根据业务模块进行“独立部署”和“隔离”,确保“大模型”的能力绝对" -"私有、安全、可控。" +msgstr "" +"随着大型模型的发布和迭代,它们变得越来越智能。然而,在使用大型模型的过程中,我们在数据安全和隐私方面面临着重大挑战。我们需要确保我们的敏感数据和环境得到完全控制,避免任何数据隐私泄露或安全风险。基于此" +",我们启动了DB-" +"GPT项目,为所有基于数据库的场景构建一个完整的私有大模型解决方案。该方案“”支持本地部署,既可应用于“独立私有环境”,又可根据业务模块进行“独立部署”和“隔离”,确保“大模型”的能力绝对私有、安全、可控。" -#: ../../index.rst:10 36b847a04d624286a4942cd77821da8c +#: ../../index.rst:10 a8d66309ae5244d88b3c599a5ff97137 msgid "" "**DB-GPT** is an experimental open-source project that uses localized GPT" " large models to interact with your data and environment. With this " "solution, you can be assured that there is no risk of data leakage, and " "your data is 100% private and secure." -msgstr "DB-GPT 是一个开源的以数据库为基础的GPT实验项目,使用本地化的" -"GPT大模型与您的数据和环境进行交互,无数据泄露风险" -"100% 私密,100% 安全。" +msgstr "" +"DB-GPT 是一个开源的以数据库为基础的GPT实验项目,使用本地化的GPT大模型与您的数据和环境进行交互,无数据泄露风险100% 私密,100%" +" 安全。" -#: ../../index.rst:12 d20166d203934385b811740f4d5eda33 +#: ../../index.rst:12 806ae939ad9349ccb4375a236cbaf418 msgid "**Features**" msgstr "特性" -#: ../../index.rst:13 03f9de47513b4bc9a26f31e1d2d8ad60 +#: ../../index.rst:13 abd6265d37a64b00939e9c0a78f11a5e msgid "" "Currently, we have released multiple key features, which are listed below" " to demonstrate our current capabilities:" msgstr "目前我们已经发布了多种关键的特性,这里一一列举展示一下当前发布的能力。" -#: ../../index.rst:15 abc51c99bc6e49d5b0105c7d95e391da +#: ../../index.rst:15 6484ed0d92654283a7cbee6cb6b54821 msgid "SQL language capabilities - SQL generation - SQL diagnosis" msgstr "SQL语言能力 - SQL生成 - SQL诊断" -#: ../../index.rst:19 e9ba27f21fd84ecf973640fa021b06b6 +#: ../../index.rst:19 0d7ef0cf6ec649e5a9c7e076bc30ca1a msgid "" "Private domain Q&A and data processing - Database knowledge Q&A - Data " "processing" msgstr "私有领域问答与数据处理 - 数据库知识问答 - 数据处理" -#: ../../index.rst:23 a4584012b6634553abef5a4ee6ddf509 +#: ../../index.rst:23 83d1fc0f88cc42bca5206c4b9915ce65 msgid "" "Plugins - Support custom plugin execution tasks and natively support the " "Auto-GPT plugin, such as:" -msgstr "插件模型 - 支持自定义插件执行任务,并原生支持Auto-GPT插件,例如:" -"* SQL自动执行,获取查询结果 * 自动爬取学习知识" +msgstr "插件模型 - 支持自定义插件执行任务,并原生支持Auto-GPT插件,例如:* SQL自动执行,获取查询结果 * 自动爬取学习知识" -#: ../../index.rst:26 b08674d7a7da4405b9388e296bc2cd57 +#: ../../index.rst:26 7790cb466b1d455298d5eaa7582fc5ed msgid "" "Unified vector storage/indexing of knowledge base - Support for " "unstructured data such as PDF, Markdown, CSV, and WebURL" msgstr "知识库统一向量存储/索引 - 非结构化数据支持包括PDF、MarkDown、CSV、WebURL" -#: ../../index.rst:29 cf4bc81d46b4418b81a78242cbc7f984 +#: ../../index.rst:29 7354784dc4ba494e82f5d3acaac7730b msgid "" "Milti LLMs Support - Supports multiple large language models, currently " "supporting Vicuna (7b, 13b), ChatGLM-6b (int4, int8) - TODO: codegen2, " "codet5p" msgstr "多模型支持 - 支持多种大语言模型, 当前已支持Vicuna(7b,13b), ChatGLM-6b(int4, int8)" -Guanaco, Goriila, Falcon等系列模型" -#: ../../index.rst:35 681ae172eea64b718e0f6fc734d041b1 +#: ../../index.rst:35 8598e71986834a24aa390603b84288d1 msgid "" "How to get started using DB-GPT to interact with your data and " "environment." msgstr "开始使用DB-GPT与您的数据环境进行交互。" -#: ../../index.rst:36 87f507e0c27a4a38ba2a5c19e804549f +#: ../../index.rst:36 8685d294df2040d294523c068969a966 msgid "`Quickstart Guid <./getting_started/getting_started.html>`_" msgstr "`使用指南 <./getting_started/getting_started.html>`_" -#: ../../index.rst:38 ab35a5cd96c548ecb0c285fd822f652a +#: ../../index.rst:38 bd33c376f4d54a24957cdbd86ac969f1 msgid "Concepts and terminology" msgstr "相关概念" -#: ../../index.rst:40 3fbd5c96df084ef889442a0b89ad6c05 +#: ../../index.rst:40 2e9ac1f015bf4d37b0b76de94a831f5b msgid "`Concepts and terminology <./getting_started/concepts.html>`_" msgstr "`相关概念 <./getting_started/concepts.html>`_" -#: ../../index.rst:42 6d9a0d727ce14edfbdcf678c6fbba76b +#: ../../index.rst:42 d641ad48ed334db983d8906948b4b430 msgid "Coming soon..." -msgstr "未完待续。。。" +msgstr "" -#: ../../index.rst:44 58cdc41dce264a3e83de565501298010 +#: ../../index.rst:44 7b94cd856f154fbe9580d8595e9afe6a msgid "`Tutorials <.getting_started/tutorials.html>`_" msgstr "`教程 <.getting_started/tutorials.html>`_" -#: ../../index.rst:58 20d67b324c23468e8f2cac6d9100b9f5 +#: ../../index.rst:58 eaf73bfa0d484e79b90d4fa1d82d4cf6 msgid "" "These modules are the core abstractions with which we can interact with " "data and environment smoothly." msgstr "这些模块是我们可以与数据和环境顺利地进行交互的核心组成。" - -#: ../../index.rst:59 45a14052370f4860a72d8e831269d184 +#: ../../index.rst:59 4ccf7a955ed241b4ad7b80bef0a3ad59 msgid "" "It's very important for DB-GPT, DB-GPT also provide standard, extendable " "interfaces." msgstr "DB-GPT还提供了标准的、可扩展的接口。" -#: ../../index.rst:61 7c78c2ddc4104a8b9688472072c3225c +#: ../../index.rst:61 93a217c7147f47e6a3917784313c2eb3 msgid "" "The docs for each module contain quickstart examples, how to guides, " "reference docs, and conceptual guides." msgstr "每个模块的文档都包含快速入门的例子、操作指南、参考文档和相关概念等内容。" -#: ../../index.rst:63 4bcc203282434ca9b77d20c4115a646a +#: ../../index.rst:63 1876f94cc11e44d29b4391011c175fd6 msgid "The modules are as follows" msgstr "组成模块如下:" -#: ../../index.rst:65 c87f13e106b5443a824df5ca85331df4 +#: ../../index.rst:65 c5c0bc53b69448389bf94c5b3b0230a1 msgid "" "`LLMs <./modules/llms.html>`_: Supported multi models management and " "integrations." msgstr "`LLMs <./modules/llms.html>`_:基于FastChat提供大模型的运行环境。支持多模型管理和集成。 " -#: ../../index.rst:67 3447e10b61804b48a786ee12beaaedfd +#: ../../index.rst:67 b382b320fdd746e1940b67c0b6ff3d7d msgid "" "`Prompts <./modules/prompts.html>`_: Prompt management, optimization, and" " serialization for multi database." -msgstr "`Prompt自动生成与优化 <./modules/prompts.html>`_: 自动化生成高质量的Prompt" -" ,并进行优化,提高系统的响应效率" +msgstr "" +"`Prompt自动生成与优化 <./modules/prompts.html>`_: 自动化生成高质量的Prompt " +",并进行优化,提高系统的响应效率" -#: ../../index.rst:69 a3182673127141888fdc13560e7dcfb3 +#: ../../index.rst:69 a2d90f213158420cad5a8d65e8484bbd msgid "`Plugins <./modules/plugins.html>`_: Plugins management, scheduler." msgstr "`Agent与插件: <./modules/plugins.html>`_:提供Agent和插件机制,使得用户可以自定义并增强系统的行为。" -#: ../../index.rst:71 66abfffcb9c0466f9a3988ecfb19fc9e +#: ../../index.rst:71 a39a7a70dda94414b0625a897119795e +#, fuzzy msgid "" -"`Knownledge <./modules/knownledge.html>`_: Knownledge management, " -"embedding, and search." -msgstr "`知识库能力: <./modules/knownledge.html>`_: 支持私域知识库问答能力, " +"`Knowledge <./modules/knowledge.html>`_: Knowledge management, embedding," +" and search." +msgstr "`知识库能力: <./modules/knowledge.html>`_: 支持私域知识库问答能力, " -#: ../../index.rst:73 1027a33646614790a4d88f29285ab0fd +#: ../../index.rst:73 8c93459d2deb4085addb33628e5a6fde msgid "" "`Connections <./modules/connections.html>`_: Supported multi databases " "connection. management connections and interact with this." msgstr "`连接模块 <./modules/connections.html>`_: 用于连接不同的模块和数据源,实现数据的流转和交互 " - -#: ../../index.rst:90 53b58e6e531841878fbc8616841d5e9e +#: ../../index.rst:90 707311d87e294078a093c092dd9e61c8 msgid "Best Practices and built-in implementations for common DB-GPT use cases:" msgstr "DB-GPT用例的最佳实践和内置方法:" -#: ../../index.rst:92 a5c664233fe04417ba9bb0415fd686d7 +#: ../../index.rst:92 71c2be99432b413d928e5b18cbf3abd1 msgid "" "`Sql generation and diagnosis " "<./use_cases/sql_generation_and_diagnosis.html>`_: SQL generation and " "diagnosis." -msgstr "`Sql生成和诊断 " -"<./use_cases/sql_generation_and_diagnosis.html>`_: Sql生成和诊断。" +msgstr "`Sql生成和诊断 <./use_cases/sql_generation_and_diagnosis.html>`_: Sql生成和诊断。" - -#: ../../index.rst:94 04c63b56e77b45e5b4e7bd1db45ea10f +#: ../../index.rst:94 4101e1e6f9354c53addca1e3d07b0234 msgid "" "`knownledge Based QA <./use_cases/knownledge_based_qa.html>`_: A " "important scene for user to chat with database documents, codes, bugs and" " schemas." -msgstr "`知识库问答 <./use_cases/knownledge_based_qa.html>`_: " -"用户与数据库文档、代码和bug聊天的重要场景"。 +msgstr "`知识库问答 <./use_cases/knownledge_based_qa.html>`_: 用户与数据库文档、代码和bug聊天的重要场景\"" -#: ../../index.rst:96 415e2b9f640341a084f893781e2b3ec0 +#: ../../index.rst:96 87ae536fb701462991897494606d4b15 msgid "" "`Chatbots <./use_cases/chatbots.html>`_: Language model love to chat, use" " multi models to chat." msgstr "`聊天机器人 <./use_cases/chatbots.html>`_: 使用多模型进行对话" -#: ../../index.rst:98 59a7ec39d2034fb794a9272d55607122 +#: ../../index.rst:98 bf65b6a3036f4984b2641e3c621d764c msgid "" "`Querying Database Data <./use_cases/query_database_data.html>`_: Query " "and Analysis data from databases and give charts." -msgstr "`查询数据库数据 <./use_cases/query_database_data.html>`_:" -"从数据库中查询和分析数据并给出图表。" +msgstr "`查询数据库数据 <./use_cases/query_database_data.html>`_:从数据库中查询和分析数据并给出图表。" -#: ../../index.rst:100 3bd098eda9044bd39e4bba28a82f4195 +#: ../../index.rst:100 c854edf150624005bd5f62f2e3e0789b msgid "" "`Interacting with apis <./use_cases/interacting_with_api.html>`_: " "Interact with apis, such as create a table, deploy a database cluster, " "create a database and so on." -msgstr "`API交互 <./use_cases/interacting_with_api.html>`_: " +msgstr "" +"`API交互 <./use_cases/interacting_with_api.html>`_: " "与API交互,例如创建表、部署数据库集群、创建数据库等。" - -#: ../../index.rst:102 66daab899d7b4e528eda70779ab79676 +#: ../../index.rst:102 55ed3ed4ec7a45458f6686c437d65e41 msgid "" "`Tool use with plugins <./use_cases/tool_use_with_plugin>`_: According to" " Plugin use tools to manage databases autonomoly." -msgstr "`插件工具 <./use_cases/tool_use_with_plugin>`_:" -" 根据插件使用工具自主管理数据库。" +msgstr "`插件工具 <./use_cases/tool_use_with_plugin>`_: 根据插件使用工具自主管理数据库。" -#: ../../index.rst:119 e5a84e2dc87d4a06aa77ef4d77fb7bcb +#: ../../index.rst:119 e3b12d97d32c4c198a85e052b2a365c7 msgid "" "Full documentation on all methods, classes, installation methods, and " "integration setups for DB-GPT." msgstr "关于DB-GPT的所有方法、类、安装方法和集成设置的完整文档。" -#: ../../index.rst:130 7c51e39ad3824c5f8575390adbcba738 +#: ../../index.rst:130 1b82c35990b84b6bba05fd565ba6c32f msgid "Ecosystem" msgstr "环境系统" -#: ../../index.rst:132 b59e9ddba86945c1bebe395b2863174c +#: ../../index.rst:132 ef97b07c83e7469bba5a8bc36e1ed83f msgid "Guides for how other companies/products can be used with DB-GPT" msgstr "其他公司/产品如何与DB-GPT一起使用的方法指南" -#: ../../index.rst:147 992bf68cc48a425696c02429d39f86e3 +#: ../../index.rst:147 e597de3a7df7460d985c1a30589ce3bb msgid "" "Additional resources we think may be useful as you develop your " "application!" msgstr "“我们认为在您开发应用程序时可能有用的其他资源!”" -#: ../../index.rst:149 d99277006b05438c8d2e8088242f239c +#: ../../index.rst:149 1438b93c57e6459799c5656893c15aa7 msgid "" "`Discord `_: if your have some " "problem or ideas, you can talk from discord." -msgstr "`Discord `_:" -"如果您有任何问题,可以到discord中进行交流。" +msgstr "`Discord `_:如果您有任何问题,可以到discord中进行交流。" diff --git a/docs/locales/zh_CN/LC_MESSAGES/modules/connections.po b/docs/locales/zh_CN/LC_MESSAGES/modules/connections.po index 19b52157d..b534e5ba3 100644 --- a/docs/locales/zh_CN/LC_MESSAGES/modules/connections.po +++ b/docs/locales/zh_CN/LC_MESSAGES/modules/connections.po @@ -8,7 +8,7 @@ msgid "" msgstr "" "Project-Id-Version: DB-GPT 0.1.0\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2023-06-11 14:10+0800\n" +"POT-Creation-Date: 2023-06-14 14:51+0800\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language: zh_CN\n" @@ -17,18 +17,33 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.11.0\n" +"Generated-By: Babel 2.12.1\n" -#: ../../modules/connections.md:1 21de23e95a6c4405a242fb9a0f4e5f2b +#: ../../modules/connections.rst:2 ../../modules/connections.rst:10 +#: c9783fd80b0b476abfbf25d4aa23b6ae msgid "Connections" msgstr "连接模块" -#: ../../modules/connections.md:3 0f09b3be20cd409f92c2ba819dbf45eb +#: ../../modules/connections.rst:3 95fecfbcae0e4170affd6a5e41a4fe56 +#, fuzzy msgid "" -"In order to interact more conveniently with users' private environments, " -"the project has designed a connection module, which can support " -"connection to databases, Excel, knowledge bases, and other environments " -"to achieve information and data exchange." -msgstr "为了更方便地与用户的私有环境进行交互,项目设计了一个连接模块,可以支持" -"与数据库、Excel、知识库等环境的连接,实现信息和数据的交换。" +"**In order to interact more conveniently with users' private " +"environments, the project has designed a connection module, which can " +"support connection to databases, Excel, knowledge bases, and other " +"environments to achieve information and data exchange.**" +msgstr "为了更方便地与用户的私有环境进行交互,项目设计了一个连接模块,可以支持与数据库、Excel、知识库等环境的连接,实现信息和数据的交换。" + +#: ../../modules/connections.rst:5 3a5a6124903c4d1fba8a8eb8426616b7 +msgid "" +"DB-GPT provides base class BaseConnect, you can inheriting and implement " +"get_session(), get_table_names(), get_index_info(), get_database_list() " +"and run()." +msgstr "DB-GPT提供了基础连接模块,你可以继承这个类,然后实现get_session(), get_table_names(), get_index_info(), get_database_list() " +"and run()这些方法即可,如果你的数据库是关系型数据库,可以直接继承RDBMSDatabase即可" + +#: ../../modules/connections.rst:7 78ef1226d576458fbd1a9f1043936fc8 +msgid "" +"`mysql_connection <./connections/mysql_connection.html>`_: supported " +"mysql_connection." +msgstr "mysql connection使用方法" diff --git a/docs/locales/zh_CN/LC_MESSAGES/modules/connections/mysql/mysql_connection.po b/docs/locales/zh_CN/LC_MESSAGES/modules/connections/mysql/mysql_connection.po new file mode 100644 index 000000000..ba26d7da5 --- /dev/null +++ b/docs/locales/zh_CN/LC_MESSAGES/modules/connections/mysql/mysql_connection.po @@ -0,0 +1,36 @@ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) 2023, csunny +# This file is distributed under the same license as the DB-GPT package. +# FIRST AUTHOR , 2023. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: DB-GPT 0.1.0\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2023-06-14 14:51+0800\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language: zh_CN\n" +"Language-Team: zh_CN \n" +"Plural-Forms: nplurals=1; plural=0;\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 2.12.1\n" + +#: ../../modules/connections/mysql/mysql_connection.md:1 +#: 69e13b9e53ff466c9ec85715741dd938 +msgid "MYSQL Connection" +msgstr "MYSQL连接" + +#: ../../modules/connections/mysql/mysql_connection.md:3 +#: 4b8446fe31204688a87a5cff1180bec1 +msgid "MYSQL can connect mysql server." +msgstr "MYSQL可以连接mysql server" + +#: ../../modules/connections/mysql/mysql_connection.md:5 +#: 5a2141fba4bd41269d4ad73885067e0d +msgid "inheriting the RDBMSDatabase" +msgstr "继承RDBMSDatabase" + diff --git a/docs/locales/zh_CN/LC_MESSAGES/modules/knowledge.po b/docs/locales/zh_CN/LC_MESSAGES/modules/knowledge.po new file mode 100644 index 000000000..c9ee6467b --- /dev/null +++ b/docs/locales/zh_CN/LC_MESSAGES/modules/knowledge.po @@ -0,0 +1,98 @@ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) 2023, csunny +# This file is distributed under the same license as the DB-GPT package. +# FIRST AUTHOR , 2023. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: DB-GPT 0.1.0\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2023-06-14 15:12+0800\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language: zh_CN\n" +"Language-Team: zh_CN \n" +"Plural-Forms: nplurals=1; plural=0;\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 2.12.1\n" + +#: ../../modules/knowledge.rst:2 ../../modules/knowledge.rst:30 +#: e98ef6095fc54f8f8dc045cfa1733dc2 +msgid "Knowledge" +msgstr "知识" + +#: ../../modules/knowledge.rst:4 51340dd2758e42ee8e96c3935de53438 +#, fuzzy +msgid "" +"As the knowledge base is currently the most significant user demand " +"scenario, we natively support the construction and processing of " +"knowledge bases. At the same time, we also provide multiple knowledge " +"base management strategies in this project, such as pdf knowledge,md " +"knowledge, txt knowledge, word knowledge, ppt knowledge:" +msgstr "由于知识库是当前用户需求最显著的场景,我们原生支持知识库的构建和处理。同时,我们还在本项目中提供了多种知识库管理策略,如:pdf,md " +", txt, word, ppt" + +#: ../../modules/knowledge.rst:7 25eeb187843a4d9baa4d0c0a404eec65 +msgid "**Create your own knowledge repository**" +msgstr "创建你自己的知识库" + +#: ../../modules/knowledge.rst:9 bed8a8f08c194ff59a31dc53f67561c1 +msgid "" +"1.Place personal knowledge files or folders in the pilot/datasets " +"directory." +msgstr "1.将个人知识文件或文件夹放在pilot/datasets目录中。" + +#: ../../modules/knowledge.rst:11 6e03e1a2799a432f8319c3aaf33e2867 +msgid "" +"We currently support many document formats: txt, pdf, md, html, doc, ppt," +" and url." +msgstr "当前支持txt, pdf, md, html, doc, ppt, url文档格式" + +#: ../../modules/knowledge.rst:13 883ebf16fe7f4e1fbc73ef7430104e79 +msgid "before execution: python -m spacy download zh_core_web_sm" +msgstr "在执行之前请先执行python -m spacy download zh_core_web_sm" + +#: ../../modules/knowledge.rst:15 59f4bfa8c1064391919ce2af69f2d4c9 +msgid "" +"2.Update your .env, set your vector store type, VECTOR_STORE_TYPE=Chroma " +"(now only support Chroma and Milvus, if you set Milvus, please set " +"MILVUS_URL and MILVUS_PORT)" +msgstr "2.更新你的.env,设置你的向量存储类型,VECTOR_STORE_TYPE=Chroma(现在只支持Chroma和Milvus,如果你设置了Milvus,请设置MILVUS_URL和MILVUS_PORT)" + +#: ../../modules/knowledge.rst:18 be600a4d93094045b78a43307dfc8f5f +#, fuzzy +msgid "2.Run the knowledge repository script in the tools directory." +msgstr "3.在tools目录执行知识入库脚本" + +#: ../../modules/knowledge.rst:20 b27eddbbf6c74993a6653575f57fff18 +msgid "" +"python tools/knowledge_init.py note : --vector_name : your vector store " +"name default_value:default" +msgstr "" + +#: ../../modules/knowledge.rst:23 f32dc12aedc94ffc8fee77a4b6e0ec88 +msgid "" +"3.Add the knowledge repository in the interface by entering the name of " +"your knowledge repository (if not specified, enter \"default\") so you " +"can use it for Q&A based on your knowledge base." +msgstr "如果选择新增知识库,在界面上新增知识库输入你的知识库名" + +#: ../../modules/knowledge.rst:25 5b1412c8beb24784bd2a93fe5c487b7b +msgid "" +"Note that the default vector model used is text2vec-large-chinese (which " +"is a large model, so if your personal computer configuration is not " +"enough, it is recommended to use text2vec-base-chinese). Therefore, " +"ensure that you download the model and place it in the models directory." +msgstr "" +"注意,这里默认向量模型是text2vec-large-chinese(模型比较大,如果个人电脑配置不够建议采用text2vec-base-" +"chinese),因此确保需要将模型download下来放到models目录中。" + +#: ../../modules/knowledge.rst:27 67773e32b01c48628c80b6fab8c90146 +msgid "" +"`pdf_embedding <./knowledge/pdf_embedding.html>`_: supported pdf " +"embedding." +msgstr "pdf_embedding <./knowledge/pdf_embedding.html>`_: supported pdf embedding." + diff --git a/docs/locales/zh_CN/LC_MESSAGES/modules/knowledge/markdown/markdown_embedding.po b/docs/locales/zh_CN/LC_MESSAGES/modules/knowledge/markdown/markdown_embedding.po new file mode 100644 index 000000000..7f14a7c1d --- /dev/null +++ b/docs/locales/zh_CN/LC_MESSAGES/modules/knowledge/markdown/markdown_embedding.po @@ -0,0 +1,55 @@ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) 2023, csunny +# This file is distributed under the same license as the DB-GPT package. +# FIRST AUTHOR , 2023. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: DB-GPT 0.1.0\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2023-06-14 14:51+0800\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language: zh_CN\n" +"Language-Team: zh_CN \n" +"Plural-Forms: nplurals=1; plural=0;\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 2.12.1\n" + +#: ../../modules/knowledge/markdown/markdown_embedding.md:1 +#: b5fd3aea05a64590955b958b753bf22a +msgid "MarkdownEmbedding" +msgstr "MarkdownEmbedding" + +#: ../../modules/knowledge/markdown/markdown_embedding.md:3 +#: 0f98ce5b34d44c6f9c828e4b497984de +msgid "" +"markdown embedding can import md text into a vector knowledge base. The " +"entire embedding process includes the read (loading data), data_process " +"(data processing), and index_to_store (embedding to the vector database) " +"methods." +msgstr "" +"markdown embedding 可以导入md格式的文档到向量数据库, 整个导入过程分为数据读取read(), " +"数据预处理data_process()和数据进向量数据库index_to_store()" + +#: ../../modules/knowledge/markdown/markdown_embedding.md:5 +#: 7f5ebfa8c7c146d7a340baca85634e16 +msgid "inheriting the SourceEmbedding" +msgstr "继承SourceEmbedding" + +#: ../../modules/knowledge/markdown/markdown_embedding.md:17 +#: 732e946bc9d149a5af802b239304b943 +#, fuzzy +msgid "" +"implement read() and data_process() read() method allows you to read data" +" and split data into chunk" +msgstr "实现read方法可以加载数据" + +#: ../../modules/knowledge/markdown/markdown_embedding.md:33 +#: f7e53658aee7403688b333b24ff08ce2 +msgid "data_process() method allows you to pre processing your ways" +msgstr "实现data_process方法可以进行数据预处理" + diff --git a/docs/locales/zh_CN/LC_MESSAGES/modules/knowledge/pdf/pdf_embedding.po b/docs/locales/zh_CN/LC_MESSAGES/modules/knowledge/pdf/pdf_embedding.po new file mode 100644 index 000000000..df41482f3 --- /dev/null +++ b/docs/locales/zh_CN/LC_MESSAGES/modules/knowledge/pdf/pdf_embedding.po @@ -0,0 +1,56 @@ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) 2023, csunny +# This file is distributed under the same license as the DB-GPT package. +# FIRST AUTHOR , 2023. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: DB-GPT 0.1.0\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2023-06-14 14:51+0800\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language: zh_CN\n" +"Language-Team: zh_CN \n" +"Plural-Forms: nplurals=1; plural=0;\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 2.12.1\n" + +#: ../../modules/knowledge/pdf/pdf_embedding.md:1 +#: fe600a1f3f9f492da81652ebd3d6d52d +msgid "PDFEmbedding" +msgstr "" + +#: ../../modules/knowledge/pdf/pdf_embedding.md:3 +#: a26a7d6ff041476b975bab5c0bf9f506 +#, fuzzy +msgid "" +"pdfembedding can import PDF text into a vector knowledge base. The entire" +" embedding process includes the read (loading data), data_process (data " +"processing), and index_to_store (embedding to the vector database) " +"methods." +msgstr "" +"pdf embedding 可以导入pdf格式的文档到向量数据库, 整个导入过程分为数据读取read(), " +"数据预处理data_process()和数据进向量数据库index_to_store()" + +#: ../../modules/knowledge/pdf/pdf_embedding.md:5 +#: 1895f2a6272c43f0b328caba092102a9 +msgid "inheriting the SourceEmbedding" +msgstr "继承SourceEmbedding" + +#: ../../modules/knowledge/pdf/pdf_embedding.md:17 +#: 2a4a349398354f9cb3e8d9630a4b8696 +#, fuzzy +msgid "" +"implement read() and data_process() read() method allows you to read data" +" and split data into chunk" +msgstr "实现read方法可以加载数据" + +#: ../../modules/knowledge/pdf/pdf_embedding.md:34 +#: 9b5c6d3e9e96443a908a09a8a762ea7a +msgid "data_process() method allows you to pre processing your ways" +msgstr "实现data_process方法可以进行数据预处理" + diff --git a/docs/locales/zh_CN/LC_MESSAGES/modules/knowledge/ppt/ppt_embedding.po b/docs/locales/zh_CN/LC_MESSAGES/modules/knowledge/ppt/ppt_embedding.po new file mode 100644 index 000000000..880499e84 --- /dev/null +++ b/docs/locales/zh_CN/LC_MESSAGES/modules/knowledge/ppt/ppt_embedding.po @@ -0,0 +1,55 @@ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) 2023, csunny +# This file is distributed under the same license as the DB-GPT package. +# FIRST AUTHOR , 2023. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: DB-GPT 0.1.0\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2023-06-14 14:51+0800\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language: zh_CN\n" +"Language-Team: zh_CN \n" +"Plural-Forms: nplurals=1; plural=0;\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 2.12.1\n" + +#: ../../modules/knowledge/ppt/ppt_embedding.md:1 +#: 2cdb249b2b284064a0c9117d051e35d4 +msgid "PPTEmbedding" +msgstr "" + +#: ../../modules/knowledge/ppt/ppt_embedding.md:3 +#: 71676e9b35434a849a206788da8f1394 +msgid "" +"ppt embedding can import ppt text into a vector knowledge base. The " +"entire embedding process includes the read (loading data), data_process " +"(data processing), and index_to_store (embedding to the vector database) " +"methods." +msgstr "" +"ppt embedding 可以导入ppt格式的文档到向量数据库, 整个导入过程分为数据读取read(), " +"数据预处理data_process()和数据进向量数据库index_to_store()" + +#: ../../modules/knowledge/ppt/ppt_embedding.md:5 +#: 016aeae4786e4d5bad815670bd109481 +msgid "inheriting the SourceEmbedding" +msgstr "继承SourceEmbedding" + +#: ../../modules/knowledge/ppt/ppt_embedding.md:17 +#: 2fb5b9dc912342df8c275cfd0e993fe0 +#, fuzzy +msgid "" +"implement read() and data_process() read() method allows you to read data" +" and split data into chunk" +msgstr "实现read方法可以加载数据" + +#: ../../modules/knowledge/ppt/ppt_embedding.md:31 +#: 9a00f72c7ec84bde9971579c720d2628 +msgid "data_process() method allows you to pre processing your ways" +msgstr "实现data_process方法可以进行数据预处理" + diff --git a/docs/locales/zh_CN/LC_MESSAGES/modules/knowledge/url/url_embedding.po b/docs/locales/zh_CN/LC_MESSAGES/modules/knowledge/url/url_embedding.po new file mode 100644 index 000000000..d7931d3f2 --- /dev/null +++ b/docs/locales/zh_CN/LC_MESSAGES/modules/knowledge/url/url_embedding.po @@ -0,0 +1,55 @@ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) 2023, csunny +# This file is distributed under the same license as the DB-GPT package. +# FIRST AUTHOR , 2023. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: DB-GPT 0.1.0\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2023-06-14 14:51+0800\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language: zh_CN\n" +"Language-Team: zh_CN \n" +"Plural-Forms: nplurals=1; plural=0;\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 2.12.1\n" + +#: ../../modules/knowledge/url/url_embedding.md:1 +#: e6d335e613ec4c3a80b89de67ba93098 +msgid "URL Embedding" +msgstr "" + +#: ../../modules/knowledge/url/url_embedding.md:3 +#: 25e7643335264bdaaa9386ded243d51d +msgid "" +"url embedding can import PDF text into a vector knowledge base. The " +"entire embedding process includes the read (loading data), data_process " +"(data processing), and index_to_store (embedding to the vector database) " +"methods." +msgstr "" +"url embedding 可以导入url格式的文档到向量数据库, 整个导入过程分为数据读取read(), " +"数据预处理data_process()和数据进向量数据库index_to_store()" + +#: ../../modules/knowledge/url/url_embedding.md:5 +#: 4b8ca6d93ed0412ab1e640bd42b400ac +msgid "inheriting the SourceEmbedding" +msgstr "继承SourceEmbedding" + +#: ../../modules/knowledge/url/url_embedding.md:17 +#: 5d69d27adc70406db97c398a339f6453 +#, fuzzy +msgid "" +"implement read() and data_process() read() method allows you to read data" +" and split data into chunk" +msgstr "实现read方法可以加载数据" + +#: ../../modules/knowledge/url/url_embedding.md:34 +#: 7d055e181d9b4d47965ab249b18bd704 +msgid "data_process() method allows you to pre processing your ways" +msgstr "实现data_process方法可以进行数据预处理" + diff --git a/docs/locales/zh_CN/LC_MESSAGES/modules/knowledge/word/word_embedding.po b/docs/locales/zh_CN/LC_MESSAGES/modules/knowledge/word/word_embedding.po new file mode 100644 index 000000000..72ea187d4 --- /dev/null +++ b/docs/locales/zh_CN/LC_MESSAGES/modules/knowledge/word/word_embedding.po @@ -0,0 +1,55 @@ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) 2023, csunny +# This file is distributed under the same license as the DB-GPT package. +# FIRST AUTHOR , 2023. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: DB-GPT 0.1.0\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2023-06-14 14:51+0800\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language: zh_CN\n" +"Language-Team: zh_CN \n" +"Plural-Forms: nplurals=1; plural=0;\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 2.12.1\n" + +#: ../../modules/knowledge/word/word_embedding.md:1 +#: 1b3272def692480bb101060a33d076c6 +msgid "WordEmbedding" +msgstr "" + +#: ../../modules/knowledge/word/word_embedding.md:3 +#: a7ea0e94e5c74dab9aa7fb80ed42ed39 +msgid "" +"word embedding can import word doc/docx text into a vector knowledge " +"base. The entire embedding process includes the read (loading data), " +"data_process (data processing), and index_to_store (embedding to the " +"vector database) methods." +msgstr "" +"word embedding 可以导入word格式的doc文档到向量数据库, 整个导入过程分为数据读取read(), " +"数据预处理data_process()和数据进向量数据库index_to_store()" + +#: ../../modules/knowledge/word/word_embedding.md:5 +#: 12ba9527ef0745538dffb6b1dcf96933 +msgid "inheriting the SourceEmbedding" +msgstr "继承SourceEmbedding" + +#: ../../modules/knowledge/word/word_embedding.md:17 +#: a4e5e7553f4a43b0b79ba0de83268ef0 +#, fuzzy +msgid "" +"implement read() and data_process() read() method allows you to read data" +" and split data into chunk" +msgstr "实现read方法可以加载数据" + +#: ../../modules/knowledge/word/word_embedding.md:29 +#: 188a434dee7543f89cf5f1584f29ca62 +msgid "data_process() method allows you to pre processing your ways" +msgstr "实现data_process方法可以进行数据预处理" + diff --git a/docs/locales/zh_CN/LC_MESSAGES/modules/llms.po b/docs/locales/zh_CN/LC_MESSAGES/modules/llms.po index bbb05b046..22138dec3 100644 --- a/docs/locales/zh_CN/LC_MESSAGES/modules/llms.po +++ b/docs/locales/zh_CN/LC_MESSAGES/modules/llms.po @@ -8,7 +8,7 @@ msgid "" msgstr "" "Project-Id-Version: DB-GPT 0.1.0\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2023-06-13 11:38+0800\n" +"POT-Creation-Date: 2023-06-14 17:26+0800\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language: zh_CN\n" @@ -19,11 +19,11 @@ msgstr "" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 2.12.1\n" -#: ../../modules/llms.md:1 34386f3fecba48fbbd86718283ba593c +#: ../../modules/llms.md:1 8eef439964b5442d91ad04ff72b3b45b msgid "LLMs" msgstr "大语言模型" -#: ../../modules/llms.md:3 241b39ad980f4cfd90a7f0fdae05a1d2 +#: ../../modules/llms.md:3 a29256f1f39b4bcda29a6811ad1b10f6 #, python-format msgid "" "In the underlying large model integration, we have designed an open " @@ -36,62 +36,95 @@ msgid "" "of use." msgstr "在底层大模型接入中,我们设计了开放的接口,支持对接多种大模型。同时对于接入模型的效果,我们有非常严格的把控与评审机制。对大模型能力上与ChatGPT对比,在准确率上需要满足85%以上的能力对齐。我们用更高的标准筛选模型,是期望在用户使用过程中,可以省去前面繁琐的测试评估环节。" -#: ../../modules/llms.md:5 25175e87a62e41bca86798eb783cefd6 +#: ../../modules/llms.md:5 e899f11399bb45d9990c73a273ed7697 msgid "Multi LLMs Usage" msgstr "多模型使用" -#: ../../modules/llms.md:6 8c35341e9ca94202ba779567813f9973 +#: ../../modules/llms.md:6 a21d6a875b3949b9be512f8ea396f6b3 msgid "" "To use multiple models, modify the LLM_MODEL parameter in the .env " "configuration file to switch between the models." msgstr "如果要使用不同的模型,请修改.env配置文件中的LLM MODEL参数以在模型之间切换。" -#: ../../modules/llms.md:8 2edf3309a6554f39ad74e19faff09cee +#: ../../modules/llms.md:8 dd061d45bb4044dcbc7e5d4b0014ded8 msgid "" "Notice: you can create .env file from .env.template, just use command " "like this:" msgstr "注意:你可以从 .env.template 创建 .env 文件。只需使用如下命令:" -#: ../../modules/llms.md:14 5fa7639ef294425e89e13b7c6617fb4b +#: ../../modules/llms.md:14 c1a789e2de2c4958987370521d46c7cc msgid "" "now we support models vicuna-13b, vicuna-7b, chatglm-6b, flan-t5-base, " "guanaco-33b-merged, falcon-40b, gorilla-7b." -msgstr "现在我们支持的模型有vicuna-13b, vicuna-7b, chatglm-6b, flan-t5-base, " -"guanaco-33b-merged, falcon-40b, gorilla-7b." +msgstr "" +"现在我们支持的模型有vicuna-13b, vicuna-7b, chatglm-6b, flan-t5-base, guanaco-33b-" +"merged, falcon-40b, gorilla-7b." -#: ../../modules/llms.md:16 96c9a5ad00264bd2a07bdbdec87e471e +#: ../../modules/llms.md:16 ddb6f2f638e642a595365a91ffdba8f9 msgid "" "DB-GPT provides a model load adapter and chat adapter. load adapter which" " allows you to easily adapt load different LLM models by inheriting the " "BaseLLMAdapter. You just implement match() and loader() method." -msgstr "DB-GPT提供了多模型适配器load adapter和chat adapter.load adapter通过继承BaseLLMAdapter类, 实现match和loader方法允许你适配不同的LLM." +msgstr "" +"DB-GPT提供了多模型适配器load adapter和chat adapter.load adapter通过继承BaseLLMAdapter类," +" 实现match和loader方法允许你适配不同的LLM." -#: ../../modules/llms.md:18 1033714691464f50900c04c9e1bb5643 +#: ../../modules/llms.md:18 5e32be54895243caa6d44d0b3421e4a0 msgid "vicuna llm load adapter" msgstr "vicuna llm load adapter" -#: ../../modules/llms.md:35 faa6432575be45bcae5deb1cc7fee3fb +#: ../../modules/llms.md:35 0b1f2e7c65164c9584e0c544394e7d57 msgid "chatglm load adapter" msgstr "chatglm load adapter" -#: ../../modules/llms.md:62 61c4189cabf04e628132c2bf5f02bb50 +#: ../../modules/llms.md:62 885b35375c764e29a983b54514e378d2 msgid "" "chat adapter which allows you to easily adapt chat different LLM models " "by inheriting the BaseChatAdpter.you just implement match() and " "get_generate_stream_func() method" -msgstr "chat adapter通过继承BaseChatAdpter允许你通过实现match和get_generate_stream_func方法允许你适配不同的LLM." +msgstr "" +"chat " +"adapter通过继承BaseChatAdpter允许你通过实现match和get_generate_stream_func方法允许你适配不同的LLM." -#: ../../modules/llms.md:64 407a67e4e2c6414b9cde346961d850c0 +#: ../../modules/llms.md:64 e0538e7e0526440085b32add07f5ec7f msgid "vicuna llm chat adapter" msgstr "vicuna llm chat adapter" -#: ../../modules/llms.md:76 53a55238cd90406db58c50dc64465195 +#: ../../modules/llms.md:76 2c97712441874e0d8deedc1d9a1ce5ed msgid "chatglm llm chat adapter" msgstr "chatglm llm chat adapter" -#: ../../modules/llms.md:89 b0c5ff72c05e40b3b301d6b81205fe63 +#: ../../modules/llms.md:89 485e5aa261714146a03a30dbcd612653 msgid "" "if you want to integrate your own model, just need to inheriting " "BaseLLMAdaper and BaseChatAdpter and implement the methods" msgstr "如果你想集成自己的模型,只需要继承BaseLLMAdaper和BaseChatAdpter类,然后实现里面的方法即可" +#: ../../modules/llms.md:92 a63b63022db74d76b743044be178e227 +#, fuzzy +msgid "Multi Proxy LLMs" +msgstr "多模型使用" + +#: ../../modules/llms.md:93 dab3041e90384049872a7f77933b1a1f +msgid "1. Openai proxy" +msgstr "Openai代理" + +#: ../../modules/llms.md:94 e50eae200bf04e4788bbc394e0b3d6b9 +msgid "" +"If you haven't deployed a private infrastructure for a large model, or if" +" you want to use DB-GPT in a low-cost and high-efficiency way, you can " +"also use OpenAI's large model as your underlying model." +msgstr "如果你没有部署私有大模型的资源,或者你想使用低成本启动DB-GPT,你可以使用openai的大模型作为你的底层模型" + +#: ../../modules/llms.md:96 53dd581608d74355ba0ce486a01ef261 +msgid "" +"If your environment deploying DB-GPT has access to OpenAI, then modify " +"the .env configuration file as below will work." +msgstr "如果你的环境能够访问openai,你只需要参考如下修改.env配置文件即可" + +#: ../../modules/llms.md:104 8df2d75af41b4953a73b6b7eae9f0373 +msgid "" +"If you can't access OpenAI locally but have an OpenAI proxy service, you " +"can configure as follows." +msgstr "如果你本地无法访问openai,但是你有一个openai的代理服务,你可以参考如下配置" + diff --git a/docs/locales/zh_CN/LC_MESSAGES/modules/plugins.po b/docs/locales/zh_CN/LC_MESSAGES/modules/plugins.po index 483761bac..dce190384 100644 --- a/docs/locales/zh_CN/LC_MESSAGES/modules/plugins.po +++ b/docs/locales/zh_CN/LC_MESSAGES/modules/plugins.po @@ -8,7 +8,7 @@ msgid "" msgstr "" "Project-Id-Version: DB-GPT 0.1.0\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2023-06-11 14:10+0800\n" +"POT-Creation-Date: 2023-06-14 17:26+0800\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language: zh_CN\n" @@ -17,13 +17,13 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.11.0\n" +"Generated-By: Babel 2.12.1\n" -#: ../../modules/plugins.md:1 48f1b7ff4099485ba3853c373e64273f +#: ../../modules/plugins.md:1 be587eb6ad384844b83ac740a2f3309e msgid "Plugins" msgstr "插件" -#: ../../modules/plugins.md:3 3d94b3250511468d80aa29359f01128d +#: ../../modules/plugins.md:3 291a6adc34684b28867b7d1adcceadbb msgid "" "The ability of Agent and Plugin is the core of whether large models can " "be automated. In this project, we natively support the plugin mode, and " @@ -31,7 +31,73 @@ msgid "" "order to give full play to the advantages of the community, the plugins " "used in this project natively support the Auto-GPT plugin ecology, that " "is, Auto-GPT plugins can directly run in our project." -msgstr "Agent与插件能力是大模型能否自动化的核心,在本的项目中,原生支持插件模式," -"大模型可以自动化完成目标。 同时为了充分发挥社区的优势,本项目中所用的插件原生支持" -"Auto-GPT插件生态,即Auto-GPT的插件可以直接在我们的项目中运行。" +msgstr "" +"Agent与插件能力是大模型能否自动化的核心,在本的项目中,原生支持插件模式,大模型可以自动化完成目标。 同时为了充分发挥社区的优势" +",本项目中所用的插件原生支持Auto-GPT插件生态,即Auto-GPT的插件可以直接在我们的项目中运行。" + +#: ../../modules/plugins.md:5 72010cbac395488bb72c5ffa79806f61 +#, fuzzy +msgid "Local Plugins" +msgstr "插件" + +#: ../../modules/plugins.md:7 842e405d6748425dab6684c3377e580a +msgid "1.1 How to write local plugins." +msgstr "如何编写一个本地插件" + +#: ../../modules/plugins.md:9 23fa64fa76954000a3f33f5b3205975e +msgid "" +"Local plugins use the Auto-GPT plugin template. A simple example is as " +"follows: first write a plugin file called \"sql_executor.py\"." +msgstr "本地插件使用Auto-GPT插件模板,一个简单的示例如下:首先编写一个插件文件`sql_executor.py`" + +#: ../../modules/plugins.md:39 5044401271fb4cfba53299875886b5b9 +msgid "" +"Then set the \"can_handle_post_prompt\" method of the plugin template to " +"True. In the \"post_prompt\" method, write the prompt information and the" +" mapped plugin function." +msgstr "然后设置can_handle_post_prompt函数为True, 在post_prompt函数中编写prompt信息和插件映射函数" + +#: ../../modules/plugins.md:81 8f2001c2934d4655a1993a98d5e7dd63 +msgid "1.2 How to use local plugins" +msgstr "1.2 如何使用本地插件" + +#: ../../modules/plugins.md:83 aa6234c1531a470db97e076408c70ebc +msgid "" +"Pack your plugin project into `your-plugin.zip` and place it in the " +"`/plugins/` directory of the DB-GPT project. After starting the " +"webserver, you can select and use it in the `Plugin Model` section." +msgstr "将您的插件项目打包成your-plugin.zip,并将其放置在DB-GPT项目的/plugins/目录中。启动Web服务器后,您可以在插件模型部分中选择并使用它。" + +#: ../../modules/plugins.md:86 6d1402451cb44ab2bb9ded2a303d8dd0 +#, fuzzy +msgid "Public Plugins" +msgstr "插件" + +#: ../../modules/plugins.md:88 2bb33cadc7604f529c762939a6225f17 +msgid "1.1 How to use public plugins" +msgstr "1.1 如何编写公共插件" + +#: ../../modules/plugins.md:90 d5f458dcba2e435987ffbc62a1d7a989 +msgid "" +"By default, after launching the webserver, plugins from the public plugin" +" library `DB-GPT-Plugins` will be automatically loaded. For more details," +" please refer to [DB-GPT-Plugins](https://github.com/csunny/DB-GPT-" +"Plugins)" +msgstr "默认情况下,在启动Web服务器后,将自动加载来自公共插件库DB-GPT-Plugins的插件。要了解更多详情,请参阅[DB-GPT-Plugins](https://github.com/csunny/DB-GPT-Plugins)" + +#: ../../modules/plugins.md:92 28fa983769ae44a0b790d977c60ce982 +msgid "1.2 Contribute to the DB-GPT-Plugins repository" +msgstr "1.2 贡献到DB-GPT-Plugins仓库" + +#: ../../modules/plugins.md:94 68573e7cc17f479fa676d0631e011baf +msgid "" +"Please refer to the plugin development process in the public plugin " +"library, and put the configuration parameters in `.plugin_env`" +msgstr "请参考公共插件库开发过程,将插件配置参数写入.plugin_env文件" + +#: ../../modules/plugins.md:96 8d6860026b824b46b2899cbf3dc3b4a0 +msgid "" +"We warmly welcome everyone to contribute plugins to the public plugin " +"library!" +msgstr "非常欢迎大家向我们公共插件库贡献插件!" diff --git a/docs/locales/zh_CN/LC_MESSAGES/use_cases/sql_generation_and_diagnosis.po b/docs/locales/zh_CN/LC_MESSAGES/use_cases/sql_generation_and_diagnosis.po index b7ab23dfc..fd8918782 100644 --- a/docs/locales/zh_CN/LC_MESSAGES/use_cases/sql_generation_and_diagnosis.po +++ b/docs/locales/zh_CN/LC_MESSAGES/use_cases/sql_generation_and_diagnosis.po @@ -8,7 +8,7 @@ msgid "" msgstr "" "Project-Id-Version: DB-GPT 0.1.0\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2023-06-11 14:10+0800\n" +"POT-Creation-Date: 2023-06-14 14:51+0800\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language: zh_CN\n" @@ -17,10 +17,26 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.11.0\n" +"Generated-By: Babel 2.12.1\n" #: ../../use_cases/sql_generation_and_diagnosis.md:1 -#: 8900f8d9f3034b20a96df1d5c611eaa1 +#: f19384aa3b1d4fc2bed3aa94abe80a3c msgid "SQL generation and diagnosis" msgstr "SQL生成和诊断" +#: ../../use_cases/sql_generation_and_diagnosis.md:3 +#: 0aa11c79b20544b9a1376d184f58a516 +msgid "" +"DB-GPT provides SQL generation and diagnostic capabilities. With advanced" +" natural language processing algorithms and a deep understanding of SQL " +"syntax and semantics, the model can generate complex SQL queries from " +"natural language input, as well as diagnose and fix issues with existing " +"queries. This allows users to save time and streamline their workflow, " +"while also improving the accuracy and efficiency of their SQL operations." +msgstr "" + +#: ../../use_cases/sql_generation_and_diagnosis.md:5 +#: ffcbe5278cdc41a7b656f03bf82a8b26 +msgid "[SQL Generation](../../assets/demo_en.gif)" +msgstr "" + diff --git a/docs/locales/zh_CN/LC_MESSAGES/use_cases/tool_use_with_plugin.po b/docs/locales/zh_CN/LC_MESSAGES/use_cases/tool_use_with_plugin.po index 73293771d..383b62b85 100644 --- a/docs/locales/zh_CN/LC_MESSAGES/use_cases/tool_use_with_plugin.po +++ b/docs/locales/zh_CN/LC_MESSAGES/use_cases/tool_use_with_plugin.po @@ -8,7 +8,7 @@ msgid "" msgstr "" "Project-Id-Version: DB-GPT 0.1.0\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2023-06-11 14:10+0800\n" +"POT-Creation-Date: 2023-06-14 17:26+0800\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language: zh_CN\n" @@ -17,9 +17,88 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.11.0\n" +"Generated-By: Babel 2.12.1\n" -#: ../../use_cases/tool_use_with_plugin.md:1 2bd7d79a16a548c4a3872a12c436aa4f +#: ../../use_cases/tool_use_with_plugin.md:1 defad8b142a2408e89b782803149be50 msgid "Tool use with plugin" msgstr "插件工具" +#: ../../use_cases/tool_use_with_plugin.md:3 30ca5f2ce55a4369b48dd8a8b08c3273 +msgid "" +"DB-GPT supports a variety of plug-ins, such as MySQL, MongoDB, ClickHouse" +" and other database tool plug-ins. In addition, some database management " +"platforms can also package their interfaces and package them into plug-" +"ins, and use the model to realize the ability of \"single-sentence " +"requirements\"" +msgstr "DB-GPT支持各种插件,例如MySQL、MongoDB、ClickHouse等数据库工具插件。此外,一些数据库管理平台也可以将它们的接口打包成插件,使用该模型实现"一句话需求"的能力。" + +#: ../../use_cases/tool_use_with_plugin.md:6 96c20363d25842cbbab21819c2567a52 +msgid "DB-GPT-DASHBOARD-PLUGIN" +msgstr "DB-GPT-DASHBOARD-PLUGIN" + +#: ../../use_cases/tool_use_with_plugin.md:8 1a78f9e15c6949019e213f3f88388e9f +msgid "" +"[Db-GPT Chart Plugin](https://github.com/csunny/DB-GPT-" +"Plugins/blob/main/src/dbgpt_plugins/Readme.md)" +msgstr "" +"[Db-GPT Chart Plugin](https://github.com/csunny/DB-GPT-" +"Plugins/blob/main/src/dbgpt_plugins/Readme.md)" + +#: ../../use_cases/tool_use_with_plugin.md:10 5ac6d662797e4694926cb0b44e58ff40 +msgid "" +"This is a DB-GPT plugin to generate data analysis charts, if you want to " +"use the test sample data, please first pull the code of [DB-GPT-" +"Plugins](https://github.com/csunny/DB-GPT-Plugins), run the command to " +"generate test DuckDB data, and then copy the generated data file to the " +"`/pilot/mock_datas` directory of the DB-GPT project." +msgstr "这是一个DB-GPT插件,用于生成数据分析图表。如果您想使用测试样本数据,请先拉取 DB-GPT-Plugins 的代码,运行命令以生成测试 DuckDB 数据,然后将生成的数据文件复制到 DB-GPT 项目的 /pilot/mock_datas 目录中。" + +#: ../../use_cases/tool_use_with_plugin.md:21 1029537ed0ca44e499a8f2098cc72f1a +msgid "" +"Test Case: Use a histogram to analyze the total order amount of users in " +"different cities." +msgstr "测试示例:请使用柱状图分析各个城市的用户数" + +#: ../../use_cases/tool_use_with_plugin.md:26 a8a2103279d641b98948bcb06971c160 +msgid "" +"More detail see: [DB-DASHBOARD](https://github.com/csunny/DB-GPT-" +"Plugins/blob/main/src/dbgpt_plugins/Readme.md)" +msgstr "更多详情请看:[DB-DASHBOARD](https://github.com/csunny/DB-GPT-" +"Plugins/blob/main/src/dbgpt_plugins/Readme.md)" + +#: ../../use_cases/tool_use_with_plugin.md:29 db2906b5f89e4a01a4a47a89ad750804 +msgid "DB-GPT-SQL-Execution-Plugin" +msgstr "DB-GPT-SQL-Execution-Plugin" + +#: ../../use_cases/tool_use_with_plugin.md:32 cd491706a64a4bf3af4459fa0699c82e +msgid "This is an DbGPT plugin to connect Generic Db And Execute SQL." +msgstr "这是一个 DbGPT 插件,用于连接通用数据库并执行 SQL。" + +#: ../../use_cases/tool_use_with_plugin.md:35 73dbe0fb80b44b9d9c2928cbf161f291 +msgid "DB-GPT-Bytebase-Plugin" +msgstr "DB-GPT-Bytebase-Plugin" + +#: ../../use_cases/tool_use_with_plugin.md:37 72f675332ff04297861394e8eb2cf5c4 +msgid "" +"To use a tool or platform plugin, you should first deploy a plugin. " +"Taking the open-source database management platform Bytebase as an " +"example, you can deploy your Bytebase service with one click using Docker" +" and access it at http://127.0.0.1:5678. More details can be found at " +"https://github.com/bytebase/bytebase." +msgstr "要使用一个工具或平台插件,您应该首先部署一个插件。以开源数据库管理平台Bytebase为例,您可以使用Docker一键部署Bytebase服务,并通过http://127.0.0.1:5678进行访问。更多细节可以在 https://github.com/bytebase/bytebase 找到。" + +#: ../../use_cases/tool_use_with_plugin.md:53 0d84f8fabc29479caa0bf630c00283d2 +msgid "" +"Note: If your machine's CPU architecture is `ARM`, please use `--platform" +" linux/arm64` instead." +msgstr "备注:如果你的机器CPU架构是ARM,请使用--platform linux/arm64 代替" + +#: ../../use_cases/tool_use_with_plugin.md:55 d4117072f9504875a765c558f55f2d88 +msgid "" +"Select the plugin on DB-GPT(All built-in plugins are from our repository:" +" https://github.com/csunny/DB-GPT-Plugins),choose DB-GPT-Bytebase-Plugin." +" Supporting functions include creating projects, creating environments, " +"creating database instances, creating databases, database DDL/DML " +"operations, and ticket approval process, etc." +msgstr "在DB-GPT上选择插件(所有内置插件均来自我们的仓库:https://github.com/csunny/DB-GPT-Plugins),选择DB-GPT-Bytebase-Plugin。支持的功能包括创建项目、创建环境、创建数据库实例、创建数据库、数据库DDL/DML操作和审批流程等。" + diff --git a/docs/modules/connections.md b/docs/modules/connections.md deleted file mode 100644 index 041120d26..000000000 --- a/docs/modules/connections.md +++ /dev/null @@ -1,4 +0,0 @@ -# Connections - -In order to interact more conveniently with users' private environments, the project has designed a connection module, which can support connection to databases, Excel, knowledge bases, and other environments to achieve information and data exchange. - diff --git a/docs/modules/connections.rst b/docs/modules/connections.rst new file mode 100644 index 000000000..96cfa1248 --- /dev/null +++ b/docs/modules/connections.rst @@ -0,0 +1,16 @@ +Connections +--------- +**In order to interact more conveniently with users' private environments, the project has designed a connection module, which can support connection to databases, Excel, knowledge bases, and other environments to achieve information and data exchange.** + +DB-GPT provides base class BaseConnect, you can inheriting and implement get_session(), get_table_names(), get_index_info(), get_database_list() and run(). + +- `mysql_connection <./connections/mysql_connection.html>`_: supported mysql_connection. + + +.. toctree:: + :maxdepth: 2 + :caption: Connections + :name: mysql_connection + :hidden: + + ./connections/mysql/mysql_connection.md \ No newline at end of file diff --git a/docs/modules/connections/mysql/mysql_connection.md b/docs/modules/connections/mysql/mysql_connection.md new file mode 100644 index 000000000..3204aa5db --- /dev/null +++ b/docs/modules/connections/mysql/mysql_connection.md @@ -0,0 +1,18 @@ +MYSQL Connection +================================== +MYSQL can connect mysql server. + +inheriting the RDBMSDatabase +``` +class MySQLConnect(RDBMSDatabase): + """Connect MySQL Database fetch MetaData + Args: + Usage: + """ + + type: str = "MySQL" + dialect: str = "mysql" + driver: str = "pymysql" + + default_db = ["information_schema", "performance_schema", "sys", "mysql"] +``` \ No newline at end of file diff --git a/docs/modules/knowledge.rst b/docs/modules/knowledge.rst new file mode 100644 index 000000000..72b97af98 --- /dev/null +++ b/docs/modules/knowledge.rst @@ -0,0 +1,40 @@ +Knowledge +--------- + +| As the knowledge base is currently the most significant user demand scenario, we natively support the construction and processing of knowledge bases. At the same time, we also provide multiple knowledge base management strategies in this project, such as pdf knowledge,md knowledge, txt knowledge, word knowledge, ppt knowledge: + + +**Create your own knowledge repository** + +1.Place personal knowledge files or folders in the pilot/datasets directory. + +We currently support many document formats: txt, pdf, md, html, doc, ppt, and url. + +before execution: python -m spacy download zh_core_web_sm + +2.Update your .env, set your vector store type, VECTOR_STORE_TYPE=Chroma +(now only support Chroma and Milvus, if you set Milvus, please set MILVUS_URL and MILVUS_PORT) + +2.Run the knowledge repository script in the tools directory. + +python tools/knowledge_init.py +note : --vector_name : your vector store name default_value:default + +3.Add the knowledge repository in the interface by entering the name of your knowledge repository (if not specified, enter "default") so you can use it for Q&A based on your knowledge base. + +Note that the default vector model used is text2vec-large-chinese (which is a large model, so if your personal computer configuration is not enough, it is recommended to use text2vec-base-chinese). Therefore, ensure that you download the model and place it in the models directory. + +- `pdf_embedding <./knowledge/pdf_embedding.html>`_: supported pdf embedding. + + +.. toctree:: + :maxdepth: 2 + :caption: Knowledge + :name: pdf_embedding + :hidden: + + ./knowledge/pdf/pdf_embedding.md + ./knowledge/markdown/markdown_embedding.md + ./knowledge/word/word_embedding.md + ./knowledge/url/url_embedding.md + ./knowledge/ppt/ppt_embedding.md \ No newline at end of file diff --git a/docs/modules/knowledge/markdown/markdown_embedding.md b/docs/modules/knowledge/markdown/markdown_embedding.md new file mode 100644 index 000000000..dfb42c7ff --- /dev/null +++ b/docs/modules/knowledge/markdown/markdown_embedding.md @@ -0,0 +1,42 @@ +MarkdownEmbedding +================================== +markdown embedding can import md text into a vector knowledge base. The entire embedding process includes the read (loading data), data_process (data processing), and index_to_store (embedding to the vector database) methods. + +inheriting the SourceEmbedding + +``` +class MarkdownEmbedding(SourceEmbedding): + """pdf embedding for read pdf document.""" + + def __init__(self, file_path, vector_store_config): + """Initialize with pdf path.""" + super().__init__(file_path, vector_store_config) + self.file_path = file_path + self.vector_store_config = vector_store_config +``` +implement read() and data_process() +read() method allows you to read data and split data into chunk + +``` +@register + def read(self): + """Load from markdown path.""" + loader = EncodeTextLoader(self.file_path) + textsplitter = SpacyTextSplitter( + pipeline="zh_core_web_sm", + chunk_size=CFG.KNOWLEDGE_CHUNK_SIZE, + chunk_overlap=100, + ) + return loader.load_and_split(textsplitter) +``` + +data_process() method allows you to pre processing your ways +``` +@register + def data_process(self, documents: List[Document]): + i = 0 + for d in documents: + documents[i].page_content = d.page_content.replace("\n", "") + i += 1 + return documents +``` \ No newline at end of file diff --git a/docs/modules/knowledge/pdf/pdf_embedding.md b/docs/modules/knowledge/pdf/pdf_embedding.md new file mode 100644 index 000000000..198a5297c --- /dev/null +++ b/docs/modules/knowledge/pdf/pdf_embedding.md @@ -0,0 +1,43 @@ +PDFEmbedding +================================== +pdfembedding can import PDF text into a vector knowledge base. The entire embedding process includes the read (loading data), data_process (data processing), and index_to_store (embedding to the vector database) methods. + +inheriting the SourceEmbedding +``` +class PDFEmbedding(SourceEmbedding): + """pdf embedding for read pdf document.""" + + def __init__(self, file_path, vector_store_config): + """Initialize with pdf path.""" + super().__init__(file_path, vector_store_config) + self.file_path = file_path + self.vector_store_config = vector_store_config +``` + +implement read() and data_process() +read() method allows you to read data and split data into chunk +``` +@register + def read(self): + """Load from pdf path.""" + loader = PyPDFLoader(self.file_path) + # textsplitter = CHNDocumentSplitter( + # pdf=True, sentence_size=CFG.KNOWLEDGE_CHUNK_SIZE + # ) + textsplitter = SpacyTextSplitter( + pipeline="zh_core_web_sm", + chunk_size=CFG.KNOWLEDGE_CHUNK_SIZE, + chunk_overlap=100, + ) + return loader.load_and_split(textsplitter) +``` +data_process() method allows you to pre processing your ways +``` +@register + def data_process(self, documents: List[Document]): + i = 0 + for d in documents: + documents[i].page_content = d.page_content.replace("\n", "") + i += 1 + return documents +``` diff --git a/docs/modules/knowledge/ppt/ppt_embedding.md b/docs/modules/knowledge/ppt/ppt_embedding.md new file mode 100644 index 000000000..e1d7754b9 --- /dev/null +++ b/docs/modules/knowledge/ppt/ppt_embedding.md @@ -0,0 +1,40 @@ +PPTEmbedding +================================== +ppt embedding can import ppt text into a vector knowledge base. The entire embedding process includes the read (loading data), data_process (data processing), and index_to_store (embedding to the vector database) methods. + +inheriting the SourceEmbedding +``` +class PPTEmbedding(SourceEmbedding): + """ppt embedding for read ppt document.""" + + def __init__(self, file_path, vector_store_config): + """Initialize with pdf path.""" + super().__init__(file_path, vector_store_config) + self.file_path = file_path + self.vector_store_config = vector_store_config +``` + +implement read() and data_process() +read() method allows you to read data and split data into chunk +``` +@register + def read(self): + """Load from ppt path.""" + loader = UnstructuredPowerPointLoader(self.file_path) + textsplitter = SpacyTextSplitter( + pipeline="zh_core_web_sm", + chunk_size=CFG.KNOWLEDGE_CHUNK_SIZE, + chunk_overlap=200, + ) + return loader.load_and_split(textsplitter) +``` +data_process() method allows you to pre processing your ways +``` +@register + def data_process(self, documents: List[Document]): + i = 0 + for d in documents: + documents[i].page_content = d.page_content.replace("\n", "") + i += 1 + return documents +``` diff --git a/docs/modules/knowledge/url/url_embedding.md b/docs/modules/knowledge/url/url_embedding.md new file mode 100644 index 000000000..637fa88c0 --- /dev/null +++ b/docs/modules/knowledge/url/url_embedding.md @@ -0,0 +1,47 @@ +URL Embedding +================================== +url embedding can import PDF text into a vector knowledge base. The entire embedding process includes the read (loading data), data_process (data processing), and index_to_store (embedding to the vector database) methods. + +inheriting the SourceEmbedding +``` +class URLEmbedding(SourceEmbedding): + """url embedding for read url document.""" + + def __init__(self, file_path, vector_store_config): + """Initialize with url path.""" + super().__init__(file_path, vector_store_config) + self.file_path = file_path + self.vector_store_config = vector_store_config +``` + +implement read() and data_process() +read() method allows you to read data and split data into chunk +``` +@register + def read(self): + """Load from url path.""" + loader = WebBaseLoader(web_path=self.file_path) + if CFG.LANGUAGE == "en": + text_splitter = CharacterTextSplitter( + chunk_size=CFG.KNOWLEDGE_CHUNK_SIZE, + chunk_overlap=20, + length_function=len, + ) + else: + text_splitter = CHNDocumentSplitter(pdf=True, sentence_size=1000) + return loader.load_and_split(text_splitter) +``` +data_process() method allows you to pre processing your ways +``` +@register + def data_process(self, documents: List[Document]): + i = 0 + for d in documents: + content = d.page_content.replace("\n", "") + soup = BeautifulSoup(content, "html.parser") + for tag in soup(["!doctype", "meta"]): + tag.extract() + documents[i].page_content = soup.get_text() + i += 1 + return documents +``` diff --git a/docs/modules/knowledge/word/word_embedding.md b/docs/modules/knowledge/word/word_embedding.md new file mode 100644 index 000000000..d978bf860 --- /dev/null +++ b/docs/modules/knowledge/word/word_embedding.md @@ -0,0 +1,38 @@ +WordEmbedding +================================== +word embedding can import word doc/docx text into a vector knowledge base. The entire embedding process includes the read (loading data), data_process (data processing), and index_to_store (embedding to the vector database) methods. + +inheriting the SourceEmbedding +``` +class WordEmbedding(SourceEmbedding): + """word embedding for read word document.""" + + def __init__(self, file_path, vector_store_config): + """Initialize with word path.""" + super().__init__(file_path, vector_store_config) + self.file_path = file_path + self.vector_store_config = vector_store_config +``` + +implement read() and data_process() +read() method allows you to read data and split data into chunk +``` +@register + def read(self): + """Load from word path.""" + loader = UnstructuredWordDocumentLoader(self.file_path) + textsplitter = CHNDocumentSplitter( + pdf=True, sentence_size=CFG.KNOWLEDGE_CHUNK_SIZE + ) + return loader.load_and_split(textsplitter) +``` +data_process() method allows you to pre processing your ways +``` +@register + def data_process(self, documents: List[Document]): + i = 0 + for d in documents: + documents[i].page_content = d.page_content.replace("\n", "") + i += 1 + return documents +``` diff --git a/docs/modules/llms.md b/docs/modules/llms.md index c83b73af8..0d3d07d5b 100644 --- a/docs/modules/llms.md +++ b/docs/modules/llms.md @@ -86,4 +86,25 @@ class ChatGLMChatAdapter(BaseChatAdpter): return chatglm_generate_stream ``` - if you want to integrate your own model, just need to inheriting BaseLLMAdaper and BaseChatAdpter and implement the methods \ No newline at end of file + if you want to integrate your own model, just need to inheriting BaseLLMAdaper and BaseChatAdpter and implement the methods + + +## Multi Proxy LLMs +### 1. Openai proxy + If you haven't deployed a private infrastructure for a large model, or if you want to use DB-GPT in a low-cost and high-efficiency way, you can also use OpenAI's large model as your underlying model. + +- If your environment deploying DB-GPT has access to OpenAI, then modify the .env configuration file as below will work. +``` +LLM_MODEL=proxy_llm +MODEL_SERVER=127.0.0.1:8000 +PROXY_API_KEY=sk-xxx +PROXY_SERVER_URL=https://api.openai.com/v1/chat/completions +``` + +- If you can't access OpenAI locally but have an OpenAI proxy service, you can configure as follows. +``` +LLM_MODEL=proxy_llm +MODEL_SERVER=127.0.0.1:8000 +PROXY_API_KEY=sk-xxx +PROXY_SERVER_URL={your-openai-proxy-server/v1/chat/completions} +``` diff --git a/docs/modules/plugins.md b/docs/modules/plugins.md index e4a95d3be..6f2617146 100644 --- a/docs/modules/plugins.md +++ b/docs/modules/plugins.md @@ -1,3 +1,98 @@ # Plugins -The ability of Agent and Plugin is the core of whether large models can be automated. In this project, we natively support the plugin mode, and large models can automatically achieve their goals. At the same time, in order to give full play to the advantages of the community, the plugins used in this project natively support the Auto-GPT plugin ecology, that is, Auto-GPT plugins can directly run in our project. \ No newline at end of file +The ability of Agent and Plugin is the core of whether large models can be automated. In this project, we natively support the plugin mode, and large models can automatically achieve their goals. At the same time, in order to give full play to the advantages of the community, the plugins used in this project natively support the Auto-GPT plugin ecology, that is, Auto-GPT plugins can directly run in our project. + +## Local Plugins + +### 1.1 How to write local plugins. + +- Local plugins use the Auto-GPT plugin template. A simple example is as follows: first write a plugin file called "sql_executor.py". +```python +import pymysql +import pymysql.cursors + +def get_conn(): + return pymysql.connect( + host="127.0.0.1", + port=int("2883"), + user="mock", + password="mock", + database="mock", + charset="utf8mb4", + ssl_ca=None, + ) + +def ob_sql_executor(sql: str): + try: + conn = get_conn() + with conn.cursor() as cursor: + cursor.execute(sql) + result = cursor.fetchall() + field_names = tuple(i[0] for i in cursor.description) + result = list(result) + result.insert(0, field_names) + return result + except pymysql.err.ProgrammingError as e: + return str(e) +``` + +Then set the "can_handle_post_prompt" method of the plugin template to True. In the "post_prompt" method, write the prompt information and the mapped plugin function. + +```python +"""This is a template for DB-GPT plugins.""" +from typing import Any, Dict, List, Optional, Tuple, TypeVar, TypedDict + +from auto_gpt_plugin_template import AutoGPTPluginTemplate + +PromptGenerator = TypeVar("PromptGenerator") + +class Message(TypedDict): + role: str + content: str + +class DBGPTOceanBase(AutoGPTPluginTemplate): + """ + This is an DB-GPT plugin to connect OceanBase. + """ + + def __init__(self): + super().__init__() + self._name = "DB-GPT-OB-Serverless-Plugin" + self._version = "0.1.0" + self._description = "This is an DB-GPT plugin to connect OceanBase." + + def can_handle_post_prompt(self) -> bool: + return True + + def post_prompt(self, prompt: PromptGenerator) -> PromptGenerator: + from .sql_executor import ob_sql_executor + + prompt.add_command( + "ob_sql_executor", + "Execute SQL in OceanBase Database.", + {"sql": ""}, + ob_sql_executor, + ) + return prompt + ... + +``` + +### 1.2 How to use local plugins + +- Pack your plugin project into `your-plugin.zip` and place it in the `/plugins/` directory of the DB-GPT project. After starting the webserver, you can select and use it in the `Plugin Model` section. + + +## Public Plugins + +### 1.1 How to use public plugins + +- By default, after launching the webserver, plugins from the public plugin library `DB-GPT-Plugins` will be automatically loaded. For more details, please refer to [DB-GPT-Plugins](https://github.com/csunny/DB-GPT-Plugins) + +### 1.2 Contribute to the DB-GPT-Plugins repository + +- Please refer to the plugin development process in the public plugin library, and put the configuration parameters in `.plugin_env` + +- We warmly welcome everyone to contribute plugins to the public plugin library! + + diff --git a/docs/use_cases/chatbots.md b/docs/use_cases/chatbots.md deleted file mode 100644 index 547ae67cc..000000000 --- a/docs/use_cases/chatbots.md +++ /dev/null @@ -1 +0,0 @@ -# Chatbot \ No newline at end of file diff --git a/docs/use_cases/interacting_with_api.md b/docs/use_cases/interacting_with_api.md deleted file mode 100644 index 65f69ed2a..000000000 --- a/docs/use_cases/interacting_with_api.md +++ /dev/null @@ -1 +0,0 @@ -# Interacting with api \ No newline at end of file diff --git a/docs/use_cases/sql_generation_and_diagnosis.md b/docs/use_cases/sql_generation_and_diagnosis.md index f0448edd0..d608ce1ec 100644 --- a/docs/use_cases/sql_generation_and_diagnosis.md +++ b/docs/use_cases/sql_generation_and_diagnosis.md @@ -1 +1,5 @@ -# SQL generation and diagnosis \ No newline at end of file +# SQL generation and diagnosis + +DB-GPT provides SQL generation and diagnostic capabilities. With advanced natural language processing algorithms and a deep understanding of SQL syntax and semantics, the model can generate complex SQL queries from natural language input, as well as diagnose and fix issues with existing queries. This allows users to save time and streamline their workflow, while also improving the accuracy and efficiency of their SQL operations. + +[SQL Generation](../../assets/demo_en.gif) \ No newline at end of file diff --git a/docs/use_cases/tool_use_with_plugin.md b/docs/use_cases/tool_use_with_plugin.md index 8aa053daf..aeb1c637d 100644 --- a/docs/use_cases/tool_use_with_plugin.md +++ b/docs/use_cases/tool_use_with_plugin.md @@ -1 +1,57 @@ -# Tool use with plugin \ No newline at end of file +# Tool use with plugin + +- DB-GPT supports a variety of plug-ins, such as MySQL, MongoDB, ClickHouse and other database tool plug-ins. In addition, some database management platforms can also package their interfaces and package them into plug-ins, and use the model to realize the ability of "single-sentence requirements" + + +## DB-GPT-DASHBOARD-PLUGIN + +[](https://github.com/csunny/DB-GPT-Plugins/blob/main/src/dbgpt_plugins/Readme.md) + +- This is a DB-GPT plugin to generate data analysis charts, if you want to use the test sample data, please first pull the code of [DB-GPT-Plugins](https://github.com/csunny/DB-GPT-Plugins), run the command to generate test DuckDB data, and then copy the generated data file to the `/pilot/mock_datas` directory of the DB-GPT project. + +```bash +git clone https://github.com/csunny/DB-GPT-Plugins.git +pip install -r requirements.txt +python /DB-GPT-Plugins/src/dbgpt_plugins/db_dashboard/mock_datas.py +cp /DB-GPT-Plugins/src/dbgpt_plugins/db_dashboard/mock_datas/db-gpt-test.db /DB-GPT/pilot/mock_datas/ + +python /DB-GPT/pilot/llmserver.py +python /DB-GPT/pilot/webserver.py +``` +- Test Case: Use a histogram to analyze the total order amount of users in different cities. +

+ +

+ +- More detail see: [DB-DASHBOARD](https://github.com/csunny/DB-GPT-Plugins/blob/main/src/dbgpt_plugins/Readme.md) + + +## DB-GPT-SQL-Execution-Plugin + + +- This is an DbGPT plugin to connect Generic Db And Execute SQL. + + +## DB-GPT-Bytebase-Plugin + +- To use a tool or platform plugin, you should first deploy a plugin. Taking the open-source database management platform Bytebase as an example, you can deploy your Bytebase service with one click using Docker and access it at http://127.0.0.1:5678. More details can be found at https://github.com/bytebase/bytebase. +```bash +docker run --init \ + --name bytebase \ + --platform linux/amd64 \ + --restart always \ + --publish 5678:8080 \ + --health-cmd "curl --fail http://localhost:5678/healthz || exit 1" \ + --health-interval 5m \ + --health-timeout 60s \ + --volume ~/.bytebase/data:/var/opt/bytebase \ + bytebase/bytebase:2.2.0 \ + --data /var/opt/bytebase \ + --port 8080 +``` + +Note: If your machine's CPU architecture is `ARM`, please use `--platform linux/arm64` instead. + +- Select the plugin on DB-GPT(All built-in plugins are from our repository: https://github.com/csunny/DB-GPT-Plugins),choose DB-GPT-Bytebase-Plugin. +Supporting functions include creating projects, creating environments, creating database instances, creating databases, database DDL/DML operations, and ticket approval process, etc. + diff --git a/pilot/common/sql_database.py b/pilot/common/sql_database.py index 5ccfb7902..d59a9d33f 100644 --- a/pilot/common/sql_database.py +++ b/pilot/common/sql_database.py @@ -66,7 +66,6 @@ class Database: self._sample_rows_in_table_info = set() self._indexes_in_table_info = indexes_in_table_info - @classmethod def from_uri( cls, database_uri: str, engine_args: Optional[dict] = None, **kwargs: Any @@ -399,7 +398,6 @@ class Database: ans = cursor.fetchall() return ans[0][1] - def get_fields(self, table_name): """Get column fields about specified table.""" session = self._db_sessions() diff --git a/pilot/configs/__init__.py b/pilot/configs/__init__.py index 44f901bbd..bbbd6a21c 100644 --- a/pilot/configs/__init__.py +++ b/pilot/configs/__init__.py @@ -10,6 +10,7 @@ if "pytest" in sys.argv or "pytest" in sys.modules or os.getenv("CI"): # Load the users .env file into environment variables load_dotenv(verbose=True, override=True) -load_dotenv(".plugin_env") +ROOT_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +load_dotenv(os.path.join(ROOT_PATH, ".plugin_env")) del load_dotenv diff --git a/pilot/configs/model_config.py b/pilot/configs/model_config.py index eab44ba09..0dc78af06 100644 --- a/pilot/configs/model_config.py +++ b/pilot/configs/model_config.py @@ -14,8 +14,8 @@ LOGDIR = os.path.join(ROOT_PATH, "logs") DATASETS_DIR = os.path.join(PILOT_PATH, "datasets") DATA_DIR = os.path.join(PILOT_PATH, "data") nltk.data.path = [os.path.join(PILOT_PATH, "nltk_data")] + nltk.data.path -PLUGINS_DIR = os.path.join(ROOT_PATH, "plugins") -FONT_DIR = os.path.join(PILOT_PATH, "fonts") +PLUGINS_DIR = os.path.join(ROOT_PATH, "plugins") +FONT_DIR = os.path.join(PILOT_PATH, "fonts") current_directory = os.getcwd() @@ -43,6 +43,7 @@ LLM_MODEL_CONFIG = { "guanaco-33b-merged": os.path.join(MODEL_PATH, "guanaco-33b-merged"), "falcon-40b": os.path.join(MODEL_PATH, "falcon-40b"), "gorilla-7b": os.path.join(MODEL_PATH, "gorilla-7b"), + "gptj-6b": os.path.join(MODEL_PATH, "ggml-gpt4all-j-v1.3-groovy.bin"), "proxyllm": "proxyllm", } diff --git a/pilot/connections/rdbms/clickhouse.py b/pilot/connections/rdbms/clickhouse.py index c7421e8e6..3e243759d 100644 --- a/pilot/connections/rdbms/clickhouse.py +++ b/pilot/connections/rdbms/clickhouse.py @@ -6,6 +6,7 @@ from pilot.configs.config import Config CFG = Config() + class ClickHouseConnector(RDBMSDatabase): """ClickHouseConnector""" @@ -17,19 +18,21 @@ class ClickHouseConnector(RDBMSDatabase): default_db = ["information_schema", "performance_schema", "sys", "mysql"] - @classmethod def from_config(cls) -> RDBMSDatabase: """ Todo password encryption Returns: """ - return cls.from_uri_db(cls, - CFG.LOCAL_DB_PATH, - engine_args={"pool_size": 10, "pool_recycle": 3600, "echo": True}) + return cls.from_uri_db( + cls, + CFG.LOCAL_DB_PATH, + engine_args={"pool_size": 10, "pool_recycle": 3600, "echo": True}, + ) @classmethod - def from_uri_db(cls, db_path: str, - engine_args: Optional[dict] = None, **kwargs: Any) -> RDBMSDatabase: + def from_uri_db( + cls, db_path: str, engine_args: Optional[dict] = None, **kwargs: Any + ) -> RDBMSDatabase: db_url: str = cls.connect_driver + "://" + db_path return cls.from_uri(db_url, engine_args, **kwargs) diff --git a/pilot/connections/rdbms/duckdb.py b/pilot/connections/rdbms/duckdb.py index e8b1038cb..947807744 100644 --- a/pilot/connections/rdbms/duckdb.py +++ b/pilot/connections/rdbms/duckdb.py @@ -6,6 +6,7 @@ from pilot.configs.config import Config CFG = Config() + class DuckDbConnect(RDBMSDatabase): """Connect Duckdb Database fetch MetaData Args: @@ -20,19 +21,21 @@ class DuckDbConnect(RDBMSDatabase): default_db = ["information_schema", "performance_schema", "sys", "mysql"] - @classmethod def from_config(cls) -> RDBMSDatabase: """ Todo password encryption Returns: """ - return cls.from_uri_db(cls, - CFG.LOCAL_DB_PATH, - engine_args={"pool_size": 10, "pool_recycle": 3600, "echo": True}) + return cls.from_uri_db( + cls, + CFG.LOCAL_DB_PATH, + engine_args={"pool_size": 10, "pool_recycle": 3600, "echo": True}, + ) @classmethod - def from_uri_db(cls, db_path: str, - engine_args: Optional[dict] = None, **kwargs: Any) -> RDBMSDatabase: + def from_uri_db( + cls, db_path: str, engine_args: Optional[dict] = None, **kwargs: Any + ) -> RDBMSDatabase: db_url: str = cls.connect_driver + "://" + db_path return cls.from_uri(db_url, engine_args, **kwargs) diff --git a/pilot/connections/rdbms/mssql.py b/pilot/connections/rdbms/mssql.py index 89c37e757..ceab845c4 100644 --- a/pilot/connections/rdbms/mssql.py +++ b/pilot/connections/rdbms/mssql.py @@ -5,9 +5,6 @@ from typing import Optional, Any from pilot.connections.rdbms.rdbms_connect import RDBMSDatabase - - - class MSSQLConnect(RDBMSDatabase): """Connect MSSQL Database fetch MetaData Args: @@ -18,6 +15,4 @@ class MSSQLConnect(RDBMSDatabase): dialect: str = "mssql" driver: str = "pyodbc" - default_db = ["master", "model", "msdb", "tempdb","modeldb", "resource"] - - + default_db = ["master", "model", "msdb", "tempdb", "modeldb", "resource"] diff --git a/pilot/connections/rdbms/mysql.py b/pilot/connections/rdbms/mysql.py index c1b57f784..8acf90759 100644 --- a/pilot/connections/rdbms/mysql.py +++ b/pilot/connections/rdbms/mysql.py @@ -5,9 +5,6 @@ from typing import Optional, Any from pilot.connections.rdbms.rdbms_connect import RDBMSDatabase - - - class MySQLConnect(RDBMSDatabase): """Connect MySQL Database fetch MetaData Args: @@ -19,5 +16,3 @@ class MySQLConnect(RDBMSDatabase): driver: str = "pymysql" default_db = ["information_schema", "performance_schema", "sys", "mysql"] - - diff --git a/pilot/connections/rdbms/oracle.py b/pilot/connections/rdbms/oracle.py index 8c5c0d004..8959695b0 100644 --- a/pilot/connections/rdbms/oracle.py +++ b/pilot/connections/rdbms/oracle.py @@ -2,8 +2,10 @@ # -*- coding:utf-8 -*- from pilot.connections.rdbms.rdbms_connect import RDBMSDatabase + class OracleConnector(RDBMSDatabase): """OracleConnector""" + type: str = "ORACLE" driver: str = "oracle" diff --git a/pilot/connections/rdbms/postgres.py b/pilot/connections/rdbms/postgres.py index 2d366566a..104380a37 100644 --- a/pilot/connections/rdbms/postgres.py +++ b/pilot/connections/rdbms/postgres.py @@ -2,6 +2,7 @@ # -*- coding: utf-8 -*- from pilot.connections.rdbms.rdbms_connect import RDBMSDatabase + class PostgresConnector(RDBMSDatabase): """PostgresConnector is a class which Connector""" diff --git a/pilot/connections/rdbms/py_study/pd_study.py b/pilot/connections/rdbms/py_study/pd_study.py index 68784f9b7..5a2b3edae 100644 --- a/pilot/connections/rdbms/py_study/pd_study.py +++ b/pilot/connections/rdbms/py_study/pd_study.py @@ -57,18 +57,19 @@ CFG = Config() if __name__ == "__main__": - def __extract_json(s): - i = s.index('{') - count = 1 # 当前所在嵌套深度,即还没闭合的'{'个数 - for j, c in enumerate(s[i + 1:], start=i + 1): - if c == '}': - count -= 1 - elif c == '{': - count += 1 - if count == 0: - break - assert (count == 0) # 检查是否找到最后一个'}' - return s[i:j + 1] - ss = """here's a sql statement that can be used to generate a histogram to analyze the distribution of user orders in different cities:select u.city, count(*) as order_countfrom tran_order oleft join user u on o.user_id = u.idgroup by u.city;this will return the number of orders for each city that has at least one order. we can use this data to generate a histogram that shows the distribution of orders across different cities.here's the response in the required format:{ "thoughts": "here's a sql statement that can be used to generate a histogram to analyze the distribution of user orders in different cities:\n\nselect u.city, count(*) as order_count\nfrom tran_order o\nleft join user u on o.user_id = u.id\ngroup by u.city;", "speak": "here's a sql statement that can be used to generate a histogram to analyze the distribution of user orders in different cities.", "command": { "name": "histogram-executor", "args": { "title": "distribution of user orders in different cities", "sql": "select u.city, count(*) as order_count\nfrom tran_order o\nleft join user u on o.user_id = u.id\ngroup by u.city;" } }}""" - print(__extract_json(ss)) \ No newline at end of file + def __extract_json(s): + i = s.index("{") + count = 1 # 当前所在嵌套深度,即还没闭合的'{'个数 + for j, c in enumerate(s[i + 1 :], start=i + 1): + if c == "}": + count -= 1 + elif c == "{": + count += 1 + if count == 0: + break + assert count == 0 # 检查是否找到最后一个'}' + return s[i : j + 1] + + ss = """here's a sql statement that can be used to generate a histogram to analyze the distribution of user orders in different cities:select u.city, count(*) as order_countfrom tran_order oleft join user u on o.user_id = u.idgroup by u.city;this will return the number of orders for each city that has at least one order. we can use this data to generate a histogram that shows the distribution of orders across different cities.here's the response in the required format:{ "thoughts": "here's a sql statement that can be used to generate a histogram to analyze the distribution of user orders in different cities:\n\nselect u.city, count(*) as order_count\nfrom tran_order o\nleft join user u on o.user_id = u.id\ngroup by u.city;", "speak": "here's a sql statement that can be used to generate a histogram to analyze the distribution of user orders in different cities.", "command": { "name": "histogram-executor", "args": { "title": "distribution of user orders in different cities", "sql": "select u.city, count(*) as order_count\nfrom tran_order o\nleft join user u on o.user_id = u.id\ngroup by u.city;" } }}""" + print(__extract_json(ss)) diff --git a/pilot/connections/rdbms/rdbms_connect.py b/pilot/connections/rdbms/rdbms_connect.py index 424bfaa7f..7fef1862f 100644 --- a/pilot/connections/rdbms/rdbms_connect.py +++ b/pilot/connections/rdbms/rdbms_connect.py @@ -35,13 +35,12 @@ class RDBMSDatabase(BaseConnect): """SQLAlchemy wrapper around a database.""" def __init__( - self, - engine, - schema: Optional[str] = None, - metadata: Optional[MetaData] = None, - ignore_tables: Optional[List[str]] = None, - include_tables: Optional[List[str]] = None, - + self, + engine, + schema: Optional[str] = None, + metadata: Optional[MetaData] = None, + ignore_tables: Optional[List[str]] = None, + include_tables: Optional[List[str]] = None, ): """Create engine from database URI.""" self._engine = engine @@ -61,18 +60,37 @@ class RDBMSDatabase(BaseConnect): Todo password encryption Returns: """ - return cls.from_uri_db(cls, - CFG.LOCAL_DB_HOST, - CFG.LOCAL_DB_PORT, - CFG.LOCAL_DB_USER, - CFG.LOCAL_DB_PASSWORD, - engine_args={"pool_size": 10, "pool_recycle": 3600, "echo": True}) + return cls.from_uri_db( + cls, + CFG.LOCAL_DB_HOST, + CFG.LOCAL_DB_PORT, + CFG.LOCAL_DB_USER, + CFG.LOCAL_DB_PASSWORD, + engine_args={"pool_size": 10, "pool_recycle": 3600, "echo": True}, + ) @classmethod - def from_uri_db(cls, host: str, port: int, user: str, pwd: str, db_name: str = None, - engine_args: Optional[dict] = None, **kwargs: Any) -> RDBMSDatabase: - db_url: str = cls.connect_driver + "://" + CFG.LOCAL_DB_USER + ":" + CFG.LOCAL_DB_PASSWORD + "@" + CFG.LOCAL_DB_HOST + ":" + str( - CFG.LOCAL_DB_PORT) + def from_uri_db( + cls, + host: str, + port: int, + user: str, + pwd: str, + db_name: str = None, + engine_args: Optional[dict] = None, + **kwargs: Any, + ) -> RDBMSDatabase: + db_url: str = ( + cls.connect_driver + + "://" + + CFG.LOCAL_DB_USER + + ":" + + CFG.LOCAL_DB_PASSWORD + + "@" + + CFG.LOCAL_DB_HOST + + ":" + + str(CFG.LOCAL_DB_PORT) + ) if cls.dialect: db_url = cls.dialect + "+" + db_url if db_name: @@ -81,7 +99,7 @@ class RDBMSDatabase(BaseConnect): @classmethod def from_uri( - cls, database_uri: str, engine_args: Optional[dict] = None, **kwargs: Any + cls, database_uri: str, engine_args: Optional[dict] = None, **kwargs: Any ) -> RDBMSDatabase: """Construct a SQLAlchemy engine from URI.""" _engine_args = engine_args or {} @@ -167,7 +185,7 @@ class RDBMSDatabase(BaseConnect): tbl for tbl in self._metadata.sorted_tables if tbl.name in set(all_table_names) - and not (self.dialect == "sqlite" and tbl.name.startswith("sqlite_")) + and not (self.dialect == "sqlite" and tbl.name.startswith("sqlite_")) ] tables = [] @@ -180,7 +198,7 @@ class RDBMSDatabase(BaseConnect): create_table = str(CreateTable(table).compile(self._engine)) table_info = f"{create_table.rstrip()}" has_extra_info = ( - self._indexes_in_table_info or self._sample_rows_in_table_info + self._indexes_in_table_info or self._sample_rows_in_table_info ) if has_extra_info: table_info += "\n\n/*" diff --git a/pilot/language/lang_content_mapping.py b/pilot/language/lang_content_mapping.py index bccc01224..54bd63e35 100644 --- a/pilot/language/lang_content_mapping.py +++ b/pilot/language/lang_content_mapping.py @@ -7,7 +7,7 @@ lang_dicts = { "learn_more_markdown": "该服务是仅供非商业用途的研究预览。受 Vicuna-13B 模型 [License](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) 的约束", "model_control_param": "模型参数", "sql_generate_mode_direct": "直接执行结果", - "sql_generate_mode_none": "db问答", + "sql_generate_mode_none": "DB问答", "max_input_token_size": "最大输出Token数", "please_choose_database": "请选择数据", "sql_generate_diagnostics": "SQL生成与诊断", @@ -44,7 +44,7 @@ lang_dicts = { "learn_more_markdown": "The service is a research preview intended for non-commercial use only. subject to the model [License](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) of Vicuna-13B", "model_control_param": "Model Parameters", "sql_generate_mode_direct": "Execute directly", - "sql_generate_mode_none": "db chat", + "sql_generate_mode_none": "DB chat", "max_input_token_size": "Maximum output token size", "please_choose_database": "Please choose database", "sql_generate_diagnostics": "SQL Generation & Diagnostics", diff --git a/pilot/model/adapter.py b/pilot/model/adapter.py index 9074a58be..2c1089cec 100644 --- a/pilot/model/adapter.py +++ b/pilot/model/adapter.py @@ -2,6 +2,7 @@ # -*- coding: utf-8 -*- import torch +import os from typing import List from functools import cache from transformers import ( @@ -107,7 +108,7 @@ class GuanacoAdapter(BaseLLMAdaper): def loader(self, model_path: str, from_pretrained_kwargs: dict): tokenizer = LlamaTokenizer.from_pretrained(model_path) model = AutoModelForCausalLM.from_pretrained( - model_path, load_in_4bit=True, device_map={"": 0}, **from_pretrained_kwargs + model_path, load_in_4bit=True, **from_pretrained_kwargs ) return model, tokenizer @@ -126,7 +127,6 @@ class FalconAdapater(BaseLLMAdaper): model_path, load_in_4bit=True, # quantize quantization_config=bnb_config, - device_map={"": 0}, trust_remote_code=True, **from_pretrained_kwagrs, ) @@ -134,7 +134,6 @@ class FalconAdapater(BaseLLMAdaper): model = AutoModelForCausalLM.from_pretrained( model_path, trust_remote_code=True, - device_map={"": 0}, **from_pretrained_kwagrs, ) return model, tokenizer @@ -185,18 +184,26 @@ class RWKV4LLMAdapter(BaseLLMAdaper): class GPT4AllAdapter(BaseLLMAdaper): - """A light version for someone who want practise LLM use laptop.""" + """ + A light version for someone who want practise LLM use laptop. + All model names see: https://gpt4all.io/models/models.json + """ def match(self, model_path: str): return "gpt4all" in model_path def loader(self, model_path: str, from_pretrained_kwargs: dict): - # TODO - pass + import gpt4all + + if model_path is None and from_pretrained_kwargs.get("model_name") is None: + model = gpt4all.GPT4All("ggml-gpt4all-j-v1.3-groovy") + else: + path, file = os.path.split(model_path) + model = gpt4all.GPT4All(model_path=path, model_name=file) + return model, None class ProxyllmAdapter(BaseLLMAdaper): - """The model adapter for local proxy""" def match(self, model_path: str): @@ -211,6 +218,7 @@ register_llm_model_adapters(ChatGLMAdapater) register_llm_model_adapters(GuanacoAdapter) register_llm_model_adapters(FalconAdapater) register_llm_model_adapters(GorillaAdapter) +register_llm_model_adapters(GPT4AllAdapter) # TODO Default support vicuna, other model need to tests and Evaluate # just for test_py, remove this later diff --git a/pilot/model/llm_out/gpt4all_llm.py b/pilot/model/llm_out/gpt4all_llm.py new file mode 100644 index 000000000..7a39a8012 --- /dev/null +++ b/pilot/model/llm_out/gpt4all_llm.py @@ -0,0 +1,23 @@ +#!/usr/bin/env python3 +# -*- coding:utf-8 -*- +import threading +import sys +import time + + +def gpt4all_generate_stream(model, tokenizer, params, device, max_position_embeddings): + stop = params.get("stop", "###") + prompt = params["prompt"] + role, query = prompt.split(stop)[1].split(":") + print(f"gpt4all, role: {role}, query: {query}") + + def worker(): + model.generate(prompt=query, streaming=True) + + t = threading.Thread(target=worker) + t.start() + + while t.is_alive(): + yield sys.stdout.output + time.sleep(0.01) + t.join() diff --git a/pilot/model/llm_out/proxy_llm.py b/pilot/model/llm_out/proxy_llm.py index 8e98ed4c9..4336d43e3 100644 --- a/pilot/model/llm_out/proxy_llm.py +++ b/pilot/model/llm_out/proxy_llm.py @@ -51,7 +51,7 @@ def proxyllm_generate_stream(model, tokenizer, params, device, context_len=2048) } ) - # Move the last user's information to the end + # Move the last user's information to the end temp_his = history[::-1] last_user_input = None for m in temp_his: @@ -76,7 +76,7 @@ def proxyllm_generate_stream(model, tokenizer, params, device, context_len=2048) text = "" for line in res.iter_lines(): if line: - json_data = line.split(b': ', 1)[1] + json_data = line.split(b": ", 1)[1] decoded_line = json_data.decode("utf-8") if decoded_line.lower() != "[DONE]".lower(): obj = json.loads(json_data) diff --git a/pilot/model/loader.py b/pilot/model/loader.py index 6fd6143ff..6acbc9234 100644 --- a/pilot/model/loader.py +++ b/pilot/model/loader.py @@ -73,12 +73,12 @@ class ModelLoader(metaclass=Singleton): elif self.device == "cuda": kwargs = {"torch_dtype": torch.float16} - num_gpus = int(num_gpus) + num_gpus = torch.cuda.device_count() if num_gpus != 1: kwargs["device_map"] = "auto" - if max_gpu_memory is None: - kwargs["device_map"] = "sequential" + # if max_gpu_memory is None: + # kwargs["device_map"] = "sequential" available_gpu_memory = get_gpu_memory(num_gpus) kwargs["max_memory"] = { diff --git a/pilot/out_parser/base.py b/pilot/out_parser/base.py index a32ef736b..96ddf64a0 100644 --- a/pilot/out_parser/base.py +++ b/pilot/out_parser/base.py @@ -51,7 +51,7 @@ class BaseOutputParser(ABC): """ TODO Multi mode output handler, rewrite this for multi model, use adapter mode. """ - if data["error_code"] == 0: + if data.get("error_code", 0) == 0: if "vicuna" in CFG.LLM_MODEL: # output = data["text"][skip_echo_len + 11:].strip() output = data["text"][skip_echo_len:].strip() @@ -121,17 +121,17 @@ class BaseOutputParser(ABC): raise ValueError("Model server error!code=" + respObj_ex["error_code"]) def __extract_json(slef, s): - i = s.index('{') + i = s.index("{") count = 1 # 当前所在嵌套深度,即还没闭合的'{'个数 - for j, c in enumerate(s[i + 1:], start=i + 1): - if c == '}': + for j, c in enumerate(s[i + 1 :], start=i + 1): + if c == "}": count -= 1 - elif c == '{': + elif c == "{": count += 1 if count == 0: break - assert (count == 0) # 检查是否找到最后一个'}' - return s[i:j + 1] + assert count == 0 # 检查是否找到最后一个'}' + return s[i : j + 1] def parse_prompt_response(self, model_out_text) -> T: """ diff --git a/pilot/scene/base_chat.py b/pilot/scene/base_chat.py index 1c8da0db5..0120b9e86 100644 --- a/pilot/scene/base_chat.py +++ b/pilot/scene/base_chat.py @@ -134,6 +134,7 @@ class BaseChat(ABC): return payload def stream_call(self): + # TODO Retry when server connection error payload = self.__call_base() self.skip_echo_len = len(payload.get("prompt").replace("", " ")) + 11 @@ -187,19 +188,19 @@ class BaseChat(ABC): ) ) -# ### MOCK -# ai_response_text = """{ -# "thoughts": "可以从users表和tran_order表联合查询,按城市和订单数量进行分组统计,并使用柱状图展示。", -# "reasoning": "为了分析用户在不同城市的分布情况,需要查询users表和tran_order表,使用LEFT JOIN将两个表联合起来。按照城市进行分组,统计每个城市的订单数量。使用柱状图展示可以直观地看出每个城市的订单数量,方便比较。", -# "speak": "根据您的分析目标,我查询了用户表和订单表,统计了每个城市的订单数量,并生成了柱状图展示。", -# "command": { -# "name": "histogram-executor", -# "args": { -# "title": "订单城市分布柱状图", -# "sql": "SELECT users.city, COUNT(tran_order.order_id) AS order_count FROM users LEFT JOIN tran_order ON users.user_name = tran_order.user_name GROUP BY users.city" -# } -# } -# }""" + # ### MOCK + # ai_response_text = """{ + # "thoughts": "可以从users表和tran_order表联合查询,按城市和订单数量进行分组统计,并使用柱状图展示。", + # "reasoning": "为了分析用户在不同城市的分布情况,需要查询users表和tran_order表,使用LEFT JOIN将两个表联合起来。按照城市进行分组,统计每个城市的订单数量。使用柱状图展示可以直观地看出每个城市的订单数量,方便比较。", + # "speak": "根据您的分析目标,我查询了用户表和订单表,统计了每个城市的订单数量,并生成了柱状图展示。", + # "command": { + # "name": "histogram-executor", + # "args": { + # "title": "订单城市分布柱状图", + # "sql": "SELECT users.city, COUNT(tran_order.order_id) AS order_count FROM users LEFT JOIN tran_order ON users.user_name = tran_order.user_name GROUP BY users.city" + # } + # } + # }""" self.current_message.add_ai_message(ai_response_text) prompt_define_response = ( diff --git a/pilot/scene/chat_execution/chat.py b/pilot/scene/chat_execution/chat.py index f91af967c..1dcb4c6ed 100644 --- a/pilot/scene/chat_execution/chat.py +++ b/pilot/scene/chat_execution/chat.py @@ -80,7 +80,6 @@ class ChatWithPlugin(BaseChat): def __list_to_prompt_str(self, list: List) -> str: return "\n".join(f"{i + 1 + 1}. {item}" for i, item in enumerate(list)) - def generate(self, p) -> str: return super().generate(p) diff --git a/pilot/scene/chat_execution/out_parser.py b/pilot/scene/chat_execution/out_parser.py index 44f203d1e..565d54c5e 100644 --- a/pilot/scene/chat_execution/out_parser.py +++ b/pilot/scene/chat_execution/out_parser.py @@ -31,7 +31,7 @@ class PluginChatOutputParser(BaseOutputParser): command, thoughts, speak = ( response["command"], response["thoughts"], - response["speak"] + response["speak"], ) return PluginAction(command, speak, thoughts) diff --git a/pilot/scene/chat_knowledge/default/chat.py b/pilot/scene/chat_knowledge/default/chat.py index 3f21b828d..6116deecd 100644 --- a/pilot/scene/chat_knowledge/default/chat.py +++ b/pilot/scene/chat_knowledge/default/chat.py @@ -56,7 +56,9 @@ class ChatDefaultKnowledge(BaseChat): context = context[:2000] input_values = {"context": context, "question": self.current_user_input} except NoIndexException: - raise ValueError("you have no default knowledge store, please execute python knowledge_init.py") + raise ValueError( + "you have no default knowledge store, please execute python knowledge_init.py" + ) return input_values def do_with_prompt_response(self, prompt_response): diff --git a/pilot/server/__init__.py b/pilot/server/__init__.py index 55f525988..ac72fc637 100644 --- a/pilot/server/__init__.py +++ b/pilot/server/__init__.py @@ -5,7 +5,6 @@ import sys from dotenv import load_dotenv - if "pytest" in sys.argv or "pytest" in sys.modules or os.getenv("CI"): print("Setting random seed to 42") random.seed(42) diff --git a/pilot/server/chat_adapter.py b/pilot/server/chat_adapter.py index e4f57cf46..ebab2d2d4 100644 --- a/pilot/server/chat_adapter.py +++ b/pilot/server/chat_adapter.py @@ -37,7 +37,6 @@ def get_llm_chat_adapter(model_path: str) -> BaseChatAdpter: class VicunaChatAdapter(BaseChatAdpter): - """Model chat Adapter for vicuna""" def match(self, model_path: str): @@ -60,7 +59,6 @@ class ChatGLMChatAdapter(BaseChatAdpter): class CodeT5ChatAdapter(BaseChatAdpter): - """Model chat adapter for CodeT5""" def match(self, model_path: str): @@ -72,7 +70,6 @@ class CodeT5ChatAdapter(BaseChatAdpter): class CodeGenChatAdapter(BaseChatAdpter): - """Model chat adapter for CodeGen""" def match(self, model_path: str): @@ -127,11 +124,22 @@ class GorillaChatAdapter(BaseChatAdpter): return generate_stream +class GPT4AllChatAdapter(BaseChatAdpter): + def match(self, model_path: str): + return "gpt4all" in model_path + + def get_generate_stream_func(self): + from pilot.model.llm_out.gpt4all_llm import gpt4all_generate_stream + + return gpt4all_generate_stream + + register_llm_model_chat_adapter(VicunaChatAdapter) register_llm_model_chat_adapter(ChatGLMChatAdapter) register_llm_model_chat_adapter(GuanacoChatAdapter) register_llm_model_chat_adapter(FalconChatAdapter) register_llm_model_chat_adapter(GorillaChatAdapter) +register_llm_model_chat_adapter(GPT4AllChatAdapter) # Proxy model for test and develop, it's cheap for us now. register_llm_model_chat_adapter(ProxyllmChatAdapter) diff --git a/pilot/server/llmserver.py b/pilot/server/llmserver.py index a1dba135b..1e3a4dcb3 100644 --- a/pilot/server/llmserver.py +++ b/pilot/server/llmserver.py @@ -39,9 +39,13 @@ class ModelWorker: ) if not isinstance(self.model, str): - if hasattr(self.model.config, "max_sequence_length"): + if hasattr(self.model, "config") and hasattr( + self.model.config, "max_sequence_length" + ): self.context_len = self.model.config.max_sequence_length - elif hasattr(self.model.config, "max_position_embeddings"): + elif hasattr(self.model, "config") and hasattr( + self.model.config, "max_position_embeddings" + ): self.context_len = self.model.config.max_position_embeddings else: @@ -69,7 +73,10 @@ class ModelWorker: for output in self.generate_stream_func( self.model, self.tokenizer, params, DEVICE, CFG.MAX_POSITION_EMBEDDINGS ): - print("output: ", output) + # Please do not open the output in production! + # The gpt4all thread shares stdout with the parent process, + # and opening it may affect the frontend output. + # print("output: ", output) ret = { "text": output, "error_code": 0, @@ -79,6 +86,12 @@ class ModelWorker: except torch.cuda.CudaError: ret = {"text": "**GPU OutOfMemory, Please Refresh.**", "error_code": 0} yield json.dumps(ret).encode() + b"\0" + except Exception as e: + ret = { + "text": f"**LLMServer Generate Error, Please CheckErrorInfo.**: {e}", + "error_code": 0, + } + yield json.dumps(ret).encode() + b"\0" def get_embeddings(self, prompt): return get_embeddings(self.model, self.tokenizer, prompt) diff --git a/pilot/server/webserver.py b/pilot/server/webserver.py index e76865550..761a239e7 100644 --- a/pilot/server/webserver.py +++ b/pilot/server/webserver.py @@ -667,8 +667,8 @@ if __name__ == "__main__": args = parser.parse_args() logger.info(f"args: {args}") - - # init config + + # init config cfg = Config() load_native_plugins(cfg) @@ -682,7 +682,7 @@ if __name__ == "__main__": "pilot.commands.built_in.audio_text", "pilot.commands.built_in.image_gen", ] - # exclude commands + # exclude commands command_categories = [ x for x in command_categories if x not in cfg.disabled_command_categories ] diff --git a/pilot/source_embedding/markdown_embedding.py b/pilot/source_embedding/markdown_embedding.py index 5f6d9526d..d8caee959 100644 --- a/pilot/source_embedding/markdown_embedding.py +++ b/pilot/source_embedding/markdown_embedding.py @@ -30,7 +30,11 @@ class MarkdownEmbedding(SourceEmbedding): def read(self): """Load from markdown path.""" loader = EncodeTextLoader(self.file_path) - textsplitter = SpacyTextSplitter(pipeline='zh_core_web_sm', chunk_size=CFG.KNOWLEDGE_CHUNK_SIZE, chunk_overlap=200) + textsplitter = SpacyTextSplitter( + pipeline="zh_core_web_sm", + chunk_size=CFG.KNOWLEDGE_CHUNK_SIZE, + chunk_overlap=100, + ) return loader.load_and_split(textsplitter) @register diff --git a/pilot/source_embedding/pdf_embedding.py b/pilot/source_embedding/pdf_embedding.py index 66b0963d9..dd8c39c03 100644 --- a/pilot/source_embedding/pdf_embedding.py +++ b/pilot/source_embedding/pdf_embedding.py @@ -29,7 +29,9 @@ class PDFEmbedding(SourceEmbedding): # pdf=True, sentence_size=CFG.KNOWLEDGE_CHUNK_SIZE # ) textsplitter = SpacyTextSplitter( - pipeline="zh_core_web_sm", chunk_size=CFG.KNOWLEDGE_CHUNK_SIZE, chunk_overlap=200 + pipeline="zh_core_web_sm", + chunk_size=CFG.KNOWLEDGE_CHUNK_SIZE, + chunk_overlap=100, ) return loader.load_and_split(textsplitter) diff --git a/pilot/source_embedding/ppt_embedding.py b/pilot/source_embedding/ppt_embedding.py index 869e92395..583b29ed1 100644 --- a/pilot/source_embedding/ppt_embedding.py +++ b/pilot/source_embedding/ppt_embedding.py @@ -25,7 +25,11 @@ class PPTEmbedding(SourceEmbedding): def read(self): """Load from ppt path.""" loader = UnstructuredPowerPointLoader(self.file_path) - textsplitter = SpacyTextSplitter(pipeline='zh_core_web_sm', chunk_size=CFG.KNOWLEDGE_CHUNK_SIZE, chunk_overlap=200) + textsplitter = SpacyTextSplitter( + pipeline="zh_core_web_sm", + chunk_size=CFG.KNOWLEDGE_CHUNK_SIZE, + chunk_overlap=200, + ) return loader.load_and_split(textsplitter) @register diff --git a/pilot/summary/db_summary_client.py b/pilot/summary/db_summary_client.py index 84fbf1550..5e551514b 100644 --- a/pilot/summary/db_summary_client.py +++ b/pilot/summary/db_summary_client.py @@ -78,7 +78,7 @@ class DBSummaryClient: model_name=LLM_MODEL_CONFIG[CFG.EMBEDDING_MODEL], vector_store_config=vector_store_config, ) - table_docs =knowledge_embedding_client.similar_search(query, topk) + table_docs = knowledge_embedding_client.similar_search(query, topk) ans = [d.page_content for d in table_docs] return ans @@ -147,8 +147,6 @@ class DBSummaryClient: logger.info("init db profile success...") - - def _get_llm_response(query, db_input, dbsummary): chat_param = { "temperature": 0.7, diff --git a/pilot/summary/mysql_db_summary.py b/pilot/summary/mysql_db_summary.py index 4a578fe2c..08a01c0fc 100644 --- a/pilot/summary/mysql_db_summary.py +++ b/pilot/summary/mysql_db_summary.py @@ -43,15 +43,14 @@ CFG = Config() # "tps": 50 # } + class MysqlSummary(DBSummary): """Get mysql summary template.""" def __init__(self, name): self.name = name self.type = "MYSQL" - self.summery = ( - """{{"database_name": "{name}", "type": "{type}", "tables": "{tables}", "qps": "{qps}", "tps": {tps}}}""" - ) + self.summery = """{{"database_name": "{name}", "type": "{type}", "tables": "{tables}", "qps": "{qps}", "tps": {tps}}}""" self.tables = {} self.tables_info = [] self.vector_tables_info = [] @@ -92,9 +91,12 @@ class MysqlSummary(DBSummary): self.tables[table_name] = table_summary.get_columns() self.table_columns_info.append(table_summary.get_columns()) # self.table_columns_json.append(table_summary.get_summary_json()) - table_profile = "table name:{table_name},table description:{table_comment}".format( - table_name=table_name, table_comment=self.db.get_show_create_table(table_name) + table_profile = ( + "table name:{table_name},table description:{table_comment}".format( + table_name=table_name, + table_comment=self.db.get_show_create_table(table_name), ) + ) self.table_columns_json.append(table_profile) # self.tables_info.append(table_summary.get_summery()) @@ -108,7 +110,11 @@ class MysqlSummary(DBSummary): def get_db_summery(self): return self.summery.format( - name=self.name, type=self.type, tables=";".join(self.vector_tables_info), qps=1000, tps=1000 + name=self.name, + type=self.type, + tables=";".join(self.vector_tables_info), + qps=1000, + tps=1000, ) def get_table_summary(self): @@ -153,7 +159,12 @@ class MysqlTableSummary(TableSummary): self.indexes_info.append(index_summary.get_summery()) self.json_summery = self.json_summery_template.format( - name=name, comment=comment_map[name], fields=self.fields_info, indexes=self.indexes_info, size_in_bytes=1000, rows=1000 + name=name, + comment=comment_map[name], + fields=self.fields_info, + indexes=self.indexes_info, + size_in_bytes=1000, + rows=1000, ) def get_summery(self): @@ -203,7 +214,9 @@ class MysqlIndexSummary(IndexSummary): self.bind_fields = index[1] def get_summery(self): - return self.summery_template.format(name=self.name, bind_fields=self.bind_fields) + return self.summery_template.format( + name=self.name, bind_fields=self.bind_fields + ) if __name__ == "__main__": diff --git a/requirements.txt b/requirements.txt index 6bf2538ef..0604e5130 100644 --- a/requirements.txt +++ b/requirements.txt @@ -49,6 +49,7 @@ llama-index==0.5.27 pymysql unstructured==0.6.3 grpcio==1.47.5 +gpt4all==0.3.0 auto-gpt-plugin-template pymdown-extensions