mirror of
https://github.com/csunny/DB-GPT.git
synced 2025-08-11 21:22:28 +00:00
Merge branch 'main' into dev
# Conflicts: # pilot/connections/mysql.py # pilot/prompts/prompt_generator.py # pilot/scene/base_chat.py # pilot/scene/chat_db/chat.py # pilot/scene/chat_db/out_parser.py # pilot/scene/chat_execution/chat.py # pilot/server/webserver.py
This commit is contained in:
commit
dd5fc529e2
@ -18,7 +18,7 @@
|
||||
#** LLM MODELS **#
|
||||
#*******************************************************************#
|
||||
LLM_MODEL=vicuna-13b
|
||||
MODEL_SERVER=http://120.79.27.110:8000
|
||||
MODEL_SERVER=http://127.0.0.1:8000
|
||||
LIMIT_MODEL_CONCURRENCY=5
|
||||
MAX_POSITION_EMBEDDINGS=4096
|
||||
|
||||
@ -92,3 +92,7 @@ VECTOR_STORE_TYPE=Chroma
|
||||
#MILVUS_USERNAME
|
||||
#MILVUS_PASSWORD
|
||||
#MILVUS_SECURE=
|
||||
|
||||
|
||||
LANGUAGE=en
|
||||
#LANGUAGE=zh
|
||||
|
5
.gitignore
vendored
5
.gitignore
vendored
@ -140,4 +140,7 @@ dmypy.json
|
||||
logs
|
||||
nltk_data
|
||||
.vectordb
|
||||
pilot/data/
|
||||
pilot/data/
|
||||
|
||||
logswebserver.log.*
|
||||
.history/*
|
26
README.md
26
README.md
@ -44,6 +44,11 @@ Run on an RTX 4090 GPU. [YouTube](https://www.youtube.com/watch?v=1PWI6F89LPo)
|
||||
<img src="./assets/demo_en.gif" width="600px" />
|
||||
</p>
|
||||
|
||||
### Run Plugin
|
||||
<p align="center">
|
||||
<img src="./assets/auto_sql_en.gif" width="600px" />
|
||||
</p>
|
||||
|
||||
### SQL Generation
|
||||
|
||||
1. Generate Create Table SQL
|
||||
@ -185,11 +190,18 @@ We provide a user interface for Gradio, which allows you to use DB-GPT through o
|
||||
|
||||
To use multiple models, modify the LLM_MODEL parameter in the .env configuration file to switch between the models.
|
||||
|
||||
####Create your own knowledge repository:
|
||||
### Multi Language Usage
|
||||
To use multiple language model, modify the LLM_MODEL parameter in the .env configuration file to switch between the models.
|
||||
|
||||
In the .env configuration file, modify the LANGUAGE parameter to switch between different languages, the default is English (Chinese zh, English en, other languages to be added)
|
||||
|
||||
### Create your own knowledge repository:
|
||||
|
||||
1.Place personal knowledge files or folders in the pilot/datasets directory.
|
||||
|
||||
2.Run the knowledge repository script in the tools directory.
|
||||
2.set .env configuration set your vector store type, eg:VECTOR_STORE_TYPE=Chroma, now we support Chroma and Milvus(version > 2.1)
|
||||
|
||||
3.Run the knowledge repository script in the tools directory.
|
||||
|
||||
```bash
|
||||
& python tools/knowledge_init.py
|
||||
@ -199,7 +211,7 @@ To use multiple models, modify the LLM_MODEL parameter in the .env configuration
|
||||
|
||||
```
|
||||
|
||||
3.Add the knowledge repository in the interface by entering the name of your knowledge repository (if not specified, enter "default") so you can use it for Q&A based on your knowledge base.
|
||||
4.Add the knowledge repository in the interface by entering the name of your knowledge repository (if not specified, enter "default") so you can use it for Q&A based on your knowledge base.
|
||||
|
||||
Note that the default vector model used is text2vec-large-chinese (which is a large model, so if your personal computer configuration is not enough, it is recommended to use text2vec-base-chinese). Therefore, ensure that you download the model and place it in the models directory.
|
||||
|
||||
@ -213,7 +225,7 @@ Run the Python interpreter and type the commands:
|
||||
|
||||
## Acknowledgement
|
||||
|
||||
The achievements of this project are thanks to the technical community, especially the following projects:
|
||||
This project is standing on the shoulders of giants and is not going to work without the open-source communities. Special thanks to the following projects for their excellent contribution to the AI industry:
|
||||
- [FastChat](https://github.com/lm-sys/FastChat) for providing chat services
|
||||
- [vicuna-13b](https://lmsys.org/blog/2023-03-30-vicuna/) as the base model
|
||||
- [langchain](https://langchain.readthedocs.io/) tool chain
|
||||
@ -232,8 +244,8 @@ The achievements of this project are thanks to the technical community, especial
|
||||
|
||||
## Contributors
|
||||
|
||||
|[<img src="https://avatars.githubusercontent.com/u/17919400?v=4" width="100px;"/><br/><sub><b>csunny</b></sub>](https://github.com/csunny)<br/>|[<img src="https://avatars.githubusercontent.com/u/1011681?v=4" width="100px;"/><br/><sub><b>xudafeng</b></sub>](https://github.com/xudafeng)<br/>|[<img src="https://avatars.githubusercontent.com/u/7636723?s=96&v=4" width="100px;"/><br/><sub><b>明天</b></sub>](https://github.com/yhjun1026)<br/> | [<img src="https://avatars.githubusercontent.com/u/13723926?v=4" width="100px;"/><br/><sub><b>Aries-ckt</b></sub>](https://github.com/Aries-ckt)<br/>|[<img src="https://avatars.githubusercontent.com/u/95130644?v=4" width="100px;"/><br/><sub><b>thebigbone</b></sub>](https://github.com/thebigbone)<br/>|
|
||||
| :---: | :---: | :---: | :---: |:---: |
|
||||
|[<img src="https://avatars.githubusercontent.com/u/17919400?v=4" width="100px;"/><br/><sub><b>csunny</b></sub>](https://github.com/csunny)<br/>|[<img src="https://avatars.githubusercontent.com/u/1011681?v=4" width="100px;"/><br/><sub><b>xudafeng</b></sub>](https://github.com/xudafeng)<br/>|[<img src="https://avatars.githubusercontent.com/u/7636723?s=96&v=4" width="100px;"/><br/><sub><b>明天</b></sub>](https://github.com/yhjun1026)<br/> | [<img src="https://avatars.githubusercontent.com/u/13723926?v=4" width="100px;"/><br/><sub><b>Aries-ckt</b></sub>](https://github.com/Aries-ckt)<br/>|[<img src="https://avatars.githubusercontent.com/u/95130644?v=4" width="100px;"/><br/><sub><b>thebigbone</b></sub>](https://github.com/thebigbone)<br/>|[<img src="https://avatars.githubusercontent.com/u/26043513?v=4" width="100px;"/><br/><sub><b>Shinexy</b></sub>](https://github.com/xuyuan23)<br/> |
|
||||
| :---: | :---: | :---: | :---: |:---: |:---: |
|
||||
|
||||
|
||||
This project follows the git-contributor [spec](https://github.com/xudafeng/git-contributor), auto updated at `Fri May 19 2023 00:24:18 GMT+0800`.
|
||||
@ -245,4 +257,4 @@ This project follows the git-contributor [spec](https://github.com/xudafeng/git-
|
||||
The MIT License (MIT)
|
||||
|
||||
## Contact Information
|
||||
We are working on building a community, if you have any ideas about building the community, feel free to contact us. [Discord](https://discord.com/invite/twmZk3vv)
|
||||
We are working on building a community, if you have any ideas about building the community, feel free to contact us. [Discord](https://discord.gg/kMFf77FH)
|
||||
|
32
README.zh.md
32
README.zh.md
@ -39,8 +39,9 @@ DB-GPT 是一个开源的以数据库为基础的GPT实验项目,使用本地
|
||||
<img src="./assets/演示.gif" width="600px" />
|
||||
</p>
|
||||
|
||||
### SQL 插件化执行
|
||||
<p align="center">
|
||||
<img src="./assets/Auto-DB-GPT.gif" width="600px" />
|
||||
<img src="./assets/auto_sql.gif" width="600px" />
|
||||
</p>
|
||||
|
||||
### SQL 生成
|
||||
@ -187,20 +188,35 @@ $ python webserver.py
|
||||
### 多模型使用
|
||||
在.env 配置文件当中, 修改LLM_MODEL参数来切换使用的模型。
|
||||
|
||||
### 多语言用户界面模式
|
||||
在.env 配置文件当中,修改LANGUAGE参数来切换使用不同的语言,默认是英文(中文zh, 英文en, 其他语言待补充)
|
||||
|
||||
### 打造属于你的知识库:
|
||||
|
||||
1、将个人知识文件或者文件夹放入pilot/datasets目录中
|
||||
1.将个人知识文件或者文件夹放入pilot/datasets目录中
|
||||
|
||||
2、在tools目录执行知识入库脚本
|
||||
2.在.env文件指定你的向量数据库类型,VECTOR_STORE_TYPE(默认Chroma),目前支持Chroma,Milvus(需要设置MILVUS_URL和MILVUS_PORT)
|
||||
|
||||
注意Milvus版本需要>2.1
|
||||
|
||||
3.在tools目录执行知识入库脚本()
|
||||
|
||||
如果是选择默认知识库,不需要指定 --vector_name, 默认default
|
||||
|
||||
```
|
||||
python tools/knowledge_init.py
|
||||
|
||||
--vector_name : your vector store name default_value:default
|
||||
--append: append mode, True:append, False: not append default_value:False
|
||||
```
|
||||
|
||||
如果选择新增知识库,在界面上新增知识库输入你的知识库名,
|
||||
|
||||
```
|
||||
3、在界面上新增知识库输入你的知识库名(如果没指定输入default),就可以根据你的知识库进行问答
|
||||
python tools/knowledge_init.py --vector_name = yourname
|
||||
|
||||
--vector_name: vector_name default_value:default
|
||||
|
||||
```
|
||||
就可以根据你的知识库进行问答
|
||||
|
||||
注意,这里默认向量模型是text2vec-large-chinese(模型比较大,如果个人电脑配置不够建议采用text2vec-base-chinese),因此确保需要将模型download下来放到models目录中。
|
||||
|
||||
@ -233,8 +249,8 @@ Run the Python interpreter and type the commands:
|
||||
|
||||
## 贡献者
|
||||
|
||||
|[<img src="https://avatars.githubusercontent.com/u/17919400?v=4" width="100px;"/><br/><sub><b>csunny</b></sub>](https://github.com/csunny)<br/>|[<img src="https://avatars.githubusercontent.com/u/1011681?v=4" width="100px;"/><br/><sub><b>xudafeng</b></sub>](https://github.com/xudafeng)<br/>|[<img src="https://avatars.githubusercontent.com/u/7636723?s=96&v=4" width="100px;"/><br/><sub><b>明天</b></sub>](https://github.com/yhjun1026)<br/> | [<img src="https://avatars.githubusercontent.com/u/13723926?v=4" width="100px;"/><br/><sub><b>Aries-ckt</b></sub>](https://github.com/Aries-ckt)<br/>|[<img src="https://avatars.githubusercontent.com/u/95130644?v=4" width="100px;"/><br/><sub><b>thebigbone</b></sub>](https://github.com/thebigbone)<br/>|
|
||||
| :---: | :---: | :---: | :---: |:---: |
|
||||
|[<img src="https://avatars.githubusercontent.com/u/17919400?v=4" width="100px;"/><br/><sub><b>csunny</b></sub>](https://github.com/csunny)<br/>|[<img src="https://avatars.githubusercontent.com/u/1011681?v=4" width="100px;"/><br/><sub><b>xudafeng</b></sub>](https://github.com/xudafeng)<br/>|[<img src="https://avatars.githubusercontent.com/u/7636723?s=96&v=4" width="100px;"/><br/><sub><b>明天</b></sub>](https://github.com/yhjun1026)<br/> | [<img src="https://avatars.githubusercontent.com/u/13723926?v=4" width="100px;"/><br/><sub><b>Aries-ckt</b></sub>](https://github.com/Aries-ckt)<br/>|[<img src="https://avatars.githubusercontent.com/u/95130644?v=4" width="100px;"/><br/><sub><b>thebigbone</b></sub>](https://github.com/thebigbone)<br/>|[<img src="https://avatars.githubusercontent.com/u/26043513?v=4" width="100px;"/><br/><sub><b>Shinexy</b></sub>](https://github.com/xuyuan23)<br/> |
|
||||
| :---: | :---: | :---: | :---: |:---: |:---: |
|
||||
|
||||
|
||||
[git-contributor 说明](https://github.com/xudafeng/git-contributor),自动生成时间:`Fri May 19 2023 00:24:18 GMT+0800`。
|
||||
|
BIN
assets/auto_sql.gif
Normal file
BIN
assets/auto_sql.gif
Normal file
Binary file not shown.
After Width: | Height: | Size: 3.5 MiB |
BIN
assets/auto_sql_en.gif
Normal file
BIN
assets/auto_sql_en.gif
Normal file
Binary file not shown.
After Width: | Height: | Size: 3.1 MiB |
@ -7,16 +7,14 @@
|
||||
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
|
||||
|
||||
import toml
|
||||
import os
|
||||
import sys
|
||||
|
||||
project = "DB-GPT"
|
||||
copyright = "2023, csunny"
|
||||
author = "csunny"
|
||||
|
||||
with open("../pyproject.toml") as f:
|
||||
data = toml.load(f)
|
||||
|
||||
version = data["tool"]["poetry"]["version"]
|
||||
release = version
|
||||
version = "0.1.0"
|
||||
html_title = project + " " + version
|
||||
|
||||
# -- General configuration ---------------------------------------------------
|
||||
|
@ -89,7 +89,7 @@ Use Cases
|
||||
|
||||
| Best Practices and built-in implementations for common DB-GPT use cases:
|
||||
|
||||
- `Sql generation and diagnosis <./use_cases/sql_generation_and_diagnosis.html>`: SQL generation and diagnosis.
|
||||
- `Sql generation and diagnosis <./use_cases/sql_generation_and_diagnosis.html>`_: SQL generation and diagnosis.
|
||||
|
||||
- `knownledge Based QA <./use_cases/knownledge_based_qa.html>`_: A important scene for user to chat with database documents, codes, bugs and schemas.
|
||||
|
||||
|
@ -10,6 +10,9 @@ As the knowledge base is currently the most significant user demand scenario, we
|
||||
|
||||
1.Place personal knowledge files or folders in the pilot/datasets directory.
|
||||
|
||||
2.Update your .env, set your vector store type, VECTOR_STORE_TYPE=Chroma
|
||||
(now only support Chroma and Milvus, if you set Milvus, please set MILVUS_URL and MILVUS_PORT)
|
||||
|
||||
2.Run the knowledge repository script in the tools directory.
|
||||
|
||||
```
|
||||
|
@ -17,6 +17,9 @@ class Config(metaclass=Singleton):
|
||||
def __init__(self) -> None:
|
||||
"""Initialize the Config class"""
|
||||
|
||||
# Gradio language version: en, cn
|
||||
self.LANGUAGE = os.getenv("LANGUAGE", "en")
|
||||
|
||||
self.debug_mode = False
|
||||
self.skip_reprompt = False
|
||||
self.temperature = float(os.getenv("TEMPERATURE", 0.7))
|
||||
|
@ -5,6 +5,7 @@ import dataclasses
|
||||
import uuid
|
||||
from enum import auto, Enum
|
||||
from typing import List, Any
|
||||
from pilot.language.translation_handler import get_lang_text
|
||||
|
||||
from pilot.configs.config import Config
|
||||
|
||||
@ -263,15 +264,17 @@ conv_qa_prompt_template = """ 基于以下已知的信息, 专业、简要的回
|
||||
default_conversation = conv_one_shot
|
||||
|
||||
conversation_sql_mode = {
|
||||
"auto_execute_ai_response": "直接执行结果",
|
||||
"dont_execute_ai_response": "不直接执行结果",
|
||||
"auto_execute_ai_response": get_lang_text("sql_generate_mode_direct"),
|
||||
"dont_execute_ai_response": get_lang_text("sql_generate_mode_none"),
|
||||
}
|
||||
|
||||
conversation_types = {
|
||||
"native": "LLM原生对话",
|
||||
"default_knownledge": "默认知识库对话",
|
||||
"custome": "新增知识库对话",
|
||||
"auto_execute_plugin": "对话使用插件",
|
||||
"native": get_lang_text("knowledge_qa_type_llm_native_dialogue"),
|
||||
"default_knownledge": get_lang_text(
|
||||
"knowledge_qa_type_default_knowledge_base_dialogue"
|
||||
),
|
||||
"custome": get_lang_text("knowledge_qa_type_add_knowledge_base_dialogue"),
|
||||
"auto_execute_plugin": get_lang_text("dialogue_use_plugin"),
|
||||
}
|
||||
|
||||
conv_templates = {
|
||||
|
74
pilot/language/lang_content_mapping.py
Normal file
74
pilot/language/lang_content_mapping.py
Normal file
@ -0,0 +1,74 @@
|
||||
## 短期内在该文件中配置,长期考虑将会存储在默认的数据库中存储,并可以支持多种语言的配置
|
||||
|
||||
lang_dicts = {
|
||||
"zh": {
|
||||
"unique_id": "中文内容",
|
||||
"db_gpt_introduction": "[DB-GPT](https://github.com/csunny/DB-GPT) 是一个开源的以数据库为基础的GPT实验项目,使用本地化的GPT大模型与您的数据和环境进行交互,无数据泄露风险,100% 私密,100% 安全。",
|
||||
"learn_more_markdown": "该服务是仅供非商业用途的研究预览。受 Vicuna-13B 模型 [License](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) 的约束",
|
||||
"model_control_param": "模型参数",
|
||||
"sql_generate_mode_direct": "直接执行结果",
|
||||
"sql_generate_mode_none": "不直接执行结果",
|
||||
"max_input_token_size": "最大输出Token数",
|
||||
"please_choose_database": "请选择数据",
|
||||
"sql_generate_diagnostics": "SQL生成与诊断",
|
||||
"knowledge_qa_type_llm_native_dialogue": "LLM原生对话",
|
||||
"knowledge_qa_type_default_knowledge_base_dialogue": "默认知识库对话",
|
||||
"knowledge_qa_type_add_knowledge_base_dialogue": "新增知识库对话",
|
||||
"dialogue_use_plugin": "对话使用插件",
|
||||
"create_knowledge_base": "新建知识库",
|
||||
"sql_schema_info": "数据库{}的Schema信息如下: {}\n",
|
||||
"current_dialogue_mode": "当前对话模式",
|
||||
"database_smart_assistant": "数据库智能助手",
|
||||
"sql_vs_setting": "自动执行模式下, DB-GPT可以具备执行SQL、从网络读取知识自动化存储学习的能力",
|
||||
"knowledge_qa": "知识问答",
|
||||
"configure_knowledge_base": "配置知识库",
|
||||
"new_klg_name": "新知识库名称",
|
||||
"add_as_new_klg": "添加为新知识库",
|
||||
"add_file_to_klg": "向知识库中添加文件",
|
||||
"upload_file": "上传文件",
|
||||
"add_file": "添加文件",
|
||||
"upload_and_load_to_klg": "上传并加载到知识库",
|
||||
"upload_folder": "上传文件夹",
|
||||
"add_folder": "添加文件夹",
|
||||
"send": "发送",
|
||||
"regenerate": "重新生成",
|
||||
"clear_box": "清理",
|
||||
},
|
||||
"en": {
|
||||
"unique_id": "English Content",
|
||||
"db_gpt_introduction": "[DB-GPT](https://github.com/csunny/DB-GPT) is an experimental open-source project that uses localized GPT large models to interact with your data and environment. With this solution, you can be assured that there is no risk of data leakage, and your data is 100% private and secure.",
|
||||
"learn_more_markdown": "The service is a research preview intended for non-commercial use only. subject to the model [License](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) of Vicuna-13B",
|
||||
"model_control_param": "Model Parameters",
|
||||
"sql_generate_mode_direct": "Execute directly",
|
||||
"sql_generate_mode_none": "Execute without model",
|
||||
"max_input_token_size": "Maximum output token size",
|
||||
"please_choose_database": "Please choose database",
|
||||
"sql_generate_diagnostics": "SQL Generation & Diagnostics",
|
||||
"knowledge_qa_type_llm_native_dialogue": "LLM native dialogue",
|
||||
"knowledge_qa_type_default_knowledge_base_dialogue": "Default documents",
|
||||
"knowledge_qa_type_add_knowledge_base_dialogue": "Added documents",
|
||||
"dialogue_use_plugin": "Dialogue Extension",
|
||||
"create_knowledge_base": "Create Knowledge Base",
|
||||
"sql_schema_info": "the schema information of database {}: {}\n",
|
||||
"current_dialogue_mode": "Current dialogue mode",
|
||||
"database_smart_assistant": "Database smart assistant",
|
||||
"sql_vs_setting": "In the automatic execution mode, DB-GPT can have the ability to execute SQL, read data from the network, automatically store and learn",
|
||||
"knowledge_qa": "Documents QA",
|
||||
"configure_knowledge_base": "Configure Documents",
|
||||
"new_klg_name": "New document name",
|
||||
"add_as_new_klg": "Add as new documents",
|
||||
"add_file_to_klg": "Add file to documents",
|
||||
"upload_file": "Upload file",
|
||||
"add_file": "Add file",
|
||||
"upload_and_load_to_klg": "Upload and load to documents",
|
||||
"upload_folder": "Upload folder",
|
||||
"add_folder": "Add folder",
|
||||
"send": "Send",
|
||||
"regenerate": "Regenerate",
|
||||
"clear_box": "Clear",
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def get_lang_content(key, language="zh"):
|
||||
return lang_dicts.get(language, {}).get(key, "")
|
8
pilot/language/translation_handler.py
Normal file
8
pilot/language/translation_handler.py
Normal file
@ -0,0 +1,8 @@
|
||||
from pilot.configs.config import Config
|
||||
from pilot.language.lang_content_mapping import get_lang_content
|
||||
|
||||
CFG = Config()
|
||||
|
||||
|
||||
def get_lang_text(key):
|
||||
return get_lang_content(key, CFG.LANGUAGE)
|
@ -5,6 +5,7 @@ from langchain.prompts import PromptTemplate
|
||||
|
||||
from pilot.configs.model_config import VECTOR_SEARCH_TOP_K
|
||||
from pilot.conversation import conv_qa_prompt_template
|
||||
from pilot.logs import logger
|
||||
from pilot.model.vicuna_llm import VicunaLLM
|
||||
from pilot.vector_store.file_loader import KnownLedge2Vector
|
||||
|
||||
@ -28,3 +29,27 @@ class KnownLedgeBaseQA:
|
||||
context = [d.page_content for d in docs]
|
||||
result = prompt.format(context="\n".join(context), question=query)
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def build_knowledge_prompt(query, docs, state):
|
||||
prompt_template = PromptTemplate(
|
||||
template=conv_qa_prompt_template, input_variables=["context", "question"]
|
||||
)
|
||||
context = [d.page_content for d in docs]
|
||||
result = prompt_template.format(context="\n".join(context), question=query)
|
||||
state.messages[-2][1] = result
|
||||
prompt = state.get_prompt()
|
||||
|
||||
if len(prompt) > 4000:
|
||||
logger.info("prompt length greater than 4000, rebuild")
|
||||
context = context[:2000]
|
||||
prompt_template = PromptTemplate(
|
||||
template=conv_qa_prompt_template,
|
||||
input_variables=["context", "question"],
|
||||
)
|
||||
result = prompt_template.format(context="\n".join(context), question=query)
|
||||
state.messages[-2][1] = result
|
||||
prompt = state.get_prompt()
|
||||
print("new prompt length:" + str(len(prompt)))
|
||||
|
||||
return prompt
|
||||
|
@ -13,7 +13,7 @@ from urllib.parse import urljoin
|
||||
|
||||
import gradio as gr
|
||||
import requests
|
||||
from langchain import PromptTemplate
|
||||
|
||||
|
||||
ROOT_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
sys.path.append(ROOT_PATH)
|
||||
@ -56,7 +56,11 @@ from pilot.vector_store.extract_tovec import (
|
||||
from pilot.commands.command import execute_ai_response_json
|
||||
from pilot.scene.base import ChatScene
|
||||
from pilot.scene.chat_factory import ChatFactory
|
||||
from pilot.language.translation_handler import get_lang_text
|
||||
|
||||
|
||||
# 加载插件
|
||||
CFG = Config()
|
||||
logger = build_logger("webserver", LOGDIR + "webserver.log")
|
||||
headers = {"User-Agent": "dbgpt Client"}
|
||||
|
||||
@ -67,15 +71,13 @@ disable_btn = gr.Button.update(interactive=True)
|
||||
enable_moderation = False
|
||||
models = []
|
||||
dbs = []
|
||||
vs_list = ["新建知识库"] + get_vector_storelist()
|
||||
vs_list = [get_lang_text("create_knowledge_base")] + get_vector_storelist()
|
||||
autogpt = False
|
||||
vector_store_client = None
|
||||
vector_store_name = {"vs_name": ""}
|
||||
|
||||
priority = {"vicuna-13b": "aaa"}
|
||||
|
||||
# 加载插件
|
||||
CFG = Config()
|
||||
CHAT_FACTORY = ChatFactory()
|
||||
|
||||
DB_SETTINGS = {
|
||||
@ -86,6 +88,20 @@ DB_SETTINGS = {
|
||||
}
|
||||
|
||||
|
||||
llm_native_dialogue = get_lang_text("knowledge_qa_type_llm_native_dialogue")
|
||||
default_knowledge_base_dialogue = get_lang_text(
|
||||
"knowledge_qa_type_default_knowledge_base_dialogue"
|
||||
)
|
||||
add_knowledge_base_dialogue = get_lang_text(
|
||||
"knowledge_qa_type_add_knowledge_base_dialogue"
|
||||
)
|
||||
knowledge_qa_type_list = [
|
||||
llm_native_dialogue,
|
||||
default_knowledge_base_dialogue,
|
||||
add_knowledge_base_dialogue,
|
||||
]
|
||||
|
||||
|
||||
def get_simlar(q):
|
||||
docsearch = knownledge_tovec_st(os.path.join(DATASETS_DIR, "plan.md"))
|
||||
docs = docsearch.similarity_search_with_score(q, k=1)
|
||||
@ -100,7 +116,7 @@ def gen_sqlgen_conversation(dbname):
|
||||
schemas = CFG.local_db.table_simple_info(db_connect)
|
||||
for s in schemas:
|
||||
message += s["schema_info"] + ";"
|
||||
return f"数据库{dbname}的Schema信息如下: {message}\n"
|
||||
return get_lang_text("sql_schema_info").format(dbname, message)
|
||||
|
||||
|
||||
def plugins_select_info():
|
||||
@ -127,6 +143,7 @@ function() {
|
||||
def load_demo(url_params, request: gr.Request):
|
||||
logger.info(f"load_demo. ip: {request.client.host}. params: {url_params}")
|
||||
|
||||
# dbs = get_database_list()
|
||||
dropdown_update = gr.Dropdown.update(visible=True)
|
||||
if dbs:
|
||||
gr.Dropdown.update(choices=dbs)
|
||||
@ -213,7 +230,7 @@ def http_bot(
|
||||
):
|
||||
logger.info(f"User message send!{state.conv_id},{selected},{mode},{sql_mode},{db_selector},{plugin_selector}")
|
||||
start_tstamp = time.time()
|
||||
scene: ChatScene = get_chat_mode(selected, mode, sql_mode, db_selector)
|
||||
scene:ChatScene = get_chat_mode(mode, sql_mode, db_selector)
|
||||
print(f"当前对话模式:{scene.value}")
|
||||
model_name = CFG.LLM_MODEL
|
||||
|
||||
@ -222,7 +239,7 @@ def http_bot(
|
||||
chat_param = {
|
||||
"chat_session_id": state.conv_id,
|
||||
"db_name": db_selector,
|
||||
"current_user_input": state.last_user_input,
|
||||
"user_input": state.last_user_input,
|
||||
}
|
||||
chat: BaseChat = CHAT_FACTORY.get_implementation(scene.value, **chat_param)
|
||||
chat.call()
|
||||
@ -241,6 +258,7 @@ def http_bot(
|
||||
yield (state, state.to_gradio_chatbot()) + (enable_btn,) * 5
|
||||
|
||||
else:
|
||||
|
||||
dbname = db_selector
|
||||
# TODO 这里的请求需要拼接现有知识库, 使得其根据现有知识库作答, 所以prompt需要继续优化
|
||||
if state.skip_next:
|
||||
@ -290,18 +308,25 @@ def http_bot(
|
||||
"vector_store_path": KNOWLEDGE_UPLOAD_ROOT_PATH,
|
||||
},
|
||||
)
|
||||
query = state.messages[-2][1]
|
||||
docs = knowledge_embedding_client.similar_search(query, 1)
|
||||
context = [d.page_content for d in docs]
|
||||
prompt_template = PromptTemplate(
|
||||
template=conv_qa_prompt_template,
|
||||
input_variables=["context", "question"],
|
||||
print("vector store name: ", vector_store_name["vs_name"])
|
||||
vector_store_config = {
|
||||
"vector_store_name": vector_store_name["vs_name"],
|
||||
"text_field": "content",
|
||||
"vector_store_path": KNOWLEDGE_UPLOAD_ROOT_PATH,
|
||||
}
|
||||
knowledge_embedding_client = KnowledgeEmbedding(
|
||||
file_path="",
|
||||
model_name=LLM_MODEL_CONFIG["text2vec"],
|
||||
local_persist=False,
|
||||
vector_store_config=vector_store_config,
|
||||
)
|
||||
result = prompt_template.format(context="\n".join(context), question=query)
|
||||
state.messages[-2][1] = result
|
||||
prompt = state.get_prompt()
|
||||
query = state.messages[-2][1]
|
||||
docs = knowledge_embedding_client.similar_search(query, VECTOR_SEARCH_TOP_K)
|
||||
prompt = KnownLedgeBaseQA.build_knowledge_prompt(query, docs, state)
|
||||
|
||||
state.messages[-2][1] = query
|
||||
skip_echo_len = len(prompt.replace("</s>", " ")) + 1
|
||||
|
||||
# Make requests
|
||||
payload = {
|
||||
"model": model_name,
|
||||
@ -346,7 +371,56 @@ def http_bot(
|
||||
enable_btn,
|
||||
)
|
||||
return
|
||||
try:
|
||||
# Stream output
|
||||
response = requests.post(
|
||||
urljoin(CFG.MODEL_SERVER, "generate_stream"),
|
||||
headers=headers,
|
||||
json=payload,
|
||||
stream=True,
|
||||
timeout=20,
|
||||
)
|
||||
for chunk in response.iter_lines(decode_unicode=False, delimiter=b"\0"):
|
||||
if chunk:
|
||||
data = json.loads(chunk.decode())
|
||||
|
||||
""" TODO Multi mode output handler, rewrite this for multi model, use adapter mode.
|
||||
"""
|
||||
if data["error_code"] == 0:
|
||||
if "vicuna" in CFG.LLM_MODEL:
|
||||
output = data["text"][skip_echo_len:].strip()
|
||||
else:
|
||||
output = data["text"].strip()
|
||||
|
||||
output = post_process_code(output)
|
||||
state.messages[-1][-1] = output + "▌"
|
||||
yield (state, state.to_gradio_chatbot()) + (
|
||||
disable_btn,
|
||||
) * 5
|
||||
else:
|
||||
output = (
|
||||
data["text"] + f" (error_code: {data['error_code']})"
|
||||
)
|
||||
state.messages[-1][-1] = output
|
||||
yield (state, state.to_gradio_chatbot()) + (
|
||||
disable_btn,
|
||||
disable_btn,
|
||||
disable_btn,
|
||||
enable_btn,
|
||||
enable_btn,
|
||||
)
|
||||
return
|
||||
|
||||
except requests.exceptions.RequestException as e:
|
||||
state.messages[-1][-1] = server_error_msg + f" (error_code: 4)"
|
||||
yield (state, state.to_gradio_chatbot()) + (
|
||||
disable_btn,
|
||||
disable_btn,
|
||||
disable_btn,
|
||||
enable_btn,
|
||||
enable_btn,
|
||||
)
|
||||
return
|
||||
except requests.exceptions.RequestException as e:
|
||||
state.messages[-1][-1] = server_error_msg + f" (error_code: 4)"
|
||||
yield (state, state.to_gradio_chatbot()) + (
|
||||
@ -358,24 +432,24 @@ def http_bot(
|
||||
)
|
||||
return
|
||||
|
||||
state.messages[-1][-1] = state.messages[-1][-1][:-1]
|
||||
yield (state, state.to_gradio_chatbot()) + (enable_btn,) * 5
|
||||
state.messages[-1][-1] = state.messages[-1][-1][:-1]
|
||||
yield (state, state.to_gradio_chatbot()) + (enable_btn,) * 5
|
||||
|
||||
# 记录运行日志
|
||||
finish_tstamp = time.time()
|
||||
logger.info(f"{output}")
|
||||
# 记录运行日志
|
||||
finish_tstamp = time.time()
|
||||
logger.info(f"{output}")
|
||||
|
||||
with open(get_conv_log_filename(), "a") as fout:
|
||||
data = {
|
||||
"tstamp": round(finish_tstamp, 4),
|
||||
"type": "chat",
|
||||
"model": model_name,
|
||||
"start": round(start_tstamp, 4),
|
||||
"finish": round(start_tstamp, 4),
|
||||
"state": state.dict(),
|
||||
"ip": request.client.host,
|
||||
}
|
||||
fout.write(json.dumps(data) + "\n")
|
||||
with open(get_conv_log_filename(), "a") as fout:
|
||||
data = {
|
||||
"tstamp": round(finish_tstamp, 4),
|
||||
"type": "chat",
|
||||
"model": model_name,
|
||||
"start": round(start_tstamp, 4),
|
||||
"finish": round(start_tstamp, 4),
|
||||
"state": state.dict(),
|
||||
"ip": request.client.host,
|
||||
}
|
||||
fout.write(json.dumps(data) + "\n")
|
||||
|
||||
|
||||
block_css = (
|
||||
@ -396,14 +470,14 @@ block_css = (
|
||||
|
||||
|
||||
def change_sql_mode(sql_mode):
|
||||
if sql_mode in ["直接执行结果"]:
|
||||
if sql_mode in [get_lang_text("sql_generate_mode_direct")]:
|
||||
return gr.update(visible=True)
|
||||
else:
|
||||
return gr.update(visible=False)
|
||||
|
||||
|
||||
def change_mode(mode):
|
||||
if mode in ["默认知识库对话", "LLM原生对话"]:
|
||||
if mode in [default_knowledge_base_dialogue, llm_native_dialogue]:
|
||||
return gr.update(visible=False)
|
||||
else:
|
||||
return gr.update(visible=True)
|
||||
@ -413,27 +487,16 @@ def change_tab():
|
||||
autogpt = True
|
||||
|
||||
|
||||
def change_func(xx):
|
||||
print("123")
|
||||
print(str(xx))
|
||||
|
||||
|
||||
def build_single_model_ui():
|
||||
notice_markdown = """
|
||||
# DB-GPT
|
||||
|
||||
[DB-GPT](https://github.com/csunny/DB-GPT) 是一个开源的以数据库为基础的GPT实验项目,使用本地化的GPT大模型与您的数据和环境进行交互,无数据泄露风险,100% 私密,100% 安全。
|
||||
"""
|
||||
learn_more_markdown = """
|
||||
### Licence
|
||||
The service is a research preview intended for non-commercial use only. subject to the model [License](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) of Vicuna-13B
|
||||
"""
|
||||
notice_markdown = get_lang_text("db_gpt_introduction")
|
||||
learn_more_markdown = get_lang_text("learn_more_markdown")
|
||||
|
||||
state = gr.State()
|
||||
|
||||
gr.Markdown(notice_markdown, elem_id="notice_markdown")
|
||||
|
||||
with gr.Accordion("参数", open=False, visible=False) as parameter_row:
|
||||
with gr.Accordion(
|
||||
get_lang_text("model_control_param"), open=False, visible=False
|
||||
) as parameter_row:
|
||||
temperature = gr.Slider(
|
||||
minimum=0.0,
|
||||
maximum=1.0,
|
||||
@ -449,7 +512,7 @@ def build_single_model_ui():
|
||||
value=512,
|
||||
step=64,
|
||||
interactive=True,
|
||||
label="最大输出Token数",
|
||||
label=get_lang_text("max_input_token_size"),
|
||||
)
|
||||
|
||||
tabs = gr.Tabs()
|
||||
@ -462,24 +525,30 @@ def build_single_model_ui():
|
||||
tabs.select(on_select, None, selected)
|
||||
|
||||
with tabs:
|
||||
tab_sql = gr.TabItem("SQL生成与诊断", elem_id="SQL")
|
||||
tab_sql.select(on_select, None, None)
|
||||
tab_sql = gr.TabItem(get_lang_text("sql_generate_diagnostics"), elem_id="SQL")
|
||||
with tab_sql:
|
||||
print("tab_sql in...")
|
||||
# TODO A selector to choose database
|
||||
with gr.Row(elem_id="db_selector"):
|
||||
db_selector = gr.Dropdown(
|
||||
label="请选择数据库",
|
||||
label=get_lang_text("please_choose_database"),
|
||||
choices=dbs,
|
||||
value=dbs[0] if len(models) > 0 else "",
|
||||
interactive=True,
|
||||
show_label=True,
|
||||
).style(container=False)
|
||||
|
||||
sql_mode = gr.Radio(["直接执行结果", "不执行结果"], show_label=False, value="不执行结果")
|
||||
sql_vs_setting = gr.Markdown("自动执行模式下, DB-GPT可以具备执行SQL、从网络读取知识自动化存储学习的能力")
|
||||
sql_mode = gr.Radio(
|
||||
[
|
||||
get_lang_text("sql_generate_mode_direct"),
|
||||
get_lang_text("sql_generate_mode_none"),
|
||||
],
|
||||
show_label=False,
|
||||
value=get_lang_text("sql_generate_mode_none"),
|
||||
)
|
||||
sql_vs_setting = gr.Markdown(get_lang_text("sql_vs_setting"))
|
||||
sql_mode.change(fn=change_sql_mode, inputs=sql_mode, outputs=sql_vs_setting)
|
||||
|
||||
tab_qa = gr.TabItem(get_lang_text("knowledge_qa"), elem_id="QA")
|
||||
tab_plugin = gr.TabItem("插件模式", elem_id="PLUGIN")
|
||||
# tab_plugin.select(change_func)
|
||||
with tab_plugin:
|
||||
@ -502,37 +571,50 @@ def build_single_model_ui():
|
||||
plugin_selected = gr.Textbox(show_label=False, visible=False, placeholder="Selected")
|
||||
plugin_selector.select(plugin_change, None, plugin_selected)
|
||||
|
||||
tab_qa = gr.TabItem("知识问答", elem_id="QA")
|
||||
tab_qa = gr.TabItem(get_lang_text("knowledge_qa"), elem_id="QA")
|
||||
with tab_qa:
|
||||
print("tab_qa in...")
|
||||
mode = gr.Radio(
|
||||
["LLM原生对话", "默认知识库对话", "新增知识库对话"], show_label=False, value="LLM原生对话"
|
||||
[
|
||||
llm_native_dialogue,
|
||||
default_knowledge_base_dialogue,
|
||||
add_knowledge_base_dialogue,
|
||||
],
|
||||
show_label=False,
|
||||
value=llm_native_dialogue,
|
||||
)
|
||||
vs_setting = gr.Accordion(
|
||||
get_lang_text("configure_knowledge_base"), open=False
|
||||
)
|
||||
vs_setting = gr.Accordion("配置知识库", open=False)
|
||||
mode.change(fn=change_mode, inputs=mode, outputs=vs_setting)
|
||||
with vs_setting:
|
||||
vs_name = gr.Textbox(label="新知识库名称", lines=1, interactive=True)
|
||||
vs_add = gr.Button("添加为新知识库")
|
||||
vs_name = gr.Textbox(
|
||||
label=get_lang_text("new_klg_name"), lines=1, interactive=True
|
||||
)
|
||||
vs_add = gr.Button(get_lang_text("add_as_new_klg"))
|
||||
with gr.Column() as doc2vec:
|
||||
gr.Markdown("向知识库中添加文件")
|
||||
with gr.Tab("上传文件"):
|
||||
gr.Markdown(get_lang_text("add_file_to_klg"))
|
||||
with gr.Tab(get_lang_text("upload_file")):
|
||||
files = gr.File(
|
||||
label="添加文件",
|
||||
label=get_lang_text("add_file"),
|
||||
file_types=[".txt", ".md", ".docx", ".pdf"],
|
||||
file_count="multiple",
|
||||
allow_flagged_uploads=True,
|
||||
show_label=False,
|
||||
)
|
||||
|
||||
load_file_button = gr.Button("上传并加载到知识库")
|
||||
with gr.Tab("上传文件夹"):
|
||||
load_file_button = gr.Button(
|
||||
get_lang_text("upload_and_load_to_klg")
|
||||
)
|
||||
with gr.Tab(get_lang_text("upload_folder")):
|
||||
folder_files = gr.File(
|
||||
label="添加文件夹",
|
||||
label=get_lang_text("add_folder"),
|
||||
accept_multiple_files=True,
|
||||
file_count="directory",
|
||||
show_label=False,
|
||||
)
|
||||
load_folder_button = gr.Button("上传并加载到知识库")
|
||||
load_folder_button = gr.Button(
|
||||
get_lang_text("upload_and_load_to_klg")
|
||||
)
|
||||
|
||||
with gr.Blocks():
|
||||
chatbot = grChatbot(elem_id="chatbot", visible=False).style(height=550)
|
||||
@ -544,11 +626,11 @@ def build_single_model_ui():
|
||||
visible=False,
|
||||
).style(container=False)
|
||||
with gr.Column(scale=2, min_width=50):
|
||||
send_btn = gr.Button(value="发送", visible=False)
|
||||
send_btn = gr.Button(value=get_lang_text("send"), visible=False)
|
||||
|
||||
with gr.Row(visible=False) as button_row:
|
||||
regenerate_btn = gr.Button(value="重新生成", interactive=False)
|
||||
clear_btn = gr.Button(value="清理", interactive=False)
|
||||
regenerate_btn = gr.Button(value=get_lang_text("regenerate"), interactive=False)
|
||||
clear_btn = gr.Button(value=get_lang_text("clear_box"), interactive=False)
|
||||
|
||||
gr.Markdown(learn_more_markdown)
|
||||
btn_list = [regenerate_btn, clear_btn]
|
||||
@ -594,10 +676,10 @@ def build_single_model_ui():
|
||||
|
||||
def build_webdemo():
|
||||
with gr.Blocks(
|
||||
title="数据库智能助手",
|
||||
# theme=gr.themes.Base(),
|
||||
theme=gr.themes.Default(),
|
||||
css=block_css,
|
||||
title=get_lang_text("database_smart_assistant"),
|
||||
# theme=gr.themes.Base(),
|
||||
theme=gr.themes.Default(),
|
||||
css=block_css,
|
||||
) as demo:
|
||||
url_params = gr.JSON(visible=False)
|
||||
(
|
||||
|
@ -2,7 +2,7 @@ import os
|
||||
|
||||
import markdown
|
||||
from bs4 import BeautifulSoup
|
||||
from langchain.document_loaders import PyPDFLoader, TextLoader, markdown
|
||||
from langchain.document_loaders import PyPDFLoader, TextLoader
|
||||
from langchain.embeddings import HuggingFaceEmbeddings
|
||||
|
||||
from pilot.configs.config import Config
|
||||
|
@ -2,9 +2,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
from langchain.embeddings import HuggingFaceEmbeddings
|
||||
|
||||
from pilot.configs.config import Config
|
||||
from pilot.vector_store.connector import VectorStoreConnector
|
||||
|
||||
@ -35,9 +32,7 @@ class SourceEmbedding(ABC):
|
||||
self.model_name = model_name
|
||||
self.vector_store_config = vector_store_config
|
||||
self.embedding_args = embedding_args
|
||||
self.embeddings = HuggingFaceEmbeddings(model_name=self.model_name)
|
||||
|
||||
vector_store_config["embeddings"] = self.embeddings
|
||||
self.embeddings = vector_store_config["embeddings"]
|
||||
self.vector_client = VectorStoreConnector(
|
||||
CFG.VECTOR_STORE_TYPE, vector_store_config
|
||||
)
|
||||
|
@ -1,11 +1,12 @@
|
||||
from typing import Any, Iterable, List, Optional, Tuple
|
||||
|
||||
from langchain.docstore.document import Document
|
||||
from pymilvus import Collection, DataType, connections
|
||||
from pymilvus import Collection, DataType, connections, utility
|
||||
|
||||
from pilot.configs.config import Config
|
||||
from pilot.vector_store.vector_store_base import VectorStoreBase
|
||||
|
||||
|
||||
CFG = Config()
|
||||
|
||||
|
||||
@ -29,6 +30,7 @@ class MilvusStore(VectorStoreBase):
|
||||
self.secure = ctx.get("secure", None)
|
||||
self.embedding = ctx.get("embeddings", None)
|
||||
self.fields = []
|
||||
self.alias = "default"
|
||||
|
||||
# use HNSW by default.
|
||||
self.index_params = {
|
||||
@ -48,7 +50,9 @@ class MilvusStore(VectorStoreBase):
|
||||
"IVF_HNSW": {"params": {"nprobe": 10, "ef": 10}},
|
||||
"ANNOY": {"params": {"search_k": 10}},
|
||||
}
|
||||
|
||||
# default collection schema
|
||||
self.primary_field = "pk_id"
|
||||
self.vector_field = "vector"
|
||||
self.text_field = "content"
|
||||
|
||||
if (self.username is None) != (self.password is None):
|
||||
@ -98,56 +102,43 @@ class MilvusStore(VectorStoreBase):
|
||||
texts = [d.page_content for d in documents]
|
||||
metadatas = [d.metadata for d in documents]
|
||||
embeddings = self.embedding.embed_query(texts[0])
|
||||
|
||||
if utility.has_collection(self.collection_name):
|
||||
self.col = Collection(self.collection_name, using=self.alias)
|
||||
self.fields = []
|
||||
for x in self.col.schema.fields:
|
||||
self.fields.append(x.name)
|
||||
if x.auto_id:
|
||||
self.fields.remove(x.name)
|
||||
if x.is_primary:
|
||||
self.primary_field = x.name
|
||||
if (
|
||||
x.dtype == DataType.FLOAT_VECTOR
|
||||
or x.dtype == DataType.BINARY_VECTOR
|
||||
):
|
||||
self.vector_field = x.name
|
||||
self._add_documents(texts, metadatas)
|
||||
return self.collection_name
|
||||
|
||||
dim = len(embeddings)
|
||||
# Generate unique names
|
||||
primary_field = "pk_id"
|
||||
vector_field = "vector"
|
||||
text_field = "content"
|
||||
self.text_field = text_field
|
||||
primary_field = self.primary_field
|
||||
vector_field = self.vector_field
|
||||
text_field = self.text_field
|
||||
# self.text_field = text_field
|
||||
collection_name = vector_name
|
||||
fields = []
|
||||
# Determine metadata schema
|
||||
# if metadatas:
|
||||
# # Check if all metadata keys line up
|
||||
# key = metadatas[0].keys()
|
||||
# for x in metadatas:
|
||||
# if key != x.keys():
|
||||
# raise ValueError(
|
||||
# "Mismatched metadata. "
|
||||
# "Make sure all metadata has the same keys and datatype."
|
||||
# )
|
||||
# # Create FieldSchema for each entry in singular metadata.
|
||||
# for key, value in metadatas[0].items():
|
||||
# # Infer the corresponding datatype of the metadata
|
||||
# dtype = infer_dtype_bydata(value)
|
||||
# if dtype == DataType.UNKNOWN:
|
||||
# raise ValueError(f"Unrecognized datatype for {key}.")
|
||||
# elif dtype == DataType.VARCHAR:
|
||||
# # Find out max length text based metadata
|
||||
# max_length = 0
|
||||
# for subvalues in metadatas:
|
||||
# max_length = max(max_length, len(subvalues[key]))
|
||||
# fields.append(
|
||||
# FieldSchema(key, DataType.VARCHAR, max_length=max_length + 1)
|
||||
# )
|
||||
# else:
|
||||
# fields.append(FieldSchema(key, dtype))
|
||||
|
||||
# Find out max length of texts
|
||||
max_length = 0
|
||||
for y in texts:
|
||||
max_length = max(max_length, len(y))
|
||||
# Create the text field
|
||||
fields.append(
|
||||
FieldSchema(text_field, DataType.VARCHAR, max_length=max_length + 1)
|
||||
)
|
||||
fields.append(FieldSchema(text_field, DataType.VARCHAR, max_length=65535))
|
||||
# primary key field
|
||||
fields.append(
|
||||
FieldSchema(primary_field, DataType.INT64, is_primary=True, auto_id=True)
|
||||
)
|
||||
# vector field
|
||||
fields.append(FieldSchema(vector_field, DataType.FLOAT_VECTOR, dim=dim))
|
||||
# milvus the schema for the collection
|
||||
schema = CollectionSchema(fields)
|
||||
# Create the collection
|
||||
collection = Collection(collection_name, schema)
|
||||
@ -165,7 +156,7 @@ class MilvusStore(VectorStoreBase):
|
||||
self.primary_field = x.name
|
||||
if x.dtype == DataType.FLOAT_VECTOR or x.dtype == DataType.BINARY_VECTOR:
|
||||
self.vector_field = x.name
|
||||
self._add_texts(texts, metadatas)
|
||||
self._add_documents(texts, metadatas)
|
||||
|
||||
return self.collection_name
|
||||
|
||||
@ -224,7 +215,7 @@ class MilvusStore(VectorStoreBase):
|
||||
# )
|
||||
# return _text
|
||||
|
||||
def _add_texts(
|
||||
def _add_documents(
|
||||
self,
|
||||
texts: Iterable[str],
|
||||
metadatas: Optional[List[dict]] = None,
|
||||
@ -257,7 +248,14 @@ class MilvusStore(VectorStoreBase):
|
||||
|
||||
def load_document(self, documents) -> None:
|
||||
"""load document in vector database."""
|
||||
self.init_schema_and_load(self.collection_name, documents)
|
||||
# self.init_schema_and_load(self.collection_name, documents)
|
||||
batch_size = 500
|
||||
batched_list = [
|
||||
documents[i : i + batch_size] for i in range(0, len(documents), batch_size)
|
||||
]
|
||||
# docs = []
|
||||
for doc_batch in batched_list:
|
||||
self.init_schema_and_load(self.collection_name, doc_batch)
|
||||
|
||||
def similar_search(self, text, topk) -> None:
|
||||
"""similar_search in vector database."""
|
||||
@ -320,3 +318,6 @@ class MilvusStore(VectorStoreBase):
|
||||
)
|
||||
|
||||
return data[0], ret
|
||||
|
||||
def close(self):
|
||||
connections.disconnect()
|
||||
|
@ -1,49 +0,0 @@
|
||||
[tool.poetry]
|
||||
name = "db-gpt"
|
||||
version = "0.0.6"
|
||||
description = "Interact with your data and environment privately"
|
||||
authors = []
|
||||
readme = "README.md"
|
||||
license = "MIT"
|
||||
packages = [{include = "db_gpt"}]
|
||||
repository = "https://www.github.com/csunny/DB-GPT"
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = "^3.10"
|
||||
accelerate = "^0.16"
|
||||
|
||||
|
||||
[tool.poetry.group.docs.dependencies]
|
||||
autodoc_pydantic = "^1.8.0"
|
||||
myst_parser = "^0.18.1"
|
||||
nbsphinx = "^0.8.9"
|
||||
sphinx = "^4.5.0"
|
||||
sphinx-autobuild = "^2021.3.14"
|
||||
sphinx_book_theme = "^0.3.3"
|
||||
sphinx_rtd_theme = "^1.0.0"
|
||||
sphinx-typlog-theme = "^0.8.0"
|
||||
sphinx-panels = "^0.6.0"
|
||||
toml = "^0.10.2"
|
||||
myst-nb = "^0.17.1"
|
||||
linkchecker = "^10.2.1"
|
||||
sphinx-copybutton = "^0.5.1"
|
||||
|
||||
[tool.poetry.group.test.dependencies]
|
||||
# The only dependencies that should be added are
|
||||
# dependencies used for running tests (e.g., pytest, freezegun, response).
|
||||
# Any dependencies that do not meet that criteria will be removed.
|
||||
pytest = "^7.3.0"
|
||||
pytest-cov = "^4.0.0"
|
||||
pytest-dotenv = "^0.5.2"
|
||||
duckdb-engine = "^0.7.0"
|
||||
pytest-watcher = "^0.2.6"
|
||||
freezegun = "^1.2.2"
|
||||
responses = "^0.22.0"
|
||||
pytest-asyncio = "^0.20.3"
|
||||
lark = "^1.1.5"
|
||||
pytest-mock = "^3.10.0"
|
||||
pytest-socket = "^0.6.0"
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core"]
|
||||
build-backend = "poetry.core.masonry.api"
|
@ -46,7 +46,7 @@ wandb
|
||||
llama-index==0.5.27
|
||||
pymysql
|
||||
unstructured==0.6.3
|
||||
grpcio==1.54.2
|
||||
grpcio==1.47.5
|
||||
|
||||
auto-gpt-plugin-template
|
||||
pymdown-extensions
|
||||
|
38
setup.py
Normal file
38
setup.py
Normal file
@ -0,0 +1,38 @@
|
||||
from typing import List
|
||||
|
||||
import setuptools
|
||||
from setuptools import find_packages
|
||||
|
||||
with open("README.md", "r") as fh:
|
||||
long_description = fh.read()
|
||||
|
||||
|
||||
def parse_requirements(file_name: str) -> List[str]:
|
||||
with open(file_name) as f:
|
||||
return [
|
||||
require.strip()
|
||||
for require in f
|
||||
if require.strip() and not require.startswith("#")
|
||||
]
|
||||
|
||||
|
||||
setuptools.setup(
|
||||
name="DB-GPT",
|
||||
packages=find_packages(),
|
||||
version="0.1.0",
|
||||
author="csunny",
|
||||
author_email="cfqcsunny@gmail.com",
|
||||
description="DB-GPT is an experimental open-source project that uses localized GPT large models to interact with your data and environment."
|
||||
" With this solution, you can be assured that there is no risk of data leakage, and your data is 100% private and secure.",
|
||||
long_description=long_description,
|
||||
long_description_content_type="text/markdown",
|
||||
install_requires=parse_requirements("requirements.txt"),
|
||||
url="https://github.com/csunny/DB-GPT",
|
||||
license="https://opensource.org/license/mit/",
|
||||
python_requires=">=3.10",
|
||||
entry_points={
|
||||
"console_scripts": [
|
||||
"dbgpt_server=pilot.server:webserver",
|
||||
],
|
||||
},
|
||||
)
|
Loading…
Reference in New Issue
Block a user