merge master

This commit is contained in:
csunny 2023-05-19 22:24:09 +08:00
commit 5acd3c1f3c
10 changed files with 180 additions and 33 deletions

38
.github/ISSUE_TEMPLATE/bug_report.md vendored Normal file
View File

@ -0,0 +1,38 @@
---
name: Bug report
about: Create a report to help us improve
title: "[BUG]: "
labels: ''
assignees: ''
---
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Steps to reproduce the behavior:
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
4. See error
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Desktop (please complete the following information):**
- OS: [e.g. iOS]
- Browser [e.g. chrome, safari]
- Version [e.g. 22]
**Smartphone (please complete the following information):**
- Device: [e.g. iPhone6]
- OS: [e.g. iOS8.1]
- Browser [e.g. stock browser, safari]
- Version [e.g. 22]
**Additional context**
Add any other context about the problem here.

View File

@ -0,0 +1,10 @@
---
name: Documentation Related
about: Describe this issue template's purpose here.
title: "[Doc]: "
labels: ''
assignees: ''
---

View File

@ -0,0 +1,20 @@
---
name: Feature request
about: Suggest an idea for this project
title: "[Feature]:"
labels: ''
assignees: ''
---
**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
**Describe the solution you'd like**
A clear and concise description of what you want to happen.
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.
**Additional context**
Add any other context or screenshots about the feature request here.

View File

@ -215,7 +215,7 @@ The achievements of this project are thanks to the technical community, especial
| :---: | :---: | :---: | :---: |:---: |
This project follows the git-contributor [spec](https://github.com/xudafeng/git-contributor), auto updated at `Sun May 14 2023 23:02:43 GMT+0800`.
This project follows the git-contributor [spec](https://github.com/xudafeng/git-contributor), auto updated at `Fri May 19 2023 00:24:18 GMT+0800`.
<!-- GITCONTRIBUTOR_END -->

View File

@ -110,8 +110,6 @@ DB-GPT基于 [FastChat](https://github.com/lm-sys/FastChat) 构建大模型运
用户只需要整理好知识文档,即可用我们现有的能力构建大模型所需要的知识库能力。
### 大模型管理能力
在底层大模型接入中设计了开放的接口支持对接多种大模型。同时对于接入模型的效果我们有非常严格的把控与评审机制。对大模型能力上与ChatGPT对比在准确率上需要满足85%以上的能力对齐。我们用更高的标准筛选模型,是期望在用户使用过程中,可以省去前面繁琐的测试评估环节。
@ -214,13 +212,14 @@ python tools/knowledge_init.py
<!-- GITCONTRIBUTOR_START -->
## Contributors
## 贡献者
|[<img src="https://avatars.githubusercontent.com/u/17919400?v=4" width="100px;"/><br/><sub><b>csunny</b></sub>](https://github.com/csunny)<br/>|[<img src="https://avatars.githubusercontent.com/u/1011681?v=4" width="100px;"/><br/><sub><b>xudafeng</b></sub>](https://github.com/xudafeng)<br/>|[<img src="https://avatars.githubusercontent.com/u/7636723?s=96&v=4" width="100px;"/><br/><sub><b>明天</b></sub>](https://github.com/yhjun1026)<br/> | [<img src="https://avatars.githubusercontent.com/u/13723926?v=4" width="100px;"/><br/><sub><b>Aries-ckt</b></sub>](https://github.com/Aries-ckt)<br/>|[<img src="https://avatars.githubusercontent.com/u/95130644?v=4" width="100px;"/><br/><sub><b>thebigbone</b></sub>](https://github.com/thebigbone)<br/>|
| :---: | :---: | :---: | :---: |:---: |
This project follows the git-contributor [spec](https://github.com/xudafeng/git-contributor), auto updated at `Sun May 14 2023 23:02:43 GMT+0800`.
[git-contributor 说明](https://github.com/xudafeng/git-contributor),自动生成时间:`Fri May 19 2023 00:24:18 GMT+0800`。
<!-- GITCONTRIBUTOR_END -->

View File

@ -25,26 +25,11 @@ LLM_MODEL_CONFIG = {
"sentence-transforms": os.path.join(MODEL_PATH, "all-MiniLM-L6-v2")
}
VECTOR_SEARCH_TOP_K = 20
LLM_MODEL = "vicuna-13b"
LIMIT_MODEL_CONCURRENCY = 5
MAX_POSITION_EMBEDDINGS = 4096
# VICUNA_MODEL_SERVER = "http://121.41.227.141:8000"
VICUNA_MODEL_SERVER = "http://120.79.27.110:8000"
# Load model config
ISLOAD_8BIT = True
ISDEBUG = False
DB_SETTINGS = {
"user": "root",
"password": "aa123456",
"host": "127.0.0.1",
"port": 3306
}
VECTOR_SEARCH_TOP_K = 10
VS_ROOT_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)), "vs_store")
KNOWLEDGE_UPLOAD_ROOT_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)), "data")
KNOWLEDGE_CHUNK_SPLIT_SIZE = 100

96
pilot/model/adapter.py Normal file
View File

@ -0,0 +1,96 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from typing import List
from functools import cache
from transformers import (
AutoTokenizer,
AutoModelForCausalLM,
AutoModel
)
class BaseLLMAdaper:
"""The Base class for multi model, in our project.
We will support those model, which performance resemble ChatGPT """
def match(self, model_path: str):
return True
def loader(self, model_path: str, from_pretrained_kwargs: dict):
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(
model_path, low_cpu_mem_usage=True, **from_pretrained_kwargs
)
return model, tokenizer
llm_model_adapters: List[BaseLLMAdaper] = []
# Register llm models to adapters, by this we can use multi models.
def register_llm_model_adapters(cls):
"""Register a llm model adapter."""
llm_model_adapters.append(cls())
@cache
def get_llm_model_adapter(model_path: str) -> BaseLLMAdaper:
for adapter in llm_model_adapters:
if adapter.match(model_path):
return adapter
raise ValueError(f"Invalid model adapter for {model_path}")
# TODO support cpu? for practise we support gpt4all or chatglm-6b-int4?
class VicunaLLMAdapater(BaseLLMAdaper):
"""Vicuna Adapter """
def match(self, model_path: str):
return "vicuna" in model_path
def loader(self, model_path: str, from_pretrained_kwagrs: dict):
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(
model_path,
low_cpu_mem_usage=True,
**from_pretrained_kwagrs
)
return model, tokenizer
class ChatGLMAdapater(BaseLLMAdaper):
"""LLM Adatpter for THUDM/chatglm-6b"""
def match(self, model_path: str):
return "chatglm" in model_path
def loader(self, model_path: str, from_pretrained_kwargs: dict):
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
model = AutoModel.from_pretrained(
model_path, trust_remote_code=True, **from_pretrained_kwargs
).half().cuda()
return model, tokenizer
class KoalaLLMAdapter(BaseLLMAdaper):
"""Koala LLM Adapter which Based LLaMA """
def match(self, model_path: str):
return "koala" in model_path
class RWKV4LLMAdapter(BaseLLMAdaper):
"""LLM Adapter for RwKv4 """
def match(self, model_path: str):
return "RWKV-4" in model_path
def loader(self, model_path: str, from_pretrained_kwargs: dict):
# TODO
pass
class GPT4AllAdapter(BaseLLMAdaper):
"""A light version for someone who want practise LLM use laptop."""
def match(self, model_path: str):
return "gpt4all" in model_path
register_llm_model_adapters(VicunaLLMAdapater)
# TODO Default support vicuna, other model need to tests and Evaluate
register_llm_model_adapters(BaseLLMAdaper)

View File

@ -19,7 +19,7 @@ from langchain import PromptTemplate
ROOT_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(ROOT_PATH)
from pilot.configs.model_config import DB_SETTINGS, KNOWLEDGE_UPLOAD_ROOT_PATH, LLM_MODEL_CONFIG, VECTOR_SEARCH_TOP_K
from pilot.configs.model_config import KNOWLEDGE_UPLOAD_ROOT_PATH, LLM_MODEL_CONFIG, VECTOR_SEARCH_TOP_K
from pilot.server.vectordb_qa import KnownLedgeBaseQA
from pilot.connections.mysql import MySQLOperator
from pilot.source_embedding.knowledge_embedding import KnowledgeEmbedding
@ -440,7 +440,7 @@ def build_single_model_ui():
notice_markdown = """
# DB-GPT
[DB-GPT](https://github.com/csunny/DB-GPT) 是一个实验性的开源应用程序它基于[FastChat](https://github.com/lm-sys/FastChat)并使用vicuna-13b作为基础模型此外此程序结合了langchain和llama-index基于现有知识库进行In-Context Learning来对其进行数据库相关知识的增强它可以进行SQL生成SQL诊断数据库知识问答等一系列的工作 总的来说它是一个用于数据库的复杂且创新的AI工具如果您对如何在工作中使用或实施DB-GPT有任何具体问题请联系我, 我会尽力提供帮助, 同时也欢迎大家参与到项目建设中, 做一些有趣的事情
[DB-GPT](https://github.com/csunny/DB-GPT) 是一个开源的以数据库为基础的GPT实验项目使用本地化的GPT大模型与您的数据和环境进行交互无数据泄露风险100% 私密100% 安全
"""
learn_more_markdown = """
### Licence
@ -641,7 +641,6 @@ if __name__ == "__main__":
cfg = Config()
# dbs = get_database_list()
cfg.set_plugins(scan_plugins(cfg, cfg.debug_mode))
# 加载插件可执行命令

View File

@ -48,7 +48,7 @@ class KnownLedge2Vector:
# vector_store.add_documents(documents=documents)
else:
documents = self.load_knownlege()
# reinit
# reinit
vector_store = Chroma.from_documents(documents=documents,
embedding=self.embeddings,
persist_directory=persist_dir)

View File

@ -60,6 +60,13 @@ gTTS==2.3.1
langchain
nltk
python-dotenv==1.0.0
vcrpy
chromadb
markdown2
colorama
playsound
distro
pypdf
# Testing dependencies
pytest
@ -69,11 +76,4 @@ pytest-benchmark
pytest-cov
pytest-integration
pytest-mock
vcrpy
pytest-recording
chromadb
markdown2
colorama
playsound
distro
pypdf
pytest-recording