diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md
new file mode 100644
index 000000000..a2998b85e
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/bug_report.md
@@ -0,0 +1,38 @@
+---
+name: Bug report
+about: Create a report to help us improve
+title: "[BUG]: "
+labels: ''
+assignees: ''
+
+---
+
+**Describe the bug**
+A clear and concise description of what the bug is.
+
+**To Reproduce**
+Steps to reproduce the behavior:
+1. Go to '...'
+2. Click on '....'
+3. Scroll down to '....'
+4. See error
+
+**Expected behavior**
+A clear and concise description of what you expected to happen.
+
+**Screenshots**
+If applicable, add screenshots to help explain your problem.
+
+**Desktop (please complete the following information):**
+ - OS: [e.g. iOS]
+ - Browser [e.g. chrome, safari]
+ - Version [e.g. 22]
+
+**Smartphone (please complete the following information):**
+ - Device: [e.g. iPhone6]
+ - OS: [e.g. iOS8.1]
+ - Browser [e.g. stock browser, safari]
+ - Version [e.g. 22]
+
+**Additional context**
+Add any other context about the problem here.
diff --git a/.github/ISSUE_TEMPLATE/documentation-related.md b/.github/ISSUE_TEMPLATE/documentation-related.md
new file mode 100644
index 000000000..5506434f7
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/documentation-related.md
@@ -0,0 +1,10 @@
+---
+name: Documentation Related
+about: Describe this issue template's purpose here.
+title: "[Doc]: "
+labels: ''
+assignees: ''
+
+---
+
+
diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md
new file mode 100644
index 000000000..0ccd363af
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/feature_request.md
@@ -0,0 +1,20 @@
+---
+name: Feature request
+about: Suggest an idea for this project
+title: "[Feature]:"
+labels: ''
+assignees: ''
+
+---
+
+**Is your feature request related to a problem? Please describe.**
+A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
+
+**Describe the solution you'd like**
+A clear and concise description of what you want to happen.
+
+**Describe alternatives you've considered**
+A clear and concise description of any alternative solutions or features you've considered.
+
+**Additional context**
+Add any other context or screenshots about the feature request here.
diff --git a/README.md b/README.md
index 986867ec8..17d1577b6 100644
--- a/README.md
+++ b/README.md
@@ -215,7 +215,7 @@ The achievements of this project are thanks to the technical community, especial
| :---: | :---: | :---: | :---: |:---: |
-This project follows the git-contributor [spec](https://github.com/xudafeng/git-contributor), auto updated at `Sun May 14 2023 23:02:43 GMT+0800`.
+This project follows the git-contributor [spec](https://github.com/xudafeng/git-contributor), auto updated at `Fri May 19 2023 00:24:18 GMT+0800`.
diff --git a/README.zh.md b/README.zh.md
index b24288d1b..6d7b5ad49 100644
--- a/README.zh.md
+++ b/README.zh.md
@@ -110,8 +110,6 @@ DB-GPT基于 [FastChat](https://github.com/lm-sys/FastChat) 构建大模型运
用户只需要整理好知识文档,即可用我们现有的能力构建大模型所需要的知识库能力。
-
-
### 大模型管理能力
在底层大模型接入中,设计了开放的接口,支持对接多种大模型。同时对于接入模型的效果,我们有非常严格的把控与评审机制。对大模型能力上与ChatGPT对比,在准确率上需要满足85%以上的能力对齐。我们用更高的标准筛选模型,是期望在用户使用过程中,可以省去前面繁琐的测试评估环节。
@@ -214,13 +212,14 @@ python tools/knowledge_init.py
-## Contributors
+
+## 贡献者
|[
csunny](https://github.com/csunny)
|[
xudafeng](https://github.com/xudafeng)
|[
明天](https://github.com/yhjun1026)
| [
Aries-ckt](https://github.com/Aries-ckt)
|[
thebigbone](https://github.com/thebigbone)
|
| :---: | :---: | :---: | :---: |:---: |
-This project follows the git-contributor [spec](https://github.com/xudafeng/git-contributor), auto updated at `Sun May 14 2023 23:02:43 GMT+0800`.
+[git-contributor 说明](https://github.com/xudafeng/git-contributor),自动生成时间:`Fri May 19 2023 00:24:18 GMT+0800`。
diff --git a/pilot/configs/model_config.py b/pilot/configs/model_config.py
index da68ab332..1368b8045 100644
--- a/pilot/configs/model_config.py
+++ b/pilot/configs/model_config.py
@@ -25,26 +25,11 @@ LLM_MODEL_CONFIG = {
"sentence-transforms": os.path.join(MODEL_PATH, "all-MiniLM-L6-v2")
}
-
-VECTOR_SEARCH_TOP_K = 20
-LLM_MODEL = "vicuna-13b"
-LIMIT_MODEL_CONCURRENCY = 5
-MAX_POSITION_EMBEDDINGS = 4096
-# VICUNA_MODEL_SERVER = "http://121.41.227.141:8000"
-VICUNA_MODEL_SERVER = "http://120.79.27.110:8000"
-
-# Load model config
ISLOAD_8BIT = True
ISDEBUG = False
-DB_SETTINGS = {
- "user": "root",
- "password": "aa123456",
- "host": "127.0.0.1",
- "port": 3306
-}
-
+VECTOR_SEARCH_TOP_K = 10
VS_ROOT_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)), "vs_store")
KNOWLEDGE_UPLOAD_ROOT_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)), "data")
KNOWLEDGE_CHUNK_SPLIT_SIZE = 100
diff --git a/pilot/model/adapter.py b/pilot/model/adapter.py
new file mode 100644
index 000000000..9afd2c01f
--- /dev/null
+++ b/pilot/model/adapter.py
@@ -0,0 +1,96 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+from typing import List
+from functools import cache
+
+from transformers import (
+ AutoTokenizer,
+ AutoModelForCausalLM,
+ AutoModel
+)
+
+class BaseLLMAdaper:
+ """The Base class for multi model, in our project.
+ We will support those model, which performance resemble ChatGPT """
+
+ def match(self, model_path: str):
+ return True
+
+ def loader(self, model_path: str, from_pretrained_kwargs: dict):
+ tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)
+ model = AutoModelForCausalLM.from_pretrained(
+ model_path, low_cpu_mem_usage=True, **from_pretrained_kwargs
+ )
+ return model, tokenizer
+
+
+llm_model_adapters: List[BaseLLMAdaper] = []
+
+# Register llm models to adapters, by this we can use multi models.
+def register_llm_model_adapters(cls):
+ """Register a llm model adapter."""
+ llm_model_adapters.append(cls())
+
+
+@cache
+def get_llm_model_adapter(model_path: str) -> BaseLLMAdaper:
+ for adapter in llm_model_adapters:
+ if adapter.match(model_path):
+ return adapter
+
+ raise ValueError(f"Invalid model adapter for {model_path}")
+
+
+# TODO support cpu? for practise we support gpt4all or chatglm-6b-int4?
+
+class VicunaLLMAdapater(BaseLLMAdaper):
+ """Vicuna Adapter """
+ def match(self, model_path: str):
+ return "vicuna" in model_path
+
+ def loader(self, model_path: str, from_pretrained_kwagrs: dict):
+ tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)
+ model = AutoModelForCausalLM.from_pretrained(
+ model_path,
+ low_cpu_mem_usage=True,
+ **from_pretrained_kwagrs
+ )
+ return model, tokenizer
+
+class ChatGLMAdapater(BaseLLMAdaper):
+ """LLM Adatpter for THUDM/chatglm-6b"""
+ def match(self, model_path: str):
+ return "chatglm" in model_path
+
+ def loader(self, model_path: str, from_pretrained_kwargs: dict):
+ tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
+ model = AutoModel.from_pretrained(
+ model_path, trust_remote_code=True, **from_pretrained_kwargs
+ ).half().cuda()
+ return model, tokenizer
+
+class KoalaLLMAdapter(BaseLLMAdaper):
+ """Koala LLM Adapter which Based LLaMA """
+ def match(self, model_path: str):
+ return "koala" in model_path
+
+
+class RWKV4LLMAdapter(BaseLLMAdaper):
+ """LLM Adapter for RwKv4 """
+ def match(self, model_path: str):
+ return "RWKV-4" in model_path
+
+ def loader(self, model_path: str, from_pretrained_kwargs: dict):
+ # TODO
+ pass
+
+class GPT4AllAdapter(BaseLLMAdaper):
+ """A light version for someone who want practise LLM use laptop."""
+ def match(self, model_path: str):
+ return "gpt4all" in model_path
+
+
+register_llm_model_adapters(VicunaLLMAdapater)
+# TODO Default support vicuna, other model need to tests and Evaluate
+
+register_llm_model_adapters(BaseLLMAdaper)
\ No newline at end of file
diff --git a/pilot/server/webserver.py b/pilot/server/webserver.py
index 25940a437..f43016904 100644
--- a/pilot/server/webserver.py
+++ b/pilot/server/webserver.py
@@ -19,7 +19,7 @@ from langchain import PromptTemplate
ROOT_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(ROOT_PATH)
-from pilot.configs.model_config import DB_SETTINGS, KNOWLEDGE_UPLOAD_ROOT_PATH, LLM_MODEL_CONFIG, VECTOR_SEARCH_TOP_K
+from pilot.configs.model_config import KNOWLEDGE_UPLOAD_ROOT_PATH, LLM_MODEL_CONFIG, VECTOR_SEARCH_TOP_K
from pilot.server.vectordb_qa import KnownLedgeBaseQA
from pilot.connections.mysql import MySQLOperator
from pilot.source_embedding.knowledge_embedding import KnowledgeEmbedding
@@ -440,7 +440,7 @@ def build_single_model_ui():
notice_markdown = """
# DB-GPT
- [DB-GPT](https://github.com/csunny/DB-GPT) 是一个实验性的开源应用程序,它基于[FastChat](https://github.com/lm-sys/FastChat),并使用vicuna-13b作为基础模型。此外,此程序结合了langchain和llama-index基于现有知识库进行In-Context Learning来对其进行数据库相关知识的增强。它可以进行SQL生成、SQL诊断、数据库知识问答等一系列的工作。 总的来说,它是一个用于数据库的复杂且创新的AI工具。如果您对如何在工作中使用或实施DB-GPT有任何具体问题,请联系我, 我会尽力提供帮助, 同时也欢迎大家参与到项目建设中, 做一些有趣的事情。
+ [DB-GPT](https://github.com/csunny/DB-GPT) 是一个开源的以数据库为基础的GPT实验项目,使用本地化的GPT大模型与您的数据和环境进行交互,无数据泄露风险,100% 私密,100% 安全。
"""
learn_more_markdown = """
### Licence
@@ -641,7 +641,6 @@ if __name__ == "__main__":
cfg = Config()
# dbs = get_database_list()
-
cfg.set_plugins(scan_plugins(cfg, cfg.debug_mode))
# 加载插件可执行命令
diff --git a/pilot/vector_store/file_loader.py b/pilot/vector_store/file_loader.py
index 8703e2e4c..279d5343c 100644
--- a/pilot/vector_store/file_loader.py
+++ b/pilot/vector_store/file_loader.py
@@ -48,7 +48,7 @@ class KnownLedge2Vector:
# vector_store.add_documents(documents=documents)
else:
documents = self.load_knownlege()
- # reinit
+ # reinit
vector_store = Chroma.from_documents(documents=documents,
embedding=self.embeddings,
persist_directory=persist_dir)
diff --git a/requirements.txt b/requirements.txt
index eac927c3d..410d3129c 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -60,6 +60,13 @@ gTTS==2.3.1
langchain
nltk
python-dotenv==1.0.0
+vcrpy
+chromadb
+markdown2
+colorama
+playsound
+distro
+pypdf
# Testing dependencies
pytest
@@ -69,11 +76,4 @@ pytest-benchmark
pytest-cov
pytest-integration
pytest-mock
-vcrpy
-pytest-recording
-chromadb
-markdown2
-colorama
-playsound
-distro
-pypdf
\ No newline at end of file
+pytest-recording
\ No newline at end of file