mirror of
https://github.com/csunny/DB-GPT.git
synced 2025-09-16 14:40:56 +00:00
Merge remote-tracking branch 'origin/dev' into oconfig_major
# Conflicts: # README.en.md # README.md
This commit is contained in:
@@ -107,7 +107,7 @@ def gen_sqlgen_conversation(dbname):
|
||||
schemas = mo.get_schema(dbname)
|
||||
for s in schemas:
|
||||
message += s["schema_info"] + ";"
|
||||
return f"数据库{dbname}的Schema信息如下: {message}\n"
|
||||
return f"Database {dbname} Schema information as follows: {message}\n"
|
||||
|
||||
|
||||
conv_one_shot = Conversation(
|
||||
@@ -170,7 +170,7 @@ auto_dbgpt_one_shot = Conversation(
|
||||
|
||||
|
||||
Schema:
|
||||
数据库gpt-user的Schema信息如下: users(city,create_time,email,last_login_time,phone,user_name);
|
||||
Database gpt-user Schema information as follows: users(city,create_time,email,last_login_time,phone,user_name);
|
||||
|
||||
|
||||
Commands:
|
||||
|
96
pilot/model/adapter.py
Normal file
96
pilot/model/adapter.py
Normal file
@@ -0,0 +1,96 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
from typing import List
|
||||
from functools import cache
|
||||
|
||||
from transformers import (
|
||||
AutoTokenizer,
|
||||
AutoModelForCausalLM,
|
||||
AutoModel
|
||||
)
|
||||
|
||||
class BaseLLMAdaper:
|
||||
"""The Base class for multi model, in our project.
|
||||
We will support those model, which performance resemble ChatGPT """
|
||||
|
||||
def match(self, model_path: str):
|
||||
return True
|
||||
|
||||
def loader(self, model_path: str, from_pretrained_kwargs: dict):
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
model_path, low_cpu_mem_usage=True, **from_pretrained_kwargs
|
||||
)
|
||||
return model, tokenizer
|
||||
|
||||
|
||||
llm_model_adapters = List[BaseLLMAdaper] = []
|
||||
|
||||
# Register llm models to adapters, by this we can use multi models.
|
||||
def register_llm_model_adapters(cls):
|
||||
"""Register a llm model adapter."""
|
||||
llm_model_adapters.append(cls())
|
||||
|
||||
|
||||
@cache
|
||||
def get_llm_model_adapter(model_path: str) -> BaseLLMAdaper:
|
||||
for adapter in llm_model_adapters:
|
||||
if adapter.match(model_path):
|
||||
return adapter
|
||||
|
||||
raise ValueError(f"Invalid model adapter for {model_path}")
|
||||
|
||||
|
||||
# TODO support cpu? for practise we support gpt4all or chatglm-6b-int4?
|
||||
|
||||
class VicunaLLMAdapater(BaseLLMAdaper):
|
||||
"""Vicuna Adapter """
|
||||
def match(self, model_path: str):
|
||||
return "vicuna" in model_path
|
||||
|
||||
def loader(self, model_path: str, from_pretrained_kwagrs: dict):
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
model_path,
|
||||
low_cpu_mem_usage=True,
|
||||
**from_pretrained_kwagrs
|
||||
)
|
||||
return model, tokenizer
|
||||
|
||||
class ChatGLMAdapater(BaseLLMAdaper):
|
||||
"""LLM Adatpter for THUDM/chatglm-6b"""
|
||||
def match(self, model_path: str):
|
||||
return "chatglm" in model_path
|
||||
|
||||
def loader(self, model_path: str, from_pretrained_kwargs: dict):
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
|
||||
model = AutoModel.from_pretrained(
|
||||
model_path, trust_remote_code=True, **from_pretrained_kwargs
|
||||
).half().cuda()
|
||||
return model, tokenizer
|
||||
|
||||
class KoalaLLMAdapter(BaseLLMAdaper):
|
||||
"""Koala LLM Adapter which Based LLaMA """
|
||||
def match(self, model_path: str):
|
||||
return "koala" in model_path
|
||||
|
||||
|
||||
class RWKV4LLMAdapter(BaseLLMAdaper):
|
||||
"""LLM Adapter for RwKv4 """
|
||||
def match(self, model_path: str):
|
||||
return "RWKV-4" in model_path
|
||||
|
||||
def loader(self, model_path: str, from_pretrained_kwargs: dict):
|
||||
# TODO
|
||||
pass
|
||||
|
||||
class GPT4AllAdapter(BaseLLMAdaper):
|
||||
"""A light version for someone who want practise LLM use laptop."""
|
||||
def match(self, model_path: str):
|
||||
return "gpt4all" in model_path
|
||||
|
||||
|
||||
register_llm_model_adapters(VicunaLLMAdapater)
|
||||
# TODO Default support vicuna, other model need to tests and Evaluate
|
||||
|
||||
register_llm_model_adapters(BaseLLMAdaper)
|
@@ -2,21 +2,19 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import torch
|
||||
import warnings
|
||||
from pilot.singleton import Singleton
|
||||
|
||||
from transformers import (
|
||||
AutoTokenizer,
|
||||
AutoModelForCausalLM,
|
||||
AutoModel
|
||||
)
|
||||
|
||||
from pilot.model.compression import compress_module
|
||||
from pilot.model.adapter import get_llm_model_adapter
|
||||
|
||||
|
||||
class ModelLoader(metaclass=Singleton):
|
||||
"""Model loader is a class for model load
|
||||
|
||||
Args: model_path
|
||||
|
||||
|
||||
TODO: multi model support.
|
||||
"""
|
||||
|
||||
kwargs = {}
|
||||
@@ -31,9 +29,11 @@ class ModelLoader(metaclass=Singleton):
|
||||
"device_map": "auto",
|
||||
}
|
||||
|
||||
# TODO multi gpu support
|
||||
def loader(self, num_gpus, load_8bit=False, debug=False):
|
||||
if self.device == "cpu":
|
||||
kwargs = {}
|
||||
|
||||
elif self.device == "cuda":
|
||||
kwargs = {"torch_dtype": torch.float16}
|
||||
if num_gpus == "auto":
|
||||
@@ -46,18 +46,20 @@ class ModelLoader(metaclass=Singleton):
|
||||
"max_memory": {i: "13GiB" for i in range(num_gpus)},
|
||||
})
|
||||
else:
|
||||
# Todo Support mps for practise
|
||||
raise ValueError(f"Invalid device: {self.device}")
|
||||
|
||||
if "chatglm" in self.model_path:
|
||||
tokenizer = AutoTokenizer.from_pretrained(self.model_path, trust_remote_code=True)
|
||||
model = AutoModel.from_pretrained(self.model_path, trust_remote_code=True).half().cuda()
|
||||
else:
|
||||
tokenizer = AutoTokenizer.from_pretrained(self.model_path, use_fast=False)
|
||||
model = AutoModelForCausalLM.from_pretrained(self.model_path,
|
||||
low_cpu_mem_usage=True, **kwargs)
|
||||
|
||||
llm_adapter = get_llm_model_adapter(self.model_path)
|
||||
model, tokenizer = llm_adapter.loader(self.model_path, kwargs)
|
||||
|
||||
if load_8bit:
|
||||
compress_module(model, self.device)
|
||||
if num_gpus != 1:
|
||||
warnings.warn(
|
||||
"8-bit quantization is not supported for multi-gpu inference"
|
||||
)
|
||||
else:
|
||||
compress_module(model, self.device)
|
||||
|
||||
if (self.device == "cuda" and num_gpus == 1):
|
||||
model.to(self.device)
|
||||
|
@@ -17,7 +17,7 @@ from pilot.logs import logger
|
||||
|
||||
def inspect_zip_for_modules(zip_path: str, debug: bool = False) -> list[str]:
|
||||
"""
|
||||
加载zip文件的插件,完全兼容Auto_gpt_plugin
|
||||
Loader zip plugin file. Native support Auto_gpt_plugin
|
||||
|
||||
Args:
|
||||
zip_path (str): Path to the zipfile.
|
||||
|
@@ -40,8 +40,8 @@ def knownledge_tovec_st(filename):
|
||||
|
||||
|
||||
def load_knownledge_from_doc():
|
||||
"""从数据集当中加载知识
|
||||
# TODO 如果向量存储已经存在, 则无需初始化
|
||||
"""Loader Knownledge from current datasets
|
||||
# TODO if the vector store is exists, just use it.
|
||||
"""
|
||||
|
||||
if not os.path.exists(DATASETS_DIR):
|
||||
|
@@ -40,15 +40,15 @@ class KnownLedge2Vector:
|
||||
|
||||
def init_vector_store(self):
|
||||
persist_dir = os.path.join(VECTORE_PATH, ".vectordb")
|
||||
print("向量数据库持久化地址: ", persist_dir)
|
||||
print("Vector store Persist address is: ", persist_dir)
|
||||
if os.path.exists(persist_dir):
|
||||
# 从本地持久化文件中Load
|
||||
print("从本地向量加载数据...")
|
||||
# Loader from local file.
|
||||
print("Loader data from local persist vector file...")
|
||||
vector_store = Chroma(persist_directory=persist_dir, embedding_function=self.embeddings)
|
||||
# vector_store.add_documents(documents=documents)
|
||||
else:
|
||||
documents = self.load_knownlege()
|
||||
# 重新初始化
|
||||
# reinit
|
||||
vector_store = Chroma.from_documents(documents=documents,
|
||||
embedding=self.embeddings,
|
||||
persist_directory=persist_dir)
|
||||
@@ -61,17 +61,17 @@ class KnownLedge2Vector:
|
||||
for file in files:
|
||||
filename = os.path.join(root, file)
|
||||
docs = self._load_file(filename)
|
||||
# 更新metadata数据
|
||||
# update metadata.
|
||||
new_docs = []
|
||||
for doc in docs:
|
||||
doc.metadata = {"source": doc.metadata["source"].replace(DATASETS_DIR, "")}
|
||||
print("文档2向量初始化中, 请稍等...", doc.metadata)
|
||||
print("Documents to vector running, please wait...", doc.metadata)
|
||||
new_docs.append(doc)
|
||||
docments += new_docs
|
||||
return docments
|
||||
|
||||
def _load_file(self, filename):
|
||||
# 加载文件
|
||||
# Loader file
|
||||
if filename.lower().endswith(".pdf"):
|
||||
loader = UnstructuredFileLoader(filename)
|
||||
text_splitor = CharacterTextSplitter()
|
||||
|
Reference in New Issue
Block a user