mirror of
https://github.com/csunny/DB-GPT.git
synced 2025-08-05 18:33:52 +00:00
update: replace embedding model
This commit is contained in:
parent
ee94242d3c
commit
2bdfcdec93
@ -20,6 +20,7 @@ DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
|||||||
LLM_MODEL_CONFIG = {
|
LLM_MODEL_CONFIG = {
|
||||||
"flan-t5-base": os.path.join(MODEL_PATH, "flan-t5-base"),
|
"flan-t5-base": os.path.join(MODEL_PATH, "flan-t5-base"),
|
||||||
"vicuna-13b": os.path.join(MODEL_PATH, "vicuna-13b"),
|
"vicuna-13b": os.path.join(MODEL_PATH, "vicuna-13b"),
|
||||||
|
"text2vec": os.path.join(MODEL_PATH, "text2vec"),
|
||||||
"sentence-transforms": os.path.join(MODEL_PATH, "all-MiniLM-L6-v2")
|
"sentence-transforms": os.path.join(MODEL_PATH, "all-MiniLM-L6-v2")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -28,7 +29,7 @@ VECTOR_SEARCH_TOP_K = 3
|
|||||||
LLM_MODEL = "vicuna-13b"
|
LLM_MODEL = "vicuna-13b"
|
||||||
LIMIT_MODEL_CONCURRENCY = 5
|
LIMIT_MODEL_CONCURRENCY = 5
|
||||||
MAX_POSITION_EMBEDDINGS = 4096
|
MAX_POSITION_EMBEDDINGS = 4096
|
||||||
VICUNA_MODEL_SERVER = "http://121.41.167.183:8000"
|
VICUNA_MODEL_SERVER = "http://121.41.227.141:8000"
|
||||||
|
|
||||||
# Load model config
|
# Load model config
|
||||||
ISLOAD_8BIT = True
|
ISLOAD_8BIT = True
|
||||||
|
@ -242,10 +242,10 @@ def http_bot(state, mode, sql_mode, db_selector, temperature, max_new_tokens, re
|
|||||||
if mode == conversation_types["custome"] and not db_selector:
|
if mode == conversation_types["custome"] and not db_selector:
|
||||||
persist_dir = os.path.join(KNOWLEDGE_UPLOAD_ROOT_PATH, vector_store_name["vs_name"] + ".vectordb")
|
persist_dir = os.path.join(KNOWLEDGE_UPLOAD_ROOT_PATH, vector_store_name["vs_name"] + ".vectordb")
|
||||||
print("向量数据库持久化地址: ", persist_dir)
|
print("向量数据库持久化地址: ", persist_dir)
|
||||||
knowledge_embedding_client = KnowledgeEmbedding(file_path="", model_name=LLM_MODEL_CONFIG["sentence-transforms"], vector_store_config={"vector_store_name": vector_store_name["vs_name"],
|
knowledge_embedding_client = KnowledgeEmbedding(file_path="", model_name=LLM_MODEL_CONFIG["text2vec"], vector_store_config={"vector_store_name": vector_store_name["vs_name"],
|
||||||
"vector_store_path": KNOWLEDGE_UPLOAD_ROOT_PATH})
|
"vector_store_path": KNOWLEDGE_UPLOAD_ROOT_PATH})
|
||||||
query = state.messages[-2][1]
|
query = state.messages[-2][1]
|
||||||
docs = knowledge_embedding_client.similar_search(query, 1)
|
docs = knowledge_embedding_client.similar_search(query, 10)
|
||||||
context = [d.page_content for d in docs]
|
context = [d.page_content for d in docs]
|
||||||
prompt_template = PromptTemplate(
|
prompt_template = PromptTemplate(
|
||||||
template=conv_qa_prompt_template,
|
template=conv_qa_prompt_template,
|
||||||
@ -254,6 +254,18 @@ def http_bot(state, mode, sql_mode, db_selector, temperature, max_new_tokens, re
|
|||||||
result = prompt_template.format(context="\n".join(context), question=query)
|
result = prompt_template.format(context="\n".join(context), question=query)
|
||||||
state.messages[-2][1] = result
|
state.messages[-2][1] = result
|
||||||
prompt = state.get_prompt()
|
prompt = state.get_prompt()
|
||||||
|
if len(prompt) > 4000:
|
||||||
|
logger.info("prompt length greater than 4000, rebuild")
|
||||||
|
docs = knowledge_embedding_client.similar_search(query, 5)
|
||||||
|
context = [d.page_content for d in docs]
|
||||||
|
prompt_template = PromptTemplate(
|
||||||
|
template=conv_qa_prompt_template,
|
||||||
|
input_variables=["context", "question"]
|
||||||
|
)
|
||||||
|
result = prompt_template.format(context="\n".join(context), question=query)
|
||||||
|
state.messages[-2][1] = result
|
||||||
|
prompt = state.get_prompt()
|
||||||
|
print(len(prompt))
|
||||||
state.messages[-2][1] = query
|
state.messages[-2][1] = query
|
||||||
skip_echo_len = len(prompt.replace("</s>", " ")) + 1
|
skip_echo_len = len(prompt.replace("</s>", " ")) + 1
|
||||||
|
|
||||||
@ -420,7 +432,7 @@ def build_single_model_ui():
|
|||||||
max_output_tokens = gr.Slider(
|
max_output_tokens = gr.Slider(
|
||||||
minimum=0,
|
minimum=0,
|
||||||
maximum=1024,
|
maximum=1024,
|
||||||
value=1024,
|
value=512,
|
||||||
step=64,
|
step=64,
|
||||||
interactive=True,
|
interactive=True,
|
||||||
label="最大输出Token数",
|
label="最大输出Token数",
|
||||||
@ -570,7 +582,7 @@ def knowledge_embedding_store(vs_id, files):
|
|||||||
shutil.move(file.name, os.path.join(KNOWLEDGE_UPLOAD_ROOT_PATH, vs_id, filename))
|
shutil.move(file.name, os.path.join(KNOWLEDGE_UPLOAD_ROOT_PATH, vs_id, filename))
|
||||||
knowledge_embedding_client = KnowledgeEmbedding(
|
knowledge_embedding_client = KnowledgeEmbedding(
|
||||||
file_path=os.path.join(KNOWLEDGE_UPLOAD_ROOT_PATH, vs_id, filename),
|
file_path=os.path.join(KNOWLEDGE_UPLOAD_ROOT_PATH, vs_id, filename),
|
||||||
model_name=LLM_MODEL_CONFIG["sentence-transforms"],
|
model_name=LLM_MODEL_CONFIG["text2vec"],
|
||||||
vector_store_config={
|
vector_store_config={
|
||||||
"vector_store_name": vector_store_name["vs_name"],
|
"vector_store_name": vector_store_name["vs_name"],
|
||||||
"vector_store_path": KNOWLEDGE_UPLOAD_ROOT_PATH})
|
"vector_store_path": KNOWLEDGE_UPLOAD_ROOT_PATH})
|
||||||
|
@ -17,8 +17,6 @@ class PDFEmbedding(SourceEmbedding):
|
|||||||
self.file_path = file_path
|
self.file_path = file_path
|
||||||
self.model_name = model_name
|
self.model_name = model_name
|
||||||
self.vector_store_config = vector_store_config
|
self.vector_store_config = vector_store_config
|
||||||
# SourceEmbedding(file_path =file_path, );
|
|
||||||
SourceEmbedding(file_path, model_name, vector_store_config)
|
|
||||||
|
|
||||||
@register
|
@register
|
||||||
def read(self):
|
def read(self):
|
||||||
@ -30,7 +28,7 @@ class PDFEmbedding(SourceEmbedding):
|
|||||||
def data_process(self, documents: List[Document]):
|
def data_process(self, documents: List[Document]):
|
||||||
i = 0
|
i = 0
|
||||||
for d in documents:
|
for d in documents:
|
||||||
documents[i].page_content = d.page_content.replace(" ", "").replace("\n", "")
|
documents[i].page_content = d.page_content.replace("\n", "")
|
||||||
i += 1
|
i += 1
|
||||||
return documents
|
return documents
|
||||||
|
|
||||||
|
@ -73,3 +73,4 @@ markdown2
|
|||||||
colorama
|
colorama
|
||||||
playsound
|
playsound
|
||||||
distro
|
distro
|
||||||
|
pypdf
|
Loading…
Reference in New Issue
Block a user