mirror of
https://github.com/csunny/DB-GPT.git
synced 2025-08-06 02:46:40 +00:00
fix: chatglm not working in doc qa, meta qa and plugin (#318)
Close #316
This commit is contained in:
commit
cc167af5ae
@ -28,7 +28,7 @@ class Config(metaclass=Singleton):
|
||||
self.skip_reprompt = False
|
||||
self.temperature = float(os.getenv("TEMPERATURE", 0.7))
|
||||
|
||||
self.NUM_GPUS = int(os.getenv("NUM_GPUS",1))
|
||||
self.NUM_GPUS = int(os.getenv("NUM_GPUS", 1))
|
||||
|
||||
self.execute_local_commands = (
|
||||
os.getenv("EXECUTE_LOCAL_COMMANDS", "False") == "True"
|
||||
|
@ -73,6 +73,7 @@ class VicunaLLMAdapater(BaseLLMAdaper):
|
||||
)
|
||||
return model, tokenizer
|
||||
|
||||
|
||||
def auto_configure_device_map(num_gpus):
|
||||
"""handling multi gpu calls"""
|
||||
# transformer.word_embeddings occupying 1 floors
|
||||
@ -81,18 +82,18 @@ def auto_configure_device_map(num_gpus):
|
||||
# Allocate a total of 30 layers to number On gpus cards
|
||||
num_trans_layers = 28
|
||||
per_gpu_layers = 30 / num_gpus
|
||||
#Bugfix: call torch.embedding in Linux and the incoming weight and input are not on the same device, resulting in a RuntimeError
|
||||
#Under Windows, model. device will be set to transformer. word_ Embeddings. device
|
||||
#Under Linux, model. device will be set to lm_ Head.device
|
||||
#When calling chat or stream_ During chat, input_ IDS will be placed on model. device
|
||||
#If transformer. word_ If embeddings. device and model. device are different, it will cause a RuntimeError
|
||||
#Therefore, here we will transform. word_ Embeddings, transformer. final_ Layernorm, lm_ Put all the heads on the first card
|
||||
# Bugfix: call torch.embedding in Linux and the incoming weight and input are not on the same device, resulting in a RuntimeError
|
||||
# Under Windows, model. device will be set to transformer. word_ Embeddings. device
|
||||
# Under Linux, model. device will be set to lm_ Head.device
|
||||
# When calling chat or stream_ During chat, input_ IDS will be placed on model. device
|
||||
# If transformer. word_ If embeddings. device and model. device are different, it will cause a RuntimeError
|
||||
# Therefore, here we will transform. word_ Embeddings, transformer. final_ Layernorm, lm_ Put all the heads on the first card
|
||||
device_map = {
|
||||
'transformer.embedding.word_embeddings': 0,
|
||||
'transformer.encoder.final_layernorm': 0,
|
||||
'transformer.output_layer': 0,
|
||||
'transformer.rotary_pos_emb': 0,
|
||||
'lm_head': 0
|
||||
"transformer.embedding.word_embeddings": 0,
|
||||
"transformer.encoder.final_layernorm": 0,
|
||||
"transformer.output_layer": 0,
|
||||
"transformer.rotary_pos_emb": 0,
|
||||
"lm_head": 0,
|
||||
}
|
||||
|
||||
used = 2
|
||||
@ -102,7 +103,7 @@ def auto_configure_device_map(num_gpus):
|
||||
gpu_target += 1
|
||||
used = 0
|
||||
assert gpu_target < num_gpus
|
||||
device_map[f'transformer.encoder.layers.{i}'] = gpu_target
|
||||
device_map[f"transformer.encoder.layers.{i}"] = gpu_target
|
||||
used += 1
|
||||
|
||||
return device_map
|
||||
@ -114,7 +115,13 @@ class ChatGLMAdapater(BaseLLMAdaper):
|
||||
def match(self, model_path: str):
|
||||
return "chatglm" in model_path
|
||||
|
||||
def loader(self, model_path: str, from_pretrained_kwargs: dict, device_map=None, num_gpus=CFG.NUM_GPUS):
|
||||
def loader(
|
||||
self,
|
||||
model_path: str,
|
||||
from_pretrained_kwargs: dict,
|
||||
device_map=None,
|
||||
num_gpus=CFG.NUM_GPUS,
|
||||
):
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
|
||||
|
||||
if DEVICE != "cuda":
|
||||
@ -125,10 +132,8 @@ class ChatGLMAdapater(BaseLLMAdaper):
|
||||
else:
|
||||
model = (
|
||||
AutoModel.from_pretrained(
|
||||
model_path, trust_remote_code=True,
|
||||
**from_pretrained_kwargs
|
||||
)
|
||||
.half()
|
||||
model_path, trust_remote_code=True, **from_pretrained_kwargs
|
||||
).half()
|
||||
# .cuda()
|
||||
)
|
||||
from accelerate import dispatch_model
|
||||
|
@ -1,5 +1,8 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding:utf-8 -*-
|
||||
|
||||
from typing import List
|
||||
import re
|
||||
import copy
|
||||
|
||||
import torch
|
||||
@ -33,34 +36,36 @@ def chatglm_generate_stream(
|
||||
messages = prompt.split(stop)
|
||||
#
|
||||
# # Add history conversation
|
||||
hist = []
|
||||
once_conversation = []
|
||||
hist = [HistoryEntry()]
|
||||
system_messages = []
|
||||
for message in messages[:-2]:
|
||||
if len(message) <= 0:
|
||||
continue
|
||||
|
||||
if "human:" in message:
|
||||
once_conversation.append(message.split("human:")[1])
|
||||
# elif "system:" in message:
|
||||
# once_conversation.append(f"""###system:{message.split("system:")[1]} """)
|
||||
hist[-1].add_question(message.split("human:")[1])
|
||||
elif "system:" in message:
|
||||
msg = message.split("system:")[1]
|
||||
hist[-1].add_question(msg)
|
||||
system_messages.append(msg)
|
||||
elif "ai:" in message:
|
||||
once_conversation.append(message.split("ai:")[1])
|
||||
last_conversation = copy.deepcopy(once_conversation)
|
||||
hist.append(last_conversation)
|
||||
once_conversation = []
|
||||
# else:
|
||||
# once_conversation.append(f"""###system:{message} """)
|
||||
hist[-1].add_answer(message.split("ai:")[1])
|
||||
hist.append(HistoryEntry())
|
||||
else:
|
||||
# TODO
|
||||
# hist[-1].add_question(message.split("system:")[1])
|
||||
# once_conversation.append(f"""###system:{message} """)
|
||||
pass
|
||||
|
||||
try:
|
||||
query = messages[-2].split("human:")[1]
|
||||
except IndexError:
|
||||
# fix doc qa: https://github.com/csunny/DB-GPT/issues/274
|
||||
doc_qa_message = messages[-2]
|
||||
if "system:" in doc_qa_message:
|
||||
query = doc_qa_message.split("system:")[1]
|
||||
else:
|
||||
query = messages[-3].split("human:")[1]
|
||||
query = messages[-3].split("human:")[1]
|
||||
hist = build_history(hist)
|
||||
if not hist:
|
||||
# No history conversation, but has system messages, merge to user`s query
|
||||
query = prompt_adaptation(system_messages, query)
|
||||
print("Query Message: ", query)
|
||||
print("hist: ", hist)
|
||||
# output = ""
|
||||
# i = 0
|
||||
|
||||
@ -75,3 +80,43 @@ def chatglm_generate_stream(
|
||||
yield output
|
||||
|
||||
yield output
|
||||
|
||||
|
||||
class HistoryEntry:
|
||||
def __init__(self, question: str = "", answer: str = ""):
|
||||
self.question = question
|
||||
self.answer = answer
|
||||
|
||||
def add_question(self, question: str):
|
||||
self.question += question
|
||||
|
||||
def add_answer(self, answer: str):
|
||||
self.answer += answer
|
||||
|
||||
def to_list(self):
|
||||
if self.question == "" or self.answer == "":
|
||||
return None
|
||||
return [self.question, self.answer]
|
||||
|
||||
|
||||
def build_history(hist: List[HistoryEntry]) -> List[List[str]]:
|
||||
return list(filter(lambda hl: hl is not None, map(lambda h: h.to_list(), hist)))
|
||||
|
||||
|
||||
def prompt_adaptation(system_messages: List[str], human_message: str) -> str:
|
||||
if not system_messages:
|
||||
return human_message
|
||||
system_messages_str = " ".join(system_messages)
|
||||
adaptation_rules = [
|
||||
r"Question:\s*{}\s*", # chat_db scene
|
||||
r"Goals:\s*{}\s*", # chat_execution
|
||||
r"问题:\s*{}\s*", # chat_knowledge zh
|
||||
r"question:\s*{}\s*", # chat_knowledge en
|
||||
]
|
||||
# system message has include human question
|
||||
for rule in adaptation_rules:
|
||||
pattern = re.compile(rule.format(re.escape(human_message)))
|
||||
if re.search(pattern, system_messages_str):
|
||||
return system_messages_str
|
||||
# https://huggingface.co/THUDM/chatglm2-6b/blob/e186c891cf64310ac66ef10a87e6635fa6c2a579/modeling_chatglm.py#L926
|
||||
return f"{system_messages_str}\n\n问:{human_message}\n\n答:"
|
||||
|
Loading…
Reference in New Issue
Block a user