mirror of
https://github.com/csunny/DB-GPT.git
synced 2025-08-11 21:22:28 +00:00
Merge branch 'llm_fxp' into falcon
This commit is contained in:
commit
a644d3ac6c
@ -36,6 +36,7 @@ LLM_MODEL_CONFIG = {
|
|||||||
"text2vec-base": os.path.join(MODEL_PATH, "text2vec-base-chinese"),
|
"text2vec-base": os.path.join(MODEL_PATH, "text2vec-base-chinese"),
|
||||||
"guanaco-33b-merged": os.path.join(MODEL_PATH, "guanaco-33b-merged"),
|
"guanaco-33b-merged": os.path.join(MODEL_PATH, "guanaco-33b-merged"),
|
||||||
"falcon-40b": os.path.join(MODEL_PATH, "falcon-40b"),
|
"falcon-40b": os.path.join(MODEL_PATH, "falcon-40b"),
|
||||||
|
"gorilla-7b": os.path.join(MODEL_PATH, "gorilla-7b"),
|
||||||
"proxyllm": "proxyllm",
|
"proxyllm": "proxyllm",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -123,6 +123,20 @@ class FalconAdapater(BaseLLMAdaper):
|
|||||||
**from_pretrained_kwagrs
|
**from_pretrained_kwagrs
|
||||||
)
|
)
|
||||||
return model, tokenizer
|
return model, tokenizer
|
||||||
|
|
||||||
|
|
||||||
|
class GorillaAdapter(BaseLLMAdaper):
|
||||||
|
"""TODO Support guanaco"""
|
||||||
|
|
||||||
|
def match(self, model_path: str):
|
||||||
|
return "gorilla" in model_path
|
||||||
|
|
||||||
|
def loader(self, model_path: str, from_pretrained_kwargs: dict):
|
||||||
|
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)
|
||||||
|
model = AutoModelForCausalLM.from_pretrained(
|
||||||
|
model_path, low_cpu_mem_usage=True, **from_pretrained_kwargs
|
||||||
|
)
|
||||||
|
return model, tokenizer
|
||||||
|
|
||||||
|
|
||||||
class CodeGenAdapter(BaseLLMAdaper):
|
class CodeGenAdapter(BaseLLMAdaper):
|
||||||
@ -181,6 +195,7 @@ register_llm_model_adapters(VicunaLLMAdapater)
|
|||||||
register_llm_model_adapters(ChatGLMAdapater)
|
register_llm_model_adapters(ChatGLMAdapater)
|
||||||
register_llm_model_adapters(GuanacoAdapter)
|
register_llm_model_adapters(GuanacoAdapter)
|
||||||
register_llm_model_adapters(FalconAdapater)
|
register_llm_model_adapters(FalconAdapater)
|
||||||
|
register_llm_model_adapters(GorillaAdapter)
|
||||||
# TODO Default support vicuna, other model need to tests and Evaluate
|
# TODO Default support vicuna, other model need to tests and Evaluate
|
||||||
|
|
||||||
# just for test, remove this later
|
# just for test, remove this later
|
||||||
|
58
pilot/model/llm_out/gorilla_llm.py
Normal file
58
pilot/model/llm_out/gorilla_llm.py
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
import torch
|
||||||
|
|
||||||
|
@torch.inference_mode()
|
||||||
|
def generate_stream(
|
||||||
|
model, tokenizer, params, device, context_len=42048, stream_interval=2
|
||||||
|
):
|
||||||
|
"""Fork from https://github.com/ShishirPatil/gorilla/blob/main/inference/serve/gorilla_cli.py"""
|
||||||
|
prompt = params["prompt"]
|
||||||
|
l_prompt = len(prompt)
|
||||||
|
max_new_tokens = int(params.get("max_new_tokens", 1024))
|
||||||
|
stop_str = params.get("stop", None)
|
||||||
|
|
||||||
|
input_ids = tokenizer(prompt).input_ids
|
||||||
|
output_ids = list(input_ids)
|
||||||
|
input_echo_len = len(input_ids)
|
||||||
|
max_src_len = context_len - max_new_tokens - 8
|
||||||
|
input_ids = input_ids[-max_src_len:]
|
||||||
|
past_key_values = out = None
|
||||||
|
|
||||||
|
for i in range(max_new_tokens):
|
||||||
|
if i == 0:
|
||||||
|
out = model(torch.as_tensor([input_ids], device=device), use_cache=True)
|
||||||
|
logits = out.logits
|
||||||
|
past_key_values = out.past_key_values
|
||||||
|
else:
|
||||||
|
out = model(
|
||||||
|
input_ids=torch.as_tensor([[token]], device=device),
|
||||||
|
use_cache=True,
|
||||||
|
past_key_values=past_key_values,
|
||||||
|
)
|
||||||
|
logits = out.logits
|
||||||
|
past_key_values = out.past_key_values
|
||||||
|
|
||||||
|
last_token_logits = logits[0][-1]
|
||||||
|
|
||||||
|
probs = torch.softmax(last_token_logits, dim=-1)
|
||||||
|
token = int(torch.multinomial(probs, num_samples=1))
|
||||||
|
output_ids.append(token)
|
||||||
|
|
||||||
|
|
||||||
|
if token == tokenizer.eos_token_id:
|
||||||
|
stopped = True
|
||||||
|
else:
|
||||||
|
stopped = False
|
||||||
|
|
||||||
|
if i % stream_interval == 0 or i == max_new_tokens - 1 or stopped:
|
||||||
|
tmp_output_ids = output_ids[input_echo_len:]
|
||||||
|
output = tokenizer.decode(tmp_output_ids, skip_special_tokens=True, spaces_between_special_tokens=False,)
|
||||||
|
pos = output.rfind(stop_str, l_prompt)
|
||||||
|
if pos != -1:
|
||||||
|
output = output[:pos]
|
||||||
|
stopped = True
|
||||||
|
yield output
|
||||||
|
|
||||||
|
if stopped:
|
||||||
|
break
|
||||||
|
|
||||||
|
del past_key_values
|
@ -94,6 +94,7 @@ class GuanacoChatAdapter(BaseChatAdpter):
|
|||||||
|
|
||||||
return guanaco_generate_stream
|
return guanaco_generate_stream
|
||||||
|
|
||||||
|
|
||||||
class FalconChatAdapter(BaseChatAdpter):
|
class FalconChatAdapter(BaseChatAdpter):
|
||||||
"""Model chat adapter for Guanaco"""
|
"""Model chat adapter for Guanaco"""
|
||||||
|
|
||||||
@ -119,6 +120,7 @@ register_llm_model_chat_adapter(VicunaChatAdapter)
|
|||||||
register_llm_model_chat_adapter(ChatGLMChatAdapter)
|
register_llm_model_chat_adapter(ChatGLMChatAdapter)
|
||||||
register_llm_model_chat_adapter(GuanacoChatAdapter)
|
register_llm_model_chat_adapter(GuanacoChatAdapter)
|
||||||
register_llm_model_adapters(FalconChatAdapter)
|
register_llm_model_adapters(FalconChatAdapter)
|
||||||
|
register_llm_model_chat_adapter(GorillaChatAdapter)
|
||||||
|
|
||||||
# Proxy model for test and develop, it's cheap for us now.
|
# Proxy model for test and develop, it's cheap for us now.
|
||||||
register_llm_model_chat_adapter(ProxyllmChatAdapter)
|
register_llm_model_chat_adapter(ProxyllmChatAdapter)
|
||||||
|
Loading…
Reference in New Issue
Block a user