mirror of
https://github.com/csunny/DB-GPT.git
synced 2025-08-09 12:18:12 +00:00
support gorilla
This commit is contained in:
parent
b24d2fe0c0
commit
c2bfab11e0
@ -35,6 +35,7 @@ LLM_MODEL_CONFIG = {
|
|||||||
"chatglm-6b": os.path.join(MODEL_PATH, "chatglm-6b"),
|
"chatglm-6b": os.path.join(MODEL_PATH, "chatglm-6b"),
|
||||||
"text2vec-base": os.path.join(MODEL_PATH, "text2vec-base-chinese"),
|
"text2vec-base": os.path.join(MODEL_PATH, "text2vec-base-chinese"),
|
||||||
"guanaco-33b-merged": os.path.join(MODEL_PATH, "guanaco-33b-merged"),
|
"guanaco-33b-merged": os.path.join(MODEL_PATH, "guanaco-33b-merged"),
|
||||||
|
"gorilla-7b": os.path.join(MODEL_PATH, "gorilla-7b"),
|
||||||
"proxyllm": "proxyllm",
|
"proxyllm": "proxyllm",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -111,6 +111,20 @@ class GuanacoAdapter(BaseLLMAdaper):
|
|||||||
return model, tokenizer
|
return model, tokenizer
|
||||||
|
|
||||||
|
|
||||||
|
class GorillaAdapter(BaseLLMAdaper):
|
||||||
|
"""TODO Support guanaco"""
|
||||||
|
|
||||||
|
def match(self, model_path: str):
|
||||||
|
return "gorilla" in model_path
|
||||||
|
|
||||||
|
def loader(self, model_path: str, from_pretrained_kwargs: dict):
|
||||||
|
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)
|
||||||
|
model = AutoModelForCausalLM.from_pretrained(
|
||||||
|
model_path, low_cpu_mem_usage=True, **from_pretrained_kwargs
|
||||||
|
)
|
||||||
|
return model, tokenizer
|
||||||
|
|
||||||
|
|
||||||
class CodeGenAdapter(BaseLLMAdaper):
|
class CodeGenAdapter(BaseLLMAdaper):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@ -166,6 +180,7 @@ class ProxyllmAdapter(BaseLLMAdaper):
|
|||||||
register_llm_model_adapters(VicunaLLMAdapater)
|
register_llm_model_adapters(VicunaLLMAdapater)
|
||||||
register_llm_model_adapters(ChatGLMAdapater)
|
register_llm_model_adapters(ChatGLMAdapater)
|
||||||
register_llm_model_adapters(GuanacoAdapter)
|
register_llm_model_adapters(GuanacoAdapter)
|
||||||
|
register_llm_model_adapters(GorillaAdapter)
|
||||||
# TODO Default support vicuna, other model need to tests and Evaluate
|
# TODO Default support vicuna, other model need to tests and Evaluate
|
||||||
|
|
||||||
# just for test, remove this later
|
# just for test, remove this later
|
||||||
|
58
pilot/model/llm_out/gorilla_llm.py
Normal file
58
pilot/model/llm_out/gorilla_llm.py
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
import torch
|
||||||
|
|
||||||
|
@torch.inference_mode()
|
||||||
|
def generate_stream(
|
||||||
|
model, tokenizer, params, device, context_len=42048, stream_interval=2
|
||||||
|
):
|
||||||
|
"""Fork from https://github.com/ShishirPatil/gorilla/blob/main/inference/serve/gorilla_cli.py"""
|
||||||
|
prompt = params["prompt"]
|
||||||
|
l_prompt = len(prompt)
|
||||||
|
max_new_tokens = int(params.get("max_new_tokens", 1024))
|
||||||
|
stop_str = params.get("stop", None)
|
||||||
|
|
||||||
|
input_ids = tokenizer(prompt).input_ids
|
||||||
|
output_ids = list(input_ids)
|
||||||
|
input_echo_len = len(input_ids)
|
||||||
|
max_src_len = context_len - max_new_tokens - 8
|
||||||
|
input_ids = input_ids[-max_src_len:]
|
||||||
|
past_key_values = out = None
|
||||||
|
|
||||||
|
for i in range(max_new_tokens):
|
||||||
|
if i == 0:
|
||||||
|
out = model(torch.as_tensor([input_ids], device=device), use_cache=True)
|
||||||
|
logits = out.logits
|
||||||
|
past_key_values = out.past_key_values
|
||||||
|
else:
|
||||||
|
out = model(
|
||||||
|
input_ids=torch.as_tensor([[token]], device=device),
|
||||||
|
use_cache=True,
|
||||||
|
past_key_values=past_key_values,
|
||||||
|
)
|
||||||
|
logits = out.logits
|
||||||
|
past_key_values = out.past_key_values
|
||||||
|
|
||||||
|
last_token_logits = logits[0][-1]
|
||||||
|
|
||||||
|
probs = torch.softmax(last_token_logits, dim=-1)
|
||||||
|
token = int(torch.multinomial(probs, num_samples=1))
|
||||||
|
output_ids.append(token)
|
||||||
|
|
||||||
|
|
||||||
|
if token == tokenizer.eos_token_id:
|
||||||
|
stopped = True
|
||||||
|
else:
|
||||||
|
stopped = False
|
||||||
|
|
||||||
|
if i % stream_interval == 0 or i == max_new_tokens - 1 or stopped:
|
||||||
|
tmp_output_ids = output_ids[input_echo_len:]
|
||||||
|
output = tokenizer.decode(tmp_output_ids, skip_special_tokens=True, spaces_between_special_tokens=False,)
|
||||||
|
pos = output.rfind(stop_str, l_prompt)
|
||||||
|
if pos != -1:
|
||||||
|
output = output[:pos]
|
||||||
|
stopped = True
|
||||||
|
yield output
|
||||||
|
|
||||||
|
if stopped:
|
||||||
|
break
|
||||||
|
|
||||||
|
del past_key_values
|
@ -95,6 +95,16 @@ class GuanacoChatAdapter(BaseChatAdpter):
|
|||||||
|
|
||||||
return guanaco_generate_stream
|
return guanaco_generate_stream
|
||||||
|
|
||||||
|
class GorillaChatAdapter(BaseChatAdpter):
|
||||||
|
"""Model chat adapter for Guanaco"""
|
||||||
|
|
||||||
|
def match(self, model_path: str):
|
||||||
|
return "gorilla" in model_path
|
||||||
|
|
||||||
|
def get_generate_stream_func(self):
|
||||||
|
from pilot.model.llm_out.gorilla_llm import generate_stream
|
||||||
|
|
||||||
|
return generate_stream
|
||||||
|
|
||||||
class ProxyllmChatAdapter(BaseChatAdpter):
|
class ProxyllmChatAdapter(BaseChatAdpter):
|
||||||
def match(self, model_path: str):
|
def match(self, model_path: str):
|
||||||
@ -109,7 +119,7 @@ class ProxyllmChatAdapter(BaseChatAdpter):
|
|||||||
register_llm_model_chat_adapter(VicunaChatAdapter)
|
register_llm_model_chat_adapter(VicunaChatAdapter)
|
||||||
register_llm_model_chat_adapter(ChatGLMChatAdapter)
|
register_llm_model_chat_adapter(ChatGLMChatAdapter)
|
||||||
register_llm_model_chat_adapter(GuanacoChatAdapter)
|
register_llm_model_chat_adapter(GuanacoChatAdapter)
|
||||||
|
register_llm_model_chat_adapter(GorillaChatAdapter)
|
||||||
|
|
||||||
# Proxy model for test and develop, it's cheap for us now.
|
# Proxy model for test and develop, it's cheap for us now.
|
||||||
register_llm_model_chat_adapter(ProxyllmChatAdapter)
|
register_llm_model_chat_adapter(ProxyllmChatAdapter)
|
||||||
|
Loading…
Reference in New Issue
Block a user