mirror of
https://github.com/csunny/DB-GPT.git
synced 2025-09-16 06:30:02 +00:00
fork file replace import
This commit is contained in:
@@ -3,6 +3,71 @@
|
||||
|
||||
import torch
|
||||
|
||||
@torch.inference_mode()
|
||||
def generate_stream(model, tokenizer, params, device,
|
||||
context_len=2048, stream_interval=2):
|
||||
|
||||
"""Fork from fastchat: https://github.com/lm-sys/FastChat/blob/main/fastchat/serve/inference.py """
|
||||
prompt = params["prompt"]
|
||||
l_prompt = len(prompt)
|
||||
temperature = float(params.get("temperature", 1.0))
|
||||
max_new_tokens = int(params.get("max_new_tokens", 256))
|
||||
stop_str = params.get("stop", None)
|
||||
|
||||
input_ids = tokenizer(prompt).input_ids
|
||||
output_ids = list(input_ids)
|
||||
|
||||
max_src_len = context_len - max_new_tokens - 8
|
||||
input_ids = input_ids[-max_src_len:]
|
||||
|
||||
for i in range(max_new_tokens):
|
||||
if i == 0:
|
||||
out = model(
|
||||
torch.as_tensor([input_ids], device=device), use_cache=True)
|
||||
logits = out.logits
|
||||
past_key_values = out.past_key_values
|
||||
else:
|
||||
attention_mask = torch.ones(
|
||||
1, past_key_values[0][0].shape[-2] + 1, device=device)
|
||||
out = model(input_ids=torch.as_tensor([[token]], device=device),
|
||||
use_cache=True,
|
||||
attention_mask=attention_mask,
|
||||
past_key_values=past_key_values)
|
||||
logits = out.logits
|
||||
past_key_values = out.past_key_values
|
||||
|
||||
last_token_logits = logits[0][-1]
|
||||
|
||||
if device == "mps":
|
||||
# Switch to CPU by avoiding some bugs in mps backend.
|
||||
last_token_logits = last_token_logits.float().to("cpu")
|
||||
|
||||
if temperature < 1e-4:
|
||||
token = int(torch.argmax(last_token_logits))
|
||||
else:
|
||||
probs = torch.softmax(last_token_logits / temperature, dim=-1)
|
||||
token = int(torch.multinomial(probs, num_samples=1))
|
||||
|
||||
output_ids.append(token)
|
||||
|
||||
if token == tokenizer.eos_token_id:
|
||||
stopped = True
|
||||
else:
|
||||
stopped = False
|
||||
|
||||
if i % stream_interval == 0 or i == max_new_tokens - 1 or stopped:
|
||||
output = tokenizer.decode(output_ids, skip_special_tokens=True)
|
||||
pos = output.rfind(stop_str, l_prompt)
|
||||
if pos != -1:
|
||||
output = output[:pos]
|
||||
stopped = True
|
||||
yield output
|
||||
|
||||
if stopped:
|
||||
break
|
||||
|
||||
del past_key_values
|
||||
|
||||
@torch.inference_mode()
|
||||
def generate_output(model, tokenizer, params, device, context_len=2048, stream_interval=2):
|
||||
prompt = params["prompt"]
|
||||
|
@@ -5,6 +5,7 @@ import torch
|
||||
from transformers import (
|
||||
AutoTokenizer,
|
||||
AutoModelForCausalLM,
|
||||
AutoModel
|
||||
)
|
||||
|
||||
from fastchat.serve.compression import compress_module
|
||||
@@ -23,20 +24,39 @@ class ModerLoader:
|
||||
"device_map": "auto",
|
||||
}
|
||||
|
||||
def loader(self, load_8bit=False, debug=False):
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(self.model_path, use_fast=False)
|
||||
model = AutoModelForCausalLM.from_pretrained(self.model_path, low_cpu_mem_usage=True, **self.kwargs)
|
||||
def loader(self, num_gpus, load_8bit=False, debug=False):
|
||||
if self.device == "cpu":
|
||||
kwargs = {}
|
||||
elif self.device == "cuda":
|
||||
kwargs = {"torch_dtype": torch.float16}
|
||||
if num_gpus == "auto":
|
||||
kwargs["device_map"] = "auto"
|
||||
else:
|
||||
num_gpus = int(num_gpus)
|
||||
if num_gpus != 1:
|
||||
kwargs.update({
|
||||
"device_map": "auto",
|
||||
"max_memory": {i: "13GiB" for i in range(num_gpus)},
|
||||
})
|
||||
else:
|
||||
raise ValueError(f"Invalid device: {self.device}")
|
||||
|
||||
if "chatglm" in self.model_path:
|
||||
tokenizer = AutoTokenizer.from_pretrained(self.model_path, trust_remote_code=True)
|
||||
model = AutoModel.from_pretrained(self.model_path, trust_remote_code=True).half().cuda()
|
||||
else:
|
||||
tokenizer = AutoTokenizer.from_pretrained(self.model_path, use_fast=False)
|
||||
model = AutoModelForCausalLM.from_pretrained(self.model_path,
|
||||
low_cpu_mem_usage=True, **kwargs)
|
||||
|
||||
if load_8bit:
|
||||
compress_module(model, self.device)
|
||||
|
||||
if (self.device == "cuda" and num_gpus == 1):
|
||||
model.to(self.device)
|
||||
|
||||
if debug:
|
||||
print(model)
|
||||
|
||||
if load_8bit:
|
||||
compress_module(model, self.device)
|
||||
|
||||
# if self.device == "cuda":
|
||||
# model.to(self.device)
|
||||
|
||||
return model, tokenizer
|
||||
|
||||
|
||||
|
||||
|
Reference in New Issue
Block a user