This commit is contained in:
csunny
2023-05-03 15:49:29 +08:00
parent 7423a87423
commit af0b974398
5 changed files with 216 additions and 15 deletions

View File

@@ -4,10 +4,10 @@
import torch
@torch.inference_mode()
def generate_output(model, tokenizer, params, device, context_len=2048):
def generate_output(model, tokenizer, params, device, context_len=2048, stream_interval=2):
prompt = params["prompt"]
temperature = float(params.get("temperature", 1.0))
max_new_tokens = int(params.get("max_new_tokens", 256))
max_new_tokens = int(params.get("max_new_tokens", 1024))
stop_parameter = params.get("stop", None)
if stop_parameter == tokenizer.eos_token:
stop_parameter = None
@@ -21,29 +21,29 @@ def generate_output(model, tokenizer, params, device, context_len=2048):
else:
raise TypeError("Stop parameter must be string or list of strings.")
pos = -1
input_ids = tokenizer(prompt).input_ids
output_ids = []
max_src_len = context_len - max_new_tokens - 8
input_ids = input_ids[-max_src_len:]
stop_word = None
for i in range(max_new_tokens):
if i == 0:
out = model(torch.as_tensor([input_ids], device=device), use_cache=True)
out = model(
torch.as_tensor([input_ids], device=device), use_cache=True)
logits = out.logits
past_key_values = out.past_key_values
else:
out = model(
input_ids=torch.as_tensor([[token]], device=device),
use_cache=True,
past_key_values=past_key_values,
)
out = model(input_ids=torch.as_tensor([[token]], device=device),
use_cache=True,
past_key_values=past_key_values)
logits = out.logits
past_key_values = out.past_key_values
last_token_logits = logits[0][-1]
if temperature < 1e-4:
token = int(torch.argmax(last_token_logits))
else:
@@ -57,15 +57,22 @@ def generate_output(model, tokenizer, params, device, context_len=2048):
else:
stopped = False
output = tokenizer.decode(output_ids, skip_special_tokens=True)
# print("Partial output:", output)
for stop_str in stop_strings:
# print(f"Looking for '{stop_str}' in '{output[:l_prompt]}'#END")
pos = output.rfind(stop_str)
if pos != -1:
# print("Found stop str: ", output)
output = output[:pos]
# print("Trimmed output: ", output)
stopped = True
stop_word = stop_str
break
else:
pass
# print("Not found")
if stopped:
break
@@ -73,7 +80,7 @@ def generate_output(model, tokenizer, params, device, context_len=2048):
del past_key_values
if pos != -1:
return output[:pos]
return output
return output
@torch.inference_mode()