[Inference] User Experience: update the logic of default tokenizer and generation config. (#5337)

* add

* fix

* fix

* pause

* fix

* fix pytest

* align

* fix

* license

* fix

* fix

* fix readme

* fix some bugs

* remove tokenizer config
This commit is contained in:
Jianghai
2024-02-07 17:55:48 +08:00
committed by GitHub
parent 6fb4bcbb24
commit 1f8c7e7046
7 changed files with 62 additions and 23 deletions

View File

@@ -31,7 +31,6 @@ def check_inference_engine(use_engine=False, prompt_template=None):
.cuda()
.half()
)
model = model.eval()
inputs = [
@@ -47,6 +46,7 @@ def check_inference_engine(use_engine=False, prompt_template=None):
if use_engine:
inference_config = InferenceConfig(max_output_len=output_len, prompt_template=prompt_template)
inference_engine = InferenceEngine(model, tokenizer, inference_config, verbose=True)
assert inference_engine.generation_config.max_new_tokens == output_len
inference_engine.add_request(prompts=inputs)
assert inference_engine.request_handler._has_waiting()
generation_config = GenerationConfig(do_sample=do_sample, top_p=top_p, top_k=top_k)