fix(model): Fix vllm new tokenizer error (#1601)

This commit is contained in:
Fangyin Cheng
2024-06-05 15:27:58 +08:00
committed by GitHub
parent c3c063683c
commit 43b5821ce4
2 changed files with 7 additions and 5 deletions

View File

@@ -61,9 +61,7 @@ async def generate_stream(
**gen_params
)
results_generator = model.generate(
prompt, sampling_params, request_id, prompt_token_ids=prompt_token_ids
)
results_generator = model.generate(prompt, sampling_params, request_id)
async for request_output in results_generator:
prompt = request_output.prompt
if echo: