feat(model): Passing stop parameter to proxyllm (#2077)

This commit is contained in:
Fangyin Cheng
2024-10-18 14:02:53 +08:00
committed by GitHub
parent cf192a5fb7
commit 53ba6259d2
13 changed files with 31 additions and 4 deletions

View File

@@ -39,6 +39,7 @@ async def chatgpt_generate_stream(
temperature=params.get("temperature"),
context=context,
max_new_tokens=params.get("max_new_tokens"),
stop=params.get("stop"),
)
async for r in client.generate_stream(request):
yield r
@@ -188,6 +189,8 @@ class OpenAILLMClient(ProxyLLMClient):
payload["temperature"] = request.temperature
if request.max_new_tokens:
payload["max_tokens"] = request.max_new_tokens
if request.stop:
payload["stop"] = request.stop
return payload
async def generate(