mirror of
https://github.com/csunny/DB-GPT.git
synced 2025-09-13 13:10:29 +00:00
feat(model): Passing stop parameter to proxyllm (#2077)
This commit is contained in:
@@ -39,6 +39,7 @@ async def chatgpt_generate_stream(
|
||||
temperature=params.get("temperature"),
|
||||
context=context,
|
||||
max_new_tokens=params.get("max_new_tokens"),
|
||||
stop=params.get("stop"),
|
||||
)
|
||||
async for r in client.generate_stream(request):
|
||||
yield r
|
||||
@@ -188,6 +189,8 @@ class OpenAILLMClient(ProxyLLMClient):
|
||||
payload["temperature"] = request.temperature
|
||||
if request.max_new_tokens:
|
||||
payload["max_tokens"] = request.max_new_tokens
|
||||
if request.stop:
|
||||
payload["stop"] = request.stop
|
||||
return payload
|
||||
|
||||
async def generate(
|
||||
|
@@ -27,6 +27,7 @@ async def deepseek_generate_stream(
|
||||
temperature=params.get("temperature"),
|
||||
context=context,
|
||||
max_new_tokens=params.get("max_new_tokens"),
|
||||
stop=params.get("stop"),
|
||||
)
|
||||
async for r in client.generate_stream(request):
|
||||
yield r
|
||||
|
@@ -46,6 +46,7 @@ def gemini_generate_stream(
|
||||
temperature=params.get("temperature"),
|
||||
context=context,
|
||||
max_new_tokens=params.get("max_new_tokens"),
|
||||
stop=params.get("stop"),
|
||||
)
|
||||
for r in client.sync_generate_stream(request):
|
||||
yield r
|
||||
|
@@ -26,6 +26,7 @@ async def moonshot_generate_stream(
|
||||
temperature=params.get("temperature"),
|
||||
context=context,
|
||||
max_new_tokens=params.get("max_new_tokens"),
|
||||
stop=params.get("stop"),
|
||||
)
|
||||
async for r in client.generate_stream(request):
|
||||
yield r
|
||||
|
@@ -47,6 +47,7 @@ def spark_generate_stream(
|
||||
temperature=params.get("temperature"),
|
||||
context=context,
|
||||
max_new_tokens=params.get("max_new_tokens"),
|
||||
stop=params.get("stop"),
|
||||
)
|
||||
for r in client.sync_generate_stream(request):
|
||||
yield r
|
||||
|
@@ -21,6 +21,7 @@ def tongyi_generate_stream(
|
||||
temperature=params.get("temperature"),
|
||||
context=context,
|
||||
max_new_tokens=params.get("max_new_tokens"),
|
||||
stop=params.get("stop"),
|
||||
)
|
||||
for r in client.sync_generate_stream(request):
|
||||
yield r
|
||||
@@ -96,6 +97,7 @@ class TongyiLLMClient(ProxyLLMClient):
|
||||
top_p=0.8,
|
||||
stream=True,
|
||||
result_format="message",
|
||||
stop=request.stop,
|
||||
)
|
||||
for r in res:
|
||||
if r:
|
||||
|
@@ -26,6 +26,7 @@ async def yi_generate_stream(
|
||||
temperature=params.get("temperature"),
|
||||
context=context,
|
||||
max_new_tokens=params.get("max_new_tokens"),
|
||||
stop=params.get("stop"),
|
||||
)
|
||||
async for r in client.generate_stream(request):
|
||||
yield r
|
||||
|
@@ -28,6 +28,7 @@ def zhipu_generate_stream(
|
||||
temperature=params.get("temperature"),
|
||||
context=context,
|
||||
max_new_tokens=params.get("max_new_tokens"),
|
||||
stop=params.get("stop"),
|
||||
)
|
||||
for r in client.sync_generate_stream(request):
|
||||
yield r
|
||||
|
Reference in New Issue
Block a user