diff --git a/assets/wechat.jpg b/assets/wechat.jpg index cfc353527..3dde06d24 100644 Binary files a/assets/wechat.jpg and b/assets/wechat.jpg differ diff --git a/dbgpt/app/openapi/api_v1/api_v1.py b/dbgpt/app/openapi/api_v1/api_v1.py index 6c3dd0d86..87b69634e 100644 --- a/dbgpt/app/openapi/api_v1/api_v1.py +++ b/dbgpt/app/openapi/api_v1/api_v1.py @@ -439,9 +439,7 @@ async def stream_generator(chat, incremental: bool, model_name: str): _type_: streaming responses """ span = root_tracer.start_span("stream_generator") - msg = "[LLM_ERROR]: llm server has no output, maybe your prompt template is wrong." - stream_id = f"chatcmpl-{str(uuid.uuid1())}" previous_response = "" async for chunk in chat.stream_call(): if chunk: @@ -453,7 +451,7 @@ async def stream_generator(chat, incremental: bool, model_name: str): delta=DeltaMessage(role="assistant", content=incremental_output), ) chunk = ChatCompletionStreamResponse( - id=stream_id, choices=[choice_data], model=model_name + id=chat.chat_session_id, choices=[choice_data], model=model_name ) yield f"data: {chunk.json(exclude_unset=True, ensure_ascii=False)}\n\n" else: diff --git a/dbgpt/client/client.py b/dbgpt/client/client.py index bb064f227..eccbd26ae 100644 --- a/dbgpt/client/client.py +++ b/dbgpt/client/client.py @@ -247,7 +247,7 @@ class Client: if response.status_code == 200: async for line in response.aiter_lines(): try: - if line == "data: [DONE]\n": + if line.strip() == "data: [DONE]": break if line.startswith("data:"): json_data = json.loads(line[len("data: ") :])