fix(agent): agent's llmclient bug (#2298)

Co-authored-by: dongzhancai1 <dongzhancai1@jd.com>
This commit is contained in:
Cooper 2025-02-07 11:35:53 +08:00 committed by GitHub
parent bb5a078550
commit 0310ce9fa3
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -199,7 +199,7 @@ class AIWrapper:
model_request = _build_model_request(payload)
str_prompt = model_request.messages_to_string()
model_output = None
sep = "########S#E#P#########"
async for output in self._llm_client.generate_stream(model_request.copy()): # type: ignore # noqa
model_output = output
if memory and stream_out:
@ -210,7 +210,7 @@ class AIWrapper:
"receiver": "?",
"model": llm_model,
"markdown": self._output_parser.parse_model_nostream_resp(
model_output, "###"
model_output, sep
),
}
@ -221,7 +221,7 @@ class AIWrapper:
if not model_output:
raise ValueError("LLM generate stream is null!")
parsed_output = self._output_parser.parse_model_nostream_resp(
model_output, "###"
model_output, sep
)
parsed_output = parsed_output.strip().replace("\\n", "\n")