mirror of
https://github.com/hwchase17/langchain.git
synced 2025-09-15 22:44:36 +00:00
community[patch]: callback before yield for _stream/_astream (#17907)
- Description: callback on_llm_new_token before yield chunk for _stream/_astream for some chat models, make all chat models in a consistent behaviour. - Issue: N/A - Dependencies: N/A
This commit is contained in:
@@ -206,12 +206,10 @@ class ChatEdenAI(BaseChatModel):
|
||||
for chunk_response in response.iter_lines():
|
||||
chunk = json.loads(chunk_response.decode())
|
||||
token = chunk["text"]
|
||||
chat_generatio_chunk = ChatGenerationChunk(
|
||||
message=AIMessageChunk(content=token)
|
||||
)
|
||||
yield chat_generatio_chunk
|
||||
cg_chunk = ChatGenerationChunk(message=AIMessageChunk(content=token))
|
||||
if run_manager:
|
||||
run_manager.on_llm_new_token(token, chunk=chat_generatio_chunk)
|
||||
run_manager.on_llm_new_token(token, chunk=cg_chunk)
|
||||
yield cg_chunk
|
||||
|
||||
async def _astream(
|
||||
self,
|
||||
@@ -246,14 +244,14 @@ class ChatEdenAI(BaseChatModel):
|
||||
async for chunk_response in response.content:
|
||||
chunk = json.loads(chunk_response.decode())
|
||||
token = chunk["text"]
|
||||
chat_generation_chunk = ChatGenerationChunk(
|
||||
cg_chunk = ChatGenerationChunk(
|
||||
message=AIMessageChunk(content=token)
|
||||
)
|
||||
yield chat_generation_chunk
|
||||
if run_manager:
|
||||
await run_manager.on_llm_new_token(
|
||||
token=chunk["text"], chunk=chat_generation_chunk
|
||||
token=chunk["text"], chunk=cg_chunk
|
||||
)
|
||||
yield cg_chunk
|
||||
|
||||
def _generate(
|
||||
self,
|
||||
|
Reference in New Issue
Block a user