mirror of
https://github.com/hwchase17/langchain.git
synced 2025-07-01 10:54:15 +00:00
langchain_openai[patch]: Invoke callback prior to yielding token (#18269)
## PR title langchain_openai[patch]: Invoke callback prior to yielding token ## PR message Description: Invoke callback prior to yielding token in _stream and _astream methods for langchain_openai. Issue: https://github.com/langchain-ai/langchain/issues/16913 Dependencies: None Twitter handle: None
This commit is contained in:
parent
5ee76fccd5
commit
0486404a74
@ -253,7 +253,7 @@ class BaseOpenAI(BaseLLM):
|
||||
if not isinstance(stream_resp, dict):
|
||||
stream_resp = stream_resp.model_dump()
|
||||
chunk = _stream_response_to_generation_chunk(stream_resp)
|
||||
yield chunk
|
||||
|
||||
if run_manager:
|
||||
run_manager.on_llm_new_token(
|
||||
chunk.text,
|
||||
@ -265,6 +265,7 @@ class BaseOpenAI(BaseLLM):
|
||||
else None
|
||||
),
|
||||
)
|
||||
yield chunk
|
||||
|
||||
async def _astream(
|
||||
self,
|
||||
@ -281,7 +282,7 @@ class BaseOpenAI(BaseLLM):
|
||||
if not isinstance(stream_resp, dict):
|
||||
stream_resp = stream_resp.model_dump()
|
||||
chunk = _stream_response_to_generation_chunk(stream_resp)
|
||||
yield chunk
|
||||
|
||||
if run_manager:
|
||||
await run_manager.on_llm_new_token(
|
||||
chunk.text,
|
||||
@ -293,6 +294,7 @@ class BaseOpenAI(BaseLLM):
|
||||
else None
|
||||
),
|
||||
)
|
||||
yield chunk
|
||||
|
||||
def _generate(
|
||||
self,
|
||||
|
Loading…
Reference in New Issue
Block a user