community[patch]: callback before yield for _stream/_astream (#17907)

- Description: callback on_llm_new_token before yield chunk for
_stream/_astream for some chat models, make all chat models in a
consistent behaviour.
- Issue: N/A
- Dependencies: N/A
This commit is contained in:
mackong
2024-02-23 08:15:21 +08:00
committed by GitHub
parent 15e42f1799
commit 9678797625
22 changed files with 66 additions and 55 deletions

View File

@@ -325,13 +325,13 @@ class GPTRouter(BaseChatModel):
chunk.data, default_chunk_class
)
yield chunk
if run_manager:
run_manager.on_llm_new_token(
token=chunk.message.content, chunk=chunk.message
)
yield chunk
async def _astream(
self,
messages: List[BaseMessage],
@@ -358,13 +358,13 @@ class GPTRouter(BaseChatModel):
chunk.data, default_chunk_class
)
yield chunk
if run_manager:
await run_manager.on_llm_new_token(
token=chunk.message.content, chunk=chunk.message
)
yield chunk
def _create_message_dicts(
self, messages: List[BaseMessage], stop: Optional[List[str]]
) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]: