mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-19 21:33:51 +00:00
docs: Update doc-string in base callback managers (#15885)
Update doc-strings with a comment about on_llm_start vs. on_chat_model_start.
This commit is contained in:
parent
881dc28d2c
commit
ef2bd745cb
@ -166,7 +166,12 @@ class CallbackManagerMixin:
|
|||||||
metadata: Optional[Dict[str, Any]] = None,
|
metadata: Optional[Dict[str, Any]] = None,
|
||||||
**kwargs: Any,
|
**kwargs: Any,
|
||||||
) -> Any:
|
) -> Any:
|
||||||
"""Run when LLM starts running."""
|
"""Run when LLM starts running.
|
||||||
|
|
||||||
|
**ATTENTION**: This method is called for non-chat models (regular LLMs). If
|
||||||
|
you're implementing a handler for a chat model,
|
||||||
|
you should use on_chat_model_start instead.
|
||||||
|
"""
|
||||||
|
|
||||||
def on_chat_model_start(
|
def on_chat_model_start(
|
||||||
self,
|
self,
|
||||||
@ -179,7 +184,13 @@ class CallbackManagerMixin:
|
|||||||
metadata: Optional[Dict[str, Any]] = None,
|
metadata: Optional[Dict[str, Any]] = None,
|
||||||
**kwargs: Any,
|
**kwargs: Any,
|
||||||
) -> Any:
|
) -> Any:
|
||||||
"""Run when a chat model starts running."""
|
"""Run when a chat model starts running.
|
||||||
|
|
||||||
|
**ATTENTION**: This method is called for chat models. If you're implementing
|
||||||
|
a handler for a non-chat model, you should use on_llm_start instead.
|
||||||
|
"""
|
||||||
|
# NotImplementedError is thrown intentionally
|
||||||
|
# Callback handler will fall back to on_llm_start if this is exception is thrown
|
||||||
raise NotImplementedError(
|
raise NotImplementedError(
|
||||||
f"{self.__class__.__name__} does not implement `on_chat_model_start`"
|
f"{self.__class__.__name__} does not implement `on_chat_model_start`"
|
||||||
)
|
)
|
||||||
@ -308,7 +319,12 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
|||||||
metadata: Optional[Dict[str, Any]] = None,
|
metadata: Optional[Dict[str, Any]] = None,
|
||||||
**kwargs: Any,
|
**kwargs: Any,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Run when LLM starts running."""
|
"""Run when LLM starts running.
|
||||||
|
|
||||||
|
**ATTENTION**: This method is called for non-chat models (regular LLMs). If
|
||||||
|
you're implementing a handler for a chat model,
|
||||||
|
you should use on_chat_model_start instead.
|
||||||
|
"""
|
||||||
|
|
||||||
async def on_chat_model_start(
|
async def on_chat_model_start(
|
||||||
self,
|
self,
|
||||||
@ -321,7 +337,13 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
|||||||
metadata: Optional[Dict[str, Any]] = None,
|
metadata: Optional[Dict[str, Any]] = None,
|
||||||
**kwargs: Any,
|
**kwargs: Any,
|
||||||
) -> Any:
|
) -> Any:
|
||||||
"""Run when a chat model starts running."""
|
"""Run when a chat model starts running.
|
||||||
|
|
||||||
|
**ATTENTION**: This method is called for chat models. If you're implementing
|
||||||
|
a handler for a non-chat model, you should use on_llm_start instead.
|
||||||
|
"""
|
||||||
|
# NotImplementedError is thrown intentionally
|
||||||
|
# Callback handler will fall back to on_llm_start if this is exception is thrown
|
||||||
raise NotImplementedError(
|
raise NotImplementedError(
|
||||||
f"{self.__class__.__name__} does not implement `on_chat_model_start`"
|
f"{self.__class__.__name__} does not implement `on_chat_model_start`"
|
||||||
)
|
)
|
||||||
@ -359,8 +381,9 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
|||||||
**kwargs: Any,
|
**kwargs: Any,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Run when LLM errors.
|
"""Run when LLM errors.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
error (BaseException): The error that occurred.
|
error: The error that occurred.
|
||||||
kwargs (Any): Additional keyword arguments.
|
kwargs (Any): Additional keyword arguments.
|
||||||
- response (LLMResult): The response which was generated before
|
- response (LLMResult): The response which was generated before
|
||||||
the error occurred.
|
the error occurred.
|
||||||
|
Loading…
Reference in New Issue
Block a user