fix(core): AsyncCallbackHandler docstring cleanup (#32897)

plus IDE warning fixes
This commit is contained in:
Mason Daugherty
2025-09-10 21:31:45 -04:00
committed by GitHub
parent a8828b1bda
commit 8e213c9f1a
2 changed files with 30 additions and 26 deletions

View File

@@ -71,7 +71,9 @@ class LLMManagerMixin:
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
"""Run on new LLM token. Only available when streaming is enabled.
"""Run on new output token. Only available when streaming is enabled.
For both chat models and non-chat models (legacy LLMs).
Args:
token (str): The new token.
@@ -243,7 +245,7 @@ class CallbackManagerMixin:
) -> Any:
"""Run when LLM starts running.
.. ATTENTION::
.. warning::
This method is called for non-chat models (regular LLMs). If you're
implementing a handler for a chat model, you should use
``on_chat_model_start`` instead.
@@ -271,8 +273,9 @@ class CallbackManagerMixin:
) -> Any:
"""Run when a chat model starts running.
**ATTENTION**: This method is called for chat models. If you're implementing
a handler for a non-chat model, you should use ``on_llm_start`` instead.
.. warning::
This method is called for chat models. If you're implementing a handler for
a non-chat model, you should use ``on_llm_start`` instead.
Args:
serialized (dict[str, Any]): The serialized chat model.
@@ -489,9 +492,9 @@ class AsyncCallbackHandler(BaseCallbackHandler):
metadata: Optional[dict[str, Any]] = None,
**kwargs: Any,
) -> None:
"""Run when LLM starts running.
"""Run when the model starts running.
.. ATTENTION::
.. warning::
This method is called for non-chat models (regular LLMs). If you're
implementing a handler for a chat model, you should use
``on_chat_model_start`` instead.
@@ -519,8 +522,9 @@ class AsyncCallbackHandler(BaseCallbackHandler):
) -> Any:
"""Run when a chat model starts running.
**ATTENTION**: This method is called for chat models. If you're implementing
a handler for a non-chat model, you should use ``on_llm_start`` instead.
.. warning::
This method is called for chat models. If you're implementing a handler for
a non-chat model, you should use ``on_llm_start`` instead.
Args:
serialized (dict[str, Any]): The serialized chat model.
@@ -546,7 +550,9 @@ class AsyncCallbackHandler(BaseCallbackHandler):
tags: Optional[list[str]] = None,
**kwargs: Any,
) -> None:
"""Run on new LLM token. Only available when streaming is enabled.
"""Run on new output token. Only available when streaming is enabled.
For both chat models and non-chat models (legacy LLMs).
Args:
token (str): The new token.
@@ -567,7 +573,7 @@ class AsyncCallbackHandler(BaseCallbackHandler):
tags: Optional[list[str]] = None,
**kwargs: Any,
) -> None:
"""Run when LLM ends running.
"""Run when the model ends running.
Args:
response (LLMResult): The response which was generated.
@@ -867,7 +873,7 @@ class AsyncCallbackHandler(BaseCallbackHandler):
metadata: Optional[dict[str, Any]] = None,
**kwargs: Any,
) -> None:
"""Override to define a handler for a custom event.
"""Override to define a handler for custom events.
Args:
name: The name of the custom event.

View File

@@ -224,7 +224,7 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand
yield chunk
def tap_output_iter(self, run_id: UUID, output: Iterator[T]) -> Iterator[T]:
"""Tap the output aiter.
"""Tap the output iter.
Args:
run_id: The ID of the run.
@@ -315,7 +315,7 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand
name: Optional[str] = None,
**kwargs: Any,
) -> None:
"""Start a trace for an LLM run."""
"""Start a trace for a chat model run."""
name_ = _assign_name(name, serialized)
run_type = "chat_model"
@@ -357,7 +357,7 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand
name: Optional[str] = None,
**kwargs: Any,
) -> None:
"""Start a trace for an LLM run."""
"""Start a trace for a (non-chat model) LLM run."""
name_ = _assign_name(name, serialized)
run_type = "llm"
@@ -421,6 +421,10 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> None:
"""Run on new output token. Only available when streaming is enabled.
For both chat models and non-chat models (legacy LLMs).
"""
run_info = self.run_map.get(run_id)
chunk_: Union[GenerationChunk, BaseMessageChunk]
@@ -466,17 +470,15 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand
async def on_llm_end(
self, response: LLMResult, *, run_id: UUID, **kwargs: Any
) -> None:
"""End a trace for an LLM run.
"""End a trace for a model run.
Args:
response (LLMResult): The response which was generated.
run_id (UUID): The run ID. This is the ID of the current run.
For both chat models and non-chat models (legacy LLMs).
Raises:
ValueError: If the run type is not ``'llm'`` or ``'chat_model'``.
"""
run_info = self.run_map.pop(run_id)
inputs_ = run_info["inputs"]
inputs_ = run_info.get("inputs")
generations: Union[list[list[GenerationChunk]], list[list[ChatGenerationChunk]]]
output: Union[dict, BaseMessage] = {}
@@ -654,10 +656,6 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand
async def on_tool_end(self, output: Any, *, run_id: UUID, **kwargs: Any) -> None:
"""End a trace for a tool run.
Args:
output: The output of the tool.
run_id: The run ID. This is the ID of the current run.
Raises:
AssertionError: If the run ID is a tool call and does not have inputs
"""
@@ -742,7 +740,7 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand
"event": "on_retriever_end",
"data": {
"output": documents,
"input": run_info["inputs"],
"input": run_info.get("inputs"),
},
"run_id": str(run_id),
"name": run_info["name"],
@@ -854,12 +852,12 @@ async def _astream_events_implementation_v1(
# Usually they will NOT be available for components that operate
# on streams, since those components stream the input and
# don't know its final value until the end of the stream.
inputs = log_entry["inputs"]
inputs = log_entry.get("inputs")
if inputs is not None:
data["input"] = inputs
if event_type == "end":
inputs = log_entry["inputs"]
inputs = log_entry.get("inputs")
if inputs is not None:
data["input"] = inputs