mirror of
https://github.com/hwchase17/langchain.git
synced 2025-09-13 21:47:12 +00:00
fix(core): AsyncCallbackHandler
docstring cleanup (#32897)
plus IDE warning fixes
This commit is contained in:
@@ -71,7 +71,9 @@ class LLMManagerMixin:
|
|||||||
parent_run_id: Optional[UUID] = None,
|
parent_run_id: Optional[UUID] = None,
|
||||||
**kwargs: Any,
|
**kwargs: Any,
|
||||||
) -> Any:
|
) -> Any:
|
||||||
"""Run on new LLM token. Only available when streaming is enabled.
|
"""Run on new output token. Only available when streaming is enabled.
|
||||||
|
|
||||||
|
For both chat models and non-chat models (legacy LLMs).
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
token (str): The new token.
|
token (str): The new token.
|
||||||
@@ -243,7 +245,7 @@ class CallbackManagerMixin:
|
|||||||
) -> Any:
|
) -> Any:
|
||||||
"""Run when LLM starts running.
|
"""Run when LLM starts running.
|
||||||
|
|
||||||
.. ATTENTION::
|
.. warning::
|
||||||
This method is called for non-chat models (regular LLMs). If you're
|
This method is called for non-chat models (regular LLMs). If you're
|
||||||
implementing a handler for a chat model, you should use
|
implementing a handler for a chat model, you should use
|
||||||
``on_chat_model_start`` instead.
|
``on_chat_model_start`` instead.
|
||||||
@@ -271,8 +273,9 @@ class CallbackManagerMixin:
|
|||||||
) -> Any:
|
) -> Any:
|
||||||
"""Run when a chat model starts running.
|
"""Run when a chat model starts running.
|
||||||
|
|
||||||
**ATTENTION**: This method is called for chat models. If you're implementing
|
.. warning::
|
||||||
a handler for a non-chat model, you should use ``on_llm_start`` instead.
|
This method is called for chat models. If you're implementing a handler for
|
||||||
|
a non-chat model, you should use ``on_llm_start`` instead.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
serialized (dict[str, Any]): The serialized chat model.
|
serialized (dict[str, Any]): The serialized chat model.
|
||||||
@@ -489,9 +492,9 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
|||||||
metadata: Optional[dict[str, Any]] = None,
|
metadata: Optional[dict[str, Any]] = None,
|
||||||
**kwargs: Any,
|
**kwargs: Any,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Run when LLM starts running.
|
"""Run when the model starts running.
|
||||||
|
|
||||||
.. ATTENTION::
|
.. warning::
|
||||||
This method is called for non-chat models (regular LLMs). If you're
|
This method is called for non-chat models (regular LLMs). If you're
|
||||||
implementing a handler for a chat model, you should use
|
implementing a handler for a chat model, you should use
|
||||||
``on_chat_model_start`` instead.
|
``on_chat_model_start`` instead.
|
||||||
@@ -519,8 +522,9 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
|||||||
) -> Any:
|
) -> Any:
|
||||||
"""Run when a chat model starts running.
|
"""Run when a chat model starts running.
|
||||||
|
|
||||||
**ATTENTION**: This method is called for chat models. If you're implementing
|
.. warning::
|
||||||
a handler for a non-chat model, you should use ``on_llm_start`` instead.
|
This method is called for chat models. If you're implementing a handler for
|
||||||
|
a non-chat model, you should use ``on_llm_start`` instead.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
serialized (dict[str, Any]): The serialized chat model.
|
serialized (dict[str, Any]): The serialized chat model.
|
||||||
@@ -546,7 +550,9 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
|||||||
tags: Optional[list[str]] = None,
|
tags: Optional[list[str]] = None,
|
||||||
**kwargs: Any,
|
**kwargs: Any,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Run on new LLM token. Only available when streaming is enabled.
|
"""Run on new output token. Only available when streaming is enabled.
|
||||||
|
|
||||||
|
For both chat models and non-chat models (legacy LLMs).
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
token (str): The new token.
|
token (str): The new token.
|
||||||
@@ -567,7 +573,7 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
|||||||
tags: Optional[list[str]] = None,
|
tags: Optional[list[str]] = None,
|
||||||
**kwargs: Any,
|
**kwargs: Any,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Run when LLM ends running.
|
"""Run when the model ends running.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
response (LLMResult): The response which was generated.
|
response (LLMResult): The response which was generated.
|
||||||
@@ -867,7 +873,7 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
|||||||
metadata: Optional[dict[str, Any]] = None,
|
metadata: Optional[dict[str, Any]] = None,
|
||||||
**kwargs: Any,
|
**kwargs: Any,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Override to define a handler for a custom event.
|
"""Override to define a handler for custom events.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
name: The name of the custom event.
|
name: The name of the custom event.
|
||||||
|
@@ -224,7 +224,7 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand
|
|||||||
yield chunk
|
yield chunk
|
||||||
|
|
||||||
def tap_output_iter(self, run_id: UUID, output: Iterator[T]) -> Iterator[T]:
|
def tap_output_iter(self, run_id: UUID, output: Iterator[T]) -> Iterator[T]:
|
||||||
"""Tap the output aiter.
|
"""Tap the output iter.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
run_id: The ID of the run.
|
run_id: The ID of the run.
|
||||||
@@ -315,7 +315,7 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand
|
|||||||
name: Optional[str] = None,
|
name: Optional[str] = None,
|
||||||
**kwargs: Any,
|
**kwargs: Any,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Start a trace for an LLM run."""
|
"""Start a trace for a chat model run."""
|
||||||
name_ = _assign_name(name, serialized)
|
name_ = _assign_name(name, serialized)
|
||||||
run_type = "chat_model"
|
run_type = "chat_model"
|
||||||
|
|
||||||
@@ -357,7 +357,7 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand
|
|||||||
name: Optional[str] = None,
|
name: Optional[str] = None,
|
||||||
**kwargs: Any,
|
**kwargs: Any,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Start a trace for an LLM run."""
|
"""Start a trace for a (non-chat model) LLM run."""
|
||||||
name_ = _assign_name(name, serialized)
|
name_ = _assign_name(name, serialized)
|
||||||
run_type = "llm"
|
run_type = "llm"
|
||||||
|
|
||||||
@@ -421,6 +421,10 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand
|
|||||||
parent_run_id: Optional[UUID] = None,
|
parent_run_id: Optional[UUID] = None,
|
||||||
**kwargs: Any,
|
**kwargs: Any,
|
||||||
) -> None:
|
) -> None:
|
||||||
|
"""Run on new output token. Only available when streaming is enabled.
|
||||||
|
|
||||||
|
For both chat models and non-chat models (legacy LLMs).
|
||||||
|
"""
|
||||||
run_info = self.run_map.get(run_id)
|
run_info = self.run_map.get(run_id)
|
||||||
chunk_: Union[GenerationChunk, BaseMessageChunk]
|
chunk_: Union[GenerationChunk, BaseMessageChunk]
|
||||||
|
|
||||||
@@ -466,17 +470,15 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand
|
|||||||
async def on_llm_end(
|
async def on_llm_end(
|
||||||
self, response: LLMResult, *, run_id: UUID, **kwargs: Any
|
self, response: LLMResult, *, run_id: UUID, **kwargs: Any
|
||||||
) -> None:
|
) -> None:
|
||||||
"""End a trace for an LLM run.
|
"""End a trace for a model run.
|
||||||
|
|
||||||
Args:
|
For both chat models and non-chat models (legacy LLMs).
|
||||||
response (LLMResult): The response which was generated.
|
|
||||||
run_id (UUID): The run ID. This is the ID of the current run.
|
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
ValueError: If the run type is not ``'llm'`` or ``'chat_model'``.
|
ValueError: If the run type is not ``'llm'`` or ``'chat_model'``.
|
||||||
"""
|
"""
|
||||||
run_info = self.run_map.pop(run_id)
|
run_info = self.run_map.pop(run_id)
|
||||||
inputs_ = run_info["inputs"]
|
inputs_ = run_info.get("inputs")
|
||||||
|
|
||||||
generations: Union[list[list[GenerationChunk]], list[list[ChatGenerationChunk]]]
|
generations: Union[list[list[GenerationChunk]], list[list[ChatGenerationChunk]]]
|
||||||
output: Union[dict, BaseMessage] = {}
|
output: Union[dict, BaseMessage] = {}
|
||||||
@@ -654,10 +656,6 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand
|
|||||||
async def on_tool_end(self, output: Any, *, run_id: UUID, **kwargs: Any) -> None:
|
async def on_tool_end(self, output: Any, *, run_id: UUID, **kwargs: Any) -> None:
|
||||||
"""End a trace for a tool run.
|
"""End a trace for a tool run.
|
||||||
|
|
||||||
Args:
|
|
||||||
output: The output of the tool.
|
|
||||||
run_id: The run ID. This is the ID of the current run.
|
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
AssertionError: If the run ID is a tool call and does not have inputs
|
AssertionError: If the run ID is a tool call and does not have inputs
|
||||||
"""
|
"""
|
||||||
@@ -742,7 +740,7 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand
|
|||||||
"event": "on_retriever_end",
|
"event": "on_retriever_end",
|
||||||
"data": {
|
"data": {
|
||||||
"output": documents,
|
"output": documents,
|
||||||
"input": run_info["inputs"],
|
"input": run_info.get("inputs"),
|
||||||
},
|
},
|
||||||
"run_id": str(run_id),
|
"run_id": str(run_id),
|
||||||
"name": run_info["name"],
|
"name": run_info["name"],
|
||||||
@@ -854,12 +852,12 @@ async def _astream_events_implementation_v1(
|
|||||||
# Usually they will NOT be available for components that operate
|
# Usually they will NOT be available for components that operate
|
||||||
# on streams, since those components stream the input and
|
# on streams, since those components stream the input and
|
||||||
# don't know its final value until the end of the stream.
|
# don't know its final value until the end of the stream.
|
||||||
inputs = log_entry["inputs"]
|
inputs = log_entry.get("inputs")
|
||||||
if inputs is not None:
|
if inputs is not None:
|
||||||
data["input"] = inputs
|
data["input"] = inputs
|
||||||
|
|
||||||
if event_type == "end":
|
if event_type == "end":
|
||||||
inputs = log_entry["inputs"]
|
inputs = log_entry.get("inputs")
|
||||||
if inputs is not None:
|
if inputs is not None:
|
||||||
data["input"] = inputs
|
data["input"] = inputs
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user