core[patch]: docstrings agents (#23502)

Added missed docstrings. Formatted docstrings to the consistent form.
This commit is contained in:
Leonid Ganeline 2024-06-26 14:50:48 -07:00 committed by GitHub
parent 79d8556c22
commit 2c9b84c3a8
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
8 changed files with 113 additions and 41 deletions

View File

@ -5,10 +5,10 @@
New agents should be built using the langgraph library New agents should be built using the langgraph library
(https://github.com/langchain-ai/langgraph)), which provides a simpler (https://github.com/langchain-ai/langgraph)), which provides a simpler
and more flexible way to define agents. and more flexible way to define agents.
Please see the migration guide for information on how to migrate existing Please see the migration guide for information on how to migrate existing
agents to modern langgraph agents: agents to modern langgraph agents:
https://python.langchain.com/v0.2/docs/how_to/migrate_agent/ https://python.langchain.com/v0.2/docs/how_to/migrate_agent/
Agents use language models to choose a sequence of actions to take. Agents use language models to choose a sequence of actions to take.
@ -21,6 +21,7 @@ A basic agent works in the following manner:
The schemas for the agents themselves are defined in langchain.agents.agent. The schemas for the agents themselves are defined in langchain.agents.agent.
""" # noqa: E501 """ # noqa: E501
from __future__ import annotations from __future__ import annotations
import json import json
@ -193,9 +194,11 @@ def _create_function_message(
agent_action: AgentAction, observation: Any agent_action: AgentAction, observation: Any
) -> FunctionMessage: ) -> FunctionMessage:
"""Convert agent action and observation into a function message. """Convert agent action and observation into a function message.
Args: Args:
agent_action: the tool invocation request from the agent agent_action: the tool invocation request from the agent
observation: the result of the tool invocation observation: the result of the tool invocation
Returns: Returns:
FunctionMessage that corresponds to the original tool invocation FunctionMessage that corresponds to the original tool invocation
""" """

View File

@ -19,6 +19,7 @@ Cache directly competes with Memory. See documentation for Pros and Cons.
BaseCache --> <name>Cache # Examples: InMemoryCache, RedisCache, GPTCache BaseCache --> <name>Cache # Examples: InMemoryCache, RedisCache, GPTCache
""" """
from __future__ import annotations from __future__ import annotations
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
@ -31,7 +32,7 @@ RETURN_VAL_TYPE = Sequence[Generation]
class BaseCache(ABC): class BaseCache(ABC):
"""This interfaces provides a caching layer for LLMs and Chat models. """Interface for a caching layer for LLMs and Chat models.
The cache interface consists of the following methods: The cache interface consists of the following methods:
@ -73,7 +74,7 @@ class BaseCache(ABC):
"""Update cache based on prompt and llm_string. """Update cache based on prompt and llm_string.
The prompt and llm_string are used to generate a key for the cache. The prompt and llm_string are used to generate a key for the cache.
The key should match that of the look up method. The key should match that of the lookup method.
Args: Args:
prompt: a string representation of the prompt. prompt: a string representation of the prompt.
@ -93,7 +94,7 @@ class BaseCache(ABC):
"""Clear cache that can take additional keyword arguments.""" """Clear cache that can take additional keyword arguments."""
async def alookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: async def alookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string. """Async look up based on prompt and llm_string.
A cache implementation is expected to generate a key from the 2-tuple A cache implementation is expected to generate a key from the 2-tuple
of prompt and llm_string (e.g., by concatenating them with a delimiter). of prompt and llm_string (e.g., by concatenating them with a delimiter).
@ -117,7 +118,7 @@ class BaseCache(ABC):
async def aupdate( async def aupdate(
self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE
) -> None: ) -> None:
"""Update cache based on prompt and llm_string. """Async update cache based on prompt and llm_string.
The prompt and llm_string are used to generate a key for the cache. The prompt and llm_string are used to generate a key for the cache.
The key should match that of the look up method. The key should match that of the look up method.
@ -137,7 +138,7 @@ class BaseCache(ABC):
return await run_in_executor(None, self.update, prompt, llm_string, return_val) return await run_in_executor(None, self.update, prompt, llm_string, return_val)
async def aclear(self, **kwargs: Any) -> None: async def aclear(self, **kwargs: Any) -> None:
"""Clear cache that can take additional keyword arguments.""" """Async clear cache that can take additional keyword arguments."""
return await run_in_executor(None, self.clear, **kwargs) return await run_in_executor(None, self.clear, **kwargs)
@ -149,11 +150,30 @@ class InMemoryCache(BaseCache):
self._cache: Dict[Tuple[str, str], RETURN_VAL_TYPE] = {} self._cache: Dict[Tuple[str, str], RETURN_VAL_TYPE] = {}
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string.""" """Look up based on prompt and llm_string.
Args:
prompt: a string representation of the prompt.
In the case of a Chat model, the prompt is a non-trivial
serialization of the prompt into the language model.
llm_string: A string representation of the LLM configuration.
Returns:
On a cache miss, return None. On a cache hit, return the cached value.
"""
return self._cache.get((prompt, llm_string), None) return self._cache.get((prompt, llm_string), None)
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None: def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update cache based on prompt and llm_string.""" """Update cache based on prompt and llm_string.
Args:
prompt: a string representation of the prompt.
In the case of a Chat model, the prompt is a non-trivial
serialization of the prompt into the language model.
llm_string: A string representation of the LLM configuration.
return_val: The value to be cached. The value is a list of Generations
(or subclasses).
"""
self._cache[(prompt, llm_string)] = return_val self._cache[(prompt, llm_string)] = return_val
def clear(self, **kwargs: Any) -> None: def clear(self, **kwargs: Any) -> None:
@ -161,15 +181,34 @@ class InMemoryCache(BaseCache):
self._cache = {} self._cache = {}
async def alookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: async def alookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string.""" """Async look up based on prompt and llm_string.
Args:
prompt: a string representation of the prompt.
In the case of a Chat model, the prompt is a non-trivial
serialization of the prompt into the language model.
llm_string: A string representation of the LLM configuration.
Returns:
On a cache miss, return None. On a cache hit, return the cached value.
"""
return self.lookup(prompt, llm_string) return self.lookup(prompt, llm_string)
async def aupdate( async def aupdate(
self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE
) -> None: ) -> None:
"""Update cache based on prompt and llm_string.""" """Async update cache based on prompt and llm_string.
Args:
prompt: a string representation of the prompt.
In the case of a Chat model, the prompt is a non-trivial
serialization of the prompt into the language model.
llm_string: A string representation of the LLM configuration.
return_val: The value to be cached. The value is a list of Generations
(or subclasses).
"""
self.update(prompt, llm_string, return_val) self.update(prompt, llm_string, return_val)
async def aclear(self, **kwargs: Any) -> None: async def aclear(self, **kwargs: Any) -> None:
"""Clear cache.""" """Async clear cache."""
self.clear() self.clear()

View File

@ -1,4 +1,5 @@
"""Custom **exceptions** for LangChain. """ """Custom **exceptions** for LangChain."""
from typing import Any, Optional from typing import Any, Optional
@ -18,7 +19,7 @@ class OutputParserException(ValueError, LangChainException):
available to catch and handle in ways to fix the parsing error, while other available to catch and handle in ways to fix the parsing error, while other
errors will be raised. errors will be raised.
Args: Parameters:
error: The error that's being re-raised or an error message. error: The error that's being re-raised or an error message.
observation: String explanation of error which can be passed to a observation: String explanation of error which can be passed to a
model to try and remediate the issue. model to try and remediate the issue.

View File

@ -1,5 +1,6 @@
# flake8: noqa # flake8: noqa
"""Global values and configuration that apply to all of LangChain.""" """Global values and configuration that apply to all of LangChain."""
import warnings import warnings
from typing import TYPE_CHECKING, Optional from typing import TYPE_CHECKING, Optional
@ -135,7 +136,11 @@ def get_debug() -> bool:
def set_llm_cache(value: Optional["BaseCache"]) -> None: def set_llm_cache(value: Optional["BaseCache"]) -> None:
"""Set a new LLM cache, overwriting the previous value, if any.""" """Set a new LLM cache, overwriting the previous value, if any.
Args:
value: The new LLM cache to use. If `None`, the LLM cache is disabled.
"""
try: try:
import langchain # type: ignore[import] import langchain # type: ignore[import]

View File

@ -7,6 +7,7 @@
BaseMemory --> <name>Memory --> <name>Memory # Examples: BaseChatMemory -> MotorheadMemory BaseMemory --> <name>Memory --> <name>Memory # Examples: BaseChatMemory -> MotorheadMemory
""" # noqa: E501 """ # noqa: E501
from __future__ import annotations from __future__ import annotations
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
@ -58,20 +59,37 @@ class BaseMemory(Serializable, ABC):
@abstractmethod @abstractmethod
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""Return key-value pairs given the text input to the chain.""" """Return key-value pairs given the text input to the chain.
Args:
inputs: The inputs to the chain."""
async def aload_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: async def aload_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""Return key-value pairs given the text input to the chain.""" """Async return key-value pairs given the text input to the chain.
Args:
inputs: The inputs to the chain.
"""
return await run_in_executor(None, self.load_memory_variables, inputs) return await run_in_executor(None, self.load_memory_variables, inputs)
@abstractmethod @abstractmethod
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save the context of this chain run to memory.""" """Save the context of this chain run to memory.
Args:
inputs: The inputs to the chain.
outputs: The outputs of the chain.
"""
async def asave_context( async def asave_context(
self, inputs: Dict[str, Any], outputs: Dict[str, str] self, inputs: Dict[str, Any], outputs: Dict[str, str]
) -> None: ) -> None:
"""Save the context of this chain run to memory.""" """Async save the context of this chain run to memory.
Args:
inputs: The inputs to the chain.
outputs: The outputs of the chain.
"""
await run_in_executor(None, self.save_context, inputs, outputs) await run_in_executor(None, self.save_context, inputs, outputs)
@abstractmethod @abstractmethod
@ -79,5 +97,5 @@ class BaseMemory(Serializable, ABC):
"""Clear memory contents.""" """Clear memory contents."""
async def aclear(self) -> None: async def aclear(self) -> None:
"""Clear memory contents.""" """Async clear memory contents."""
await run_in_executor(None, self.clear) await run_in_executor(None, self.clear)

View File

@ -3,6 +3,7 @@
Prompt values are used to represent different pieces of prompts. Prompt values are used to represent different pieces of prompts.
They can be used to represent text, images, or chat message pieces. They can be used to represent text, images, or chat message pieces.
""" """
from __future__ import annotations from __future__ import annotations
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
@ -103,15 +104,15 @@ class ImagePromptValue(PromptValue):
"""Image prompt value.""" """Image prompt value."""
image_url: ImageURL image_url: ImageURL
"""Prompt image.""" """Image URL."""
type: Literal["ImagePromptValue"] = "ImagePromptValue" type: Literal["ImagePromptValue"] = "ImagePromptValue"
def to_string(self) -> str: def to_string(self) -> str:
"""Return prompt as string.""" """Return prompt (image URL) as string."""
return self.image_url["url"] return self.image_url["url"]
def to_messages(self) -> List[BaseMessage]: def to_messages(self) -> List[BaseMessage]:
"""Return prompt as messages.""" """Return prompt (image URL) as messages."""
return [HumanMessage(content=[cast(dict, self.image_url)])] return [HumanMessage(content=[cast(dict, self.image_url)])]
@ -120,6 +121,7 @@ class ChatPromptValueConcrete(ChatPromptValue):
For use in external schemas.""" For use in external schemas."""
messages: Sequence[AnyMessage] messages: Sequence[AnyMessage]
"""Sequence of messages."""
type: Literal["ChatPromptValueConcrete"] = "ChatPromptValueConcrete" type: Literal["ChatPromptValueConcrete"] = "ChatPromptValueConcrete"

View File

@ -18,6 +18,7 @@ the backbone of a retriever, but there are other types of retrievers as well.
Document, Serializable, Callbacks, Document, Serializable, Callbacks,
CallbackManagerForRetrieverRun, AsyncCallbackManagerForRetrieverRun CallbackManagerForRetrieverRun, AsyncCallbackManagerForRetrieverRun
""" """
from __future__ import annotations from __future__ import annotations
import warnings import warnings
@ -119,14 +120,14 @@ class BaseRetriever(RunnableSerializable[RetrieverInput, RetrieverOutput], ABC):
_new_arg_supported: bool = False _new_arg_supported: bool = False
_expects_other_args: bool = False _expects_other_args: bool = False
tags: Optional[List[str]] = None tags: Optional[List[str]] = None
"""Optional list of tags associated with the retriever. Defaults to None """Optional list of tags associated with the retriever. Defaults to None.
These tags will be associated with each call to this retriever, These tags will be associated with each call to this retriever,
and passed as arguments to the handlers defined in `callbacks`. and passed as arguments to the handlers defined in `callbacks`.
You can use these to eg identify a specific instance of a retriever with its You can use these to eg identify a specific instance of a retriever with its
use case. use case.
""" """
metadata: Optional[Dict[str, Any]] = None metadata: Optional[Dict[str, Any]] = None
"""Optional metadata associated with the retriever. Defaults to None """Optional metadata associated with the retriever. Defaults to None.
This metadata will be associated with each call to this retriever, This metadata will be associated with each call to this retriever,
and passed as arguments to the handlers defined in `callbacks`. and passed as arguments to the handlers defined in `callbacks`.
You can use these to eg identify a specific instance of a retriever with its You can use these to eg identify a specific instance of a retriever with its
@ -289,9 +290,10 @@ class BaseRetriever(RunnableSerializable[RetrieverInput, RetrieverOutput], ABC):
self, query: str, *, run_manager: CallbackManagerForRetrieverRun self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]: ) -> List[Document]:
"""Get documents relevant to a query. """Get documents relevant to a query.
Args: Args:
query: String to find relevant documents for query: String to find relevant documents for
run_manager: The callbacks handler to use run_manager: The callback handler to use
Returns: Returns:
List of relevant documents List of relevant documents
""" """
@ -300,9 +302,10 @@ class BaseRetriever(RunnableSerializable[RetrieverInput, RetrieverOutput], ABC):
self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun
) -> List[Document]: ) -> List[Document]:
"""Asynchronously get documents relevant to a query. """Asynchronously get documents relevant to a query.
Args: Args:
query: String to find relevant documents for query: String to find relevant documents for
run_manager: The callbacks handler to use run_manager: The callback handler to use
Returns: Returns:
List of relevant documents List of relevant documents
""" """

View File

@ -5,6 +5,7 @@ to a simple key-value interface.
The primary goal of these storages is to support implementation of caching. The primary goal of these storages is to support implementation of caching.
""" """
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from typing import ( from typing import (
Any, Any,
@ -95,7 +96,7 @@ class BaseStore(Generic[K, V], ABC):
""" """
async def amget(self, keys: Sequence[K]) -> List[Optional[V]]: async def amget(self, keys: Sequence[K]) -> List[Optional[V]]:
"""Get the values associated with the given keys. """Async get the values associated with the given keys.
Args: Args:
keys (Sequence[K]): A sequence of keys. keys (Sequence[K]): A sequence of keys.
@ -115,7 +116,7 @@ class BaseStore(Generic[K, V], ABC):
""" """
async def amset(self, key_value_pairs: Sequence[Tuple[K, V]]) -> None: async def amset(self, key_value_pairs: Sequence[Tuple[K, V]]) -> None:
"""Set the values for the given keys. """Async set the values for the given keys.
Args: Args:
key_value_pairs (Sequence[Tuple[K, V]]): A sequence of key-value pairs. key_value_pairs (Sequence[Tuple[K, V]]): A sequence of key-value pairs.
@ -131,7 +132,7 @@ class BaseStore(Generic[K, V], ABC):
""" """
async def amdelete(self, keys: Sequence[K]) -> None: async def amdelete(self, keys: Sequence[K]) -> None:
"""Delete the given keys and their associated values. """Async delete the given keys and their associated values.
Args: Args:
keys (Sequence[K]): A sequence of keys to delete. keys (Sequence[K]): A sequence of keys to delete.
@ -147,7 +148,7 @@ class BaseStore(Generic[K, V], ABC):
Args: Args:
prefix (str): The prefix to match. prefix (str): The prefix to match.
Returns: Yields:
Iterator[K | str]: An iterator over keys that match the given prefix. Iterator[K | str]: An iterator over keys that match the given prefix.
This method is allowed to return an iterator over either K or str This method is allowed to return an iterator over either K or str
@ -157,12 +158,12 @@ class BaseStore(Generic[K, V], ABC):
async def ayield_keys( async def ayield_keys(
self, *, prefix: Optional[str] = None self, *, prefix: Optional[str] = None
) -> Union[AsyncIterator[K], AsyncIterator[str]]: ) -> Union[AsyncIterator[K], AsyncIterator[str]]:
"""Get an iterator over keys that match the given prefix. """Async get an iterator over keys that match the given prefix.
Args: Args:
prefix (str): The prefix to match. prefix (str): The prefix to match.
Returns: Yields:
Iterator[K | str]: An iterator over keys that match the given prefix. Iterator[K | str]: An iterator over keys that match the given prefix.
This method is allowed to return an iterator over either K or str This method is allowed to return an iterator over either K or str
@ -200,7 +201,7 @@ class InMemoryBaseStore(BaseStore[str, V], Generic[V]):
return [self.store.get(key) for key in keys] return [self.store.get(key) for key in keys]
async def amget(self, keys: Sequence[str]) -> List[Optional[V]]: async def amget(self, keys: Sequence[str]) -> List[Optional[V]]:
"""Get the values associated with the given keys. """Async get the values associated with the given keys.
Args: Args:
keys (Sequence[str]): A sequence of keys. keys (Sequence[str]): A sequence of keys.
@ -224,7 +225,7 @@ class InMemoryBaseStore(BaseStore[str, V], Generic[V]):
self.store[key] = value self.store[key] = value
async def amset(self, key_value_pairs: Sequence[Tuple[str, V]]) -> None: async def amset(self, key_value_pairs: Sequence[Tuple[str, V]]) -> None:
"""Set the values for the given keys. """Async set the values for the given keys.
Args: Args:
key_value_pairs (Sequence[Tuple[str, V]]): A sequence of key-value pairs. key_value_pairs (Sequence[Tuple[str, V]]): A sequence of key-value pairs.
@ -245,7 +246,7 @@ class InMemoryBaseStore(BaseStore[str, V], Generic[V]):
del self.store[key] del self.store[key]
async def amdelete(self, keys: Sequence[str]) -> None: async def amdelete(self, keys: Sequence[str]) -> None:
"""Delete the given keys and their associated values. """Async delete the given keys and their associated values.
Args: Args:
keys (Sequence[str]): A sequence of keys to delete. keys (Sequence[str]): A sequence of keys to delete.
@ -258,7 +259,7 @@ class InMemoryBaseStore(BaseStore[str, V], Generic[V]):
Args: Args:
prefix (str, optional): The prefix to match. Defaults to None. prefix (str, optional): The prefix to match. Defaults to None.
Returns: Yields:
Iterator[str]: An iterator over keys that match the given prefix. Iterator[str]: An iterator over keys that match the given prefix.
""" """
if prefix is None: if prefix is None:
@ -269,12 +270,12 @@ class InMemoryBaseStore(BaseStore[str, V], Generic[V]):
yield key yield key
async def ayield_keys(self, prefix: Optional[str] = None) -> AsyncIterator[str]: async def ayield_keys(self, prefix: Optional[str] = None) -> AsyncIterator[str]:
"""Get an async iterator over keys that match the given prefix. """Async get an async iterator over keys that match the given prefix.
Args: Args:
prefix (str, optional): The prefix to match. Defaults to None. prefix (str, optional): The prefix to match. Defaults to None.
Returns: Yields:
AsyncIterator[str]: An async iterator over keys that match the given prefix. AsyncIterator[str]: An async iterator over keys that match the given prefix.
""" """
if prefix is None: if prefix is None: