mirror of
https://github.com/hwchase17/langchain.git
synced 2025-07-18 02:33:19 +00:00
docs: community docstring updates (#21040)
Added missed docstrings. Updated docstrings to consistent format.
This commit is contained in:
parent
90f19028e5
commit
85094cbb3a
@ -20,7 +20,7 @@ def import_aim() -> Any:
|
|||||||
|
|
||||||
|
|
||||||
class BaseMetadataCallbackHandler:
|
class BaseMetadataCallbackHandler:
|
||||||
"""This class handles the metadata and associated function states for callbacks.
|
"""Callback handler for the metadata and associated function states for callbacks.
|
||||||
|
|
||||||
Attributes:
|
Attributes:
|
||||||
step (int): The current step.
|
step (int): The current step.
|
||||||
|
@ -74,6 +74,7 @@ logger.addHandler(handler)
|
|||||||
|
|
||||||
|
|
||||||
def import_uptrain() -> Any:
|
def import_uptrain() -> Any:
|
||||||
|
"""Import the `uptrain` package."""
|
||||||
try:
|
try:
|
||||||
import uptrain
|
import uptrain
|
||||||
except ImportError as e:
|
except ImportError as e:
|
||||||
|
@ -65,7 +65,7 @@ def _flatten_dict(
|
|||||||
def flatten_dict(
|
def flatten_dict(
|
||||||
nested_dict: Dict[str, Any], parent_key: str = "", sep: str = "_"
|
nested_dict: Dict[str, Any], parent_key: str = "", sep: str = "_"
|
||||||
) -> Dict[str, Any]:
|
) -> Dict[str, Any]:
|
||||||
"""Flattens a nested dictionary into a flat dictionary.
|
"""Flatten a nested dictionary into a flat dictionary.
|
||||||
|
|
||||||
Parameters:
|
Parameters:
|
||||||
nested_dict (dict): The nested dictionary to flatten.
|
nested_dict (dict): The nested dictionary to flatten.
|
||||||
@ -108,7 +108,7 @@ def load_json(json_path: Union[str, Path]) -> str:
|
|||||||
|
|
||||||
|
|
||||||
class BaseMetadataCallbackHandler:
|
class BaseMetadataCallbackHandler:
|
||||||
"""This class handles the metadata and associated function states for callbacks.
|
"""Handle the metadata and associated function states for callbacks.
|
||||||
|
|
||||||
Attributes:
|
Attributes:
|
||||||
step (int): The current step.
|
step (int): The current step.
|
||||||
|
@ -19,7 +19,7 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
|
|
||||||
class SearchScope(str, Enum):
|
class SearchScope(str, Enum):
|
||||||
"""Which documents to search. Messages or Summaries?"""
|
"""Scope for the document search. Messages or Summaries?"""
|
||||||
|
|
||||||
messages = "messages"
|
messages = "messages"
|
||||||
"""Search chat history messages."""
|
"""Search chat history messages."""
|
||||||
|
@ -27,8 +27,7 @@ DEFAULT_SYSTEM_PROMPT = """You are a helpful, respectful, and honest assistant."
|
|||||||
|
|
||||||
|
|
||||||
class ChatMLX(BaseChatModel):
|
class ChatMLX(BaseChatModel):
|
||||||
"""
|
"""MLX chat models.
|
||||||
Wrapper for using MLX LLM's as ChatModels.
|
|
||||||
|
|
||||||
Works with `MLXPipeline` LLM.
|
Works with `MLXPipeline` LLM.
|
||||||
|
|
||||||
|
@ -10,7 +10,7 @@ from langchain_community.document_loaders.parsers import (
|
|||||||
|
|
||||||
|
|
||||||
class AzureAIDocumentIntelligenceLoader(BaseLoader):
|
class AzureAIDocumentIntelligenceLoader(BaseLoader):
|
||||||
"""Loads a PDF with Azure Document Intelligence"""
|
"""Load a PDF with Azure Document Intelligence."""
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
|
@ -28,7 +28,7 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class DocAIParsingResults:
|
class DocAIParsingResults:
|
||||||
"""A dataclass to store Document AI parsing results."""
|
"""Dataclass to store Document AI parsing results."""
|
||||||
|
|
||||||
source_path: str
|
source_path: str
|
||||||
parsed_path: str
|
parsed_path: str
|
||||||
|
@ -12,7 +12,7 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
|
|
||||||
class BS4HTMLParser(BaseBlobParser):
|
class BS4HTMLParser(BaseBlobParser):
|
||||||
"""Pparse HTML files using `Beautiful Soup`."""
|
"""Parse HTML files using `Beautiful Soup`."""
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
|
@ -731,7 +731,7 @@ class AmazonTextractPDFLoader(BasePDFLoader):
|
|||||||
|
|
||||||
|
|
||||||
class DocumentIntelligenceLoader(BasePDFLoader):
|
class DocumentIntelligenceLoader(BasePDFLoader):
|
||||||
"""Loads a PDF with Azure Document Intelligence"""
|
"""Load a PDF with Azure Document Intelligence"""
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
|
@ -57,7 +57,7 @@ class PlaywrightEvaluator(ABC):
|
|||||||
|
|
||||||
|
|
||||||
class UnstructuredHtmlEvaluator(PlaywrightEvaluator):
|
class UnstructuredHtmlEvaluator(PlaywrightEvaluator):
|
||||||
"""Evaluates the page HTML content using the `unstructured` library."""
|
"""Evaluate the page HTML content using the `unstructured` library."""
|
||||||
|
|
||||||
def __init__(self, remove_selectors: Optional[List[str]] = None):
|
def __init__(self, remove_selectors: Optional[List[str]] = None):
|
||||||
"""Initialize UnstructuredHtmlEvaluator."""
|
"""Initialize UnstructuredHtmlEvaluator."""
|
||||||
|
@ -15,11 +15,13 @@ __all__ = ["InfinityEmbeddings"]
|
|||||||
|
|
||||||
|
|
||||||
class InfinityEmbeddings(BaseModel, Embeddings):
|
class InfinityEmbeddings(BaseModel, Embeddings):
|
||||||
"""Embedding models for self-hosted https://github.com/michaelfeil/infinity
|
"""Self-hosted embedding models for `infinity` package.
|
||||||
This should also work for text-embeddings-inference and other
|
|
||||||
|
See https://github.com/michaelfeil/infinity
|
||||||
|
This also works for text-embeddings-inference and other
|
||||||
self-hosted openai-compatible servers.
|
self-hosted openai-compatible servers.
|
||||||
|
|
||||||
Infinity is a class to interact with Embedding Models on https://github.com/michaelfeil/infinity
|
Infinity is a package to interact with Embedding Models on https://github.com/michaelfeil/infinity
|
||||||
|
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
@ -115,7 +117,9 @@ class InfinityEmbeddings(BaseModel, Embeddings):
|
|||||||
|
|
||||||
|
|
||||||
class TinyAsyncOpenAIInfinityEmbeddingClient: #: :meta private:
|
class TinyAsyncOpenAIInfinityEmbeddingClient: #: :meta private:
|
||||||
"""A helper tool to embed Infinity. Not part of Langchain's stable API,
|
"""Helper tool to embed Infinity.
|
||||||
|
|
||||||
|
It is not a part of Langchain's stable API,
|
||||||
direct use discouraged.
|
direct use discouraged.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
@ -72,6 +72,15 @@ def create_prem_retry_decorator(
|
|||||||
*,
|
*,
|
||||||
max_retries: int = 1,
|
max_retries: int = 1,
|
||||||
) -> Callable[[Any], Any]:
|
) -> Callable[[Any], Any]:
|
||||||
|
"""Create a retry decorator for PremAIEmbeddings.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
embedder (PremAIEmbeddings): The PremAIEmbeddings instance
|
||||||
|
max_retries (int): The maximum number of retries
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Callable[[Any], Any]: The retry decorator
|
||||||
|
"""
|
||||||
import premai.models
|
import premai.models
|
||||||
|
|
||||||
errors = [
|
errors = [
|
||||||
|
@ -187,5 +187,7 @@ class SparkLLMTextEmbeddings(BaseModel, Embeddings):
|
|||||||
|
|
||||||
|
|
||||||
class AssembleHeaderException(Exception):
|
class AssembleHeaderException(Exception):
|
||||||
|
"""Exception raised for errors in the header assembly."""
|
||||||
|
|
||||||
def __init__(self, msg: str) -> None:
|
def __init__(self, msg: str) -> None:
|
||||||
self.message = msg
|
self.message = msg
|
||||||
|
@ -6,22 +6,24 @@ from langchain_core.pydantic_v1 import BaseModel
|
|||||||
|
|
||||||
|
|
||||||
class TakeoffEmbeddingException(Exception):
|
class TakeoffEmbeddingException(Exception):
|
||||||
"""Exceptions experienced with interfacing with Takeoff Embedding Wrapper"""
|
"""Custom exception for interfacing with Takeoff Embedding class."""
|
||||||
|
|
||||||
|
|
||||||
class MissingConsumerGroup(TakeoffEmbeddingException):
|
class MissingConsumerGroup(TakeoffEmbeddingException):
|
||||||
"""Exception raised when no consumer group is provided on initialization of
|
"""Exception raised when no consumer group is provided on initialization of
|
||||||
TitanTakeoffEmbed or in embed request"""
|
TitanTakeoffEmbed or in embed request."""
|
||||||
|
|
||||||
|
|
||||||
class Device(str, Enum):
|
class Device(str, Enum):
|
||||||
"""The device to use for inference, cuda or cpu"""
|
"""Device to use for inference, cuda or cpu."""
|
||||||
|
|
||||||
cuda = "cuda"
|
cuda = "cuda"
|
||||||
cpu = "cpu"
|
cpu = "cpu"
|
||||||
|
|
||||||
|
|
||||||
class ReaderConfig(BaseModel):
|
class ReaderConfig(BaseModel):
|
||||||
|
"""Configuration for the reader to be deployed in Takeoff."""
|
||||||
|
|
||||||
class Config:
|
class Config:
|
||||||
protected_namespaces = ()
|
protected_namespaces = ()
|
||||||
|
|
||||||
@ -36,10 +38,9 @@ class ReaderConfig(BaseModel):
|
|||||||
|
|
||||||
|
|
||||||
class TitanTakeoffEmbed(Embeddings):
|
class TitanTakeoffEmbed(Embeddings):
|
||||||
"""Titan Takeoff Embed is a wrapper to interface with Takeoff Inference API
|
"""Interface with Takeoff Inference API for embedding models.
|
||||||
for embedding models
|
|
||||||
|
|
||||||
You can use this wrapper to send embedding requests and to deploy embedding
|
Use it to send embedding requests and to deploy embedding
|
||||||
readers with Takeoff.
|
readers with Takeoff.
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
|
@ -5,18 +5,18 @@ from langchain_community.graphs.graph_document import GraphDocument
|
|||||||
|
|
||||||
|
|
||||||
class GraphStore:
|
class GraphStore:
|
||||||
"""An abstract class wrapper for graph operations."""
|
"""Abstract class for graph operations."""
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def get_schema(self) -> str:
|
def get_schema(self) -> str:
|
||||||
"""Returns the schema of the Graph database"""
|
"""Return the schema of the Graph database"""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def get_structured_schema(self) -> Dict[str, Any]:
|
def get_structured_schema(self) -> Dict[str, Any]:
|
||||||
"""Returns the schema of the Graph database"""
|
"""Return the schema of the Graph database"""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
@ -26,7 +26,7 @@ class GraphStore:
|
|||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def refresh_schema(self) -> None:
|
def refresh_schema(self) -> None:
|
||||||
"""Refreshes the graph schema information."""
|
"""Refresh the graph schema information."""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
|
@ -10,6 +10,7 @@ from langchain_community.graphs.graph_store import GraphStore
|
|||||||
|
|
||||||
class GremlinGraph(GraphStore):
|
class GremlinGraph(GraphStore):
|
||||||
"""Gremlin wrapper for graph operations.
|
"""Gremlin wrapper for graph operations.
|
||||||
|
|
||||||
Parameters:
|
Parameters:
|
||||||
url (Optional[str]): The URL of the Gremlin database server or env GREMLIN_URI
|
url (Optional[str]): The URL of the Gremlin database server or env GREMLIN_URI
|
||||||
username (Optional[str]): The collection-identifier like '/dbs/database/colls/graph'
|
username (Optional[str]): The collection-identifier like '/dbs/database/colls/graph'
|
||||||
|
@ -22,9 +22,11 @@ class NeptuneQueryException(Exception):
|
|||||||
|
|
||||||
|
|
||||||
class BaseNeptuneGraph(ABC):
|
class BaseNeptuneGraph(ABC):
|
||||||
|
"""Abstract base class for Neptune"""
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def get_schema(self) -> str:
|
def get_schema(self) -> str:
|
||||||
"""Returns the schema of the Neptune database"""
|
"""Return the schema of the Neptune database"""
|
||||||
return self.schema
|
return self.schema
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
|
@ -17,7 +17,7 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
class BaichuanLLM(LLM):
|
class BaichuanLLM(LLM):
|
||||||
# TODO: Adding streaming support.
|
# TODO: Adding streaming support.
|
||||||
"""Wrapper around Baichuan large language models."""
|
"""Baichuan large language models."""
|
||||||
|
|
||||||
model: str = "Baichuan2-Turbo-192k"
|
model: str = "Baichuan2-Turbo-192k"
|
||||||
"""
|
"""
|
||||||
|
@ -23,7 +23,7 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
|
|
||||||
class _MinimaxEndpointClient(BaseModel):
|
class _MinimaxEndpointClient(BaseModel):
|
||||||
"""An API client that talks to a Minimax llm endpoint."""
|
"""API client for the Minimax LLM endpoint."""
|
||||||
|
|
||||||
host: str
|
host: str
|
||||||
group_id: str
|
group_id: str
|
||||||
@ -117,7 +117,8 @@ class MinimaxCommon(BaseModel):
|
|||||||
|
|
||||||
|
|
||||||
class Minimax(MinimaxCommon, LLM):
|
class Minimax(MinimaxCommon, LLM):
|
||||||
"""Wrapper around Minimax large language models.
|
"""Minimax large language models.
|
||||||
|
|
||||||
To use, you should have the environment variable
|
To use, you should have the environment variable
|
||||||
``MINIMAX_API_KEY`` and ``MINIMAX_GROUP_ID`` set with your API key,
|
``MINIMAX_API_KEY`` and ``MINIMAX_GROUP_ID`` set with your API key,
|
||||||
or pass them as a named parameter to the constructor.
|
or pass them as a named parameter to the constructor.
|
||||||
|
@ -17,6 +17,8 @@ class Device(str, Enum):
|
|||||||
|
|
||||||
|
|
||||||
class ReaderConfig(BaseModel):
|
class ReaderConfig(BaseModel):
|
||||||
|
"""Configuration for the reader to be deployed in Titan Takeoff API."""
|
||||||
|
|
||||||
class Config:
|
class Config:
|
||||||
protected_namespaces = ()
|
protected_namespaces = ()
|
||||||
|
|
||||||
|
@ -27,7 +27,7 @@ def interleave(inter, f, seq):
|
|||||||
|
|
||||||
|
|
||||||
class Unparser:
|
class Unparser:
|
||||||
"""Methods in this class recursively traverse an AST and
|
"""Traverse an AST and
|
||||||
output source code for the abstract syntax; original formatting
|
output source code for the abstract syntax; original formatting
|
||||||
is disregarded."""
|
is disregarded."""
|
||||||
|
|
||||||
|
@ -11,6 +11,16 @@ if TYPE_CHECKING:
|
|||||||
async def wrapped_response_future(
|
async def wrapped_response_future(
|
||||||
func: Callable[..., ResponseFuture], *args: Any, **kwargs: Any
|
func: Callable[..., ResponseFuture], *args: Any, **kwargs: Any
|
||||||
) -> Any:
|
) -> Any:
|
||||||
|
"""Wrap a Cassandra response future in an asyncio future.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
func: The Cassandra function to call.
|
||||||
|
*args: The arguments to pass to the Cassandra function.
|
||||||
|
**kwargs: The keyword arguments to pass to the Cassandra function.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The result of the Cassandra function.
|
||||||
|
"""
|
||||||
loop = asyncio.get_event_loop()
|
loop = asyncio.get_event_loop()
|
||||||
asyncio_future = loop.create_future()
|
asyncio_future = loop.create_future()
|
||||||
response_future = func(*args, **kwargs)
|
response_future = func(*args, **kwargs)
|
||||||
|
@ -196,12 +196,15 @@ def extract_dict_elements_from_component_fields(
|
|||||||
def load_query(
|
def load_query(
|
||||||
query: str, fault_tolerant: bool = False
|
query: str, fault_tolerant: bool = False
|
||||||
) -> Tuple[Optional[Dict], Optional[str]]:
|
) -> Tuple[Optional[Dict], Optional[str]]:
|
||||||
"""Attempts to parse a JSON string and return the parsed object.
|
"""Parse a JSON string and return the parsed object.
|
||||||
|
|
||||||
If parsing fails, returns an error message.
|
If parsing fails, returns an error message.
|
||||||
|
|
||||||
:param query: The JSON string to parse.
|
:param query: The JSON string to parse.
|
||||||
:return: A tuple containing the parsed object or None and an error message or None.
|
:return: A tuple containing the parsed object or None and an error message or None.
|
||||||
|
|
||||||
|
Exceptions:
|
||||||
|
json.JSONDecodeError: If the input is not a valid JSON string.
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
return json.loads(query), None
|
return json.loads(query), None
|
||||||
|
@ -9,6 +9,8 @@ from langchain_core.utils import get_from_dict_or_env
|
|||||||
|
|
||||||
|
|
||||||
class NoDiskStorage:
|
class NoDiskStorage:
|
||||||
|
"""Mixin to prevent storing on disk."""
|
||||||
|
|
||||||
@final
|
@final
|
||||||
def __getstate__(self) -> None:
|
def __getstate__(self) -> None:
|
||||||
raise AttributeError("Do not store on disk.")
|
raise AttributeError("Do not store on disk.")
|
||||||
@ -46,11 +48,12 @@ except ImportError:
|
|||||||
|
|
||||||
|
|
||||||
def is_http_retryable(rsp: requests.Response) -> bool:
|
def is_http_retryable(rsp: requests.Response) -> bool:
|
||||||
|
"""Check if a HTTP response is retryable."""
|
||||||
return bool(rsp) and rsp.status_code in [408, 425, 429, 500, 502, 503, 504]
|
return bool(rsp) and rsp.status_code in [408, 425, 429, 500, 502, 503, 504]
|
||||||
|
|
||||||
|
|
||||||
class ManagedPassioLifeAuth(NoDiskStorage):
|
class ManagedPassioLifeAuth(NoDiskStorage):
|
||||||
"""Manages the token for the NutritionAI API."""
|
"""Manage the token for the NutritionAI API."""
|
||||||
|
|
||||||
_access_token_expiry: Optional[datetime]
|
_access_token_expiry: Optional[datetime]
|
||||||
|
|
||||||
|
@ -192,7 +192,7 @@ def get_loader_type(loader: str) -> str:
|
|||||||
|
|
||||||
def get_loader_full_path(loader: BaseLoader) -> str:
|
def get_loader_full_path(loader: BaseLoader) -> str:
|
||||||
"""Return an absolute source path of source of loader based on the
|
"""Return an absolute source path of source of loader based on the
|
||||||
keys present in Document object from loader.
|
keys present in Document.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
loader (BaseLoader): Langchain document loader, derived from Baseloader.
|
loader (BaseLoader): Langchain document loader, derived from Baseloader.
|
||||||
|
@ -251,7 +251,7 @@ def json_to_md(
|
|||||||
json_contents: List[Dict[str, Union[str, int, float]]],
|
json_contents: List[Dict[str, Union[str, int, float]]],
|
||||||
table_name: Optional[str] = None,
|
table_name: Optional[str] = None,
|
||||||
) -> str:
|
) -> str:
|
||||||
"""Converts a JSON object to a markdown table."""
|
"""Convert a JSON object to a markdown table."""
|
||||||
if len(json_contents) == 0:
|
if len(json_contents) == 0:
|
||||||
return ""
|
return ""
|
||||||
output_md = ""
|
output_md = ""
|
||||||
|
@ -35,7 +35,7 @@ _NOT_SET = object()
|
|||||||
|
|
||||||
|
|
||||||
class Cassandra(VectorStore):
|
class Cassandra(VectorStore):
|
||||||
"""Wrapper around Apache Cassandra(R) for vector-store workloads.
|
"""Apache Cassandra(R) for vector-store workloads.
|
||||||
|
|
||||||
To use it, you need a recent installation of the `cassio` library
|
To use it, you need a recent installation of the `cassio` library
|
||||||
and a Cassandra cluster / Astra DB instance supporting vector capabilities.
|
and a Cassandra cluster / Astra DB instance supporting vector capabilities.
|
||||||
|
@ -11,6 +11,8 @@ from langchain_core.vectorstores import VectorStore
|
|||||||
|
|
||||||
|
|
||||||
def import_lancedb() -> Any:
|
def import_lancedb() -> Any:
|
||||||
|
"""Import lancedb package."""
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import lancedb
|
import lancedb
|
||||||
except ImportError as e:
|
except ImportError as e:
|
||||||
|
@ -109,6 +109,15 @@ class MetaField(BaseModel):
|
|||||||
def translate_filter(
|
def translate_filter(
|
||||||
lc_filter: str, allowed_fields: Optional[Sequence[str]] = None
|
lc_filter: str, allowed_fields: Optional[Sequence[str]] = None
|
||||||
) -> str:
|
) -> str:
|
||||||
|
"""Translate LangChain filter to Tencent VectorDB filter.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
lc_filter (str): LangChain filter.
|
||||||
|
allowed_fields (Optional[Sequence[str]]): Allowed fields for filter.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Translated filter.
|
||||||
|
"""
|
||||||
from langchain.chains.query_constructor.base import fix_filter_directive
|
from langchain.chains.query_constructor.base import fix_filter_directive
|
||||||
from langchain.chains.query_constructor.parser import get_parser
|
from langchain.chains.query_constructor.parser import get_parser
|
||||||
from langchain.retrievers.self_query.tencentvectordb import (
|
from langchain.retrievers.self_query.tencentvectordb import (
|
||||||
|
Loading…
Reference in New Issue
Block a user