mirror of
https://github.com/hwchase17/langchain.git
synced 2025-07-31 00:29:57 +00:00
community[patch]: docstrings (#16810)
- added missed docstrings - formated docstrings to the consistent form
This commit is contained in:
parent
ae66bcbc10
commit
932c52c333
@ -40,25 +40,33 @@ async def aenumerate(
|
||||
|
||||
|
||||
class IndexableBaseModel(BaseModel):
|
||||
"""Allows a BaseModel to return its fields by string variable indexing"""
|
||||
"""Allows a BaseModel to return its fields by string variable indexing."""
|
||||
|
||||
def __getitem__(self, item: str) -> Any:
|
||||
return getattr(self, item)
|
||||
|
||||
|
||||
class Choice(IndexableBaseModel):
|
||||
"""Choice."""
|
||||
|
||||
message: dict
|
||||
|
||||
|
||||
class ChatCompletions(IndexableBaseModel):
|
||||
"""Chat completions."""
|
||||
|
||||
choices: List[Choice]
|
||||
|
||||
|
||||
class ChoiceChunk(IndexableBaseModel):
|
||||
"""Choice chunk."""
|
||||
|
||||
delta: dict
|
||||
|
||||
|
||||
class ChatCompletionChunk(IndexableBaseModel):
|
||||
"""Chat completion chunk."""
|
||||
|
||||
choices: List[ChoiceChunk]
|
||||
|
||||
|
||||
@ -301,7 +309,7 @@ def convert_messages_for_finetuning(
|
||||
|
||||
|
||||
class Completions:
|
||||
"""Completion."""
|
||||
"""Completions."""
|
||||
|
||||
@overload
|
||||
@staticmethod
|
||||
@ -399,6 +407,8 @@ class Completions:
|
||||
|
||||
|
||||
class Chat:
|
||||
"""Chat."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.completions = Completions()
|
||||
|
||||
|
@ -191,7 +191,7 @@ class RequestsPutToolWithParsing(BaseRequestsTool, BaseTool):
|
||||
|
||||
|
||||
class RequestsDeleteToolWithParsing(BaseRequestsTool, BaseTool):
|
||||
"""A tool that sends a DELETE request and parses the response."""
|
||||
"""Tool that sends a DELETE request and parses the response."""
|
||||
|
||||
name: str = "requests_delete"
|
||||
"""The name of the tool."""
|
||||
|
@ -39,6 +39,7 @@ def import_mlflow() -> Any:
|
||||
|
||||
|
||||
def mlflow_callback_metrics() -> List[str]:
|
||||
"""Get the metrics to log to MLFlow."""
|
||||
return [
|
||||
"step",
|
||||
"starts",
|
||||
@ -59,6 +60,7 @@ def mlflow_callback_metrics() -> List[str]:
|
||||
|
||||
|
||||
def get_text_complexity_metrics() -> List[str]:
|
||||
"""Get the text complexity metrics from textstat."""
|
||||
return [
|
||||
"flesch_reading_ease",
|
||||
"flesch_kincaid_grade",
|
||||
|
@ -225,7 +225,7 @@ class LLMThought:
|
||||
|
||||
|
||||
class StreamlitCallbackHandler(BaseCallbackHandler):
|
||||
"""A callback handler that writes to a Streamlit app."""
|
||||
"""Callback handler that writes to a Streamlit app."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
|
@ -21,7 +21,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BaseMessageConverter(ABC):
|
||||
"""The class responsible for converting BaseMessage to your SQLAlchemy model."""
|
||||
"""Class that converts BaseMessage to the SQLAlchemy model."""
|
||||
|
||||
@abstractmethod
|
||||
def from_sql_model(self, sql_message: Any) -> BaseMessage:
|
||||
|
@ -20,6 +20,8 @@ from langchain_community.llms.azureml_endpoint import (
|
||||
|
||||
|
||||
class LlamaContentFormatter(ContentFormatterBase):
|
||||
"""Content formatter for `LLaMA`."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
raise TypeError(
|
||||
"`LlamaContentFormatter` is deprecated for chat models. Use "
|
||||
@ -34,7 +36,7 @@ class LlamaChatContentFormatter(ContentFormatterBase):
|
||||
|
||||
@staticmethod
|
||||
def _convert_message_to_dict(message: BaseMessage) -> Dict:
|
||||
"""Converts message to a dict according to role"""
|
||||
"""Converts a message to a dict according to a role"""
|
||||
content = cast(str, message.content)
|
||||
if isinstance(message, HumanMessage):
|
||||
return {
|
||||
|
@ -58,6 +58,8 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ChatDeepInfraException(Exception):
|
||||
"""Exception raised when the DeepInfra API returns an error."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
@ -67,7 +69,7 @@ def _create_retry_decorator(
|
||||
Union[AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun]
|
||||
] = None,
|
||||
) -> Callable[[Any], Any]:
|
||||
"""Returns a tenacity retry decorator, preconfigured to handle PaLM exceptions"""
|
||||
"""Returns a tenacity retry decorator, preconfigured to handle PaLM exceptions."""
|
||||
return create_base_retry_decorator(
|
||||
error_types=[requests.exceptions.ConnectTimeout, ChatDeepInfraException],
|
||||
max_retries=llm.max_retries,
|
||||
|
@ -53,6 +53,8 @@ class GPTRouterException(Exception):
|
||||
|
||||
|
||||
class GPTRouterModel(BaseModel):
|
||||
"""GPTRouter model."""
|
||||
|
||||
name: str
|
||||
provider_name: str
|
||||
|
||||
|
@ -39,8 +39,8 @@ def convert_dict_to_message(_dict: Mapping[str, Any]) -> AIMessage:
|
||||
|
||||
|
||||
class VolcEngineMaasChat(BaseChatModel, VolcEngineMaasBase):
|
||||
"""Volc Engine Maas hosts a plethora of models.
|
||||
|
||||
"""volc engine maas hosts a plethora of models.
|
||||
You can utilize these models through this class.
|
||||
|
||||
To use, you should have the ``volcengine`` python package installed.
|
||||
|
@ -20,11 +20,15 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ref(BaseModel):
|
||||
"""Reference used in CharacterGLM."""
|
||||
|
||||
enable: bool = Field(True)
|
||||
search_query: str = Field("")
|
||||
|
||||
|
||||
class meta(BaseModel):
|
||||
"""Metadata used in CharacterGLM."""
|
||||
|
||||
user_info: str = Field("")
|
||||
bot_info: str = Field("")
|
||||
bot_name: str = Field("")
|
||||
|
@ -9,7 +9,7 @@ if TYPE_CHECKING:
|
||||
class UnstructuredCHMLoader(UnstructuredFileLoader):
|
||||
"""Load `CHM` files using `Unstructured`.
|
||||
|
||||
CHM mean Microsoft Compiled HTML Help.
|
||||
CHM means Microsoft Compiled HTML Help.
|
||||
|
||||
Examples
|
||||
--------
|
||||
@ -35,6 +35,8 @@ class UnstructuredCHMLoader(UnstructuredFileLoader):
|
||||
|
||||
|
||||
class CHMParser(object):
|
||||
"""Microsoft Compiled HTML Help (CHM) Parser."""
|
||||
|
||||
path: str
|
||||
file: "chm.CHMFile"
|
||||
|
||||
|
@ -11,6 +11,8 @@ from langchain_community.document_loaders.blob_loaders import Blob
|
||||
|
||||
|
||||
class VsdxParser(BaseBlobParser, ABC):
|
||||
"""Parser for vsdx files."""
|
||||
|
||||
def parse(self, blob: Blob) -> Iterator[Document]: # type: ignore[override]
|
||||
"""Parse a vsdx file."""
|
||||
return self.lazy_parse(blob)
|
||||
|
@ -141,6 +141,8 @@ def _parse_video_id(url: str) -> Optional[str]:
|
||||
|
||||
|
||||
class TranscriptFormat(Enum):
|
||||
"""Transcript format."""
|
||||
|
||||
TEXT = "text"
|
||||
LINES = "lines"
|
||||
|
||||
|
@ -13,7 +13,7 @@ def _chunk(texts: List[str], size: int) -> Iterator[List[str]]:
|
||||
|
||||
|
||||
class MlflowEmbeddings(Embeddings, BaseModel):
|
||||
"""Wrapper around embeddings LLMs in MLflow.
|
||||
"""Embedding LLMs in MLflow.
|
||||
|
||||
To use, you should have the `mlflow[genai]` python package installed.
|
||||
For more information, see https://mlflow.org/docs/latest/llms/deployments/server.html.
|
||||
@ -85,5 +85,7 @@ class MlflowEmbeddings(Embeddings, BaseModel):
|
||||
|
||||
|
||||
class MlflowCohereEmbeddings(MlflowEmbeddings):
|
||||
"""Cohere embedding LLMs in MLflow."""
|
||||
|
||||
query_params: Dict[str, str] = {"input_type": "search_query"}
|
||||
documents_params: Dict[str, str] = {"input_type": "search_document"}
|
||||
|
@ -8,6 +8,8 @@ CUSTOM_ENDPOINT_PREFIX = "ocid1.generativeaiendpoint"
|
||||
|
||||
|
||||
class OCIAuthType(Enum):
|
||||
"""OCI authentication types as enumerator."""
|
||||
|
||||
API_KEY = 1
|
||||
SECURITY_TOKEN = 2
|
||||
INSTANCE_PRINCIPAL = 3
|
||||
|
@ -32,7 +32,8 @@ RETURN {start: label, type: property, end: toString(other_node)} AS output
|
||||
|
||||
|
||||
def value_sanitize(d: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""
|
||||
"""Sanitize the input dictionary.
|
||||
|
||||
Sanitizes the input dictionary by removing embedding-like values,
|
||||
lists with more than 128 elements, that are mostly irrelevant for
|
||||
generating answers in a LLM context. These properties, if left in
|
||||
@ -63,7 +64,8 @@ def value_sanitize(d: Dict[str, Any]) -> Dict[str, Any]:
|
||||
|
||||
|
||||
class Neo4jGraph(GraphStore):
|
||||
"""Provides a connection to a Neo4j database for various graph operations.
|
||||
"""Neo4j database wrapper for various graph operations.
|
||||
|
||||
Parameters:
|
||||
url (Optional[str]): The URL of the Neo4j database server.
|
||||
username (Optional[str]): The username for database authentication.
|
||||
|
@ -2,7 +2,7 @@ from typing import Any, Dict, List, Optional, Tuple, Union
|
||||
|
||||
|
||||
class NeptuneQueryException(Exception):
|
||||
"""A class to handle queries that fail to execute"""
|
||||
"""Exception for the Neptune queries."""
|
||||
|
||||
def __init__(self, exception: Union[str, Dict]):
|
||||
if isinstance(exception, dict):
|
||||
|
@ -15,6 +15,8 @@ VALID_PROVIDERS = ("cohere", "meta")
|
||||
|
||||
|
||||
class OCIAuthType(Enum):
|
||||
"""OCI authentication types as enumerator."""
|
||||
|
||||
API_KEY = 1
|
||||
SECURITY_TOKEN = 2
|
||||
INSTANCE_PRINCIPAL = 3
|
||||
|
@ -91,7 +91,9 @@ def stream_generate_with_retry(llm: Tongyi, **kwargs: Any) -> Any:
|
||||
|
||||
|
||||
async def astream_generate_with_retry(llm: Tongyi, **kwargs: Any) -> Any:
|
||||
"""Because the dashscope SDK doesn't provide an async API,
|
||||
"""Async version of `stream_generate_with_retry`.
|
||||
|
||||
Because the dashscope SDK doesn't provide an async API,
|
||||
we wrap `stream_generate_with_retry` with an async generator."""
|
||||
|
||||
class _AioTongyiGenerator:
|
||||
|
@ -10,7 +10,7 @@ from langchain_community.utilities.arcee import ArceeWrapper, DALMFilter
|
||||
|
||||
|
||||
class ArceeRetriever(BaseRetriever):
|
||||
"""Document retriever for Arcee's Domain Adapted Language Models (DALMs).
|
||||
"""Retriever for Arcee's Domain Adapted Language Models (DALMs).
|
||||
|
||||
To use, set the ``ARCEE_API_KEY`` environment variable with your Arcee API key,
|
||||
or pass ``arcee_api_key`` as a named parameter.
|
||||
|
@ -25,6 +25,8 @@ V = TypeVar("V")
|
||||
|
||||
|
||||
class AstraDBBaseStore(Generic[V], BaseStore[str, V], ABC):
|
||||
"""Base class for the DataStax AstraDB data store."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
collection_name: str,
|
||||
@ -79,6 +81,7 @@ class AstraDBBaseStore(Generic[V], BaseStore[str, V], ABC):
|
||||
|
||||
class AstraDBStore(AstraDBBaseStore[Any]):
|
||||
"""BaseStore implementation using DataStax AstraDB as the underlying store.
|
||||
|
||||
The value type can be any type serializable by json.dumps.
|
||||
Can be used to store embeddings with the CacheBackedEmbeddings.
|
||||
Documents in the AstraDB collection will have the format
|
||||
@ -97,6 +100,7 @@ class AstraDBStore(AstraDBBaseStore[Any]):
|
||||
|
||||
class AstraDBByteStore(AstraDBBaseStore[bytes], ByteStore):
|
||||
"""ByteStore implementation using DataStax AstraDB as the underlying store.
|
||||
|
||||
The bytes values are converted to base64 encoded strings
|
||||
Documents in the AstraDB collection will have the format
|
||||
{
|
||||
|
@ -8,6 +8,8 @@ from langchain_community.utilities.polygon import PolygonAPIWrapper
|
||||
|
||||
|
||||
class Inputs(BaseModel):
|
||||
"""Inputs for Polygon's Last Quote API"""
|
||||
|
||||
query: str
|
||||
|
||||
|
||||
|
@ -57,7 +57,7 @@ def init_vertexai(
|
||||
location: Optional[str] = None,
|
||||
credentials: Optional["Credentials"] = None,
|
||||
) -> None:
|
||||
"""Init vertexai.
|
||||
"""Init Vertex AI.
|
||||
|
||||
Args:
|
||||
project: The default GCP project to use when making Vertex API calls.
|
||||
|
@ -16,6 +16,8 @@ logger = logging.getLogger(__name__)
|
||||
class KDBAI(VectorStore):
|
||||
"""`KDB.AI` vector store.
|
||||
|
||||
See [https://kdb.ai](https://kdb.ai)
|
||||
|
||||
To use, you should have the `kdbai_client` python package installed.
|
||||
|
||||
Args:
|
||||
@ -25,7 +27,7 @@ class KDBAI(VectorStore):
|
||||
distance_strategy: One option from DistanceStrategy.EUCLIDEAN_DISTANCE,
|
||||
DistanceStrategy.DOT_PRODUCT or DistanceStrategy.COSINE.
|
||||
|
||||
See the example https://github.com/KxSystems/langchain/blob/KDB.AI/docs/docs/integrations/vectorstores/kdbai.ipynb.
|
||||
See the example [notebook](https://github.com/KxSystems/langchain/blob/KDB.AI/docs/docs/integrations/vectorstores/kdbai.ipynb).
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
|
@ -47,12 +47,14 @@ def _results_to_docs(docs_and_scores: Any) -> List[Document]:
|
||||
|
||||
|
||||
class BaseEmbeddingStore:
|
||||
"""Embedding store."""
|
||||
"""Base class for the Lantern embedding store."""
|
||||
|
||||
|
||||
def get_embedding_store(
|
||||
distance_strategy: DistanceStrategy, collection_name: str
|
||||
) -> Any:
|
||||
"""Get the embedding store class."""
|
||||
|
||||
embedding_type = None
|
||||
|
||||
if distance_strategy == DistanceStrategy.HAMMING:
|
||||
|
Loading…
Reference in New Issue
Block a user