mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-23 15:19:33 +00:00
docstrings cleanup (#8311)
- added missed docstrings - changed docstrings into consistent format @baskaryan
This commit is contained in:
parent
ceab0a7c1f
commit
ee6ff96e28
@ -13,6 +13,8 @@ from langchain_experimental.autonomous_agents.autogpt.prompt_generator import ge
|
||||
|
||||
|
||||
class AutoGPTPrompt(BaseChatPromptTemplate, BaseModel):
|
||||
"""Prompt for AutoGPT."""
|
||||
|
||||
ai_name: str
|
||||
ai_role: str
|
||||
tools: List[BaseTool]
|
||||
|
@ -38,6 +38,8 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BaseAgentExecutorIterator(ABC):
|
||||
"""Base class for AgentExecutorIterator."""
|
||||
|
||||
@abstractmethod
|
||||
def build_callback_manager(self) -> None:
|
||||
pass
|
||||
@ -57,6 +59,8 @@ def rebuild_callback_manager_on_set(
|
||||
|
||||
|
||||
class AgentExecutorIterator(BaseAgentExecutorIterator):
|
||||
"""Iterator for AgentExecutor."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
agent_executor: AgentExecutor,
|
||||
|
@ -468,7 +468,7 @@ class AsyncCallbackHandler(BaseCallbackHandler):
|
||||
|
||||
|
||||
class BaseCallbackManager(CallbackManagerMixin):
|
||||
"""Base callback manager that can be used to handle callbacks from LangChain."""
|
||||
"""Base callback manager that handles callbacks from LangChain."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
|
@ -93,7 +93,7 @@ def analyze_text(
|
||||
|
||||
|
||||
class FlyteCallbackHandler(BaseMetadataCallbackHandler, BaseCallbackHandler):
|
||||
"""This callback handler is designed specifically for usage within a Flyte task."""
|
||||
"""This callback handler that is used within a Flyte task."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
"""Initialize callback handler."""
|
||||
|
@ -1004,7 +1004,7 @@ class AsyncCallbackManagerForRetrieverRun(
|
||||
|
||||
|
||||
class CallbackManager(BaseCallbackManager):
|
||||
"""Callback manager that can be used to handle callbacks from langchain."""
|
||||
"""Callback manager that handles callbacks from langchain."""
|
||||
|
||||
def on_llm_start(
|
||||
self,
|
||||
@ -1273,7 +1273,7 @@ class CallbackManager(BaseCallbackManager):
|
||||
|
||||
|
||||
class AsyncCallbackManager(BaseCallbackManager):
|
||||
"""Async callback manager that can be used to handle callbacks from LangChain."""
|
||||
"""Async callback manager that handles callbacks from LangChain."""
|
||||
|
||||
@property
|
||||
def is_async(self) -> bool:
|
||||
|
@ -22,7 +22,9 @@ def StreamlitCallbackHandler(
|
||||
collapse_completed_thoughts: bool = True,
|
||||
thought_labeler: Optional[LLMThoughtLabeler] = None,
|
||||
) -> BaseCallbackHandler:
|
||||
"""Construct a new StreamlitCallbackHandler. This CallbackHandler is geared towards
|
||||
"""Callback Handler that writes to a Streamlit app.
|
||||
|
||||
This CallbackHandler is geared towards
|
||||
use with a LangChain Agent; it displays the Agent's LLM and tool-usage "thoughts"
|
||||
inside a series of Streamlit expanders.
|
||||
|
||||
|
@ -31,7 +31,7 @@ class TracerSessionV1(TracerSessionV1Base):
|
||||
|
||||
|
||||
class TracerSessionBase(TracerSessionV1Base):
|
||||
"""A creation class for TracerSession."""
|
||||
"""Base class for TracerSession."""
|
||||
|
||||
tenant_id: UUID
|
||||
|
||||
|
@ -20,6 +20,7 @@ INTERMEDIATE_STEPS_KEY = "intermediate_steps"
|
||||
|
||||
|
||||
def extract_cypher(text: str) -> str:
|
||||
"""Extract Cypher code from text using Regex."""
|
||||
# The pattern to find Cypher code enclosed in triple backticks
|
||||
pattern = r"```(.*?)```"
|
||||
|
||||
|
@ -13,7 +13,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TransformChain(Chain):
|
||||
"""Chain transform chain output.
|
||||
"""Chain that transforms the chain output.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
@ -22,7 +22,7 @@ from langchain.schema.output import ChatGenerationChunk
|
||||
|
||||
|
||||
class ChatAnthropic(BaseChatModel, _AnthropicCommon):
|
||||
r"""Wrapper around Anthropic's large language model.
|
||||
"""Anthropic's large language chat model.
|
||||
|
||||
To use, you should have the ``anthropic`` python package installed, and the
|
||||
environment variable ``ANTHROPIC_API_KEY`` set with your API key, or pass
|
||||
|
@ -14,7 +14,9 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AzureChatOpenAI(ChatOpenAI):
|
||||
"""Wrapper around Azure OpenAI Chat Completion API. To use this class you
|
||||
"""Wrapper around Azure OpenAI Chat Completion API.
|
||||
|
||||
To use this class you
|
||||
must have a deployed model on Azure OpenAI. Use `deployment_name` in the
|
||||
constructor to refer to the "Model deployment name" in the Azure portal.
|
||||
|
||||
|
@ -67,7 +67,7 @@ def _collect_yaml_input(
|
||||
|
||||
|
||||
class HumanInputChatModel(BaseChatModel):
|
||||
"""ChatModel wrapper which returns user input as the response.."""
|
||||
"""ChatModel which returns user input as the response."""
|
||||
|
||||
input_func: Callable = Field(default_factory=lambda: _collect_yaml_input)
|
||||
message_func: Callable = Field(default_factory=lambda: _display_messages)
|
||||
|
@ -133,8 +133,8 @@ def _convert_message_to_dict(message: BaseMessage) -> dict:
|
||||
|
||||
|
||||
class JinaChat(BaseChatModel):
|
||||
"""JinaChat is a wrapper for Jina AI's LLM service, providing cost-effective
|
||||
image chat capabilities in comparison to other LLM APIs.
|
||||
"""Wrapper for Jina AI's LLM service, providing cost-effective
|
||||
image chat capabilities.
|
||||
|
||||
To use, you should have the ``openai`` python package installed, and the
|
||||
environment variable ``JINACHAT_API_KEY`` set to your API key, which you
|
||||
|
@ -8,7 +8,8 @@ from langchain.document_loaders.unstructured import (
|
||||
|
||||
|
||||
class UnstructuredEPubLoader(UnstructuredFileLoader):
|
||||
"""UnstructuredEPubLoader uses unstructured to load EPUB files.
|
||||
"""Loader that uses Unstructured to load EPUB files.
|
||||
|
||||
You can run the loader in one of two modes: "single" and "elements".
|
||||
If you use "single" mode, the document will be returned as a single
|
||||
langchain Document object. If you use "elements" mode, the unstructured
|
||||
|
@ -5,7 +5,8 @@ from langchain.document_loaders.unstructured import UnstructuredFileLoader
|
||||
|
||||
|
||||
class UnstructuredHTMLLoader(UnstructuredFileLoader):
|
||||
"""UnstructuredHTMLLoader uses unstructured to load HTML files.
|
||||
"""Loader that uses Unstructured to load HTML files.
|
||||
|
||||
You can run the loader in one of two modes: "single" and "elements".
|
||||
If you use "single" mode, the document will be returned as a single
|
||||
langchain Document object. If you use "elements" mode, the unstructured
|
||||
|
@ -5,7 +5,8 @@ from langchain.document_loaders.unstructured import UnstructuredFileLoader
|
||||
|
||||
|
||||
class UnstructuredImageLoader(UnstructuredFileLoader):
|
||||
"""UnstructuredImageLoader uses unstructured to load PNG and JPG files.
|
||||
"""Loader that uses Unstructured to load PNG and JPG files.
|
||||
|
||||
You can run the loader in one of two modes: "single" and "elements".
|
||||
If you use "single" mode, the document will be returned as a single
|
||||
langchain Document object. If you use "elements" mode, the unstructured
|
||||
|
@ -5,7 +5,8 @@ from langchain.document_loaders.unstructured import UnstructuredFileLoader
|
||||
|
||||
|
||||
class UnstructuredMarkdownLoader(UnstructuredFileLoader):
|
||||
"""UnstructuredMarkdownLoader uses unstructured to load markdown files.
|
||||
"""Loader that uses Unstructured to load markdown files.
|
||||
|
||||
You can run the loader in one of two modes: "single" and "elements".
|
||||
If you use "single" mode, the document will be returned as a single
|
||||
langchain Document object. If you use "elements" mode, the unstructured
|
||||
|
@ -7,7 +7,7 @@ from langchain.document_loaders.tencent_cos_file import TencentCOSFileLoader
|
||||
|
||||
|
||||
class TencentCOSDirectoryLoader(BaseLoader):
|
||||
"""Loading logic for loading documents from Tencent Cloud COS."""
|
||||
"""Loader for Tencent Cloud COS directory."""
|
||||
|
||||
def __init__(self, conf: Any, bucket: str, prefix: str = ""):
|
||||
"""Initialize with COS config, bucket and prefix.
|
||||
|
@ -9,7 +9,7 @@ from langchain.document_loaders.unstructured import UnstructuredFileLoader
|
||||
|
||||
|
||||
class TencentCOSFileLoader(BaseLoader):
|
||||
"""Loading logic for loading documents from Tencent Cloud COS."""
|
||||
"""Loader for Tencent Cloud COS file."""
|
||||
|
||||
def __init__(self, conf: Any, bucket: str, key: str):
|
||||
"""Initialize with COS config, bucket and key name.
|
||||
|
@ -34,7 +34,7 @@ def validate_unstructured_version(min_unstructured_version: str) -> None:
|
||||
|
||||
|
||||
class UnstructuredBaseLoader(BaseLoader, ABC):
|
||||
"""Loader that uses unstructured to load files."""
|
||||
"""Loader that uses Unstructured to load files."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
@ -130,7 +130,9 @@ class UnstructuredBaseLoader(BaseLoader, ABC):
|
||||
|
||||
|
||||
class UnstructuredFileLoader(UnstructuredBaseLoader):
|
||||
"""UnstructuredFileLoader uses unstructured to load files. The file loader uses the
|
||||
"""Loader that uses Unstructured to load files.
|
||||
|
||||
The file loader uses the
|
||||
unstructured partition function and will automatically detect the file
|
||||
type. You can run the loader in one of two modes: "single" and "elements".
|
||||
If you use "single" mode, the document will be returned as a single
|
||||
@ -209,7 +211,8 @@ def get_elements_from_api(
|
||||
|
||||
|
||||
class UnstructuredAPIFileLoader(UnstructuredFileLoader):
|
||||
"""UnstructuredAPIFileLoader uses the Unstructured API to load files.
|
||||
"""Loader that uses the Unstructured API to load files.
|
||||
|
||||
By default, the loader makes a call to the hosted Unstructured API.
|
||||
If you are running the unstructured API locally, you can change the
|
||||
API rule by passing in the url parameter when you initialize the loader.
|
||||
@ -272,7 +275,9 @@ class UnstructuredAPIFileLoader(UnstructuredFileLoader):
|
||||
|
||||
|
||||
class UnstructuredFileIOLoader(UnstructuredBaseLoader):
|
||||
"""UnstructuredFileIOLoader uses unstructured to load files. The file loader
|
||||
"""Loader that uses Unstructured to load files.
|
||||
|
||||
The file loader
|
||||
uses the unstructured partition function and will automatically detect the file
|
||||
type. You can run the loader in one of two modes: "single" and "elements".
|
||||
If you use "single" mode, the document will be returned as a single
|
||||
@ -317,7 +322,8 @@ class UnstructuredFileIOLoader(UnstructuredBaseLoader):
|
||||
|
||||
|
||||
class UnstructuredAPIFileIOLoader(UnstructuredFileIOLoader):
|
||||
"""UnstructuredAPIFileIOLoader uses the Unstructured API to load files.
|
||||
"""Loader that uses the Unstructured API to load files.
|
||||
|
||||
By default, the loader makes a call to the hosted Unstructured API.
|
||||
If you are running the unstructured API locally, you can change the
|
||||
API rule by passing in the url parameter when you initialize the loader.
|
||||
|
@ -5,7 +5,7 @@ from langchain.utils import get_from_env
|
||||
|
||||
|
||||
class DoctranPropertyExtractor(BaseDocumentTransformer):
|
||||
"""Extracts properties from text documents using doctran.
|
||||
"""Extract properties from text documents using doctran.
|
||||
|
||||
Arguments:
|
||||
properties: A list of the properties to extract.
|
||||
|
@ -5,7 +5,7 @@ from langchain.utils import get_from_env
|
||||
|
||||
|
||||
class DoctranQATransformer(BaseDocumentTransformer):
|
||||
"""Extracts QA from text documents using doctran.
|
||||
"""Extract QA from text documents using doctran.
|
||||
|
||||
Arguments:
|
||||
openai_api_key: OpenAI API key. Can also be specified via environment variable
|
||||
|
@ -5,7 +5,7 @@ from langchain.utils import get_from_env
|
||||
|
||||
|
||||
class DoctranTextTranslator(BaseDocumentTransformer):
|
||||
"""Translates text documents using doctran.
|
||||
"""Translate text documents using doctran.
|
||||
|
||||
Arguments:
|
||||
openai_api_key: OpenAI API key. Can also be specified via environment variable
|
||||
|
@ -1,4 +1,4 @@
|
||||
"""LLM Chain specifically for evaluating question answering."""
|
||||
"""LLM Chains for evaluating question answering."""
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
@ -50,7 +50,7 @@ def _parse_string_eval_output(text: str) -> dict:
|
||||
|
||||
|
||||
class QAEvalChain(LLMChain, StringEvaluator, LLMEvalChain):
|
||||
"""LLM Chain specifically for evaluating question answering."""
|
||||
"""LLM Chain for evaluating question answering."""
|
||||
|
||||
output_key: str = "results" #: :meta private:
|
||||
|
||||
@ -184,7 +184,7 @@ class QAEvalChain(LLMChain, StringEvaluator, LLMEvalChain):
|
||||
|
||||
|
||||
class ContextQAEvalChain(LLMChain, StringEvaluator, LLMEvalChain):
|
||||
"""LLM Chain specifically for evaluating QA w/o GT based on context"""
|
||||
"""LLM Chain for evaluating QA w/o GT based on context"""
|
||||
|
||||
@property
|
||||
def requires_reference(self) -> bool:
|
||||
@ -308,7 +308,7 @@ class ContextQAEvalChain(LLMChain, StringEvaluator, LLMEvalChain):
|
||||
|
||||
|
||||
class CotQAEvalChain(ContextQAEvalChain):
|
||||
"""LLM Chain specifically for evaluating QA using chain of thought reasoning."""
|
||||
"""LLM Chain for evaluating QA using chain of thought reasoning."""
|
||||
|
||||
@property
|
||||
def evaluation_name(self) -> str:
|
||||
|
@ -1,4 +1,4 @@
|
||||
"""LLM Chain specifically for generating examples for question answering."""
|
||||
"""LLM Chain for generating examples for question answering."""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
@ -9,7 +9,7 @@ from langchain.schema.language_model import BaseLanguageModel
|
||||
|
||||
|
||||
class QAGenerateChain(LLMChain):
|
||||
"""LLM Chain specifically for generating examples for question answering."""
|
||||
"""LLM Chain for generating examples for question answering."""
|
||||
|
||||
@classmethod
|
||||
def from_llm(cls, llm: BaseLanguageModel, **kwargs: Any) -> QAGenerateChain:
|
||||
|
@ -7,11 +7,15 @@ from langchain.schema import BaseOutputParser
|
||||
|
||||
|
||||
class AutoGPTAction(NamedTuple):
|
||||
"""Action returned by AutoGPTOutputParser."""
|
||||
|
||||
name: str
|
||||
args: Dict
|
||||
|
||||
|
||||
class BaseAutoGPTOutputParser(BaseOutputParser):
|
||||
"""Base Output parser for AutoGPT."""
|
||||
|
||||
@abstractmethod
|
||||
def parse(self, text: str) -> AutoGPTAction:
|
||||
"""Return AutoGPTAction"""
|
||||
@ -36,6 +40,8 @@ def preprocess_json_input(input_str: str) -> str:
|
||||
|
||||
|
||||
class AutoGPTOutputParser(BaseAutoGPTOutputParser):
|
||||
"""Output parser for AutoGPT."""
|
||||
|
||||
def parse(self, text: str) -> AutoGPTAction:
|
||||
try:
|
||||
parsed = json.loads(text, strict=False)
|
||||
|
@ -13,6 +13,8 @@ from langchain.vectorstores.base import VectorStoreRetriever
|
||||
|
||||
|
||||
class AutoGPTPrompt(BaseChatPromptTemplate, BaseModel):
|
||||
"""Prompt for AutoGPT."""
|
||||
|
||||
ai_name: str
|
||||
ai_role: str
|
||||
tools: List[BaseTool]
|
||||
|
@ -123,7 +123,7 @@ class PromptGenerator:
|
||||
|
||||
|
||||
def get_prompt(tools: List[BaseTool]) -> str:
|
||||
"""This function generates a prompt string.
|
||||
"""Generates a prompt string.
|
||||
|
||||
It includes various constraints, commands, resources, and performance evaluations.
|
||||
|
||||
|
@ -2,6 +2,8 @@ from enum import Enum
|
||||
|
||||
|
||||
class Constant(Enum):
|
||||
"""Enum for constants used in the CPAL."""
|
||||
|
||||
narrative_input = "narrative_input"
|
||||
chain_answer = "chain_answer" # natural language answer
|
||||
chain_data = "chain_data" # pydantic instance
|
||||
|
@ -136,7 +136,7 @@ def get_arangodb_client(
|
||||
username: Optional[str] = None,
|
||||
password: Optional[str] = None,
|
||||
) -> Any:
|
||||
"""Convenience method that gets Arango DB from credentials.
|
||||
"""Get the Arango DB client from credentials.
|
||||
|
||||
Args:
|
||||
url: Arango DB url. Can be passed in as named arg or set as environment
|
||||
|
@ -110,7 +110,7 @@ class StringPromptValue(PromptValue):
|
||||
|
||||
|
||||
class StringPromptTemplate(BasePromptTemplate, ABC):
|
||||
"""String prompt should expose the format method, returning a prompt."""
|
||||
"""String prompt that exposes the format method, returning a prompt."""
|
||||
|
||||
def format_prompt(self, **kwargs: Any) -> PromptValue:
|
||||
"""Create Chat Messages."""
|
||||
|
@ -18,7 +18,8 @@ if TYPE_CHECKING:
|
||||
|
||||
|
||||
class GoogleCloudEnterpriseSearchRetriever(BaseRetriever):
|
||||
"""Wrapper around Google Cloud Enterprise Search Service API.
|
||||
"""Retriever for the Google Cloud Enterprise Search Service API.
|
||||
|
||||
For the detailed explanation of the Enterprise Search concepts
|
||||
and configuration parameters refer to the product documentation.
|
||||
|
||||
|
@ -26,6 +26,7 @@ OPERATOR_TO_TQL = {
|
||||
|
||||
|
||||
def can_cast_to_float(string: str) -> bool:
|
||||
"""Check if a string can be cast to a float."""
|
||||
try:
|
||||
float(string)
|
||||
return True
|
||||
|
@ -67,6 +67,8 @@ class QuestionListOutputParser(PydanticOutputParser):
|
||||
|
||||
|
||||
class WebResearchRetriever(BaseRetriever):
|
||||
"""Retriever for web research based on the Google Search API."""
|
||||
|
||||
# Inputs
|
||||
vectorstore: VectorStore = Field(
|
||||
..., description="Vector store for storing web pages"
|
||||
|
@ -13,4 +13,6 @@ if TYPE_CHECKING:
|
||||
|
||||
|
||||
class AmadeusBaseTool(BaseTool):
|
||||
"""Base Tool for Amadeus."""
|
||||
|
||||
client: Client = Field(default_factory=authenticate)
|
||||
|
@ -12,6 +12,8 @@ from langchain.tools.amadeus.base import AmadeusBaseTool
|
||||
|
||||
|
||||
class ClosestAirportSchema(BaseModel):
|
||||
"""Schema for the AmadeusClosestAirport tool."""
|
||||
|
||||
location: str = Field(
|
||||
description=(
|
||||
" The location for which you would like to find the nearest airport "
|
||||
@ -29,6 +31,8 @@ class ClosestAirportSchema(BaseModel):
|
||||
|
||||
|
||||
class AmadeusClosestAirport(AmadeusBaseTool):
|
||||
"""Tool for finding the closest airport to a particular location."""
|
||||
|
||||
name: str = "closest_airport"
|
||||
description: str = (
|
||||
"Use this tool to find the closest airport to a particular location."
|
||||
|
@ -14,6 +14,8 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class FlightSearchSchema(BaseModel):
|
||||
"""Schema for the AmadeusFlightSearch tool."""
|
||||
|
||||
originLocationCode: str = Field(
|
||||
description=(
|
||||
" The three letter International Air Transport "
|
||||
@ -53,6 +55,8 @@ class FlightSearchSchema(BaseModel):
|
||||
|
||||
|
||||
class AmadeusFlightSearch(AmadeusBaseTool):
|
||||
"""Tool for searching for a single flight between two airports."""
|
||||
|
||||
name: str = "single_flight_search"
|
||||
description: str = (
|
||||
" Use this tool to search for a single flight between the origin and "
|
||||
|
@ -20,6 +20,8 @@ from langchain.utilities.github import GitHubAPIWrapper
|
||||
|
||||
|
||||
class GitHubAction(BaseTool):
|
||||
"""Tool for interacting with the GitHub API."""
|
||||
|
||||
api_wrapper: GitHubAPIWrapper = Field(default_factory=GitHubAPIWrapper)
|
||||
mode: str
|
||||
name = ""
|
||||
|
@ -36,4 +36,5 @@ def stringify_dict(data: dict) -> str:
|
||||
|
||||
|
||||
def comma_list(items: List[Any]) -> str:
|
||||
"""Convert a list to a comma-separated string."""
|
||||
return ", ".join(str(item) for item in items)
|
||||
|
Loading…
Reference in New Issue
Block a user