docstrings top level update (#7173)

Updated docstrings so, that [API
Reference](https://api.python.langchain.com/en/latest/api_reference.html)
page has text in the second column (class/function/... description.
This commit is contained in:
Leonid Ganeline 2023-07-06 23:42:28 -07:00 committed by GitHub
parent 8d961b9e33
commit 284d40b7af
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
23 changed files with 103 additions and 9 deletions

View File

@ -1,4 +1,3 @@
"""Base class for all language models."""
from __future__ import annotations from __future__ import annotations
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
@ -30,6 +29,8 @@ def _get_token_ids_default_method(text: str) -> List[int]:
class BaseLanguageModel(Serializable, ABC): class BaseLanguageModel(Serializable, ABC):
"""Base class for all language models."""
@abstractmethod @abstractmethod
def generate_prompt( def generate_prompt(
self, self,

View File

@ -18,6 +18,7 @@ LANGCHAIN_MODEL_NAME = "langchain-model"
def import_comet_ml() -> Any: def import_comet_ml() -> Any:
"""Import comet_ml and raise an error if it is not installed."""
try: try:
import comet_ml # noqa: F401 import comet_ml # noqa: F401
except ImportError: except ImportError:

View File

@ -23,6 +23,7 @@ logger = logging.getLogger(__name__)
def import_flytekit() -> Tuple[flytekit, renderer]: def import_flytekit() -> Tuple[flytekit, renderer]:
"""Import flytekit and flytekitplugins-deck-standard."""
try: try:
import flytekit # noqa: F401 import flytekit # noqa: F401
from flytekitplugins.deck import renderer # noqa: F401 from flytekitplugins.deck import renderer # noqa: F401

View File

@ -6,6 +6,7 @@ from langchain.schema import AgentAction, AgentFinish, LLMResult
def import_infino() -> Any: def import_infino() -> Any:
"""Import the infino client."""
try: try:
from infinopy import InfinoClient from infinopy import InfinoClient
except ImportError: except ImportError:

View File

@ -9,11 +9,15 @@ if TYPE_CHECKING:
class ChildType(Enum): class ChildType(Enum):
"""The enumerator of the child type."""
MARKDOWN = "MARKDOWN" MARKDOWN = "MARKDOWN"
EXCEPTION = "EXCEPTION" EXCEPTION = "EXCEPTION"
class ChildRecord(NamedTuple): class ChildRecord(NamedTuple):
"""The child record as a NamedTuple."""
type: ChildType type: ChildType
kwargs: Dict[str, Any] kwargs: Dict[str, Any]
dg: DeltaGenerator dg: DeltaGenerator

View File

@ -27,6 +27,8 @@ EXCEPTION_EMOJI = "⚠️"
class LLMThoughtState(Enum): class LLMThoughtState(Enum):
"""Enumerator of the LLMThought state."""
# The LLM is thinking about what to do next. We don't know which tool we'll run. # The LLM is thinking about what to do next. We don't know which tool we'll run.
THINKING = "THINKING" THINKING = "THINKING"
# The LLM has decided to run a tool. We don't have results from the tool yet. # The LLM has decided to run a tool. We don't have results from the tool yet.
@ -36,6 +38,8 @@ class LLMThoughtState(Enum):
class ToolRecord(NamedTuple): class ToolRecord(NamedTuple):
"""The tool record as a NamedTuple."""
name: str name: str
input_str: str input_str: str
@ -100,6 +104,8 @@ class LLMThoughtLabeler:
class LLMThought: class LLMThought:
"""A thought in the LLM's thought stream."""
def __init__( def __init__(
self, self,
parent_container: DeltaGenerator, parent_container: DeltaGenerator,
@ -107,6 +113,14 @@ class LLMThought:
expanded: bool, expanded: bool,
collapse_on_complete: bool, collapse_on_complete: bool,
): ):
"""Initialize the LLMThought.
Args:
parent_container: The container we're writing into.
labeler: The labeler to use for this thought.
expanded: Whether the thought should be expanded by default.
collapse_on_complete: Whether the thought should be collapsed.
"""
self._container = MutableExpander( self._container = MutableExpander(
parent_container=parent_container, parent_container=parent_container,
label=labeler.get_initial_label(), label=labeler.get_initial_label(),
@ -213,6 +227,8 @@ class LLMThought:
class StreamlitCallbackHandler(BaseCallbackHandler): class StreamlitCallbackHandler(BaseCallbackHandler):
"""A callback handler that writes to a Streamlit app."""
def __init__( def __init__(
self, self,
parent_container: DeltaGenerator, parent_container: DeltaGenerator,

View File

@ -31,6 +31,7 @@ def log_error_once(method: str, exception: Exception) -> None:
def wait_for_all_tracers() -> None: def wait_for_all_tracers() -> None:
"""Wait for all tracers to finish."""
global _TRACERS global _TRACERS
for tracer in _TRACERS: for tracer in _TRACERS:
tracer.wait_for_futures() tracer.wait_for_futures()

View File

@ -123,8 +123,8 @@ def load_query_constructor_chain(
enable_limit: bool = False, enable_limit: bool = False,
**kwargs: Any, **kwargs: Any,
) -> LLMChain: ) -> LLMChain:
""" """Load a query constructor chain.
Load a query constructor chain.
Args: Args:
llm: BaseLanguageModel to use for the chain. llm: BaseLanguageModel to use for the chain.
document_contents: The contents of the document to be queried. document_contents: The contents of the document to be queried.

View File

@ -1,4 +1,4 @@
"""LangChain+ Client.""" """LangChain + Client."""
from langchain.client.runner_utils import ( from langchain.client.runner_utils import (
arun_on_dataset, arun_on_dataset,
arun_on_examples, arun_on_examples,

View File

@ -3,6 +3,8 @@ from typing import List
class CodeSegmenter(ABC): class CodeSegmenter(ABC):
"""The abstract class for the code segmenter."""
def __init__(self, code: str): def __init__(self, code: str):
self.code = code self.code = code

View File

@ -4,6 +4,8 @@ from langchain.document_loaders.parsers.language.code_segmenter import CodeSegme
class JavaScriptSegmenter(CodeSegmenter): class JavaScriptSegmenter(CodeSegmenter):
"""The code segmenter for JavaScript."""
def __init__(self, code: str): def __init__(self, code: str):
super().__init__(code) super().__init__(code)
self.source_lines = self.code.splitlines() self.source_lines = self.code.splitlines()

View File

@ -5,6 +5,8 @@ from langchain.document_loaders.parsers.language.code_segmenter import CodeSegme
class PythonSegmenter(CodeSegmenter): class PythonSegmenter(CodeSegmenter):
"""The code segmenter for Python."""
def __init__(self, code: str): def __init__(self, code: str):
super().__init__(code) super().__init__(code)
self.source_lines = self.code.splitlines() self.source_lines = self.code.splitlines()

View File

@ -12,7 +12,14 @@ from langchain.evaluation.schema import EvaluatorType, LLMEvalChain
def load_dataset(uri: str) -> List[Dict]: def load_dataset(uri: str) -> List[Dict]:
"""Load a dataset from the LangChainDatasets HuggingFace org.""" """Load a dataset from the LangChainDatasets HuggingFace org.
Args:
uri: The uri of the dataset to load.
Returns:
A list of dictionaries, each representing a row in the dataset.
"""
try: try:
from datasets import load_dataset from datasets import load_dataset
except ImportError: except ImportError:

View File

@ -32,6 +32,8 @@ ServerType = Literal["http", "grpc"]
class IdentifyingParams(TypedDict): class IdentifyingParams(TypedDict):
"""Parameters for identifying a model as a typed dict."""
model_name: str model_name: str
model_id: Optional[str] model_id: Optional[str]
server_url: Optional[str] server_url: Optional[str]

View File

@ -23,6 +23,13 @@ if TYPE_CHECKING:
def is_codey_model(model_name: str) -> bool: def is_codey_model(model_name: str) -> bool:
"""Returns True if the model name is a Codey model.
Args:
model_name: The model name to check.
Returns: True if the model name is a Codey model.
"""
return "code" in model_name return "code" in model_name

View File

@ -7,6 +7,8 @@ from langchain.load.serializable import Serializable
class Reviver: class Reviver:
"""Reviver for JSON objects."""
def __init__(self, secrets_map: Optional[Dict[str, str]] = None) -> None: def __init__(self, secrets_map: Optional[Dict[str, str]] = None) -> None:
self.secrets_map = secrets_map or dict() self.secrets_map = secrets_map or dict()
@ -65,4 +67,13 @@ class Reviver:
def loads(text: str, *, secrets_map: Optional[Dict[str, str]] = None) -> Any: def loads(text: str, *, secrets_map: Optional[Dict[str, str]] = None) -> Any:
"""Load a JSON object from a string.
Args:
text: The string to load.
secrets_map: A map of secrets to load.
Returns:
"""
return json.loads(text, object_hook=Reviver(secrets_map)) return json.loads(text, object_hook=Reviver(secrets_map))

View File

@ -12,6 +12,15 @@ from langchain.schema import BaseRetriever
def clean_excerpt(excerpt: str) -> str: def clean_excerpt(excerpt: str) -> str:
"""Cleans an excerpt from Kendra.
Args:
excerpt: The excerpt to clean.
Returns:
The cleaned excerpt.
"""
if not excerpt: if not excerpt:
return excerpt return excerpt
res = re.sub("\s+", " ", excerpt).replace("...", "") res = re.sub("\s+", " ", excerpt).replace("...", "")
@ -19,6 +28,16 @@ def clean_excerpt(excerpt: str) -> str:
def combined_text(title: str, excerpt: str) -> str: def combined_text(title: str, excerpt: str) -> str:
"""Combines a title and an excerpt into a single string.
Args:
title: The title of the document.
excerpt: The excerpt of the document.
Returns:
The combined text.
"""
if not title or not excerpt: if not title or not excerpt:
return "" return ""
return f"Document Title: {title} \nDocument Excerpt: \n{excerpt}\n" return f"Document Title: {title} \nDocument Excerpt: \n{excerpt}\n"

View File

@ -16,13 +16,22 @@ from langchain.schema import BaseRetriever
class WeaviateHybridSearchRetriever(BaseRetriever): class WeaviateHybridSearchRetriever(BaseRetriever):
"""Retriever that uses Weaviate's hybrid search to retrieve documents."""
client: Any client: Any
"""keyword arguments to pass to the Weaviate client."""
index_name: str index_name: str
"""The name of the index to use."""
text_key: str text_key: str
"""The name of the text key to use."""
alpha: float = 0.5 alpha: float = 0.5
"""The weight of the text key in the hybrid search."""
k: int = 4 k: int = 4
"""The number of results to return."""
attributes: List[str] attributes: List[str]
"""The attributes to return in the results."""
create_schema_if_missing: bool = True create_schema_if_missing: bool = True
"""Whether to create the schema if it doesn't exist."""
@root_validator(pre=True) @root_validator(pre=True)
def validate_client( def validate_client(

View File

@ -561,6 +561,8 @@ class SentenceTransformersTokenTextSplitter(TextSplitter):
class Language(str, Enum): class Language(str, Enum):
"""Enum of the programming languages."""
CPP = "cpp" CPP = "cpp"
GO = "go" GO = "go"
JAVA = "java" JAVA = "java"

View File

@ -52,7 +52,7 @@ def get_current_page(browser: SyncBrowser) -> SyncPage:
def create_async_playwright_browser(headless: bool = True) -> AsyncBrowser: def create_async_playwright_browser(headless: bool = True) -> AsyncBrowser:
""" """
Create a async playwright browser. Create an async playwright browser.
Args: Args:
headless: Whether to run the browser in headless mode. Defaults to True. headless: Whether to run the browser in headless mode. Defaults to True.
@ -86,7 +86,7 @@ T = TypeVar("T")
def run_async(coro: Coroutine[Any, Any, T]) -> T: def run_async(coro: Coroutine[Any, Any, T]) -> T:
""" """Run an async coroutine.
Args: Args:
coro: The coroutine to run. Coroutine[Any, Any, T] coro: The coroutine to run. Coroutine[Any, Any, T]

View File

@ -26,7 +26,7 @@ logger = logging.getLogger(__name__)
class HTTPVerb(str, Enum): class HTTPVerb(str, Enum):
"""HTTP verbs.""" """Enumerator of the HTTP verbs."""
GET = "get" GET = "get"
PUT = "put" PUT = "put"

View File

@ -108,12 +108,15 @@ def comma_list(items: List[Any]) -> str:
@contextlib.contextmanager @contextlib.contextmanager
def mock_now(dt_value): # type: ignore def mock_now(dt_value): # type: ignore
"""Context manager for mocking out datetime.now() in unit tests. """Context manager for mocking out datetime.now() in unit tests.
Example: Example:
with mock_now(datetime.datetime(2011, 2, 3, 10, 11)): with mock_now(datetime.datetime(2011, 2, 3, 10, 11)):
assert datetime.datetime.now() == datetime.datetime(2011, 2, 3, 10, 11) assert datetime.datetime.now() == datetime.datetime(2011, 2, 3, 10, 11)
""" """
class MockDateTime(datetime.datetime): class MockDateTime(datetime.datetime):
"""Mock datetime.datetime.now() with a fixed datetime."""
@classmethod @classmethod
def now(cls): # type: ignore def now(cls): # type: ignore
# Create a copy of dt_value. # Create a copy of dt_value.

View File

@ -16,7 +16,10 @@ from langchain.vectorstores.base import VectorStore, VectorStoreRetriever
class Vectara(VectorStore): class Vectara(VectorStore):
"""Implementation of Vector Store using Vectara (https://vectara.com). """Implementation of Vector Store using Vectara.
See (https://vectara.com).
Example: Example:
.. code-block:: python .. code-block:: python