experimental[patch]: update module doc strings (#19539)

Added missed module descriptions. Fixed format.
This commit is contained in:
Leonid Ganeline 2024-03-26 07:38:10 -07:00 committed by GitHub
parent 72ba738bf5
commit 4159a4723c
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
30 changed files with 154 additions and 40 deletions

View File

@ -1,3 +1,12 @@
"""**Agent** is a class that uses an LLM to choose
a sequence of actions to take.
In Chains, a sequence of actions is hardcoded. In Agents,
a language model is used as a reasoning engine to determine which actions
to take and in which order.
Agents select and use **Tools** and **Toolkits** for actions.
""" # noqa: E501
from langchain_experimental.agents.agent_toolkits import (
create_csv_agent,
create_pandas_dataframe_agent,

View File

@ -1,3 +1,15 @@
"""**Autonomous agents** in the Langchain experimental package include
[AutoGPT](https://github.com/Significant-Gravitas/AutoGPT),
[BabyAGI](https://github.com/yoheinakajima/babyagi),
and [HuggingGPT](https://arxiv.org/abs/2303.17580) agents that
interact with language models autonomously.
These agents have specific functionalities like memory management,
task creation, execution chains, and response generation.
They differ from ordinary agents by their autonomous decision-making capabilities,
memory handling, and specialized functionalities for tasks and response.
"""
from langchain_experimental.autonomous_agents.autogpt.agent import AutoGPT
from langchain_experimental.autonomous_agents.baby_agi.baby_agi import BabyAGI
from langchain_experimental.autonomous_agents.hugginggpt.hugginggpt import HuggingGPT

View File

@ -1,3 +1,23 @@
"""
**Comprehend Moderation** is used to detect and handle `Personally Identifiable Information (PII)`,
`toxicity`, and `prompt safety` in text.
The Langchain experimental package includes the **AmazonComprehendModerationChain** class
for the comprehend moderation tasks. It is based on `Amazon Comprehend` service.
This class can be configured with specific moderation settings like PII labels, redaction,
toxicity thresholds, and prompt safety thresholds.
See more at https://aws.amazon.com/comprehend/
`Amazon Comprehend` service is used by several other classes:
- **ComprehendToxicity** class is used to check the toxicity of text prompts using
`AWS Comprehend service` and take actions based on the configuration
- **ComprehendPromptSafety** class is used to validate the safety of given prompt
text, raising an error if unsafe content is detected based on the specified threshold
- **ComprehendPII** class is designed to handle
`Personally Identifiable Information (PII)` moderation tasks,
detecting and managing PII entities in text inputs
""" # noqa: E501
from langchain_experimental.comprehend_moderation.amazon_comprehend_moderation import (
AmazonComprehendModerationChain,
)

View File

@ -0,0 +1,17 @@
"""
**Causal program-aided language (CPAL)** is a concept implemented in LangChain as
a chain for causal modeling and narrative decomposition.
CPAL improves upon the program-aided language (**PAL**) by incorporating
causal structure to prevent hallucination in language models,
particularly when dealing with complex narratives and math
problems with nested dependencies.
CPAL involves translating causal narratives into a stack of operations,
setting hypothetical conditions for causal models, and decomposing
narratives into story elements.
It allows for the creation of causal chains that define the relationships
between different elements in a narrative, enabling the modeling and analysis
of causal relationships within a given context.
"""

View File

@ -1,4 +1,13 @@
"""Data anonymizer package"""
"""**Data anonymizer** contains both Anonymizers and Deanonymizers.
It uses the [Microsoft Presidio](https://microsoft.github.io/presidio/) library.
**Anonymizers** are used to replace a `Personally Identifiable Information (PII)`
entity text with some other
value by applying a certain operator (e.g. replace, mask, redact, encrypt).
**Deanonymizers** are used to revert the anonymization operation
(e.g. to decrypt an encrypted text).
"""
from langchain_experimental.data_anonymizer.presidio import (
PresidioAnonymizer,
PresidioReversibleAnonymizer,

View File

@ -1,4 +1,7 @@
"""The Chain runs a self-review of logical fallacies as determined by this paper \
categorizing and defining logical fallacies https://arxiv.org/pdf/2212.07425.pdf. \
Modeled after Constitutional AI and in same format, but applying logical \
fallacies as generalized rules to remove in output"""
"""**Fallacy Removal** Chain runs a self-review of logical fallacies
as determined by paper
[Robust and Explainable Identification of Logical Fallacies in Natural
Language Arguments](https://arxiv.org/pdf/2212.07425.pdf).
It is modeled after `Constitutional AI` and in the same format, but applying logical
fallacies as generalized rules to remove them in output.
"""

View File

@ -1,4 +1,4 @@
"""Generative Agents primitives."""
"""**Generative Agent** primitives."""
from langchain_experimental.generative_agents.generative_agent import GenerativeAgent
from langchain_experimental.generative_agents.memory import GenerativeAgentMemory

View File

@ -1,3 +1,4 @@
"""**Graph Transformers** transform Documents into Graph Documents."""
from langchain_experimental.graph_transformers.diffbot import DiffbotGraphTransformer
from langchain_experimental.graph_transformers.llm import LLMGraphTransformer

View File

@ -143,24 +143,26 @@ def map_to_base_relationship(rel: Any) -> Relationship:
class LLMGraphTransformer:
"""
A class designed to transform documents into graph-based documents using a LLM.
"""Transform documents into graph-based documents using a LLM.
It allows specifying constraints on the types of nodes and relationships to include
in the output graph. The class doesn't support neither extract and node or
relationship properties
Args:
llm (BaseLanguageModel): An instance of a language model supporting structured
output. allowed_nodes (List[str], optional): Specifies which node types are
allowed in the graph. Defaults to an empty list, allowing all node types.
output.
allowed_nodes (List[str], optional): Specifies which node types are
allowed in the graph. Defaults to an empty list, allowing all node types.
allowed_relationships (List[str], optional): Specifies which relationship types
are allowed in the graph. Defaults to an empty list, allowing all relationship
types.
prompt (Optional[ChatPromptTemplate], optional): The prompt to pass to the to
the LLM with additional instructions.
are allowed in the graph. Defaults to an empty list, allowing all relationship
types.
prompt (Optional[ChatPromptTemplate], optional): The prompt to pass to
the LLM with additional instructions.
strict_mode (bool, optional): Determines whether the transformer should apply
filtering to strictly adhere to `allowed_nodes` and `allowed_relationships`.
Defaults to True.
filtering to strictly adhere to `allowed_nodes` and `allowed_relationships`.
Defaults to True.
Example:
.. code-block:: python
from langchain_experimental.graph_transformers import LLMGraphTransformer

View File

@ -1 +1,2 @@
"""Chain that interprets a prompt and executes bash code to perform bash operations."""
"""**LLM bash** is a chain that uses LLM to interpret a prompt and
executes **bash** code."""

View File

@ -1,4 +1,4 @@
"""Chain that interprets a prompt and executes python code to do math.
"""Chain that interprets a prompt and **executes python code to do math**.
Heavily borrowed from llm_math, wrapper for SymPy
Heavily borrowed from `llm_math`, uses the [SymPy](https://www.sympy.org/) package.
"""

View File

@ -1,5 +1,6 @@
"""Experimental LLM wrappers."""
"""Experimental **LLM** classes provide
access to the large language model (**LLM**) APIs and services.
"""
from langchain_experimental.llms.jsonformer_decoder import JsonFormer
from langchain_experimental.llms.llamaapi import ChatLlamaAPI
from langchain_experimental.llms.lmformatenforcer_decoder import LMFormatEnforcer

View File

@ -1,3 +1,11 @@
"""**OpenCLIP Embeddings** model.
OpenCLIP is a multimodal model that can encode text and images into a shared space.
See this paper for more details: https://arxiv.org/abs/2103.00020
and [this repository](https://github.com/mlfoundations/open_clip) for details.
"""
from .open_clip import OpenCLIPEmbeddings
__all__ = ["OpenCLIPEmbeddings"]

View File

@ -1,10 +1,9 @@
"""Implements Program-Aided Language Models.
"""**PAL Chain** implements **Program-Aided Language** Models.
As in https://arxiv.org/pdf/2211.10435.pdf.
See the paper: https://arxiv.org/pdf/2211.10435.pdf.
This is vulnerable to arbitrary code execution:
https://github.com/langchain-ai/langchain/issues/5872
"""
This chain is vulnerable to [arbitrary code execution](https://github.com/langchain-ai/langchain/issues/5872).
""" # noqa: E501
from langchain_experimental.pal_chain.base import PALChain
__all__ = ["PALChain"]

View File

@ -1,3 +1,7 @@
"""**Plan-and-execute agents** are planning tasks with a language model (LLM) and
executing them with a separate agent.
"""
from langchain_experimental.plan_and_execute.agent_executor import PlanAndExecute
from langchain_experimental.plan_and_execute.executors.agent_executor import (
load_agent_executor,

View File

@ -1,4 +1,7 @@
"""HuggingFace Security toolkit."""
"""**HuggingFace Injection Identifier** is a tool that uses
[HuggingFace Prompt Injection model](https://huggingface.co/deepset/deberta-v3-base-injection)
to detect prompt injection attacks.
"""
from langchain_experimental.prompt_injection_identifier.hugging_face_identifier import (
HuggingFaceInjectionIdentifier,

View File

@ -50,7 +50,7 @@ def _model_default_factory(
class HuggingFaceInjectionIdentifier(BaseTool):
"""Tool that uses HuggingFace Prompt Injection to
"""Tool that uses HuggingFace Prompt Injection model to
detect prompt injection attacks."""
name: str = "hugging_face_injection_identifier"

View File

@ -1,3 +1,5 @@
"""Unified method for **loading a prompt** from LangChainHub or local file system.
"""
from langchain_experimental.prompts.load import load_prompt
__all__ = ["load_prompt"]

View File

@ -1,4 +1,9 @@
"""Amazon Personalize primitives."""
"""**Amazon Personalize** primitives.
[Amazon Personalize](https://docs.aws.amazon.com/personalize/latest/dg/what-is-personalize.html)
is a fully managed machine learning service that uses your data to generate
item recommendations for your users.
""" # noqa: E501
from langchain_experimental.recommenders.amazon_personalize import AmazonPersonalize
from langchain_experimental.recommenders.amazon_personalize_chain import (
AmazonPersonalizeChain,

View File

@ -2,8 +2,9 @@ from typing import Any, List, Mapping, Optional, Sequence
class AmazonPersonalize:
"""Amazon Personalize Runtime wrapper for executing real-time operations:
https://docs.aws.amazon.com/personalize/latest/dg/API_Operations_Amazon_Personalize_Runtime.html
"""Amazon Personalize Runtime wrapper for executing real-time operations.
See [this link for more details](https://docs.aws.amazon.com/personalize/latest/dg/API_Operations_Amazon_Personalize_Runtime.html).
Args:
campaign_arn: str, Optional: The Amazon Resource Name (ARN) of the campaign

View File

@ -0,0 +1,5 @@
"""**Retriever** class returns Documents given a text **query**.
It is more general than a vector store. A retriever does not need to be able to
store documents, only to return (or retrieve) it.
"""

View File

@ -1,3 +1,12 @@
"""
**RL (Reinforcement Learning) Chain** leverages the `Vowpal Wabbit (VW)` models
for reinforcement learning with a context, with the goal of modifying
the prompt before the LLM call.
[Vowpal Wabbit](https://vowpalwabbit.org/) provides fast, efficient,
and flexible online machine learning techniques for reinforcement learning,
supervised learning, and more.
"""
import logging
from langchain_experimental.rl_chain.base import (

View File

@ -1,13 +1,13 @@
"""Chain for applying self-critique using the SmartGPT workflow.
"""**SmartGPT** chain is applying self-critique using the `SmartGPT` workflow.
See details at https://youtu.be/wVzuvf9D9BU
The workflow performs these 3 steps:
1. **Ideate**: Pass the user prompt to an ideation LLM n_ideas times,
1. **Ideate**: Pass the user prompt to an `Ideation LLM` n_ideas times,
each result is an "idea"
2. **Critique**: Pass the ideas to a critique LLM which looks for flaws in the ideas
2. **Critique**: Pass the ideas to a `Critique LLM` which looks for flaws in the ideas
& picks the best one
3. **Resolve**: Pass the critique to a resolver LLM which improves upon the best idea
3. **Resolve**: Pass the critique to a `Resolver LLM` which improves upon the best idea
& outputs only the (improved version of) the best output
In total, the SmartGPT workflow will use n_ideas+2 LLM calls

View File

@ -1,4 +1,4 @@
"""Chain for interacting with SQL Database."""
"""**SQL Chain** interacts with `SQL` Database."""
from langchain_experimental.sql.base import SQLDatabaseChain, SQLDatabaseSequentialChain
__all__ = ["SQLDatabaseChain", "SQLDatabaseSequentialChain"]

View File

@ -1,3 +1,4 @@
"""Generate **synthetic data** using LLM and few-shot template."""
from typing import Any, Dict, List, Optional
from langchain.chains.base import Chain

View File

@ -0,0 +1 @@
"""Generate **tabular synthetic data** using LLM and few-shot template."""

View File

@ -1,3 +1,4 @@
"""Experimental **text splitter** based on semantic similarity."""
import copy
import re
from typing import Any, Dict, Iterable, List, Literal, Optional, Sequence, Tuple, cast

View File

@ -1,3 +1,4 @@
"""Experimental **Python REPL** tools."""
from langchain_experimental.tools.python.tool import PythonAstREPLTool, PythonREPLTool
__all__ = ["PythonREPLTool", "PythonAstREPLTool"]

View File

@ -1,7 +1,5 @@
"""Implementation of a Tree of Thought (ToT) chain based on the paper
"Large Language Model Guided Tree-of-Thought"
https://arxiv.org/pdf/2305.08291.pdf
"""Implementation of a **Tree of Thought (ToT)** chain based on the paper
[Large Language Model Guided Tree-of-Thought](https://arxiv.org/pdf/2305.08291.pdf).
The Tree of Thought (ToT) chain uses a tree structure to explore the space of
possible solutions to a problem.

View File

@ -1,3 +1,4 @@
"""Utility that simulates a standalone **Python REPL**."""
from langchain_experimental.utilities.python import PythonREPL
__all__ = ["PythonREPL"]