mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-25 08:03:39 +00:00
docs: modules descriptions (#17844)
Several `core` modules do not have descriptions, like the [agent](https://api.python.langchain.com/en/latest/core_api_reference.html#module-langchain_core.agents) module. - Added missed module descriptions. The descriptions are mostly copied from the `langchain` or `community` package modules.
This commit is contained in:
parent
d9aa11d589
commit
2f2b77602e
@ -1,3 +1,33 @@
|
||||
"""
|
||||
**Agent** is a class that uses an LLM to choose a sequence of actions to take.
|
||||
|
||||
In Chains, a sequence of actions is hardcoded. In Agents,
|
||||
a language model is used as a reasoning engine to determine which actions
|
||||
to take and in which order.
|
||||
|
||||
Agents select and use **Tools** and **Toolkits** for actions.
|
||||
|
||||
**Class hierarchy:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
BaseSingleActionAgent --> LLMSingleActionAgent
|
||||
OpenAIFunctionsAgent
|
||||
XMLAgent
|
||||
Agent --> <name>Agent # Examples: ZeroShotAgent, ChatAgent
|
||||
|
||||
|
||||
BaseMultiActionAgent --> OpenAIMultiFunctionsAgent
|
||||
|
||||
|
||||
**Main helpers:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
AgentType, AgentExecutor, AgentOutputParser, AgentExecutorIterator,
|
||||
AgentAction, AgentFinish, AgentStep
|
||||
|
||||
""" # noqa: E501
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
|
@ -0,0 +1 @@
|
||||
"""Some **beta** features that are not yet ready for production."""
|
@ -1,3 +1,24 @@
|
||||
"""
|
||||
.. warning::
|
||||
Beta Feature!
|
||||
|
||||
**Cache** provides an optional caching layer for LLMs.
|
||||
|
||||
Cache is useful for two reasons:
|
||||
|
||||
- It can save you money by reducing the number of API calls you make to the LLM
|
||||
provider if you're often requesting the same completion multiple times.
|
||||
- It can speed up your application by reducing the number of API calls you make
|
||||
to the LLM provider.
|
||||
|
||||
Cache directly competes with Memory. See documentation for Pros and Cons.
|
||||
|
||||
**Class hierarchy:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
BaseCache --> <name>Cache # Examples: InMemoryCache, RedisCache, GPTCache
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
|
@ -1,3 +1,11 @@
|
||||
"""**Callback handlers** allow listening to events in LangChain.
|
||||
|
||||
**Class hierarchy:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
BaseCallbackHandler --> <name>CallbackHandler # Example: AimCallbackHandler
|
||||
"""
|
||||
from langchain_core.callbacks.base import (
|
||||
AsyncCallbackHandler,
|
||||
BaseCallbackHandler,
|
||||
|
@ -1,3 +1,19 @@
|
||||
"""**Chat message history** stores a history of the message interactions in a chat.
|
||||
|
||||
|
||||
**Class hierarchy:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
BaseChatMessageHistory --> <name>ChatMessageHistory # Examples: FileChatMessageHistory, PostgresChatMessageHistory
|
||||
|
||||
**Main helpers:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
AIMessage, HumanMessage, BaseMessage
|
||||
|
||||
""" # noqa: E501
|
||||
from __future__ import annotations
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
|
@ -1,3 +1,6 @@
|
||||
"""**Chat Sessions** are a collection of messages and function calls.
|
||||
|
||||
"""
|
||||
from typing import Sequence, TypedDict
|
||||
|
||||
from langchain_core.messages import BaseMessage
|
||||
|
@ -1,3 +1,7 @@
|
||||
"""**Document** module is a collection of classes that handle documents
|
||||
and their transformations.
|
||||
|
||||
"""
|
||||
from langchain_core.documents.base import Document
|
||||
from langchain_core.documents.transformers import BaseDocumentTransformer
|
||||
|
||||
|
@ -1,3 +1,4 @@
|
||||
"""**Embeddings** interface."""
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import List
|
||||
|
||||
|
@ -1,4 +1,7 @@
|
||||
"""Logic for selecting examples to include in prompts."""
|
||||
"""**Example selector** implements logic for selecting examples to include them
|
||||
in prompts.
|
||||
This allows us to select examples that are most relevant to the input.
|
||||
"""
|
||||
from langchain_core.example_selectors.base import BaseExampleSelector
|
||||
from langchain_core.example_selectors.length_based import (
|
||||
LengthBasedExampleSelector,
|
||||
|
@ -1,3 +1,4 @@
|
||||
"""Custom **exceptions** for LangChain. """
|
||||
from typing import Any, Optional
|
||||
|
||||
|
||||
|
@ -1,3 +1,27 @@
|
||||
"""**Language Model** is a type of model that can generate text or complete
|
||||
text prompts.
|
||||
|
||||
LangChain has two main classes to work with language models:
|
||||
- **LLM** classes provide access to the large language model (**LLM**) APIs and services.
|
||||
- **Chat Models** are a variation on language models.
|
||||
|
||||
**Class hierarchy:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
BaseLanguageModel --> BaseLLM --> LLM --> <name> # Examples: AI21, HuggingFaceHub, OpenAI
|
||||
--> BaseChatModel --> <name> # Examples: ChatOpenAI, ChatGooglePalm
|
||||
|
||||
**Main helpers:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
LLMResult, PromptValue,
|
||||
CallbackManagerForLLMRun, AsyncCallbackManagerForLLMRun,
|
||||
CallbackManager, AsyncCallbackManager,
|
||||
AIMessage, BaseMessage, HumanMessage
|
||||
""" # noqa: E501
|
||||
|
||||
from langchain_core.language_models.base import (
|
||||
BaseLanguageModel,
|
||||
LanguageModelInput,
|
||||
|
@ -1,4 +1,4 @@
|
||||
"""Serialization and deserialization."""
|
||||
"""**Load** module helps with serialization and deserialization."""
|
||||
from langchain_core.load.dump import dumpd, dumps
|
||||
from langchain_core.load.load import load, loads
|
||||
from langchain_core.load.serializable import Serializable
|
||||
|
@ -1,3 +1,12 @@
|
||||
"""**Memory** maintains Chain state, incorporating context from past runs.
|
||||
|
||||
**Class hierarchy for Memory:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
BaseMemory --> <name>Memory --> <name>Memory # Examples: BaseChatMemory -> MotorheadMemory
|
||||
|
||||
""" # noqa: E501
|
||||
from __future__ import annotations
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
|
@ -1,3 +1,19 @@
|
||||
"""**Messages** are objects used in prompts and chat conversations.
|
||||
|
||||
**Class hierarchy:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
BaseMessage --> SystemMessage, AIMessage, HumanMessage, ChatMessage, FunctionMessage, ToolMessage
|
||||
--> BaseMessageChunk --> SystemMessageChunk, AIMessageChunk, HumanMessageChunk, ChatMessageChunk, FunctionMessageChunk, ToolMessageChunk
|
||||
|
||||
**Main helpers:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
ChatPromptTemplate
|
||||
|
||||
""" # noqa: E501
|
||||
from typing import Any, Dict, List, Optional, Sequence, Tuple, Union
|
||||
|
||||
from langchain_core.messages.ai import AIMessage, AIMessageChunk
|
||||
|
@ -1,3 +1,17 @@
|
||||
"""**OutputParser** classes parse the output of an LLM call.
|
||||
|
||||
**Class hierarchy:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
BaseLLMOutputParser --> BaseOutputParser --> <name>OutputParser # ListOutputParser, PydanticOutputParser
|
||||
|
||||
**Main helpers:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
Serializable, Generation, PromptValue
|
||||
""" # noqa: E501
|
||||
from langchain_core.output_parsers.base import (
|
||||
BaseGenerationOutputParser,
|
||||
BaseLLMOutputParser,
|
||||
|
@ -1,3 +1,7 @@
|
||||
"""**Output** classes are used to represent the output of a language model call
|
||||
and the output of a chat.
|
||||
|
||||
"""
|
||||
from langchain_core.outputs.chat_generation import ChatGeneration, ChatGenerationChunk
|
||||
from langchain_core.outputs.chat_result import ChatResult
|
||||
from langchain_core.outputs.generation import Generation, GenerationChunk
|
||||
|
@ -1,3 +1,8 @@
|
||||
"""**Prompt values** for language model prompts.
|
||||
|
||||
Prompt values are used to represent different pieces of prompts.
|
||||
They can be used to represent text, images, or chat message pieces.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
|
@ -1,7 +1,7 @@
|
||||
"""**Prompt** is the input to the model.
|
||||
|
||||
Prompt is often constructed
|
||||
from multiple components. Prompt classes and functions make constructing
|
||||
from multiple components and prompt values. Prompt classes and functions make constructing
|
||||
and working with prompts easy.
|
||||
|
||||
**Class hierarchy:**
|
||||
|
@ -1,3 +1,23 @@
|
||||
"""**Retriever** class returns Documents given a text **query**.
|
||||
|
||||
It is more general than a vector store. A retriever does not need to be able to
|
||||
store documents, only to return (or retrieve) it. Vector stores can be used as
|
||||
the backbone of a retriever, but there are other types of retrievers as well.
|
||||
|
||||
**Class hierarchy:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
BaseRetriever --> <name>Retriever # Examples: ArxivRetriever, MergerRetriever
|
||||
|
||||
**Main helpers:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
RetrieverInput, RetrieverOutput, RetrieverLike, RetrieverOutputLike,
|
||||
Document, Serializable, Callbacks,
|
||||
CallbackManagerForRetrieverRun, AsyncCallbackManagerForRetrieverRun
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import warnings
|
||||
|
@ -1,3 +1,10 @@
|
||||
"""**Store** implements the key-value stores and storage helpers.
|
||||
|
||||
Module provides implementations of various key-value stores that conform
|
||||
to a simple key-value interface.
|
||||
|
||||
The primary goal of these storages is to support implementation of caching.
|
||||
"""
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import (
|
||||
AsyncIterator,
|
||||
|
@ -1,4 +1,6 @@
|
||||
"""Print information about the system and langchain packages for debugging purposes."""
|
||||
"""**sys_info** prints information about the system and langchain packages
|
||||
for debugging purposes.
|
||||
"""
|
||||
|
||||
from typing import Sequence
|
||||
|
||||
|
@ -1,4 +1,22 @@
|
||||
"""Base implementation for tools or skills."""
|
||||
"""**Tools** are classes that an Agent uses to interact with the world.
|
||||
|
||||
Each tool has a **description**. Agent uses the description to choose the right
|
||||
tool for the job.
|
||||
|
||||
**Class hierarchy:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
RunnableSerializable --> BaseTool --> <name>Tool # Examples: AIPluginTool, BaseGraphQLTool
|
||||
<name> # Examples: BraveSearch, HumanInputRun
|
||||
|
||||
**Main helpers:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
CallbackManagerForToolRun, AsyncCallbackManagerForToolRun
|
||||
""" # noqa: E501
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import inspect
|
||||
|
@ -1,3 +1,13 @@
|
||||
"""**Tracers** are classes for tracing runs.
|
||||
|
||||
**Class hierarchy:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
BaseCallbackHandler --> BaseTracer --> <name>Tracer # Examples: LangChainTracer, RootListenersTracer
|
||||
--> <name> # Examples: LogStreamCallbackHandler
|
||||
""" # noqa: E501
|
||||
|
||||
__all__ = [
|
||||
"BaseTracer",
|
||||
"EvaluatorCallbackHandler",
|
||||
|
@ -1,3 +1,23 @@
|
||||
"""**Vector store** stores embedded data and performs vector search.
|
||||
|
||||
One of the most common ways to store and search over unstructured data is to
|
||||
embed it and store the resulting embedding vectors, and then query the store
|
||||
and retrieve the data that are 'most similar' to the embedded query.
|
||||
|
||||
**Class hierarchy:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
VectorStore --> <name> # Examples: Annoy, FAISS, Milvus
|
||||
|
||||
BaseRetriever --> VectorStoreRetriever --> <name>Retriever # Example: VespaRetriever
|
||||
|
||||
**Main helpers:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
Embeddings, Document
|
||||
""" # noqa: E501
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
|
Loading…
Reference in New Issue
Block a user