mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-04 08:10:25 +00:00
Compare commits
1 Commits
eugene/cal
...
bagatur/mo
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2506110b15 |
@@ -1,4 +1,29 @@
|
||||
"""Interface for agents."""
|
||||
"""
|
||||
**Agent** is a class that uses an LLM to choose a sequence of actions to take.
|
||||
|
||||
In Chains, a sequence of actions is hardcoded. In Agents,
|
||||
a language model is used as a reasoning engine to determine which actions
|
||||
to take and in which order.
|
||||
|
||||
Agents select and use **Tools** and **Toolkits** for actions.
|
||||
|
||||
**Class hierarchy:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
BaseSingleActionAgent, BaseMultiActionAgent
|
||||
Agent(BaseSingleActionAgent)
|
||||
...Agent(Agent)
|
||||
...ActionAgent(BaseSingleActionAgent OR BaseMultiActionAgent)
|
||||
|
||||
**Main helpers:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
AgentType, AgentExecutor, AgentOutputParser, AgentExecutorIterator,
|
||||
AgentAction, AgentFinish
|
||||
|
||||
""" # noqa: E501
|
||||
from langchain.agents.agent import (
|
||||
Agent,
|
||||
AgentExecutor,
|
||||
|
||||
@@ -1,4 +1,25 @@
|
||||
"""Beta Feature: base interface for cache."""
|
||||
"""
|
||||
.. warning::
|
||||
Beta Feature!
|
||||
|
||||
**Cache** provides an optional caching layer for LLMs.
|
||||
|
||||
Cache is useful for two reasons:
|
||||
|
||||
- It can save you money by reducing the number of API calls you make to the LLM
|
||||
provider if you're often requesting the same completion multiple times.
|
||||
- It can speed up your application by reducing the number of API calls you make
|
||||
to the LLM provider.
|
||||
|
||||
Cache directly competes with Memory. See documentation for Pros and Cons.
|
||||
|
||||
**Class hierarchy:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
BaseCache
|
||||
...Cache(BaseCache)
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import hashlib
|
||||
|
||||
@@ -1,4 +1,12 @@
|
||||
"""Callback handlers that allow listening to events in LangChain."""
|
||||
"""**Callback handlers** allow listening to events in LangChain.
|
||||
|
||||
**Class hierarchy:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
BaseCallbackHandler
|
||||
...CallbackHandler(BaseCallbackHandler)
|
||||
"""
|
||||
|
||||
from langchain.callbacks.aim_callback import AimCallbackHandler
|
||||
from langchain.callbacks.argilla_callback import ArgillaCallbackHandler
|
||||
|
||||
@@ -1,16 +1,22 @@
|
||||
"""Chains are easily reusable components which can be linked together.
|
||||
"""**Chains** are easily reusable components linked together.
|
||||
|
||||
Chains should be used to encode a sequence of calls to components like
|
||||
models, document retrievers, other chains, etc., and provide a simple interface
|
||||
to this sequence.
|
||||
Chains encode a sequence of calls to components like models, document retrievers,
|
||||
other Chains, etc., and provide a simple interface to this sequence.
|
||||
|
||||
The Chain interface makes it easy to create apps that are:
|
||||
- Stateful: add Memory to any Chain to give it state,
|
||||
- Observable: pass Callbacks to a Chain to execute additional functionality,
|
||||
like logging, outside the main sequence of component calls,
|
||||
- Composable: the Chain API is flexible enough that it is easy to combine
|
||||
Chains with other components, including other Chains.
|
||||
"""
|
||||
The Chain interface makes it easy to create apps that are:
|
||||
|
||||
- **Stateful:** add Memory to any Chain to give it state,
|
||||
- **Observable:** pass Callbacks to a Chain to execute additional functionality,
|
||||
like logging, outside the main sequence of component calls,
|
||||
- **Composable:** combine Chains with other components, including other Chains.
|
||||
|
||||
**Class hierarchy:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
Chain
|
||||
...Chain(Chain)
|
||||
"""
|
||||
|
||||
from langchain.chains.api.base import APIChain
|
||||
from langchain.chains.api.openapi.chain import OpenAPIEndpointChain
|
||||
|
||||
@@ -1,3 +1,23 @@
|
||||
"""**Chat Models** are a variation on language models.
|
||||
|
||||
While Chat Models use language models under the hood, the interface they expose
|
||||
is a bit different. Rather than expose a "text in, text out" API, they expose
|
||||
an interface where "chat messages" are the inputs and outputs.
|
||||
|
||||
**Class hierarchy:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
BaseChatModel(BaseLanguageModel, ABC)
|
||||
...ChatModel(BaseChatModel); ...(BaseChatModel)
|
||||
|
||||
**Main helpers:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
AIMessage, BaseMessage, HumanMessage
|
||||
"""
|
||||
|
||||
from langchain.chat_models.anthropic import ChatAnthropic
|
||||
from langchain.chat_models.azure_openai import AzureChatOpenAI
|
||||
from langchain.chat_models.fake import FakeListChatModel
|
||||
|
||||
@@ -1,4 +1,20 @@
|
||||
"""Wrappers on top of docstores."""
|
||||
"""**Docstores** are classes to store and load Documents.
|
||||
|
||||
The **Docstore** is a simplified version of the Document Loader.
|
||||
|
||||
**Class hierarchy:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
Docstore
|
||||
...(Docstore)
|
||||
|
||||
**Main helpers:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
Document, AddableMixin
|
||||
"""
|
||||
from langchain.docstore.arbitrary_fn import DocstoreFn
|
||||
from langchain.docstore.in_memory import InMemoryDocstore
|
||||
from langchain.docstore.wikipedia import Wikipedia
|
||||
|
||||
@@ -1,4 +1,20 @@
|
||||
"""All different types of document loaders."""
|
||||
"""**Document Loaders** are classes to load Documents.
|
||||
|
||||
**Document Loaders** are usually used to load a lot of Documents in a single run.
|
||||
|
||||
**Class hierarchy:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
BaseLoader
|
||||
...Loader(BaseLoader)
|
||||
|
||||
**Main helpers:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
Document, ...TextSplitter
|
||||
"""
|
||||
|
||||
from langchain.document_loaders.acreom import AcreomLoader
|
||||
from langchain.document_loaders.airbyte_json import AirbyteJSONLoader
|
||||
|
||||
@@ -1,3 +1,21 @@
|
||||
"""**Document Transformers** are classes to transform Documents.
|
||||
|
||||
**Document Transformers** usually used to transform a lot of Documents in a single run.
|
||||
|
||||
**Class hierarchy:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
BaseDocumentTransformer
|
||||
...(BaseDocumentTransformer)
|
||||
|
||||
**Main helpers:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
Document
|
||||
"""
|
||||
|
||||
from langchain.document_transformers.doctran_text_extract import (
|
||||
DoctranPropertyExtractor,
|
||||
)
|
||||
|
||||
@@ -1,4 +1,17 @@
|
||||
"""Wrappers around embedding modules."""
|
||||
"""**Embedding models** are wrappers around embedding models
|
||||
from different APIs and services.
|
||||
|
||||
**Embedding models** can be LLMs or not.
|
||||
|
||||
**Class hierarchy:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
Embeddings
|
||||
...Embeddings(Embeddings)
|
||||
"""
|
||||
|
||||
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ from functools import lru_cache
|
||||
|
||||
@lru_cache(maxsize=1)
|
||||
def get_runtime_environment() -> dict:
|
||||
"""Get information about the environment."""
|
||||
"""Get information about the LangChain runtime environment."""
|
||||
# Lazy import to avoid circular imports
|
||||
from langchain import __version__
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
"""Evaluation chains for grading LLM and Chain outputs.
|
||||
"""**Evaluation** chains for grading LLM and Chain outputs.
|
||||
|
||||
This module contains off-the-shelf evaluation chains for grading the output of
|
||||
LangChain primitives such as language models and chains.
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
"""Graph implementations."""
|
||||
"""**Graphs** provide a natural language interface to graph databases."""
|
||||
|
||||
from langchain.graphs.arangodb_graph import ArangoGraph
|
||||
from langchain.graphs.hugegraph import HugeGraph
|
||||
from langchain.graphs.kuzu_graph import KuzuGraph
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
"""All index utils."""
|
||||
"""**Index** utilities."""
|
||||
from langchain.indexes.graph import GraphIndexCreator
|
||||
from langchain.indexes.vectorstore import VectorstoreIndexCreator
|
||||
|
||||
|
||||
@@ -1,4 +1,25 @@
|
||||
"""Access to the large language model APIs and services."""
|
||||
"""
|
||||
**LLM** classes provide
|
||||
access to the large language model (**LLM**) APIs and services.
|
||||
|
||||
**Class hierarchy:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
BaseLanguageModel
|
||||
BaseLLM(BaseLanguageModel, ABC)
|
||||
LLM(BaseLLM)
|
||||
...(LLM)
|
||||
|
||||
**Main helpers:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
LLMResult, PromptValue,
|
||||
CallbackManagerForLLMRun, AsyncCallbackManagerForLLMRun,
|
||||
CallbackManager, AsyncCallbackManager,
|
||||
AIMessage, BaseMessage
|
||||
"""
|
||||
from typing import Dict, Type
|
||||
|
||||
from langchain.llms.ai21 import AI21
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
"""Serialization and deserialization."""
|
||||
|
||||
@@ -1,4 +1,34 @@
|
||||
"""Memory maintains Chain state, incorporating context from past runs."""
|
||||
"""**Memory** maintains Chain state, incorporating context from past runs.
|
||||
|
||||
**Class hierarchy for Memory:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
BaseMemory
|
||||
BaseChatMemory(BaseMemory, ABC)
|
||||
...Memory(BaseChatMemory)
|
||||
|
||||
**Main helpers:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
BaseChatMessageHistory
|
||||
|
||||
**Chat Message History** stores the chat message history in different stores.
|
||||
|
||||
**Class hierarchy for ChatMessageHistory:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
BaseChatMessageHistory
|
||||
...ChatMessageHistory(BaseChatMessageHistory)
|
||||
|
||||
**Main helpers:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
AIMessage, BaseMessage, HumanMessage
|
||||
"""
|
||||
from langchain.memory.buffer import (
|
||||
ConversationBufferMemory,
|
||||
ConversationStringBufferMemory,
|
||||
|
||||
@@ -1,3 +1,19 @@
|
||||
"""**OutputParser** classes parse the output of an LLM call.
|
||||
|
||||
**Class hierarchy:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
BaseLLMOutputParser
|
||||
BaseOutputParser(BaseLLMOutputParser, ABC, Generic[T])
|
||||
...OutputParser(BaseOutputParser) or ...OutputParser(BaseOutputParser[Any])
|
||||
|
||||
**Main helpers:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
Serializable, Generation, PromptValue
|
||||
"""
|
||||
from langchain.output_parsers.boolean import BooleanOutputParser
|
||||
from langchain.output_parsers.combining import CombiningOutputParser
|
||||
from langchain.output_parsers.datetime import DatetimeOutputParser
|
||||
|
||||
@@ -1,4 +1,29 @@
|
||||
"""Prompt template classes."""
|
||||
"""**Prompt** is the input to the model.
|
||||
|
||||
Prompt is often constructed
|
||||
from multiple components. Prompt classes and functions make constructing
|
||||
and working with prompts easy.
|
||||
|
||||
**Class hierarchy:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
BasePromptTemplate
|
||||
StringPromptTemplate(BasePromptTemplate, ABC)
|
||||
PromptTemplate(StringPromptTemplate), ...(StringPromptTemplate)
|
||||
BaseChatPromptTemplate(BasePromptTemplate, ABC)
|
||||
...Prompt(BaseChatPromptTemplate)
|
||||
PipelinePromptTemplate(BasePromptTemplate)
|
||||
|
||||
BaseMessagePromptTemplate(Serializable, ABC)
|
||||
BaseStringMessagePromptTemplate(BaseMessagePromptTemplate, ABC)
|
||||
ChatMessagePromptTemplate, HumanMessagePromptTemplate,
|
||||
AIMessagePromptTemplate, SystemMessagePromptTemplate
|
||||
|
||||
PromptValue
|
||||
StringPromptValue(PromptValue), ChatPromptValue(PromptValue)
|
||||
|
||||
"""
|
||||
from langchain.prompts.base import StringPromptTemplate
|
||||
from langchain.prompts.chat import (
|
||||
AIMessagePromptTemplate,
|
||||
|
||||
@@ -1,3 +1,24 @@
|
||||
"""**Retriever** class returns Documents given a text **query**.
|
||||
|
||||
It is more general than a vector store. A retriever does not need to be able to
|
||||
store documents, only to return (or retrieve) it. Vector stores can be used as
|
||||
the backbone of a retriever, but there are other types of retrievers as well.
|
||||
|
||||
**Class hierarchy:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
BaseRetriever
|
||||
...Retriever(BaseRetriever)
|
||||
|
||||
**Main helpers:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
Document, Serializable, Callbacks,
|
||||
CallbackManagerForRetrieverRun, AsyncCallbackManagerForRetrieverRun
|
||||
"""
|
||||
|
||||
from langchain.retrievers.arxiv import ArxivRetriever
|
||||
from langchain.retrievers.azure_cognitive_search import AzureCognitiveSearchRetriever
|
||||
from langchain.retrievers.bm25 import BM25Retriever
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
"""**Schemas** are the LangChain Base Classes and Interfaces."""
|
||||
from langchain.schema.agent import AgentAction, AgentFinish
|
||||
from langchain.schema.document import BaseDocumentTransformer, Document
|
||||
from langchain.schema.memory import BaseChatMessageHistory, BaseMemory
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
"""LangSmith utilities.
|
||||
"""**LangSmith** utilities.
|
||||
|
||||
This module provides utilities for connecting to `LangSmith <https://smith.langchain.com/>`_. For more information on LangSmith, see the `LangSmith documentation <https://docs.smith.langchain.com/>`_.
|
||||
|
||||
|
||||
@@ -1,4 +1,26 @@
|
||||
"""Functionality for splitting text."""
|
||||
"""**Text Splitters** are classes for splitting text.
|
||||
|
||||
|
||||
**Class hierarchy:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
TextSplitter(BaseDocumentTransformer, ABC)
|
||||
...TextSplitter(TextSplitter)
|
||||
RecursiveCharacterTextSplitter(TextSplitter)
|
||||
...TextSplitter(RecursiveCharacterTextSplitter)
|
||||
|
||||
Note: **MarkdownHeaderTextSplitter** does not derive from TextSplitter.
|
||||
|
||||
|
||||
**Main helpers:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
Document, Tokenizer, Language, LineType, HeaderType
|
||||
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import copy
|
||||
|
||||
@@ -1,4 +1,22 @@
|
||||
"""Core toolkit implementations."""
|
||||
"""**Tools** are classes that an Agent uses to interact with the world.
|
||||
|
||||
Each tool has a **description**. Agent uses the description to choose the right
|
||||
tool for the job.
|
||||
|
||||
**Class hierarchy:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
ToolMetaclass
|
||||
BaseTool(ABC, metaclass=ToolMetaclass)
|
||||
...Tool(BaseTool) or ...(BaseTool)
|
||||
|
||||
**Main helpers:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
CallbackManagerForToolRun, AsyncCallbackManagerForToolRun
|
||||
"""
|
||||
|
||||
from langchain.tools.arxiv.tool import ArxivQueryRun
|
||||
from langchain.tools.azure_cognitive_services import (
|
||||
@@ -113,8 +131,6 @@ __all__ = [
|
||||
"BaseSQLDatabaseTool",
|
||||
"BaseSparkSQLTool",
|
||||
"BaseTool",
|
||||
"BaseTool",
|
||||
"BaseTool",
|
||||
"BingSearchResults",
|
||||
"BingSearchRun",
|
||||
"BraveSearch",
|
||||
|
||||
@@ -1,4 +1,8 @@
|
||||
"""Generic integrations with third-part systems and packages."""
|
||||
"""**Utilities** are the integrations with third-part systems and packages.
|
||||
|
||||
Other LangChain classes use **Utilities** to interact with third-part systems
|
||||
and packages.
|
||||
"""
|
||||
from langchain.utilities.arxiv import ArxivAPIWrapper
|
||||
from langchain.utilities.awslambda import LambdaWrapper
|
||||
from langchain.utilities.bash import BashProcess
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
"""
|
||||
Utility functions for langchain.
|
||||
**Utility functions** for LangChain.
|
||||
|
||||
These functions do not depend on any other langchain modules.
|
||||
These functions do not depend on any other LangChain module.
|
||||
"""
|
||||
|
||||
from langchain.utils.env import get_from_dict_or_env, get_from_env
|
||||
|
||||
@@ -1,4 +1,25 @@
|
||||
"""Wrappers on top of vector stores."""
|
||||
"""**Vector store** stores embedded data and performs vector search.
|
||||
|
||||
One of the most common ways to store and search over unstructured data is to
|
||||
embed it and store the resulting embedding vectors, and then query the store
|
||||
and retrieve the data that are 'most similar' to the embedded query.
|
||||
|
||||
**Class hierarchy:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
VectorStore
|
||||
...(VectorStore)
|
||||
|
||||
VectorStoreRetriever(BaseRetriever)
|
||||
...Retriever(VectorStoreRetriever)
|
||||
|
||||
**Main helpers:**
|
||||
|
||||
.. code-block::
|
||||
|
||||
Embeddings, Document
|
||||
"""
|
||||
from langchain.vectorstores.alibabacloud_opensearch import (
|
||||
AlibabaCloudOpenSearch,
|
||||
AlibabaCloudOpenSearchSettings,
|
||||
|
||||
@@ -14,8 +14,6 @@ _EXPECTED = [
|
||||
"BaseSQLDatabaseTool",
|
||||
"BaseSparkSQLTool",
|
||||
"BaseTool",
|
||||
"BaseTool",
|
||||
"BaseTool",
|
||||
"BingSearchResults",
|
||||
"BingSearchRun",
|
||||
"BraveSearch",
|
||||
@@ -102,4 +100,4 @@ _EXPECTED = [
|
||||
def test_public_api() -> None:
|
||||
"""Test for regressions or changes in the public API."""
|
||||
# Check that the public API is as expected
|
||||
assert sorted(public_api) == sorted(_EXPECTED)
|
||||
assert set(public_api) == set(_EXPECTED)
|
||||
|
||||
Reference in New Issue
Block a user