Compare commits

..

40 Commits

Author SHA1 Message Date
Eugene Yurtsev
3c92e986f6 x 2025-10-06 21:52:18 -04:00
Eugene Yurtsev
40b4c69a5a x 2025-10-06 17:38:01 -04:00
Eugene Yurtsev
076c6f6b41 x 2025-10-06 17:35:27 -04:00
Eugene Yurtsev
db58bfa543 rename to continue 2025-10-06 17:32:04 -04:00
Eugene Yurtsev
ba9ec6d895 x 2025-10-06 16:56:16 -04:00
Eugene Yurtsev
fa533c44b7 x 2025-10-06 16:54:00 -04:00
Eugene Yurtsev
4f53ed3e9a x 2025-10-06 16:51:24 -04:00
Eugene Yurtsev
def2f147ae x 2025-10-06 16:16:19 -04:00
Eugene Yurtsev
65e073e85c x 2025-10-06 16:08:09 -04:00
Eugene Yurtsev
a9ff8e0b67 x 2025-10-06 15:31:16 -04:00
Eugene Yurtsev
0927ae4be1 x 2025-10-06 15:26:43 -04:00
Eugene Yurtsev
06ce94ca06 x 2025-10-06 00:00:02 -04:00
Nuno Campos
a9aa3f232d feat(langchain_v1): Add retry_model_request middleware hook, add ModelFallbackMiddleware (#33275)
- retry_model_request hook lets a middleware decide to retry a failed
model request, with full ability to modify as much or as little of the
request before doing so
- ModelFallbackMiddleware tries each fallback model in order, until one
is successful, or fallback list is exhausted

Co-authored-by: Sydney Runkle <54324534+sydney-runkle@users.noreply.github.com>
2025-10-05 20:32:45 +00:00
Sydney Runkle
20514f5d44 fix(langchain_v1): linting fixes for llm tool selector (#33278)
* Including server side tools by default
* Fixing up typing / linting on `master`
2025-10-05 16:30:27 -04:00
Eugene Yurtsev
df2ecd9448 feat(langchain_v1): add llm selection middleware (#33272)
* Add llm based tool selection middleware.
* Note that we might want some form of caching for when the agent is
inside an active tool calling loop as the tool selection isn't expected
to change during that time.

API:

```python
class LLMToolSelectorMiddleware(AgentMiddleware):
    """Uses an LLM to select relevant tools before calling the main model.

    When an agent has many tools available, this middleware filters them down
    to only the most relevant ones for the user's query. This reduces token usage
    and helps the main model focus on the right tools.

    Examples:
        Limit to 3 tools:
        ```python
        from langchain.agents.middleware import LLMToolSelectorMiddleware

        middleware = LLMToolSelectorMiddleware(max_tools=3)

        agent = create_agent(
            model="openai:gpt-4o",
            tools=[tool1, tool2, tool3, tool4, tool5],
            middleware=[middleware],
        )
        ```

        Use a smaller model for selection:
        ```python
        middleware = LLMToolSelectorMiddleware(model="openai:gpt-4o-mini", max_tools=2)
        ```
    """

    def __init__(
        self,
        *,
        model: str | BaseChatModel | None = None,
        system_prompt: str = DEFAULT_SYSTEM_PROMPT,
        max_tools: int | None = None,
        always_include: list[str] | None = None,
    ) -> None:
        """Initialize the tool selector.

        Args:
            model: Model to use for selection. If not provided, uses the agent's main model.
                Can be a model identifier string or BaseChatModel instance.
            system_prompt: Instructions for the selection model.
            max_tools: Maximum number of tools to select. If the model selects more,
                only the first max_tools will be used. No limit if not specified.
            always_include: Tool names to always include regardless of selection.
                These do not count against the max_tools limit.
        """
```



```python
"""Test script for LLM tool selection middleware."""

from langchain.agents import create_agent
from langchain.agents.middleware import LLMToolSelectorMiddleware
from langchain_core.tools import tool


@tool
def get_weather(location: str) -> str:
    """Get current weather for a location."""
    return f"Weather in {location}: 72°F, sunny"


@tool
def search_web(query: str) -> str:
    """Search the web for information."""
    return f"Search results for: {query}"


@tool
def calculate(expression: str) -> str:
    """Perform mathematical calculations."""
    return f"Result of {expression}: 42"


@tool
def send_email(to: str, subject: str) -> str:
    """Send an email to someone."""
    return f"Email sent to {to} with subject: {subject}"


@tool
def get_stock_price(symbol: str) -> str:
    """Get current stock price for a symbol."""
    return f"Stock price for {symbol}: $150.25"


@tool
def translate_text(text: str, target_language: str) -> str:
    """Translate text to another language."""
    return f"Translated '{text}' to {target_language}"


@tool
def set_reminder(task: str, time: str) -> str:
    """Set a reminder for a task."""
    return f"Reminder set: {task} at {time}"


@tool
def get_news(topic: str) -> str:
    """Get latest news about a topic."""
    return f"Latest news about {topic}"


@tool
def book_flight(destination: str, date: str) -> str:
    """Book a flight to a destination."""
    return f"Flight booked to {destination} on {date}"


@tool
def get_restaurant_recommendations(city: str, cuisine: str) -> str:
    """Get restaurant recommendations."""
    return f"Top {cuisine} restaurants in {city}"


# Create agent with tool selection middleware
middleware = LLMToolSelectorMiddleware(
    model="openai:gpt-4o-mini",
    max_tools=3,
)

agent = create_agent(
    model="openai:gpt-4o",
    tools=[
        get_weather,
        search_web,
        calculate,
        send_email,
        get_stock_price,
        translate_text,
        set_reminder,
        get_news,
        book_flight,
        get_restaurant_recommendations,
    ],
    middleware=[middleware],
)

# Test with a query that should select specific tools
response = agent.invoke(
    {"messages": [{"role": "user", "content": "I need to find restaurants"}]}
)

print(response)
```
2025-10-05 15:55:55 -04:00
Eugene Yurtsev
bdb7dbbf16 feat(langchain_v1): represent server side tools in modifyModelRequest and update tool handling (#33274)
* Add server side tools to modifyModelRequest (represented as dicts)
* Update some of the logic in terms of which tools are bound to ToolNode
* We still have a constraint on changing the response format dynamically
when using tool strategy. structured_output_tools are being using in
some of the edges. The code is now raising an exception to explain that
it's a limitation of the implementation. (We can add support later.)
2025-10-05 15:55:46 -04:00
Nuno Campos
30f7c87b6f feat(langchain_v1): Implement PIIMiddleware (#33271)
- supports 6 well-known PII types (email, credit_card, ip, mac_address,
url)
- 4 handling strategies (block, redact, mask, hash)
- supports custom PII types with detector functions or regex
- the built-in types were chosen because they are common, and detection
can be reliably implemented with stdlib
2025-10-04 22:27:51 -04:00
Eugene Yurtsev
fdf8181f58 fix(langchain_v1): dynamic response format (#33273)
* Preserve Auto type for the response format. cc @sydney-runkle Creating
an extra type was the nicest devx I could find for this (makes it easy
to do isinstance(thingy, AutoStrategy)

Remaining issue to address:
* Going to sort out why we're including tools in the tool node
2025-10-04 16:58:32 -04:00
Eugene Yurtsev
8a95eb1ef7 chore(langchain_v1): remove union return type in init_embeddings (#33062)
Fix the return type of init_embeddings
2025-10-04 16:40:36 -04:00
Eugene Yurtsev
4d1cfa494a chore(langchain,prompty): rename to langchain-classic (#33256)
* Rename to langchain-classic
* After release of community, we should add the [community] option back
into the pyproject.toml file.
2025-10-04 16:04:43 -04:00
Nuno Campos
2286d0d993 feat(langchain_v1): Add ToolCallLimitMiddleware (#33269)
which implements a tool call budget for either all tools, or a specific tool

---------

Co-authored-by: Eugene Yurtsev <eyurtsev@gmail.com>
2025-10-04 15:03:45 -04:00
Eugene Yurtsev
46b87e435c chore(langchain_v1): change modifyModelRequest back to tools (#33270)
Seems like a much better devx with fairly little downside (we'll
document that you can't register new tools)
2025-10-04 12:33:54 -04:00
Eugene Yurtsev
905c6d7bad fix(langchain_v1): handle switching resposne format strategy based on model identity (#33259)
Change response format strategy dynamically based on model.

After this PR there are two remaining issues:

- [ ] Review binding of tools used for output to ToolNode (shouldn't be
required)
- [ ] Update ModelRequest to also support the original schema provided
by the user (to correctly support auto mode)
2025-10-04 11:56:56 -04:00
Sydney Runkle
acd1aa813c feat(langchain_v1): implement nicer devx for dynamic prompt (#33264)
Adding a `dynamic_prompt` decorator to support smoother devx for dynamic
system prompts

```py
from langchain.agents.middleware.types import dynamic_prompt, ModelRequest, AgentState
from langchain.agents.middleware_agent import create_agent
from langgraph.runtime import Runtime
from dataclasses import dataclass
from langchain_core.messages import HumanMessage


@dataclass
class Context:
    user_name: str


@dynamic_prompt
def my_prompt(request: ModelRequest, state: AgentState, runtime: Runtime[Context]) -> str:
    user_name = runtime.context.user_name
    return (
        f"You are a helpful assistant helping {user_name}. Please refer to the user as {user_name}."
    )


agent = create_agent(model="openai:gpt-4o", middleware=[my_prompt]).compile()

result = agent.invoke({"messages": [HumanMessage("Hello")]}, context=Context(user_name="Sydney"))
for msg in result["messages"]:
    msg.pretty_print()

"""
================================ Human Message =================================

Hello
================================== Ai Message ==================================

Hello Sydney! How can I assist you today?
"""

```
2025-10-03 21:06:23 -04:00
Sydney Runkle
2671fee2c6 feat(langchain_v1): description generator for HITL middleware (#33195)
Need to decide - what information should we feed to this description
factory? Right now, feeding:
* state
* runtime
* tool call (so the developer doesn't have to search through the state's
messages for the corresponding tool call)

I can see a case for just passing tool call. But again, this abstraction
is semi-bound to interrupts for tools... though we pretend it's more
abstract than that.

Right now:

```py
def custom_description(state: AgentState, runtime: Runtime, tool_call: ToolCall) -> str:
        """Generate a custom description."""
        return f"Custom: {tool_call['name']} with args {tool_call['args']}"

middleware = HumanInTheLoopMiddleware(
    interrupt_on={
        "tool_with_callable": {"allow_accept": True, "description": custom_description},
        "tool_with_string": {"allow_accept": True, "description": "Static description"},
    }
)
```
2025-10-04 01:01:44 +00:00
ccurme
010ed5d096 fix(anthropic,openai): fix tests (#33257)
following https://github.com/langchain-ai/langchain/pull/33192
2025-10-03 13:41:37 -04:00
Eugene Yurtsev
7f5be6b65c chore(core,langchain,langchain_v1)!: remove globals from langchain-v1, update globals in langchain-classic, langchain-core (#33251)
* Remove globals.py from langchain_v1
* Adjust langchain-core to not inspect langchain namespace
2025-10-03 12:53:33 -04:00
Eugene Yurtsev
1074ce5fe5 chore(langchain_v1)!: Remove ToolNode from agents (#33250)
Remove ToolNode from agents namespace. It should only be present in tools
2025-10-03 10:57:54 -04:00
Sydney Runkle
3d2f13a2f1 feat(langchain): model call limits (#33178)
This PR adds a model call limit middleware that helps to manage:

* number of model calls during a run (helps w/ avoiding tool calling
loops) - implemented w/ `UntrackedValue`
* number of model calls on a thread (helps w/ avoiding lengthy convos) -
standard state

Concern here is w/ other middlewares overwriting the model call count...
we could use a `_` prefixed field?
2025-10-03 08:28:56 -04:00
SN
99361e623a feat(core): add optional include_id param to convert_to_openai_messages function (#33242) 2025-10-03 08:22:43 -04:00
Mason Daugherty
5a016de53f chore: delete deprecated items (#33192)
Removed:
- `libs/core/langchain_core/chat_history.py`: `add_user_message` and
`add_ai_message` in favor of `add_messages` and `aadd_messages`
- `libs/core/langchain_core/language_models/base.py`: `predict`,
`predict_messages`, and async versions in favor of `invoke`. removed
`_all_required_field_names` since it was a wrapper on
`get_pydantic_field_names`
- `libs/core/langchain_core/language_models/chat_models.py`:
`callback_manager` param in favor of `callbacks`. `__call__` and
`call_as_llm` method in favor of `invoke`
- `libs/core/langchain_core/language_models/llms.py`: `callback_manager`
param in favor of `callbacks`. `__call__`, `predict`, `apredict`, and
`apredict_messages` methods in favor of `invoke`
- `libs/core/langchain_core/prompts/chat.py`: `from_role_strings` and
`from_strings` in favor of `from_messages`
- `libs/core/langchain_core/prompts/pipeline.py`: removed
`PipelinePromptTemplate`
- `libs/core/langchain_core/prompts/prompt.py`: `input_variables` param
on `from_file` as it wasn't used
- `libs/core/langchain_core/tools/base.py`: `callback_manager` param in
favor of `callbacks`
- `libs/core/langchain_core/tracers/context.py`: `tracing_enabled` in
favor of `tracing_enabled_v2`
- `libs/core/langchain_core/tracers/langchain_v1.py`: entire module
- `libs/core/langchain_core/utils/loading.py`: entire module,
`try_load_from_hub`
- `libs/core/langchain_core/vectorstores/in_memory.py`: `upsert` in
favor of `add_documents`
- `libs/standard-tests/langchain_tests/integration_tests/chat_models.py`
and `libs/standard-tests/langchain_tests/unit_tests/chat_models.py`:
`tool_choice_value` as models should accept `tool_choice="any"`
- `langchain` will consequently no longer expose these items if it was
previously

---------

Co-authored-by: Mohammad Mohtashim <45242107+keenborder786@users.noreply.github.com>
Co-authored-by: Caspar Broekhuizen <caspar@langchain.dev>
Co-authored-by: ccurme <chester.curme@gmail.com>
Co-authored-by: Christophe Bornet <cbornet@hotmail.com>
Co-authored-by: Eugene Yurtsev <eyurtsev@gmail.com>
Co-authored-by: Sadra Barikbin <sadraqazvin1@yahoo.com>
Co-authored-by: Vadym Barda <vadim.barda@gmail.com>
2025-10-03 03:33:24 +00:00
Mason Daugherty
b541a56c66 chore(langchain): uncomment some optional deps (#33243)
remaining:
- azure-ai
- cohere
- huggingface
- community
2025-10-02 23:29:14 -04:00
Mason Daugherty
4a6890a4e5 chore(langchain_v1): uncomment some optional deps (#33244)
remaining:
- azure-ai
- cohere
- huggingface
- community
2025-10-02 23:18:06 -04:00
Mason Daugherty
e2e0327c90 ci: add workflow for manually building API ref for v0.3 (#33241) 2025-10-02 20:33:12 -04:00
Mason Daugherty
bba37bd6be chore: add libs/ note (#33238) 2025-10-02 19:57:50 -04:00
Mason Daugherty
b051ff4a84 chore(infra): remove formatting and linting hook for root (#33237) 2025-10-02 19:43:09 -04:00
Mason Daugherty
13812f0df8 release(qdrant): 1.0.0a1 (#33236) 2025-10-02 19:37:00 -04:00
Mason Daugherty
420dcf5c4a release(prompty): 1.0.0a1 (#33235) 2025-10-02 19:29:55 -04:00
Mason Daugherty
9f75e20d4f release(perplexity): 1.0.0a1 (#33234) 2025-10-02 19:23:22 -04:00
Mason Daugherty
743e9b2ad1 release(nomic): 1.0.0a1 (#33233) 2025-10-02 19:23:06 -04:00
1604 changed files with 12448 additions and 6357 deletions

View File

@@ -0,0 +1,15 @@
{
"permissions": {
"allow": [
"Bash(uv run:*)",
"Bash(make:*)",
"WebSearch",
"WebFetch(domain:ai.pydantic.dev)",
"WebFetch(domain:openai.github.io)",
"Bash(uv run:*)",
"Bash(python3:*)"
],
"deny": [],
"ask": []
}
}

View File

@@ -50,10 +50,6 @@ IGNORED_PARTNERS = [
"prompty",
]
PY_312_MAX_PACKAGES = [
"libs/partners/chroma", # https://github.com/chroma-core/chroma/issues/4382
]
def all_package_dirs() -> Set[str]:
return {
@@ -139,9 +135,6 @@ def _get_configs_for_single_dir(job: str, dir_: str) -> List[Dict[str, str]]:
py_versions = ["3.10", "3.11", "3.12", "3.13"]
# custom logic for specific directories
elif dir_ in PY_312_MAX_PACKAGES:
py_versions = ["3.10", "3.12"]
elif dir_ == "libs/langchain" and job == "extended-tests":
py_versions = ["3.10", "3.13"]
elif dir_ == "libs/langchain_v1":
@@ -310,6 +303,11 @@ if __name__ == "__main__":
dirs_to_run["codspeed"].add(f"libs/partners/{partner_dir}")
# Skip if the directory was deleted or is just a tombstone readme
elif file.startswith("libs/"):
# Check if this is a root-level file in libs/ (e.g., libs/README.md)
file_parts = file.split("/")
if len(file_parts) == 2:
# Root-level file in libs/, skip it (no tests needed)
continue
raise ValueError(
f"Unknown lib: {file}. check_diff.py likely needs "
"an update for this new library!"

152
.github/workflows/api_doc_build.yml vendored Normal file
View File

@@ -0,0 +1,152 @@
# Build the API reference documentation for v0.3 branch.
#
# Manual trigger only.
#
# Built HTML pushed to langchain-ai/langchain-api-docs-html.
#
# Looks for langchain-ai org repos in packages.yml and checks them out.
# Calls prep_api_docs_build.py.
name: "📚 API Docs (v0.3)"
run-name: "Build & Deploy API Reference (v0.3)"
on:
workflow_dispatch:
env:
PYTHON_VERSION: "3.11"
jobs:
build:
if: github.repository == 'langchain-ai/langchain' || github.event_name != 'schedule'
runs-on: ubuntu-latest
permissions:
contents: read
steps:
- uses: actions/checkout@v5
with:
ref: v0.3
path: langchain
- uses: actions/checkout@v5
with:
repository: langchain-ai/langchain-api-docs-html
path: langchain-api-docs-html
token: ${{ secrets.TOKEN_GITHUB_API_DOCS_HTML }}
- name: "📋 Extract Repository List with yq"
id: get-unsorted-repos
uses: mikefarah/yq@master
with:
cmd: |
# Extract repos from packages.yml that are in the langchain-ai org
# (excluding 'langchain' itself)
yq '
.packages[]
| select(
(
(.repo | test("^langchain-ai/"))
and
(.repo != "langchain-ai/langchain")
)
or
(.include_in_api_ref // false)
)
| .repo
' langchain/libs/packages.yml
- name: "📋 Parse YAML & Checkout Repositories"
env:
REPOS_UNSORTED: ${{ steps.get-unsorted-repos.outputs.result }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
# Get unique repositories
REPOS=$(echo "$REPOS_UNSORTED" | sort -u)
# Checkout each unique repository
for repo in $REPOS; do
# Validate repository format (allow any org with proper format)
if [[ ! "$repo" =~ ^[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+$ ]]; then
echo "Error: Invalid repository format: $repo"
exit 1
fi
REPO_NAME=$(echo $repo | cut -d'/' -f2)
# Additional validation for repo name
if [[ ! "$REPO_NAME" =~ ^[a-zA-Z0-9_.-]+$ ]]; then
echo "Error: Invalid repository name: $REPO_NAME"
exit 1
fi
echo "Checking out $repo to $REPO_NAME"
git clone --depth 1 https://github.com/$repo.git $REPO_NAME
done
- name: "🐍 Setup Python ${{ env.PYTHON_VERSION }}"
uses: actions/setup-python@v6
id: setup-python
with:
python-version: ${{ env.PYTHON_VERSION }}
- name: "📦 Install Initial Python Dependencies using uv"
working-directory: langchain
run: |
python -m pip install -U uv
python -m uv pip install --upgrade --no-cache-dir pip setuptools pyyaml
- name: "📦 Organize Library Directories"
# Places cloned partner packages into libs/partners structure
run: python langchain/.github/scripts/prep_api_docs_build.py
- name: "🧹 Clear Prior Build"
run:
# Remove artifacts from prior docs build
rm -rf langchain-api-docs-html/api_reference_build/html
- name: "📦 Install Documentation Dependencies using uv"
working-directory: langchain
run: |
# Install all partner packages in editable mode with overrides
python -m uv pip install $(ls ./libs/partners | xargs -I {} echo "./libs/partners/{}") --overrides ./docs/vercel_overrides.txt
# Install core langchain and other main packages
python -m uv pip install libs/core libs/langchain libs/text-splitters libs/community libs/experimental libs/standard-tests
# Install Sphinx and related packages for building docs
python -m uv pip install -r docs/api_reference/requirements.txt
- name: "🔧 Configure Git Settings"
working-directory: langchain
run: |
git config --local user.email "actions@github.com"
git config --local user.name "Github Actions"
- name: "📚 Build API Documentation"
working-directory: langchain
run: |
# Generate the API reference RST files
python docs/api_reference/create_api_rst.py
# Build the HTML documentation using Sphinx
# -T: show full traceback on exception
# -E: don't use cached environment (force rebuild, ignore cached doctrees)
# -b html: build HTML docs (vs PDS, etc.)
# -d: path for the cached environment (parsed document trees / doctrees)
# - Separate from output dir for faster incremental builds
# -c: path to conf.py
# -j auto: parallel build using all available CPU cores
python -m sphinx -T -E -b html -d ../langchain-api-docs-html/_build/doctrees -c docs/api_reference docs/api_reference ../langchain-api-docs-html/api_reference_build/html -j auto
# Post-process the generated HTML
python docs/api_reference/scripts/custom_formatter.py ../langchain-api-docs-html/api_reference_build/html
# Default index page is blank so we copy in the actual home page.
cp ../langchain-api-docs-html/api_reference_build/html/{reference,index}.html
# Removes Sphinx's intermediate build artifacts after the build is complete.
rm -rf ../langchain-api-docs-html/_build/
# Commit and push changes to langchain-api-docs-html repo
- uses: EndBug/add-and-commit@v9
with:
cwd: langchain-api-docs-html
message: "Update API docs build from v0.3 branch"

View File

@@ -97,9 +97,3 @@ repos:
entry: make -C libs/partners/qdrant format lint
files: ^libs/partners/qdrant/
pass_filenames: false
- id: root
name: format and lint docs, cookbook
language: system
entry: make format lint
files: ^(docs|cookbook)/
pass_filenames: false

2
libs/README.md Normal file
View File

@@ -0,0 +1,2 @@
> [!IMPORTANT]
> [**View all LangChain integrations packages**](https://docs.langchain.com/oss/python/integrations/providers)

View File

@@ -1,5 +1,8 @@
[
["langchain._api.deprecated", "langchain_core._api.deprecated"],
[
"langchain._api.deprecated",
"langchain_core._api.deprecated"
],
[
"langchain._api.LangChainDeprecationWarning",
"langchain_core._api.LangChainDeprecationWarning"
@@ -12,7 +15,10 @@
"langchain._api.surface_langchain_deprecation_warnings",
"langchain_core._api.surface_langchain_deprecation_warnings"
],
["langchain._api.warn_deprecated", "langchain_core._api.warn_deprecated"],
[
"langchain._api.warn_deprecated",
"langchain_core._api.warn_deprecated"
],
[
"langchain._api.deprecation.LangChainDeprecationWarning",
"langchain_core._api.LangChainDeprecationWarning"
@@ -21,7 +27,10 @@
"langchain._api.deprecation.LangChainPendingDeprecationWarning",
"langchain_core._api.deprecation.LangChainPendingDeprecationWarning"
],
["langchain._api.deprecation.deprecated", "langchain_core._api.deprecated"],
[
"langchain._api.deprecation.deprecated",
"langchain_core._api.deprecated"
],
[
"langchain._api.deprecation.suppress_langchain_deprecation_warning",
"langchain_core._api.suppress_langchain_deprecation_warning"
@@ -38,12 +47,30 @@
"langchain._api.path.get_relative_path",
"langchain_core._api.get_relative_path"
],
["langchain._api.path.as_import_path", "langchain_core._api.as_import_path"],
["langchain.agents.Tool", "langchain_core.tools.Tool"],
["langchain.agents.tool", "langchain_core.tools.tool"],
["langchain.agents.tools.BaseTool", "langchain_core.tools.BaseTool"],
["langchain.agents.tools.tool", "langchain_core.tools.tool"],
["langchain.agents.tools.Tool", "langchain_core.tools.Tool"],
[
"langchain._api.path.as_import_path",
"langchain_core._api.as_import_path"
],
[
"langchain.agents.Tool",
"langchain_core.tools.Tool"
],
[
"langchain.agents.tool",
"langchain_core.tools.tool"
],
[
"langchain.agents.tools.BaseTool",
"langchain_core.tools.BaseTool"
],
[
"langchain.agents.tools.tool",
"langchain_core.tools.tool"
],
[
"langchain.agents.tools.Tool",
"langchain_core.tools.Tool"
],
[
"langchain.base_language.BaseLanguageModel",
"langchain_core.language_models.BaseLanguageModel"
@@ -60,10 +87,6 @@
"langchain.callbacks.LangChainTracer",
"langchain_core.tracers.LangChainTracer"
],
[
"langchain.callbacks.tracing_enabled",
"langchain_core.tracers.context.tracing_enabled"
],
[
"langchain.callbacks.tracing_v2_enabled",
"langchain_core.tracers.context.tracing_v2_enabled"
@@ -300,7 +323,10 @@
"langchain.callbacks.tracers.schemas.LLMRun",
"langchain_core.tracers.schemas.LLMRun"
],
["langchain.callbacks.tracers.schemas.Run", "langchain_core.tracers.Run"],
[
"langchain.callbacks.tracers.schemas.Run",
"langchain_core.tracers.Run"
],
[
"langchain.callbacks.tracers.schemas.RunTypeEnum",
"langchain_core.tracers.schemas.RunTypeEnum"
@@ -361,8 +387,14 @@
"langchain.chat_models.base.agenerate_from_stream",
"langchain_core.language_models.chat_models.agenerate_from_stream"
],
["langchain.docstore.document.Document", "langchain_core.documents.Document"],
["langchain.document_loaders.Blob", "langchain_core.document_loaders.Blob"],
[
"langchain.docstore.document.Document",
"langchain_core.documents.Document"
],
[
"langchain.document_loaders.Blob",
"langchain_core.document_loaders.Blob"
],
[
"langchain.document_loaders.BlobLoader",
"langchain_core.document_loaders.BlobLoader"
@@ -399,29 +431,74 @@
"langchain.formatting.StrictFormatter",
"langchain_core.utils.StrictFormatter"
],
["langchain.input.get_bolded_text", "langchain_core.utils.get_bolded_text"],
[
"langchain.input.get_bolded_text",
"langchain_core.utils.get_bolded_text"
],
[
"langchain.input.get_color_mapping",
"langchain_core.utils.get_color_mapping"
],
["langchain.input.get_colored_text", "langchain_core.utils.get_colored_text"],
["langchain.input.print_text", "langchain_core.utils.print_text"],
[
"langchain.input.get_colored_text",
"langchain_core.utils.get_colored_text"
],
[
"langchain.input.print_text",
"langchain_core.utils.print_text"
],
[
"langchain.llms.base.BaseLanguageModel",
"langchain_core.language_models.BaseLanguageModel"
],
["langchain.llms.base.BaseLLM", "langchain_core.language_models.BaseLLM"],
["langchain.llms.base.LLM", "langchain_core.language_models.LLM"],
["langchain.load.dumpd", "langchain_core.load.dumpd"],
["langchain.load.dumps", "langchain_core.load.dumps"],
["langchain.load.load", "langchain_core.load.load"],
["langchain.load.loads", "langchain_core.load.loads"],
["langchain.load.dump.default", "langchain_core.load.dump.default"],
["langchain.load.dump.dumps", "langchain_core.load.dumps"],
["langchain.load.dump.dumpd", "langchain_core.load.dumpd"],
["langchain.load.load.Reviver", "langchain_core.load.load.Reviver"],
["langchain.load.load.loads", "langchain_core.load.loads"],
["langchain.load.load.load", "langchain_core.load.load"],
[
"langchain.llms.base.BaseLLM",
"langchain_core.language_models.BaseLLM"
],
[
"langchain.llms.base.LLM",
"langchain_core.language_models.LLM"
],
[
"langchain.load.dumpd",
"langchain_core.load.dumpd"
],
[
"langchain.load.dumps",
"langchain_core.load.dumps"
],
[
"langchain.load.load",
"langchain_core.load.load"
],
[
"langchain.load.loads",
"langchain_core.load.loads"
],
[
"langchain.load.dump.default",
"langchain_core.load.dump.default"
],
[
"langchain.load.dump.dumps",
"langchain_core.load.dumps"
],
[
"langchain.load.dump.dumpd",
"langchain_core.load.dumpd"
],
[
"langchain.load.load.Reviver",
"langchain_core.load.load.Reviver"
],
[
"langchain.load.load.loads",
"langchain_core.load.loads"
],
[
"langchain.load.load.load",
"langchain_core.load.load"
],
[
"langchain.load.serializable.BaseSerialized",
"langchain_core.load.serializable.BaseSerialized"
@@ -602,7 +679,10 @@
"langchain.prompts.PipelinePromptTemplate",
"langchain_core.prompts.PipelinePromptTemplate"
],
["langchain.prompts.PromptTemplate", "langchain_core.prompts.PromptTemplate"],
[
"langchain.prompts.PromptTemplate",
"langchain_core.prompts.PromptTemplate"
],
[
"langchain.prompts.SemanticSimilarityExampleSelector",
"langchain_core.example_selectors.SemanticSimilarityExampleSelector"
@@ -615,12 +695,18 @@
"langchain.prompts.SystemMessagePromptTemplate",
"langchain_core.prompts.SystemMessagePromptTemplate"
],
["langchain.prompts.load_prompt", "langchain_core.prompts.load_prompt"],
[
"langchain.prompts.load_prompt",
"langchain_core.prompts.load_prompt"
],
[
"langchain.prompts.FewShotChatMessagePromptTemplate",
"langchain_core.prompts.FewShotChatMessagePromptTemplate"
],
["langchain.prompts.Prompt", "langchain_core.prompts.PromptTemplate"],
[
"langchain.prompts.Prompt",
"langchain_core.prompts.PromptTemplate"
],
[
"langchain.prompts.base.jinja2_formatter",
"langchain_core.prompts.jinja2_formatter"
@@ -801,13 +887,34 @@
"langchain.prompts.prompt.PromptTemplate",
"langchain_core.prompts.PromptTemplate"
],
["langchain.prompts.prompt.Prompt", "langchain_core.prompts.PromptTemplate"],
["langchain.schema.BaseCache", "langchain_core.caches.BaseCache"],
["langchain.schema.BaseMemory", "langchain_core.memory.BaseMemory"],
["langchain.schema.BaseStore", "langchain_core.stores.BaseStore"],
["langchain.schema.AgentFinish", "langchain_core.agents.AgentFinish"],
["langchain.schema.AgentAction", "langchain_core.agents.AgentAction"],
["langchain.schema.Document", "langchain_core.documents.Document"],
[
"langchain.prompts.prompt.Prompt",
"langchain_core.prompts.PromptTemplate"
],
[
"langchain.schema.BaseCache",
"langchain_core.caches.BaseCache"
],
[
"langchain.schema.BaseMemory",
"langchain_core.memory.BaseMemory"
],
[
"langchain.schema.BaseStore",
"langchain_core.stores.BaseStore"
],
[
"langchain.schema.AgentFinish",
"langchain_core.agents.AgentFinish"
],
[
"langchain.schema.AgentAction",
"langchain_core.agents.AgentAction"
],
[
"langchain.schema.Document",
"langchain_core.documents.Document"
],
[
"langchain.schema.BaseChatMessageHistory",
"langchain_core.chat_history.BaseChatMessageHistory"
@@ -816,15 +923,30 @@
"langchain.schema.BaseDocumentTransformer",
"langchain_core.documents.BaseDocumentTransformer"
],
["langchain.schema.BaseMessage", "langchain_core.messages.BaseMessage"],
["langchain.schema.ChatMessage", "langchain_core.messages.ChatMessage"],
[
"langchain.schema.BaseMessage",
"langchain_core.messages.BaseMessage"
],
[
"langchain.schema.ChatMessage",
"langchain_core.messages.ChatMessage"
],
[
"langchain.schema.FunctionMessage",
"langchain_core.messages.FunctionMessage"
],
["langchain.schema.HumanMessage", "langchain_core.messages.HumanMessage"],
["langchain.schema.AIMessage", "langchain_core.messages.AIMessage"],
["langchain.schema.SystemMessage", "langchain_core.messages.SystemMessage"],
[
"langchain.schema.HumanMessage",
"langchain_core.messages.HumanMessage"
],
[
"langchain.schema.AIMessage",
"langchain_core.messages.AIMessage"
],
[
"langchain.schema.SystemMessage",
"langchain_core.messages.SystemMessage"
],
[
"langchain.schema.messages_from_dict",
"langchain_core.messages.messages_from_dict"
@@ -849,18 +971,42 @@
"langchain.schema.get_buffer_string",
"langchain_core.messages.get_buffer_string"
],
["langchain.schema.RunInfo", "langchain_core.outputs.RunInfo"],
["langchain.schema.LLMResult", "langchain_core.outputs.LLMResult"],
["langchain.schema.ChatResult", "langchain_core.outputs.ChatResult"],
["langchain.schema.ChatGeneration", "langchain_core.outputs.ChatGeneration"],
["langchain.schema.Generation", "langchain_core.outputs.Generation"],
["langchain.schema.PromptValue", "langchain_core.prompt_values.PromptValue"],
[
"langchain.schema.RunInfo",
"langchain_core.outputs.RunInfo"
],
[
"langchain.schema.LLMResult",
"langchain_core.outputs.LLMResult"
],
[
"langchain.schema.ChatResult",
"langchain_core.outputs.ChatResult"
],
[
"langchain.schema.ChatGeneration",
"langchain_core.outputs.ChatGeneration"
],
[
"langchain.schema.Generation",
"langchain_core.outputs.Generation"
],
[
"langchain.schema.PromptValue",
"langchain_core.prompt_values.PromptValue"
],
[
"langchain.schema.LangChainException",
"langchain_core.exceptions.LangChainException"
],
["langchain.schema.BaseRetriever", "langchain_core.retrievers.BaseRetriever"],
["langchain.schema.Memory", "langchain_core.memory.BaseMemory"],
[
"langchain.schema.BaseRetriever",
"langchain_core.retrievers.BaseRetriever"
],
[
"langchain.schema.Memory",
"langchain_core.memory.BaseMemory"
],
[
"langchain.schema.OutputParserException",
"langchain_core.exceptions.OutputParserException"
@@ -885,13 +1031,22 @@
"langchain.schema.format_document",
"langchain_core.prompts.format_document"
],
["langchain.schema.agent.AgentAction", "langchain_core.agents.AgentAction"],
[
"langchain.schema.agent.AgentAction",
"langchain_core.agents.AgentAction"
],
[
"langchain.schema.agent.AgentActionMessageLog",
"langchain_core.agents.AgentActionMessageLog"
],
["langchain.schema.agent.AgentFinish", "langchain_core.agents.AgentFinish"],
["langchain.schema.cache.BaseCache", "langchain_core.caches.BaseCache"],
[
"langchain.schema.agent.AgentFinish",
"langchain_core.agents.AgentFinish"
],
[
"langchain.schema.cache.BaseCache",
"langchain_core.caches.BaseCache"
],
[
"langchain.schema.callbacks.base.RetrieverManagerMixin",
"langchain_core.callbacks.RetrieverManagerMixin"
@@ -1168,7 +1323,10 @@
"langchain.schema.chat_history.BaseChatMessageHistory",
"langchain_core.chat_history.BaseChatMessageHistory"
],
["langchain.schema.document.Document", "langchain_core.documents.Document"],
[
"langchain.schema.document.Document",
"langchain_core.documents.Document"
],
[
"langchain.schema.document.BaseDocumentTransformer",
"langchain_core.documents.BaseDocumentTransformer"
@@ -1189,7 +1347,10 @@
"langchain.schema.language_model._get_token_ids_default_method",
"langchain_core.language_models.base._get_token_ids_default_method"
],
["langchain.schema.memory.BaseMemory", "langchain_core.memory.BaseMemory"],
[
"langchain.schema.memory.BaseMemory",
"langchain_core.memory.BaseMemory"
],
[
"langchain.schema.messages.get_buffer_string",
"langchain_core.messages.get_buffer_string"
@@ -1214,7 +1375,10 @@
"langchain.schema.messages.HumanMessageChunk",
"langchain_core.messages.HumanMessageChunk"
],
["langchain.schema.messages.AIMessage", "langchain_core.messages.AIMessage"],
[
"langchain.schema.messages.AIMessage",
"langchain_core.messages.AIMessage"
],
[
"langchain.schema.messages.AIMessageChunk",
"langchain_core.messages.AIMessageChunk"
@@ -1271,7 +1435,10 @@
"langchain.schema.messages.message_to_dict",
"langchain_core.messages.message_to_dict"
],
["langchain.schema.output.Generation", "langchain_core.outputs.Generation"],
[
"langchain.schema.output.Generation",
"langchain_core.outputs.Generation"
],
[
"langchain.schema.output.GenerationChunk",
"langchain_core.outputs.GenerationChunk"
@@ -1284,9 +1451,18 @@
"langchain.schema.output.ChatGenerationChunk",
"langchain_core.outputs.ChatGenerationChunk"
],
["langchain.schema.output.RunInfo", "langchain_core.outputs.RunInfo"],
["langchain.schema.output.ChatResult", "langchain_core.outputs.ChatResult"],
["langchain.schema.output.LLMResult", "langchain_core.outputs.LLMResult"],
[
"langchain.schema.output.RunInfo",
"langchain_core.outputs.RunInfo"
],
[
"langchain.schema.output.ChatResult",
"langchain_core.outputs.ChatResult"
],
[
"langchain.schema.output.LLMResult",
"langchain_core.outputs.LLMResult"
],
[
"langchain.schema.output_parser.BaseLLMOutputParser",
"langchain_core.output_parsers.BaseLLMOutputParser"
@@ -1359,7 +1535,10 @@
"langchain.schema.runnable.RouterRunnable",
"langchain_core.runnables.RouterRunnable"
],
["langchain.schema.runnable.Runnable", "langchain_core.runnables.Runnable"],
[
"langchain.schema.runnable.Runnable",
"langchain_core.runnables.Runnable"
],
[
"langchain.schema.runnable.RunnableSerializable",
"langchain_core.runnables.RunnableSerializable"
@@ -1596,7 +1775,10 @@
"langchain.schema.runnable.utils.SupportsAdd",
"langchain_core.runnables.utils.SupportsAdd"
],
["langchain.schema.runnable.utils.add", "langchain_core.runnables.add"],
[
"langchain.schema.runnable.utils.add",
"langchain_core.runnables.add"
],
[
"langchain.schema.runnable.utils.ConfigurableField",
"langchain_core.runnables.ConfigurableField"
@@ -1617,7 +1799,10 @@
"langchain.schema.runnable.utils.get_unique_config_specs",
"langchain_core.runnables.utils.get_unique_config_specs"
],
["langchain.schema.runnable.utils.aadd", "langchain_core.runnables.aadd"],
[
"langchain.schema.runnable.utils.aadd",
"langchain_core.runnables.aadd"
],
[
"langchain.schema.runnable.utils.gated_coro",
"langchain_core.runnables.utils.gated_coro"
@@ -1626,7 +1811,10 @@
"langchain.schema.runnable.utils.gather_with_concurrency",
"langchain_core.runnables.utils.gather_with_concurrency"
],
["langchain.schema.storage.BaseStore", "langchain_core.stores.BaseStore"],
[
"langchain.schema.storage.BaseStore",
"langchain_core.stores.BaseStore"
],
[
"langchain.schema.vectorstore.VectorStore",
"langchain_core.vectorstores.VectorStore"
@@ -1635,14 +1823,26 @@
"langchain.schema.vectorstore.VectorStoreRetriever",
"langchain_core.vectorstores.VectorStoreRetriever"
],
["langchain.tools.BaseTool", "langchain_core.tools.BaseTool"],
["langchain.tools.StructuredTool", "langchain_core.tools.StructuredTool"],
["langchain.tools.Tool", "langchain_core.tools.Tool"],
[
"langchain.tools.BaseTool",
"langchain_core.tools.BaseTool"
],
[
"langchain.tools.StructuredTool",
"langchain_core.tools.StructuredTool"
],
[
"langchain.tools.Tool",
"langchain_core.tools.Tool"
],
[
"langchain.tools.format_tool_to_openai_function",
"langchain_core.utils.function_calling.format_tool_to_openai_function"
],
["langchain.tools.tool", "langchain_core.tools.tool"],
[
"langchain.tools.tool",
"langchain_core.tools.tool"
],
[
"langchain.tools.base.SchemaAnnotationError",
"langchain_core.tools.SchemaAnnotationError"
@@ -1651,14 +1851,26 @@
"langchain.tools.base.create_schema_from_function",
"langchain_core.tools.create_schema_from_function"
],
["langchain.tools.base.ToolException", "langchain_core.tools.ToolException"],
["langchain.tools.base.BaseTool", "langchain_core.tools.BaseTool"],
["langchain.tools.base.Tool", "langchain_core.tools.Tool"],
[
"langchain.tools.base.ToolException",
"langchain_core.tools.ToolException"
],
[
"langchain.tools.base.BaseTool",
"langchain_core.tools.BaseTool"
],
[
"langchain.tools.base.Tool",
"langchain_core.tools.Tool"
],
[
"langchain.tools.base.StructuredTool",
"langchain_core.tools.StructuredTool"
],
["langchain.tools.base.tool", "langchain_core.tools.tool"],
[
"langchain.tools.base.tool",
"langchain_core.tools.tool"
],
[
"langchain.tools.convert_to_openai.format_tool_to_openai_function",
"langchain_core.utils.function_calling.format_tool_to_openai_function"
@@ -1675,49 +1887,94 @@
"langchain.utilities.loading.try_load_from_hub",
"langchain_core.utils.try_load_from_hub"
],
["langchain.utils.StrictFormatter", "langchain_core.utils.StrictFormatter"],
[
"langchain.utils.StrictFormatter",
"langchain_core.utils.StrictFormatter"
],
[
"langchain.utils.check_package_version",
"langchain_core.utils.check_package_version"
],
["langchain.utils.comma_list", "langchain_core.utils.comma_list"],
[
"langchain.utils.comma_list",
"langchain_core.utils.comma_list"
],
[
"langchain.utils.convert_to_secret_str",
"langchain_core.utils.convert_to_secret_str"
],
["langchain.utils.get_bolded_text", "langchain_core.utils.get_bolded_text"],
[
"langchain.utils.get_bolded_text",
"langchain_core.utils.get_bolded_text"
],
[
"langchain.utils.get_color_mapping",
"langchain_core.utils.get_color_mapping"
],
["langchain.utils.get_colored_text", "langchain_core.utils.get_colored_text"],
[
"langchain.utils.get_colored_text",
"langchain_core.utils.get_colored_text"
],
[
"langchain.utils.get_from_dict_or_env",
"langchain_core.utils.get_from_dict_or_env"
],
["langchain.utils.get_from_env", "langchain_core.utils.get_from_env"],
[
"langchain.utils.get_from_env",
"langchain_core.utils.get_from_env"
],
[
"langchain.utils.get_pydantic_field_names",
"langchain_core.utils.get_pydantic_field_names"
],
["langchain.utils.guard_import", "langchain_core.utils.guard_import"],
["langchain.utils.mock_now", "langchain_core.utils.mock_now"],
["langchain.utils.print_text", "langchain_core.utils.print_text"],
[
"langchain.utils.guard_import",
"langchain_core.utils.guard_import"
],
[
"langchain.utils.mock_now",
"langchain_core.utils.mock_now"
],
[
"langchain.utils.print_text",
"langchain_core.utils.print_text"
],
[
"langchain.utils.raise_for_status_with_text",
"langchain_core.utils.raise_for_status_with_text"
],
["langchain.utils.stringify_dict", "langchain_core.utils.stringify_dict"],
["langchain.utils.stringify_value", "langchain_core.utils.stringify_value"],
["langchain.utils.xor_args", "langchain_core.utils.xor_args"],
["langchain.utils.aiter.py_anext", "langchain_core.utils.aiter.py_anext"],
["langchain.utils.aiter.NoLock", "langchain_core.utils.aiter.NoLock"],
["langchain.utils.aiter.Tee", "langchain_core.utils.aiter.Tee"],
[
"langchain.utils.stringify_dict",
"langchain_core.utils.stringify_dict"
],
[
"langchain.utils.stringify_value",
"langchain_core.utils.stringify_value"
],
[
"langchain.utils.xor_args",
"langchain_core.utils.xor_args"
],
[
"langchain.utils.aiter.py_anext",
"langchain_core.utils.aiter.py_anext"
],
[
"langchain.utils.aiter.NoLock",
"langchain_core.utils.aiter.NoLock"
],
[
"langchain.utils.aiter.Tee",
"langchain_core.utils.aiter.Tee"
],
[
"langchain.utils.env.get_from_dict_or_env",
"langchain_core.utils.get_from_dict_or_env"
],
["langchain.utils.env.get_from_env", "langchain_core.utils.get_from_env"],
[
"langchain.utils.env.get_from_env",
"langchain_core.utils.get_from_env"
],
[
"langchain.utils.formatting.StrictFormatter",
"langchain_core.utils.StrictFormatter"
@@ -1742,10 +1999,22 @@
"langchain.utils.input.get_bolded_text",
"langchain_core.utils.get_bolded_text"
],
["langchain.utils.input.print_text", "langchain_core.utils.print_text"],
["langchain.utils.iter.NoLock", "langchain_core.utils.iter.NoLock"],
["langchain.utils.iter.tee_peer", "langchain_core.utils.iter.tee_peer"],
["langchain.utils.iter.Tee", "langchain_core.utils.iter.Tee"],
[
"langchain.utils.input.print_text",
"langchain_core.utils.print_text"
],
[
"langchain.utils.iter.NoLock",
"langchain_core.utils.iter.NoLock"
],
[
"langchain.utils.iter.tee_peer",
"langchain_core.utils.iter.tee_peer"
],
[
"langchain.utils.iter.Tee",
"langchain_core.utils.iter.Tee"
],
[
"langchain.utils.iter.batch_iterate",
"langchain_core.utils.iter.batch_iterate"
@@ -1798,14 +2067,26 @@
"langchain.utils.strings.stringify_dict",
"langchain_core.utils.stringify_dict"
],
["langchain.utils.strings.comma_list", "langchain_core.utils.comma_list"],
["langchain.utils.utils.xor_args", "langchain_core.utils.xor_args"],
[
"langchain.utils.strings.comma_list",
"langchain_core.utils.comma_list"
],
[
"langchain.utils.utils.xor_args",
"langchain_core.utils.xor_args"
],
[
"langchain.utils.utils.raise_for_status_with_text",
"langchain_core.utils.raise_for_status_with_text"
],
["langchain.utils.utils.mock_now", "langchain_core.utils.mock_now"],
["langchain.utils.utils.guard_import", "langchain_core.utils.guard_import"],
[
"langchain.utils.utils.mock_now",
"langchain_core.utils.mock_now"
],
[
"langchain.utils.utils.guard_import",
"langchain_core.utils.guard_import"
],
[
"langchain.utils.utils.check_package_version",
"langchain_core.utils.check_package_version"

View File

@@ -17,14 +17,12 @@
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Union
from typing import TYPE_CHECKING
from pydantic import BaseModel, Field
from langchain_core.messages import (
AIMessage,
BaseMessage,
HumanMessage,
get_buffer_string,
)
from langchain_core.runnables.config import run_in_executor
@@ -126,40 +124,6 @@ class BaseChatMessageHistory(ABC):
"""
return await run_in_executor(None, lambda: self.messages)
def add_user_message(self, message: Union[HumanMessage, str]) -> None:
"""Convenience method for adding a human message string to the store.
!!! note
This is a convenience method. Code should favor the bulk ``add_messages``
interface instead to save on round-trips to the persistence layer.
This method may be deprecated in a future release.
Args:
message: The human message to add to the store.
"""
if isinstance(message, HumanMessage):
self.add_message(message)
else:
self.add_message(HumanMessage(content=message))
def add_ai_message(self, message: Union[AIMessage, str]) -> None:
"""Convenience method for adding an AI message string to the store.
!!! note
This is a convenience method. Code should favor the bulk ``add_messages``
interface instead to save on round-trips to the persistence layer.
This method may be deprecated in a future release.
Args:
message: The AI message to add.
"""
if isinstance(message, AIMessage):
self.add_message(message)
else:
self.add_message(AIMessage(content=message))
def add_message(self, message: BaseMessage) -> None:
"""Add a Message object to the store.

View File

@@ -1,18 +1,10 @@
"""Global values and configuration that apply to all of LangChain."""
import warnings
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from langchain_core.caches import BaseCache
try:
import langchain # type: ignore[import-not-found]
_HAS_LANGCHAIN = True
except ImportError:
_HAS_LANGCHAIN = False
# DO NOT USE THESE VALUES DIRECTLY!
# Use them only via `get_<X>()` and `set_<X>()` below,
@@ -29,26 +21,6 @@ def set_verbose(value: bool) -> None: # noqa: FBT001
Args:
value: The new value for the `verbose` global setting.
"""
if _HAS_LANGCHAIN:
# We're about to run some deprecated code, don't report warnings from it.
# The user called the correct (non-deprecated) code path and shouldn't get
# warnings.
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message=(
"Importing verbose from langchain root module "
"is no longer supported"
),
)
# N.B.: This is a workaround for an unfortunate quirk of Python's
# module-level `__getattr__()` implementation:
# https://github.com/langchain-ai/langchain/pull/11311#issuecomment-1743780004
#
# Remove it once `langchain.verbose` is no longer supported, and once all
# users have migrated to using `set_verbose()` here.
langchain.verbose = value
global _verbose # noqa: PLW0603
_verbose = value
@@ -59,35 +31,7 @@ def get_verbose() -> bool:
Returns:
The value of the `verbose` global setting.
"""
if _HAS_LANGCHAIN:
# We're about to run some deprecated code, don't report warnings from it.
# The user called the correct (non-deprecated) code path and shouldn't get
# warnings.
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message=(
".*Importing verbose from langchain root module "
"is no longer supported"
),
)
# N.B.: This is a workaround for an unfortunate quirk of Python's
# module-level `__getattr__()` implementation:
# https://github.com/langchain-ai/langchain/pull/11311#issuecomment-1743780004
#
# Remove it once `langchain.verbose` is no longer supported, and once all
# users have migrated to using `set_verbose()` here.
#
# In the meantime, the `verbose` setting is considered True if either the
# old or the new value are True. This accommodates users who haven't
# migrated to using `set_verbose()` yet. Those users are getting
# deprecation warnings directing them to use `set_verbose()` when they
# import `langchain.verbose`.
old_verbose = langchain.verbose
else:
old_verbose = False
return _verbose or old_verbose
return _verbose
def set_debug(value: bool) -> None: # noqa: FBT001
@@ -96,24 +40,6 @@ def set_debug(value: bool) -> None: # noqa: FBT001
Args:
value: The new value for the `debug` global setting.
"""
if _HAS_LANGCHAIN:
# We're about to run some deprecated code, don't report warnings from it.
# The user called the correct (non-deprecated) code path and shouldn't get
# warnings.
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message="Importing debug from langchain root module "
"is no longer supported",
)
# N.B.: This is a workaround for an unfortunate quirk of Python's
# module-level `__getattr__()` implementation:
# https://github.com/langchain-ai/langchain/pull/11311#issuecomment-1743780004
#
# Remove it once `langchain.debug` is no longer supported, and once all
# users have migrated to using `set_debug()` here.
langchain.debug = value
global _debug # noqa: PLW0603
_debug = value
@@ -124,32 +50,7 @@ def get_debug() -> bool:
Returns:
The value of the `debug` global setting.
"""
if _HAS_LANGCHAIN:
# We're about to run some deprecated code, don't report warnings from it.
# The user called the correct (non-deprecated) code path and shouldn't get
# warnings.
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message="Importing debug from langchain root module "
"is no longer supported",
)
# N.B.: This is a workaround for an unfortunate quirk of Python's
# module-level `__getattr__()` implementation:
# https://github.com/langchain-ai/langchain/pull/11311#issuecomment-1743780004
#
# Remove it once `langchain.debug` is no longer supported, and once all
# users have migrated to using `set_debug()` here.
#
# In the meantime, the `debug` setting is considered True if either the old
# or the new value are True. This accommodates users who haven't migrated
# to using `set_debug()` yet. Those users are getting deprecation warnings
# directing them to use `set_debug()` when they import `langchain.debug`.
old_debug = langchain.debug
else:
old_debug = False
return _debug or old_debug
return _debug
def set_llm_cache(value: Optional["BaseCache"]) -> None:
@@ -158,26 +59,6 @@ def set_llm_cache(value: Optional["BaseCache"]) -> None:
Args:
value: The new LLM cache to use. If `None`, the LLM cache is disabled.
"""
if _HAS_LANGCHAIN:
# We're about to run some deprecated code, don't report warnings from it.
# The user called the correct (non-deprecated) code path and shouldn't get
# warnings.
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message=(
"Importing llm_cache from langchain root module "
"is no longer supported"
),
)
# N.B.: This is a workaround for an unfortunate quirk of Python's
# module-level `__getattr__()` implementation:
# https://github.com/langchain-ai/langchain/pull/11311#issuecomment-1743780004
#
# Remove it once `langchain.llm_cache` is no longer supported, and
# once all users have migrated to using `set_llm_cache()` here.
langchain.llm_cache = value
global _llm_cache # noqa: PLW0603
_llm_cache = value
@@ -188,33 +69,4 @@ def get_llm_cache() -> Optional["BaseCache"]:
Returns:
The value of the `llm_cache` global setting.
"""
if _HAS_LANGCHAIN:
# We're about to run some deprecated code, don't report warnings from it.
# The user called the correct (non-deprecated) code path and shouldn't get
# warnings.
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message=(
"Importing llm_cache from langchain root module "
"is no longer supported"
),
)
# N.B.: This is a workaround for an unfortunate quirk of Python's
# module-level `__getattr__()` implementation:
# https://github.com/langchain-ai/langchain/pull/11311#issuecomment-1743780004
#
# Remove it once `langchain.llm_cache` is no longer supported, and
# once all users have migrated to using `set_llm_cache()` here.
#
# In the meantime, the `llm_cache` setting returns whichever of
# its two backing sources is truthy (not `None` and non-empty),
# or the old value if both are falsy. This accommodates users
# who haven't migrated to using `set_llm_cache()` yet.
# Those users are getting deprecation warnings directing them
# to use `set_llm_cache()` when they import `langchain.llm_cache`.
old_llm_cache = langchain.llm_cache
else:
old_llm_cache = None
return _llm_cache or old_llm_cache
return _llm_cache

View File

@@ -20,7 +20,6 @@ from typing import (
from pydantic import BaseModel, ConfigDict, Field, field_validator
from typing_extensions import TypedDict, override
from langchain_core._api import deprecated
from langchain_core.caches import BaseCache
from langchain_core.callbacks import Callbacks
from langchain_core.globals import get_verbose
@@ -37,7 +36,6 @@ from langchain_core.prompt_values import (
StringPromptValue,
)
from langchain_core.runnables import Runnable, RunnableSerializable
from langchain_core.utils import get_pydantic_field_names
if TYPE_CHECKING:
from langchain_core.outputs import LLMResult
@@ -259,102 +257,6 @@ class BaseLanguageModel(
# generate responses that match a given schema.
raise NotImplementedError
@deprecated("0.1.7", alternative="invoke", removal="1.0")
@abstractmethod
def predict(
self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any
) -> str:
"""Pass a single string input to the model and return a string.
Use this method when passing in raw text. If you want to pass in specific types
of chat messages, use predict_messages.
Args:
text: String input to pass to the model.
stop: Stop words to use when generating. Model output is cut off at the
first occurrence of any of these substrings.
**kwargs: Arbitrary additional keyword arguments. These are usually passed
to the model provider API call.
Returns:
Top model prediction as a string.
"""
@deprecated("0.1.7", alternative="invoke", removal="1.0")
@abstractmethod
def predict_messages(
self,
messages: list[BaseMessage],
*,
stop: Optional[Sequence[str]] = None,
**kwargs: Any,
) -> BaseMessage:
"""Pass a message sequence to the model and return a message.
Use this method when passing in chat messages. If you want to pass in raw text,
use predict.
Args:
messages: A sequence of chat messages corresponding to a single model input.
stop: Stop words to use when generating. Model output is cut off at the
first occurrence of any of these substrings.
**kwargs: Arbitrary additional keyword arguments. These are usually passed
to the model provider API call.
Returns:
Top model prediction as a message.
"""
@deprecated("0.1.7", alternative="ainvoke", removal="1.0")
@abstractmethod
async def apredict(
self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any
) -> str:
"""Asynchronously pass a string to the model and return a string.
Use this method when calling pure text generation models and only the top
candidate generation is needed.
Args:
text: String input to pass to the model.
stop: Stop words to use when generating. Model output is cut off at the
first occurrence of any of these substrings.
**kwargs: Arbitrary additional keyword arguments. These are usually passed
to the model provider API call.
Returns:
Top model prediction as a string.
"""
@deprecated("0.1.7", alternative="ainvoke", removal="1.0")
@abstractmethod
async def apredict_messages(
self,
messages: list[BaseMessage],
*,
stop: Optional[Sequence[str]] = None,
**kwargs: Any,
) -> BaseMessage:
"""Asynchronously pass messages to the model and return a message.
Use this method when calling chat models and only the top candidate generation
is needed.
Args:
messages: A sequence of chat messages corresponding to a single model input.
stop: Stop words to use when generating. Model output is cut off at the
first occurrence of any of these substrings.
**kwargs: Arbitrary additional keyword arguments. These are usually passed
to the model provider API call.
Returns:
Top model prediction as a message.
"""
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
@@ -417,12 +319,3 @@ class BaseLanguageModel(
stacklevel=2,
)
return sum(self.get_num_tokens(get_buffer_string([m])) for m in messages)
@classmethod
def _all_required_field_names(cls) -> set:
"""DEPRECATED: Kept for backwards compatibility.
Use ``get_pydantic_field_names``.
"""
return get_pydantic_field_names(cls)

View File

@@ -6,22 +6,19 @@ import asyncio
import inspect
import json
import typing
import warnings
from abc import ABC, abstractmethod
from collections.abc import AsyncIterator, Iterator, Sequence
from functools import cached_property
from operator import itemgetter
from typing import TYPE_CHECKING, Any, Callable, Literal, Optional, Union, cast
from pydantic import BaseModel, ConfigDict, Field, model_validator
from pydantic import BaseModel, ConfigDict, Field
from typing_extensions import override
from langchain_core._api import deprecated
from langchain_core.caches import BaseCache
from langchain_core.callbacks import (
AsyncCallbackManager,
AsyncCallbackManagerForLLMRun,
BaseCallbackManager,
CallbackManager,
CallbackManagerForLLMRun,
Callbacks,
@@ -42,7 +39,6 @@ from langchain_core.messages import (
AIMessageChunk,
AnyMessage,
BaseMessage,
HumanMessage,
convert_to_messages,
is_data_content_block,
message_chunk_to_message,
@@ -319,16 +315,6 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
""" # noqa: E501
callback_manager: Optional[BaseCallbackManager] = deprecated(
name="callback_manager", since="0.1.7", removal="1.0", alternative="callbacks"
)(
Field(
default=None,
exclude=True,
description="Callback manager to add to the run trace.",
)
)
rate_limiter: Optional[BaseRateLimiter] = Field(default=None, exclude=True)
"An optional rate limiter to use for limiting the number of requests."
@@ -373,27 +359,6 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
"""
@model_validator(mode="before")
@classmethod
def raise_deprecation(cls, values: dict) -> Any:
"""Emit deprecation warning if ``callback_manager`` is used.
Args:
values (Dict): Values to validate.
Returns:
Dict: Validated values.
"""
if values.get("callback_manager") is not None:
warnings.warn(
"callback_manager is deprecated. Please use callbacks instead.",
DeprecationWarning,
stacklevel=5,
)
values["callbacks"] = values.pop("callback_manager", None)
return values
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
@@ -1455,40 +1420,6 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
break
yield item # type: ignore[misc]
@deprecated("0.1.7", alternative="invoke", removal="1.0")
def __call__(
self,
messages: list[BaseMessage],
stop: Optional[list[str]] = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> BaseMessage:
"""Call the model.
Args:
messages: List of messages.
stop: Stop words to use when generating. Model output is cut off at the
first occurrence of any of these substrings.
callbacks: Callbacks to pass through. Used for executing additional
functionality, such as logging or streaming, throughout generation.
**kwargs: Arbitrary additional keyword arguments. These are usually passed
to the model provider API call.
Raises:
ValueError: If the generation is not a chat generation.
Returns:
The model output message.
"""
generation = self.generate(
[messages], stop=stop, callbacks=callbacks, **kwargs
).generations[0][0]
if isinstance(generation, ChatGeneration):
return generation.message
msg = "Unexpected generation type"
raise ValueError(msg)
async def _call_async(
self,
messages: list[BaseMessage],
@@ -1505,91 +1436,6 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
msg = "Unexpected generation type"
raise ValueError(msg)
@deprecated("0.1.7", alternative="invoke", removal="1.0")
def call_as_llm(
self, message: str, stop: Optional[list[str]] = None, **kwargs: Any
) -> str:
"""Call the model.
Args:
message: The input message.
stop: Stop words to use when generating. Model output is cut off at the
first occurrence of any of these substrings.
**kwargs: Arbitrary additional keyword arguments. These are usually passed
to the model provider API call.
Returns:
The model output string.
"""
return self.predict(message, stop=stop, **kwargs)
@deprecated("0.1.7", alternative="invoke", removal="1.0")
@override
def predict(
self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any
) -> str:
"""Predict the next message.
Args:
text: The input message.
stop: Stop words to use when generating. Model output is cut off at the
first occurrence of any of these substrings.
**kwargs: Arbitrary additional keyword arguments. These are usually passed
to the model provider API call.
Raises:
ValueError: If the output is not a string.
Returns:
The predicted output string.
"""
stop_ = None if stop is None else list(stop)
result = self([HumanMessage(content=text)], stop=stop_, **kwargs)
if isinstance(result.content, str):
return result.content
msg = "Cannot use predict when output is not a string."
raise ValueError(msg)
@deprecated("0.1.7", alternative="invoke", removal="1.0")
@override
def predict_messages(
self,
messages: list[BaseMessage],
*,
stop: Optional[Sequence[str]] = None,
**kwargs: Any,
) -> BaseMessage:
stop_ = None if stop is None else list(stop)
return self(messages, stop=stop_, **kwargs)
@deprecated("0.1.7", alternative="ainvoke", removal="1.0")
@override
async def apredict(
self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any
) -> str:
stop_ = None if stop is None else list(stop)
result = await self._call_async(
[HumanMessage(content=text)], stop=stop_, **kwargs
)
if isinstance(result.content, str):
return result.content
msg = "Cannot use predict when output is not a string."
raise ValueError(msg)
@deprecated("0.1.7", alternative="ainvoke", removal="1.0")
@override
async def apredict_messages(
self,
messages: list[BaseMessage],
*,
stop: Optional[Sequence[str]] = None,
**kwargs: Any,
) -> BaseMessage:
stop_ = None if stop is None else list(stop)
return await self._call_async(messages, stop=stop_, **kwargs)
@property
@abstractmethod
def _llm_type(self) -> str:

View File

@@ -7,7 +7,6 @@ import functools
import inspect
import json
import logging
import warnings
from abc import ABC, abstractmethod
from collections.abc import AsyncIterator, Iterator, Sequence
from pathlib import Path
@@ -21,7 +20,7 @@ from typing import (
)
import yaml
from pydantic import ConfigDict, Field, model_validator
from pydantic import ConfigDict
from tenacity import (
RetryCallState,
before_sleep_log,
@@ -33,7 +32,6 @@ from tenacity import (
)
from typing_extensions import override
from langchain_core._api import deprecated
from langchain_core.caches import BaseCache
from langchain_core.callbacks import (
AsyncCallbackManager,
@@ -51,10 +49,7 @@ from langchain_core.language_models.base import (
)
from langchain_core.load import dumpd
from langchain_core.messages import (
AIMessage,
BaseMessage,
convert_to_messages,
get_buffer_string,
)
from langchain_core.outputs import Generation, GenerationChunk, LLMResult, RunInfo
from langchain_core.prompt_values import ChatPromptValue, PromptValue, StringPromptValue
@@ -296,26 +291,10 @@ class BaseLLM(BaseLanguageModel[str], ABC):
It should take in a prompt and return a string.
"""
callback_manager: Optional[BaseCallbackManager] = Field(default=None, exclude=True)
"""[DEPRECATED]"""
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
@model_validator(mode="before")
@classmethod
def raise_deprecation(cls, values: dict) -> Any:
"""Raise deprecation warning if callback_manager is used."""
if values.get("callback_manager") is not None:
warnings.warn(
"callback_manager is deprecated. Please use callbacks instead.",
DeprecationWarning,
stacklevel=5,
)
values["callbacks"] = values.pop("callback_manager", None)
return values
@functools.cached_property
def _serialized(self) -> dict[str, Any]:
return dumpd(self)
@@ -1308,56 +1287,6 @@ class BaseLLM(BaseLanguageModel[str], ABC):
generations = [existing_prompts[i] for i in range(len(prompts))]
return LLMResult(generations=generations, llm_output=llm_output, run=run_info)
@deprecated("0.1.7", alternative="invoke", removal="1.0")
def __call__(
self,
prompt: str,
stop: Optional[list[str]] = None,
callbacks: Callbacks = None,
*,
tags: Optional[list[str]] = None,
metadata: Optional[dict[str, Any]] = None,
**kwargs: Any,
) -> str:
"""Check Cache and run the LLM on the given prompt and input.
Args:
prompt: The prompt to generate from.
stop: Stop words to use when generating. Model output is cut off at the
first occurrence of any of these substrings.
callbacks: Callbacks to pass through. Used for executing additional
functionality, such as logging or streaming, throughout generation.
tags: List of tags to associate with the prompt.
metadata: Metadata to associate with the prompt.
**kwargs: Arbitrary additional keyword arguments. These are usually passed
to the model provider API call.
Returns:
The generated text.
Raises:
ValueError: If the prompt is not a string.
"""
if not isinstance(prompt, str):
msg = (
"Argument `prompt` is expected to be a string. Instead found "
f"{type(prompt)}. If you want to run the LLM on multiple prompts, use "
"`generate` instead."
)
raise ValueError(msg) # noqa: TRY004
return (
self.generate(
[prompt],
stop=stop,
callbacks=callbacks,
tags=tags,
metadata=metadata,
**kwargs,
)
.generations[0][0]
.text
)
async def _call_async(
self,
prompt: str,
@@ -1379,50 +1308,6 @@ class BaseLLM(BaseLanguageModel[str], ABC):
)
return result.generations[0][0].text
@deprecated("0.1.7", alternative="invoke", removal="1.0")
@override
def predict(
self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any
) -> str:
stop_ = None if stop is None else list(stop)
return self(text, stop=stop_, **kwargs)
@deprecated("0.1.7", alternative="invoke", removal="1.0")
@override
def predict_messages(
self,
messages: list[BaseMessage],
*,
stop: Optional[Sequence[str]] = None,
**kwargs: Any,
) -> BaseMessage:
text = get_buffer_string(messages)
stop_ = None if stop is None else list(stop)
content = self(text, stop=stop_, **kwargs)
return AIMessage(content=content)
@deprecated("0.1.7", alternative="ainvoke", removal="1.0")
@override
async def apredict(
self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any
) -> str:
stop_ = None if stop is None else list(stop)
return await self._call_async(text, stop=stop_, **kwargs)
@deprecated("0.1.7", alternative="ainvoke", removal="1.0")
@override
async def apredict_messages(
self,
messages: list[BaseMessage],
*,
stop: Optional[Sequence[str]] = None,
**kwargs: Any,
) -> BaseMessage:
text = get_buffer_string(messages)
stop_ = None if stop is None else list(stop)
content = await self._call_async(text, stop=stop_, **kwargs)
return AIMessage(content=content)
def __str__(self) -> str:
"""Return a string representation of the object for printing."""
cls_name = f"\033[1m{self.__class__.__name__}\033[0m"

View File

@@ -413,11 +413,10 @@ SERIALIZABLE_MAPPING: dict[tuple[str, ...], tuple[str, ...]] = {
"few_shot_with_templates",
"FewShotPromptWithTemplates",
),
("langchain", "prompts", "pipeline", "PipelinePromptTemplate"): (
("langchain", "prompts", "pipeline"): (
"langchain_core",
"prompts",
"pipeline",
"PipelinePromptTemplate",
),
("langchain", "prompts", "base", "StringPromptTemplate"): (
"langchain_core",
@@ -846,11 +845,10 @@ OLD_CORE_NAMESPACES_MAPPING: dict[tuple[str, ...], tuple[str, ...]] = {
"few_shot_with_templates",
"FewShotPromptWithTemplates",
),
("langchain_core", "prompts", "pipeline", "PipelinePromptTemplate"): (
("langchain_core", "prompts", "pipeline"): (
"langchain_core",
"prompts",
"pipeline",
"PipelinePromptTemplate",
),
("langchain_core", "prompts", "string", "StringPromptTemplate"): (
"langchain_core",

View File

@@ -1045,6 +1045,7 @@ def convert_to_openai_messages(
messages: Union[MessageLikeRepresentation, Sequence[MessageLikeRepresentation]],
*,
text_format: Literal["string", "block"] = "string",
include_id: bool = False,
) -> Union[dict, list[dict]]:
"""Convert LangChain messages into OpenAI message dicts.
@@ -1062,6 +1063,8 @@ def convert_to_openai_messages(
If a message has a string content, this is turned into a list
with a single content block of type ``'text'``. If a message has
content blocks these are left as is.
include_id: Whether to include message ids in the openai messages, if they
are present in the source messages.
Raises:
ValueError: if an unrecognized ``text_format`` is specified, or if a message
@@ -1150,6 +1153,8 @@ def convert_to_openai_messages(
oai_msg["refusal"] = message.additional_kwargs["refusal"]
if isinstance(message, ToolMessage):
oai_msg["tool_call_id"] = message.tool_call_id
if include_id and message.id:
oai_msg["id"] = message.id
if not message.content:
content = "" if text_format == "string" else []

View File

@@ -8,8 +8,7 @@ from multiple components and prompt values. Prompt classes and functions make co
.. code-block::
BasePromptTemplate --> PipelinePromptTemplate
StringPromptTemplate --> PromptTemplate
BasePromptTemplate --> StringPromptTemplate --> PromptTemplate
FewShotPromptTemplate
FewShotPromptWithTemplates
BaseChatPromptTemplate --> AutoGPTPrompt
@@ -53,7 +52,6 @@ if TYPE_CHECKING:
FewShotPromptWithTemplates,
)
from langchain_core.prompts.loading import load_prompt
from langchain_core.prompts.pipeline import PipelinePromptTemplate
from langchain_core.prompts.prompt import PromptTemplate
from langchain_core.prompts.string import (
StringPromptTemplate,
@@ -75,7 +73,6 @@ __all__ = (
"FewShotPromptWithTemplates",
"HumanMessagePromptTemplate",
"MessagesPlaceholder",
"PipelinePromptTemplate",
"PromptTemplate",
"StringPromptTemplate",
"SystemMessagePromptTemplate",
@@ -104,7 +101,6 @@ _dynamic_imports = {
"FewShotPromptTemplate": "few_shot",
"FewShotPromptWithTemplates": "few_shot_with_templates",
"load_prompt": "loading",
"PipelinePromptTemplate": "pipeline",
"PromptTemplate": "prompt",
"StringPromptTemplate": "string",
"check_valid_template": "string",

View File

@@ -24,7 +24,6 @@ from pydantic import (
)
from typing_extensions import Self, override
from langchain_core._api import deprecated
from langchain_core.messages import (
AIMessage,
AnyMessage,
@@ -262,14 +261,12 @@ class BaseStringMessagePromptTemplate(BaseMessagePromptTemplate, ABC):
def from_template_file(
cls,
template_file: Union[str, Path],
input_variables: list[str], # noqa: ARG003 # Deprecated
**kwargs: Any,
) -> Self:
"""Create a class from a template file.
Args:
template_file: path to a template file. String or Path.
input_variables: list of input variables.
**kwargs: keyword arguments to pass to the constructor.
Returns:
@@ -1105,41 +1102,6 @@ class ChatPromptTemplate(BaseChatPromptTemplate):
message = HumanMessagePromptTemplate(prompt=prompt_template)
return cls.from_messages([message])
@classmethod
@deprecated("0.0.1", alternative="from_messages", pending=True)
def from_role_strings(
cls, string_messages: list[tuple[str, str]]
) -> ChatPromptTemplate:
"""Create a chat prompt template from a list of (role, template) tuples.
Args:
string_messages: list of (role, template) tuples.
Returns:
a chat prompt template.
"""
return cls(
messages=[
ChatMessagePromptTemplate.from_template(template, role=role)
for role, template in string_messages
]
)
@classmethod
@deprecated("0.0.1", alternative="from_messages", pending=True)
def from_strings(
cls, string_messages: list[tuple[type[BaseMessagePromptTemplate], str]]
) -> ChatPromptTemplate:
"""Create a chat prompt template from a list of (role class, template) tuples.
Args:
string_messages: list of (role class, template) tuples.
Returns:
a chat prompt template.
"""
return cls.from_messages(string_messages)
@classmethod
def from_messages(
cls,

View File

@@ -1,138 +0,0 @@
"""[DEPRECATED] Pipeline prompt template."""
from typing import Any
from pydantic import model_validator
from langchain_core._api.deprecation import deprecated
from langchain_core.prompt_values import PromptValue
from langchain_core.prompts.base import BasePromptTemplate
from langchain_core.prompts.chat import BaseChatPromptTemplate
def _get_inputs(inputs: dict, input_variables: list[str]) -> dict:
return {k: inputs[k] for k in input_variables}
@deprecated(
since="0.3.22",
removal="1.0",
message=(
"This class is deprecated in favor of chaining individual prompts together."
),
)
class PipelinePromptTemplate(BasePromptTemplate):
"""Pipeline prompt template.
This has been deprecated in favor of chaining individual prompts together in your
code; e.g. using a for loop, you could do:
.. code-block:: python
my_input = {"key": "value"}
for name, prompt in pipeline_prompts:
my_input[name] = prompt.invoke(my_input).to_string()
my_output = final_prompt.invoke(my_input)
Prompt template for composing multiple prompt templates together.
This can be useful when you want to reuse parts of prompts.
A PipelinePrompt consists of two main parts:
- final_prompt: This is the final prompt that is returned
- pipeline_prompts: This is a list of tuples, consisting
of a string (``name``) and a Prompt Template.
Each PromptTemplate will be formatted and then passed
to future prompt templates as a variable with
the same name as ``name``
"""
final_prompt: BasePromptTemplate
"""The final prompt that is returned."""
pipeline_prompts: list[tuple[str, BasePromptTemplate]]
"""A list of tuples, consisting of a string (``name``) and a Prompt Template."""
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the langchain object.
Returns:
``["langchain", "prompts", "pipeline"]``
"""
return ["langchain", "prompts", "pipeline"]
@model_validator(mode="before")
@classmethod
def get_input_variables(cls, values: dict) -> Any:
"""Get input variables."""
created_variables = set()
all_variables = set()
for k, prompt in values["pipeline_prompts"]:
created_variables.add(k)
all_variables.update(prompt.input_variables)
values["input_variables"] = list(all_variables.difference(created_variables))
return values
def format_prompt(self, **kwargs: Any) -> PromptValue:
"""Format the prompt with the inputs.
Args:
kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
"""
for k, prompt in self.pipeline_prompts:
inputs = _get_inputs(kwargs, prompt.input_variables)
if isinstance(prompt, BaseChatPromptTemplate):
kwargs[k] = prompt.format_messages(**inputs)
else:
kwargs[k] = prompt.format(**inputs)
inputs = _get_inputs(kwargs, self.final_prompt.input_variables)
return self.final_prompt.format_prompt(**inputs)
async def aformat_prompt(self, **kwargs: Any) -> PromptValue:
"""Async format the prompt with the inputs.
Args:
kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
"""
for k, prompt in self.pipeline_prompts:
inputs = _get_inputs(kwargs, prompt.input_variables)
if isinstance(prompt, BaseChatPromptTemplate):
kwargs[k] = await prompt.aformat_messages(**inputs)
else:
kwargs[k] = await prompt.aformat(**inputs)
inputs = _get_inputs(kwargs, self.final_prompt.input_variables)
return await self.final_prompt.aformat_prompt(**inputs)
def format(self, **kwargs: Any) -> str:
"""Format the prompt with the inputs.
Args:
kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
"""
return self.format_prompt(**kwargs).to_string()
async def aformat(self, **kwargs: Any) -> str:
"""Async format the prompt with the inputs.
Args:
kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
"""
return (await self.aformat_prompt(**kwargs)).to_string()
@property
def _prompt_type(self) -> str:
raise ValueError

View File

@@ -2,7 +2,6 @@
from __future__ import annotations
import warnings
from pathlib import Path
from typing import TYPE_CHECKING, Any, Optional, Union
@@ -235,7 +234,6 @@ class PromptTemplate(StringPromptTemplate):
def from_file(
cls,
template_file: Union[str, Path],
input_variables: Optional[list[str]] = None,
encoding: Optional[str] = None,
**kwargs: Any,
) -> PromptTemplate:
@@ -243,23 +241,13 @@ class PromptTemplate(StringPromptTemplate):
Args:
template_file: The path to the file containing the prompt template.
input_variables: [DEPRECATED] A list of variable names the final prompt
template will expect. Defaults to None.
encoding: The encoding system for opening the template file.
If not provided, will use the OS default.
input_variables is ignored as from_file now delegates to from_template().
Returns:
The prompt loaded from the file.
"""
template = Path(template_file).read_text(encoding=encoding)
if input_variables:
warnings.warn(
"`input_variables' is deprecated and ignored.",
DeprecationWarning,
stacklevel=2,
)
return cls.from_template(template=template, **kwargs)
@classmethod

View File

@@ -31,7 +31,6 @@ from pydantic import (
PydanticDeprecationWarning,
SkipValidation,
ValidationError,
model_validator,
validate_arguments,
)
from pydantic.v1 import BaseModel as BaseModelV1
@@ -39,10 +38,8 @@ from pydantic.v1 import ValidationError as ValidationErrorV1
from pydantic.v1 import validate_arguments as validate_arguments_v1
from typing_extensions import override
from langchain_core._api import deprecated
from langchain_core.callbacks import (
AsyncCallbackManager,
BaseCallbackManager,
CallbackManager,
Callbacks,
)
@@ -464,15 +461,6 @@ class ChildTool(BaseTool):
callbacks: Callbacks = Field(default=None, exclude=True)
"""Callbacks to be called during tool execution."""
callback_manager: Optional[BaseCallbackManager] = deprecated(
name="callback_manager", since="0.1.7", removal="1.0", alternative="callbacks"
)(
Field(
default=None,
exclude=True,
description="Callback manager to add to the run trace.",
)
)
tags: Optional[list[str]] = None
"""Optional list of tags associated with the tool. Defaults to None.
These tags will be associated with each call to this tool,
@@ -700,26 +688,6 @@ class ChildTool(BaseTool):
}
return tool_input
@model_validator(mode="before")
@classmethod
def raise_deprecation(cls, values: dict) -> Any:
"""Raise deprecation warning if callback_manager is used.
Args:
values: The values to validate.
Returns:
The validated values.
"""
if values.get("callback_manager") is not None:
warnings.warn(
"callback_manager is deprecated. Please use callbacks instead.",
DeprecationWarning,
stacklevel=6,
)
values["callbacks"] = values.pop("callback_manager", None)
return values
@abstractmethod
def _run(self, *args: Any, **kwargs: Any) -> Any:
"""Use the tool.
@@ -1011,19 +979,6 @@ class ChildTool(BaseTool):
await run_manager.on_tool_end(output, color=color, name=self.name, **kwargs)
return output
@deprecated("0.1.47", alternative="invoke", removal="1.0")
def __call__(self, tool_input: str, callbacks: Callbacks = None) -> str:
"""Make tool callable (deprecated).
Args:
tool_input: The input to the tool.
callbacks: Callbacks to use during execution.
Returns:
The tool's output.
"""
return self.run(tool_input, callbacks=callbacks)
def _is_tool_call(x: Any) -> bool:
"""Check if the input is a tool call dictionary.

View File

@@ -27,7 +27,6 @@ if TYPE_CHECKING:
from langchain_core.callbacks.base import BaseCallbackHandler, Callbacks
from langchain_core.callbacks.manager import AsyncCallbackManager, CallbackManager
from langchain_core.tracers.schemas import TracerSessionV1
# for backwards partial compatibility if this is imported by users but unused
tracing_callback_var: Any = None
@@ -39,21 +38,6 @@ run_collector_var: ContextVar[Optional[RunCollectorCallbackHandler]] = ContextVa
)
@contextmanager
def tracing_enabled(
session_name: str = "default", # noqa: ARG001
) -> Generator[TracerSessionV1, None, None]:
"""Throw an error because this has been replaced by ``tracing_v2_enabled``.
Raises:
RuntimeError: Always, because this function is deprecated.
"""
msg = (
"tracing_enabled is no longer supported. Please use tracing_enabled_v2 instead."
)
raise RuntimeError(msg)
@contextmanager
def tracing_v2_enabled(
project_name: Optional[str] = None,

View File

@@ -1,31 +0,0 @@
"""This module is deprecated and will be removed in a future release.
Please use LangChainTracer instead.
"""
from typing import Any
def get_headers(*args: Any, **kwargs: Any) -> Any: # noqa: ARG001
"""Throw an error because this has been replaced by get_headers.
Raises:
RuntimeError: Always, because this function is deprecated.
"""
msg = (
"get_headers for LangChainTracerV1 is no longer supported. "
"Please use LangChainTracer instead."
)
raise RuntimeError(msg)
def LangChainTracerV1(*args: Any, **kwargs: Any) -> Any: # noqa: N802,ARG001
"""Throw an error because this has been replaced by ``LangChainTracer``.
Raises:
RuntimeError: Always, because this class is deprecated.
"""
msg = (
"LangChainTracerV1 is no longer supported. Please use LangChainTracer instead."
)
raise RuntimeError(msg)

View File

@@ -21,7 +21,6 @@ if TYPE_CHECKING:
print_text,
)
from langchain_core.utils.iter import batch_iterate
from langchain_core.utils.loading import try_load_from_hub
from langchain_core.utils.pydantic import pre_init
from langchain_core.utils.strings import (
comma_list,
@@ -68,7 +67,6 @@ __all__ = (
"secret_from_env",
"stringify_dict",
"stringify_value",
"try_load_from_hub",
"xor_args",
)
@@ -84,7 +82,6 @@ _dynamic_imports = {
"get_colored_text": "input",
"print_text": "input",
"batch_iterate": "iter",
"try_load_from_hub": "loading",
"pre_init": "pydantic",
"comma_list": "strings",
"sanitize_for_postgres": "strings",

View File

@@ -1,35 +0,0 @@
"""Utilities for loading configurations from langchain_core-hub."""
import warnings
from typing import Any
from langchain_core._api.deprecation import deprecated
@deprecated(
since="0.1.30",
removal="1.0",
message=(
"Using the hwchase17/langchain-hub "
"repo for prompts is deprecated. Please use "
"<https://smith.langchain.com/hub> instead."
),
)
def try_load_from_hub(
*args: Any, # noqa: ARG001
**kwargs: Any, # noqa: ARG001
) -> Any:
"""[DEPRECATED] Try to load from the old Hub.
Returns:
None always, indicating that we shouldn't load from the old hub.
"""
warnings.warn(
"Loading from the deprecated github-based Hub is no longer supported. "
"Please use the new LangChain Hub at https://smith.langchain.com/hub instead.",
DeprecationWarning,
stacklevel=2,
)
# return None, which indicates that we shouldn't load from old hub
# and might just be a filepath for e.g. load_chain
return None

View File

@@ -14,7 +14,6 @@ from typing import (
from typing_extensions import override
from langchain_core._api import deprecated
from langchain_core.documents import Document
from langchain_core.load import dumpd, load
from langchain_core.vectorstores import VectorStore
@@ -25,7 +24,6 @@ if TYPE_CHECKING:
from collections.abc import Iterator, Sequence
from langchain_core.embeddings import Embeddings
from langchain_core.indexing import UpsertResponse
try:
import numpy as np
@@ -281,76 +279,6 @@ class InMemoryVectorStore(VectorStore):
)
return documents
@deprecated(
alternative="VectorStore.add_documents",
message=(
"This was a beta API that was added in 0.2.11. It'll be removed in 0.3.0."
),
since="0.2.29",
removal="1.0",
)
def upsert(self, items: Sequence[Document], /, **_kwargs: Any) -> UpsertResponse:
"""[DEPRECATED] Upsert documents into the store.
Args:
items: The documents to upsert.
Returns:
The upsert response.
"""
vectors = self.embedding.embed_documents([item.page_content for item in items])
ids = []
for item, vector in zip(items, vectors):
doc_id = item.id or str(uuid.uuid4())
ids.append(doc_id)
self.store[doc_id] = {
"id": doc_id,
"vector": vector,
"text": item.page_content,
"metadata": item.metadata,
}
return {
"succeeded": ids,
"failed": [],
}
@deprecated(
alternative="VectorStore.aadd_documents",
message=(
"This was a beta API that was added in 0.2.11. It'll be removed in 0.3.0."
),
since="0.2.29",
removal="1.0",
)
async def aupsert(
self, items: Sequence[Document], /, **_kwargs: Any
) -> UpsertResponse:
"""[DEPRECATED] Upsert documents into the store.
Args:
items: The documents to upsert.
Returns:
The upsert response.
"""
vectors = await self.embedding.aembed_documents(
[item.page_content for item in items]
)
ids = []
for item, vector in zip(items, vectors):
doc_id = item.id or str(uuid.uuid4())
ids.append(doc_id)
self.store[doc_id] = {
"id": doc_id,
"vector": vector,
"text": item.page_content,
"metadata": item.metadata,
}
return {
"succeeded": ids,
"failed": [],
}
@override
async def aget_by_ids(self, ids: Sequence[str], /) -> list[Document]:
"""Async get documents by their ids.

View File

@@ -882,10 +882,21 @@ def test_convert_to_openai_messages_string() -> None:
def test_convert_to_openai_messages_single_message() -> None:
message = HumanMessage(content="Hello")
message: BaseMessage = HumanMessage(content="Hello")
result = convert_to_openai_messages(message)
assert result == {"role": "user", "content": "Hello"}
# Test IDs
result = convert_to_openai_messages(message, include_id=True)
assert result == {"role": "user", "content": "Hello"} # no ID
message = AIMessage(content="Hello", id="resp_123")
result = convert_to_openai_messages(message)
assert result == {"role": "assistant", "content": "Hello"}
result = convert_to_openai_messages(message, include_id=True)
assert result == {"role": "assistant", "content": "Hello", "id": "resp_123"}
def test_convert_to_openai_messages_multiple_messages() -> None:
messages = [

View File

@@ -8,14 +8,10 @@ from packaging import version
from pydantic import ValidationError
from syrupy.assertion import SnapshotAssertion
from langchain_core._api.deprecation import (
LangChainPendingDeprecationWarning,
)
from langchain_core.load import dumpd, load
from langchain_core.messages import (
AIMessage,
BaseMessage,
ChatMessage,
HumanMessage,
SystemMessage,
ToolMessage,
@@ -191,7 +187,6 @@ def test_message_prompt_template_from_template_file() -> None:
)
actual = ChatMessagePromptTemplate.from_template_file(
Path(__file__).parent.parent / "data" / "prompt_file.txt",
["question"],
role="human",
)
assert expected == actual
@@ -459,32 +454,6 @@ def test_chat_valid_infer_variables() -> None:
assert prompt.partial_variables == {"formatins": "some structure"}
async def test_chat_from_role_strings() -> None:
"""Test instantiation of chat template from role strings."""
with pytest.warns(LangChainPendingDeprecationWarning):
template = ChatPromptTemplate.from_role_strings(
[
("system", "You are a bot."),
("assistant", "hello!"),
("human", "{question}"),
("other", "{quack}"),
]
)
expected = [
ChatMessage(content="You are a bot.", role="system"),
ChatMessage(content="hello!", role="assistant"),
ChatMessage(content="How are you?", role="human"),
ChatMessage(content="duck", role="other"),
]
messages = template.format_messages(question="How are you?", quack="duck")
assert messages == expected
messages = await template.aformat_messages(question="How are you?", quack="duck")
assert messages == expected
@pytest.mark.parametrize(
("args", "expected"),
[

View File

@@ -14,7 +14,6 @@ EXPECTED_ALL = [
"aformat_document",
"HumanMessagePromptTemplate",
"MessagesPlaceholder",
"PipelinePromptTemplate",
"PromptTemplate",
"StringPromptTemplate",
"SystemMessagePromptTemplate",

View File

@@ -1,68 +0,0 @@
import pytest
from langchain_core.prompts.chat import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.prompts.pipeline import PipelinePromptTemplate
from langchain_core.prompts.prompt import PromptTemplate
# Suppress deprecation warnings for PipelinePromptTemplate since we're testing the
# deprecated functionality intentionally to ensure it still works correctly
@pytest.mark.filterwarnings(
"ignore:This class is deprecated"
":langchain_core._api.deprecation.LangChainDeprecationWarning"
)
def test_get_input_variables() -> None:
prompt_a = PromptTemplate.from_template("{foo}")
prompt_b = PromptTemplate.from_template("{bar}")
pipeline_prompt = PipelinePromptTemplate( # type: ignore[call-arg]
final_prompt=prompt_b, pipeline_prompts=[("bar", prompt_a)]
)
assert pipeline_prompt.input_variables == ["foo"]
@pytest.mark.filterwarnings(
"ignore:This class is deprecated"
":langchain_core._api.deprecation.LangChainDeprecationWarning"
)
def test_simple_pipeline() -> None:
prompt_a = PromptTemplate.from_template("{foo}")
prompt_b = PromptTemplate.from_template("{bar}")
pipeline_prompt = PipelinePromptTemplate( # type: ignore[call-arg]
final_prompt=prompt_b, pipeline_prompts=[("bar", prompt_a)]
)
output = pipeline_prompt.format(foo="jim")
assert output == "jim"
@pytest.mark.filterwarnings(
"ignore:This class is deprecated"
":langchain_core._api.deprecation.LangChainDeprecationWarning"
)
def test_multi_variable_pipeline() -> None:
prompt_a = PromptTemplate.from_template("{foo}")
prompt_b = PromptTemplate.from_template("okay {bar} {baz}")
pipeline_prompt = PipelinePromptTemplate( # type: ignore[call-arg]
final_prompt=prompt_b, pipeline_prompts=[("bar", prompt_a)]
)
output = pipeline_prompt.format(foo="jim", baz="deep")
assert output == "okay jim deep"
@pytest.mark.filterwarnings(
"ignore:This class is deprecated"
":langchain_core._api.deprecation.LangChainDeprecationWarning"
)
async def test_partial_with_chat_prompts() -> None:
prompt_a = ChatPromptTemplate(
input_variables=["foo"], messages=[MessagesPlaceholder(variable_name="foo")]
)
prompt_b = ChatPromptTemplate.from_template("jim {bar}")
pipeline_prompt = PipelinePromptTemplate( # type: ignore[call-arg]
final_prompt=prompt_a, pipeline_prompts=[("foo", prompt_b)]
)
assert pipeline_prompt.input_variables == ["bar"]
output = pipeline_prompt.format_prompt(bar="okay")
assert output.to_messages()[0].content == "jim okay"
output = await pipeline_prompt.aformat_prompt(bar="okay")
assert output.to_messages()[0].content == "jim okay"

View File

@@ -32,10 +32,9 @@ from tests.unit_tests.pydantic_utils import _schema
def test_interfaces() -> None:
history = InMemoryChatMessageHistory()
history.add_message(SystemMessage(content="system"))
history.add_user_message("human 1")
history.add_ai_message("ai")
history.add_message(HumanMessage(content="human 2"))
assert str(history) == "System: system\nHuman: human 1\nAI: ai\nHuman: human 2"
history.add_message(HumanMessage(content="human 1"))
history.add_message(AIMessage(content="ai"))
assert str(history) == "System: system\nHuman: human 1\nAI: ai"
def _get_get_session_history(

View File

@@ -16,7 +16,6 @@ EXPECTED_ALL = [
"print_text",
"raise_for_status_with_text",
"xor_args",
"try_load_from_hub",
"image",
"build_extra_kwargs",
"get_from_dict_or_env",

View File

@@ -36,7 +36,7 @@ test_watch_extended:
integration_tests:
uv run --group test --group test_integration pytest tests/integration_tests
check_imports: $(shell find langchain -name '*.py')
check_imports: $(shell find langchain_classic -name '*.py')
uv run python ./scripts/check_imports.py $^
######################
@@ -48,7 +48,7 @@ PYTHON_FILES=.
MYPY_CACHE=.mypy_cache
lint format: PYTHON_FILES=.
lint_diff format_diff: PYTHON_FILES=$(shell git diff --relative=libs/langchain --name-only --diff-filter=d master | grep -E '\.py$$|\.ipynb$$')
lint_package: PYTHON_FILES=langchain
lint_package: PYTHON_FILES=langchain_classic
lint_tests: PYTHON_FILES=tests
lint_tests: MYPY_CACHE=.mypy_cache_test

View File

@@ -13,7 +13,7 @@ To help you ship LangChain apps to production faster, check out [LangSmith](http
## Quick Install
`pip install langchain`
`pip install langchain-classic`
## 🤔 What is this?

View File

@@ -1,3 +0,0 @@
from langchain.tools.retriever import create_retriever_tool
__all__ = ["create_retriever_tool"]

View File

@@ -1,3 +0,0 @@
from langchain.agents.openai_assistant.base import OpenAIAssistantRunnable
__all__ = ["OpenAIAssistantRunnable"]

View File

@@ -1,4 +0,0 @@
from langchain.agents.output_parsers.self_ask import SelfAskOutputParser
# For backwards compatibility
__all__ = ["SelfAskOutputParser"]

View File

@@ -1,27 +0,0 @@
from typing import Union
from langchain.agents.agent import BaseSingleActionAgent
from langchain.agents.agent_types import AgentType
from langchain.agents.chat.base import ChatAgent
from langchain.agents.conversational.base import ConversationalAgent
from langchain.agents.conversational_chat.base import ConversationalChatAgent
from langchain.agents.mrkl.base import ZeroShotAgent
from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent
from langchain.agents.openai_functions_multi_agent.base import OpenAIMultiFunctionsAgent
from langchain.agents.react.base import ReActDocstoreAgent
from langchain.agents.self_ask_with_search.base import SelfAskWithSearchAgent
from langchain.agents.structured_chat.base import StructuredChatAgent
AGENT_TYPE = Union[type[BaseSingleActionAgent], type[OpenAIMultiFunctionsAgent]]
AGENT_TO_CLASS: dict[AgentType, AGENT_TYPE] = {
AgentType.ZERO_SHOT_REACT_DESCRIPTION: ZeroShotAgent,
AgentType.REACT_DOCSTORE: ReActDocstoreAgent,
AgentType.SELF_ASK_WITH_SEARCH: SelfAskWithSearchAgent,
AgentType.CONVERSATIONAL_REACT_DESCRIPTION: ConversationalAgent,
AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION: ChatAgent,
AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION: ConversationalChatAgent,
AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION: StructuredChatAgent,
AgentType.OPENAI_FUNCTIONS: OpenAIFunctionsAgent,
AgentType.OPENAI_MULTI_FUNCTIONS: OpenAIMultiFunctionsAgent,
}

View File

@@ -1,3 +0,0 @@
from langchain_core.tracers.langchain_v1 import LangChainTracerV1
__all__ = ["LangChainTracerV1"]

View File

@@ -1,96 +0,0 @@
"""**Chains** are easily reusable components linked together.
Chains encode a sequence of calls to components like models, document retrievers,
other Chains, etc., and provide a simple interface to this sequence.
The Chain interface makes it easy to create apps that are:
- **Stateful:** add Memory to any Chain to give it state,
- **Observable:** pass Callbacks to a Chain to execute additional functionality,
like logging, outside the main sequence of component calls,
- **Composable:** combine Chains with other components, including other Chains.
**Class hierarchy:**
.. code-block::
Chain --> <name>Chain # Examples: LLMChain, MapReduceChain, RouterChain
"""
from typing import Any
from langchain._api import create_importer
_module_lookup = {
"APIChain": "langchain.chains.api.base",
"OpenAPIEndpointChain": "langchain_community.chains.openapi.chain",
"AnalyzeDocumentChain": "langchain.chains.combine_documents.base",
"MapReduceDocumentsChain": "langchain.chains.combine_documents.map_reduce",
"MapRerankDocumentsChain": "langchain.chains.combine_documents.map_rerank",
"ReduceDocumentsChain": "langchain.chains.combine_documents.reduce",
"RefineDocumentsChain": "langchain.chains.combine_documents.refine",
"StuffDocumentsChain": "langchain.chains.combine_documents.stuff",
"ConstitutionalChain": "langchain.chains.constitutional_ai.base",
"ConversationChain": "langchain.chains.conversation.base",
"ChatVectorDBChain": "langchain.chains.conversational_retrieval.base",
"ConversationalRetrievalChain": "langchain.chains.conversational_retrieval.base",
"generate_example": "langchain.chains.example_generator",
"FlareChain": "langchain.chains.flare.base",
"ArangoGraphQAChain": "langchain_community.chains.graph_qa.arangodb",
"GraphQAChain": "langchain_community.chains.graph_qa.base",
"GraphCypherQAChain": "langchain_community.chains.graph_qa.cypher",
"FalkorDBQAChain": "langchain_community.chains.graph_qa.falkordb",
"HugeGraphQAChain": "langchain_community.chains.graph_qa.hugegraph",
"KuzuQAChain": "langchain_community.chains.graph_qa.kuzu",
"NebulaGraphQAChain": "langchain_community.chains.graph_qa.nebulagraph",
"NeptuneOpenCypherQAChain": "langchain_community.chains.graph_qa.neptune_cypher",
"NeptuneSparqlQAChain": "langchain_community.chains.graph_qa.neptune_sparql",
"OntotextGraphDBQAChain": "langchain_community.chains.graph_qa.ontotext_graphdb",
"GraphSparqlQAChain": "langchain_community.chains.graph_qa.sparql",
"create_history_aware_retriever": "langchain.chains.history_aware_retriever",
"HypotheticalDocumentEmbedder": "langchain.chains.hyde.base",
"LLMChain": "langchain.chains.llm",
"LLMCheckerChain": "langchain.chains.llm_checker.base",
"LLMMathChain": "langchain.chains.llm_math.base",
"LLMRequestsChain": "langchain_community.chains.llm_requests",
"LLMSummarizationCheckerChain": "langchain.chains.llm_summarization_checker.base",
"load_chain": "langchain.chains.loading",
"MapReduceChain": "langchain.chains.mapreduce",
"OpenAIModerationChain": "langchain.chains.moderation",
"NatBotChain": "langchain.chains.natbot.base",
"create_citation_fuzzy_match_chain": "langchain.chains.openai_functions",
"create_citation_fuzzy_match_runnable": "langchain.chains.openai_functions",
"create_extraction_chain": "langchain.chains.openai_functions",
"create_extraction_chain_pydantic": "langchain.chains.openai_functions",
"create_qa_with_sources_chain": "langchain.chains.openai_functions",
"create_qa_with_structure_chain": "langchain.chains.openai_functions",
"create_tagging_chain": "langchain.chains.openai_functions",
"create_tagging_chain_pydantic": "langchain.chains.openai_functions",
"QAGenerationChain": "langchain.chains.qa_generation.base",
"QAWithSourcesChain": "langchain.chains.qa_with_sources.base",
"RetrievalQAWithSourcesChain": "langchain.chains.qa_with_sources.retrieval",
"VectorDBQAWithSourcesChain": "langchain.chains.qa_with_sources.vector_db",
"create_retrieval_chain": "langchain.chains.retrieval",
"RetrievalQA": "langchain.chains.retrieval_qa.base",
"VectorDBQA": "langchain.chains.retrieval_qa.base",
"LLMRouterChain": "langchain.chains.router",
"MultiPromptChain": "langchain.chains.router",
"MultiRetrievalQAChain": "langchain.chains.router",
"MultiRouteChain": "langchain.chains.router",
"RouterChain": "langchain.chains.router",
"SequentialChain": "langchain.chains.sequential",
"SimpleSequentialChain": "langchain.chains.sequential",
"create_sql_query_chain": "langchain.chains.sql_database.query",
"create_structured_output_runnable": "langchain.chains.structured_output",
"load_summarize_chain": "langchain.chains.summarize",
"TransformChain": "langchain.chains.transform",
}
importer = create_importer(__package__, module_lookup=_module_lookup)
def __getattr__(name: str) -> Any:
return importer(name)
__all__ = list(_module_lookup.keys())

View File

@@ -1,3 +0,0 @@
from langchain.chains.elasticsearch_database.base import ElasticsearchDatabaseChain
__all__ = ["ElasticsearchDatabaseChain"]

View File

@@ -1,3 +0,0 @@
from langchain.chains.openai_tools.extraction import create_extraction_chain_pydantic
__all__ = ["create_extraction_chain_pydantic"]

View File

@@ -1,3 +0,0 @@
from langchain.chains.query_constructor.base import load_query_constructor_runnable
__all__ = ["load_query_constructor_runnable"]

View File

@@ -1,6 +0,0 @@
from langchain.chains.question_answering.chain import LoadingCallable, load_qa_chain
__all__ = [
"LoadingCallable",
"load_qa_chain",
]

View File

@@ -1,12 +0,0 @@
from langchain.chains.router.base import MultiRouteChain, RouterChain
from langchain.chains.router.llm_router import LLMRouterChain
from langchain.chains.router.multi_prompt import MultiPromptChain
from langchain.chains.router.multi_retrieval_qa import MultiRetrievalQAChain
__all__ = [
"LLMRouterChain",
"MultiPromptChain",
"MultiRetrievalQAChain",
"MultiRouteChain",
"RouterChain",
]

View File

@@ -1,5 +0,0 @@
"""Chains for evaluating ReAct style agents."""
from langchain.evaluation.agents.trajectory_eval_chain import TrajectoryEvalChain
__all__ = ["TrajectoryEvalChain"]

View File

@@ -1,180 +0,0 @@
"""Global values and configuration that apply to all of LangChain."""
import warnings
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from langchain_core.caches import BaseCache
# DO NOT USE THESE VALUES DIRECTLY!
# Use them only via `get_<X>()` and `set_<X>()` below,
# or else your code may behave unexpectedly with other uses of these global settings:
# https://github.com/langchain-ai/langchain/pull/11311#issuecomment-1743780004
_verbose: bool = False
_debug: bool = False
_llm_cache: Optional["BaseCache"] = None
def set_verbose(
value: bool, # noqa: FBT001
) -> None:
"""Set a new value for the `verbose` global setting."""
import langchain
# We're about to run some deprecated code, don't report warnings from it.
# The user called the correct (non-deprecated) code path and shouldn't get warnings.
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message=(
"Importing verbose from langchain root module is no longer supported"
),
)
# N.B.: This is a workaround for an unfortunate quirk of Python's
# module-level `__getattr__()` implementation:
# https://github.com/langchain-ai/langchain/pull/11311#issuecomment-1743780004
#
# Remove it once `langchain.verbose` is no longer supported, and once all users
# have migrated to using `set_verbose()` here.
langchain.verbose = value
global _verbose # noqa: PLW0603
_verbose = value
def get_verbose() -> bool:
"""Get the value of the `verbose` global setting."""
import langchain
# We're about to run some deprecated code, don't report warnings from it.
# The user called the correct (non-deprecated) code path and shouldn't get warnings.
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message=(
"Importing verbose from langchain root module is no longer supported"
),
)
# N.B.: This is a workaround for an unfortunate quirk of Python's
# module-level `__getattr__()` implementation:
# https://github.com/langchain-ai/langchain/pull/11311#issuecomment-1743780004
#
# Remove it once `langchain.verbose` is no longer supported, and once all users
# have migrated to using `set_verbose()` here.
#
# In the meantime, the `verbose` setting is considered True if either the old
# or the new value are True. This accommodates users who haven't migrated
# to using `set_verbose()` yet. Those users are getting deprecation warnings
# directing them to use `set_verbose()` when they import `langchain.verbose`.
old_verbose = langchain.verbose
return _verbose or old_verbose
def set_debug(
value: bool, # noqa: FBT001
) -> None:
"""Set a new value for the `debug` global setting."""
import langchain
# We're about to run some deprecated code, don't report warnings from it.
# The user called the correct (non-deprecated) code path and shouldn't get warnings.
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message="Importing debug from langchain root module is no longer supported",
)
# N.B.: This is a workaround for an unfortunate quirk of Python's
# module-level `__getattr__()` implementation:
# https://github.com/langchain-ai/langchain/pull/11311#issuecomment-1743780004
#
# Remove it once `langchain.debug` is no longer supported, and once all users
# have migrated to using `set_debug()` here.
langchain.debug = value
global _debug # noqa: PLW0603
_debug = value
def get_debug() -> bool:
"""Get the value of the `debug` global setting."""
import langchain
# We're about to run some deprecated code, don't report warnings from it.
# The user called the correct (non-deprecated) code path and shouldn't get warnings.
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message="Importing debug from langchain root module is no longer supported",
)
# N.B.: This is a workaround for an unfortunate quirk of Python's
# module-level `__getattr__()` implementation:
# https://github.com/langchain-ai/langchain/pull/11311#issuecomment-1743780004
#
# Remove it once `langchain.debug` is no longer supported, and once all users
# have migrated to using `set_debug()` here.
#
# In the meantime, the `debug` setting is considered True if either the old
# or the new value are True. This accommodates users who haven't migrated
# to using `set_debug()` yet. Those users are getting deprecation warnings
# directing them to use `set_debug()` when they import `langchain.debug`.
old_debug = langchain.debug
return _debug or old_debug
def set_llm_cache(value: Optional["BaseCache"]) -> None:
"""Set a new LLM cache, overwriting the previous value, if any."""
import langchain
# We're about to run some deprecated code, don't report warnings from it.
# The user called the correct (non-deprecated) code path and shouldn't get warnings.
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message=(
"Importing llm_cache from langchain root module is no longer supported"
),
)
# N.B.: This is a workaround for an unfortunate quirk of Python's
# module-level `__getattr__()` implementation:
# https://github.com/langchain-ai/langchain/pull/11311#issuecomment-1743780004
#
# Remove it once `langchain.llm_cache` is no longer supported, and
# once all users have migrated to using `set_llm_cache()` here.
langchain.llm_cache = value
global _llm_cache # noqa: PLW0603
_llm_cache = value
def get_llm_cache() -> "BaseCache":
"""Get the value of the `llm_cache` global setting."""
import langchain
# We're about to run some deprecated code, don't report warnings from it.
# The user called the correct (non-deprecated) code path and shouldn't get warnings.
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message=(
"Importing llm_cache from langchain root module is no longer supported"
),
)
# N.B.: This is a workaround for an unfortunate quirk of Python's
# module-level `__getattr__()` implementation:
# https://github.com/langchain-ai/langchain/pull/11311#issuecomment-1743780004
#
# Remove it once `langchain.llm_cache` is no longer supported, and
# once all users have migrated to using `set_llm_cache()` here.
#
# In the meantime, the `llm_cache` setting returns whichever of
# its two backing sources is truthy (not `None` and non-empty),
# or the old value if both are falsy. This accommodates users
# who haven't migrated to using `set_llm_cache()` yet.
# Those users are getting deprecation warnings directing them
# to use `set_llm_cache()` when they import `langchain.llm_cache`.
old_llm_cache = langchain.llm_cache
return _llm_cache or old_llm_cache

View File

@@ -1,3 +0,0 @@
from langchain_core.prompts.pipeline import PipelinePromptTemplate, _get_inputs
__all__ = ["PipelinePromptTemplate", "_get_inputs"]

View File

@@ -1,3 +0,0 @@
from langchain_core.tracers.langchain_v1 import LangChainTracerV1, get_headers
__all__ = ["LangChainTracerV1", "get_headers"]

View File

@@ -1,4 +0,0 @@
from langchain_core.utils.loading import try_load_from_hub
# For backwards compatibility
__all__ = ["try_load_from_hub"]

View File

@@ -1,3 +0,0 @@
from langchain_core.utils.loading import try_load_from_hub
__all__ = ["try_load_from_hub"]

View File

@@ -16,7 +16,7 @@ del metadata # optional, avoids polluting the results of dir(__package__)
def _warn_on_import(name: str, replacement: Optional[str] = None) -> None:
"""Warn on import of deprecated module."""
from langchain._api.interactive_env import is_interactive_env
from langchain_classic._api.interactive_env import is_interactive_env
if is_interactive_env():
# No warnings for interactive environments.
@@ -38,33 +38,35 @@ def _warn_on_import(name: str, replacement: Optional[str] = None) -> None:
)
# Surfaces Deprecation and Pending Deprecation warnings from langchain.
# Surfaces Deprecation and Pending Deprecation warnings from langchain_classic.
surface_langchain_deprecation_warnings()
def __getattr__(name: str) -> Any:
if name == "MRKLChain":
from langchain.agents import MRKLChain
from langchain_classic.agents import MRKLChain
_warn_on_import(name, replacement="langchain.agents.MRKLChain")
_warn_on_import(name, replacement="langchain_classic.agents.MRKLChain")
return MRKLChain
if name == "ReActChain":
from langchain.agents import ReActChain
from langchain_classic.agents import ReActChain
_warn_on_import(name, replacement="langchain.agents.ReActChain")
_warn_on_import(name, replacement="langchain_classic.agents.ReActChain")
return ReActChain
if name == "SelfAskWithSearchChain":
from langchain.agents import SelfAskWithSearchChain
from langchain_classic.agents import SelfAskWithSearchChain
_warn_on_import(name, replacement="langchain.agents.SelfAskWithSearchChain")
_warn_on_import(
name, replacement="langchain_classic.agents.SelfAskWithSearchChain"
)
return SelfAskWithSearchChain
if name == "ConversationChain":
from langchain.chains import ConversationChain
from langchain_classic.chains import ConversationChain
_warn_on_import(name, replacement="langchain.chains.ConversationChain")
_warn_on_import(name, replacement="langchain_classic.chains.ConversationChain")
return ConversationChain
if name == "LLMBashChain":
@@ -79,51 +81,53 @@ def __getattr__(name: str) -> Any:
raise ImportError(msg)
if name == "LLMChain":
from langchain.chains import LLMChain
from langchain_classic.chains import LLMChain
_warn_on_import(name, replacement="langchain.chains.LLMChain")
_warn_on_import(name, replacement="langchain_classic.chains.LLMChain")
return LLMChain
if name == "LLMCheckerChain":
from langchain.chains import LLMCheckerChain
from langchain_classic.chains import LLMCheckerChain
_warn_on_import(name, replacement="langchain.chains.LLMCheckerChain")
_warn_on_import(name, replacement="langchain_classic.chains.LLMCheckerChain")
return LLMCheckerChain
if name == "LLMMathChain":
from langchain.chains import LLMMathChain
from langchain_classic.chains import LLMMathChain
_warn_on_import(name, replacement="langchain.chains.LLMMathChain")
_warn_on_import(name, replacement="langchain_classic.chains.LLMMathChain")
return LLMMathChain
if name == "QAWithSourcesChain":
from langchain.chains import QAWithSourcesChain
from langchain_classic.chains import QAWithSourcesChain
_warn_on_import(name, replacement="langchain.chains.QAWithSourcesChain")
_warn_on_import(name, replacement="langchain_classic.chains.QAWithSourcesChain")
return QAWithSourcesChain
if name == "VectorDBQA":
from langchain.chains import VectorDBQA
from langchain_classic.chains import VectorDBQA
_warn_on_import(name, replacement="langchain.chains.VectorDBQA")
_warn_on_import(name, replacement="langchain_classic.chains.VectorDBQA")
return VectorDBQA
if name == "VectorDBQAWithSourcesChain":
from langchain.chains import VectorDBQAWithSourcesChain
from langchain_classic.chains import VectorDBQAWithSourcesChain
_warn_on_import(name, replacement="langchain.chains.VectorDBQAWithSourcesChain")
_warn_on_import(
name, replacement="langchain_classic.chains.VectorDBQAWithSourcesChain"
)
return VectorDBQAWithSourcesChain
if name == "InMemoryDocstore":
from langchain_community.docstore import InMemoryDocstore
_warn_on_import(name, replacement="langchain.docstore.InMemoryDocstore")
_warn_on_import(name, replacement="langchain_classic.docstore.InMemoryDocstore")
return InMemoryDocstore
if name == "Wikipedia":
from langchain_community.docstore import Wikipedia
_warn_on_import(name, replacement="langchain.docstore.Wikipedia")
_warn_on_import(name, replacement="langchain_classic.docstore.Wikipedia")
return Wikipedia
if name == "Anthropic":
@@ -366,39 +370,6 @@ def __getattr__(name: str) -> Any:
)
return SerpAPIWrapper
if name == "verbose":
from langchain.globals import _verbose
_warn_on_import(
name,
replacement=(
"langchain.globals.set_verbose() / langchain.globals.get_verbose()"
),
)
return _verbose
if name == "debug":
from langchain.globals import _debug
_warn_on_import(
name,
replacement=(
"langchain.globals.set_debug() / langchain.globals.get_debug()"
),
)
return _debug
if name == "llm_cache":
from langchain.globals import _llm_cache
_warn_on_import(
name,
replacement=(
"langchain.globals.set_llm_cache() / langchain.globals.get_llm_cache()"
),
)
return _llm_cache
msg = f"Could not find: {name}"
raise AttributeError(msg)

View File

@@ -3,12 +3,12 @@ from typing import Any, Callable, Optional
from langchain_core._api import internal, warn_deprecated
from langchain._api.interactive_env import is_interactive_env
from langchain_classic._api.interactive_env import is_interactive_env
ALLOWED_TOP_LEVEL_PKGS = {
"langchain_community",
"langchain_core",
"langchain",
"langchain_classic",
}

View File

@@ -1,6 +1,6 @@
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.adapters.openai import (

View File

@@ -35,8 +35,8 @@ from langchain_core._api.path import as_import_path
from langchain_core.tools import Tool
from langchain_core.tools.convert import tool
from langchain._api import create_importer
from langchain.agents.agent import (
from langchain_classic._api import create_importer
from langchain_classic.agents.agent import (
Agent,
AgentExecutor,
AgentOutputParser,
@@ -44,36 +44,38 @@ from langchain.agents.agent import (
BaseSingleActionAgent,
LLMSingleActionAgent,
)
from langchain.agents.agent_iterator import AgentExecutorIterator
from langchain.agents.agent_toolkits.vectorstore.base import (
from langchain_classic.agents.agent_iterator import AgentExecutorIterator
from langchain_classic.agents.agent_toolkits.vectorstore.base import (
create_vectorstore_agent,
create_vectorstore_router_agent,
)
from langchain.agents.agent_types import AgentType
from langchain.agents.conversational.base import ConversationalAgent
from langchain.agents.conversational_chat.base import ConversationalChatAgent
from langchain.agents.initialize import initialize_agent
from langchain.agents.json_chat.base import create_json_chat_agent
from langchain.agents.loading import load_agent
from langchain.agents.mrkl.base import MRKLChain, ZeroShotAgent
from langchain.agents.openai_functions_agent.base import (
from langchain_classic.agents.agent_types import AgentType
from langchain_classic.agents.conversational.base import ConversationalAgent
from langchain_classic.agents.conversational_chat.base import ConversationalChatAgent
from langchain_classic.agents.initialize import initialize_agent
from langchain_classic.agents.json_chat.base import create_json_chat_agent
from langchain_classic.agents.loading import load_agent
from langchain_classic.agents.mrkl.base import MRKLChain, ZeroShotAgent
from langchain_classic.agents.openai_functions_agent.base import (
OpenAIFunctionsAgent,
create_openai_functions_agent,
)
from langchain.agents.openai_functions_multi_agent.base import OpenAIMultiFunctionsAgent
from langchain.agents.openai_tools.base import create_openai_tools_agent
from langchain.agents.react.agent import create_react_agent
from langchain.agents.react.base import ReActChain, ReActTextWorldAgent
from langchain.agents.self_ask_with_search.base import (
from langchain_classic.agents.openai_functions_multi_agent.base import (
OpenAIMultiFunctionsAgent,
)
from langchain_classic.agents.openai_tools.base import create_openai_tools_agent
from langchain_classic.agents.react.agent import create_react_agent
from langchain_classic.agents.react.base import ReActChain, ReActTextWorldAgent
from langchain_classic.agents.self_ask_with_search.base import (
SelfAskWithSearchChain,
create_self_ask_with_search_agent,
)
from langchain.agents.structured_chat.base import (
from langchain_classic.agents.structured_chat.base import (
StructuredChatAgent,
create_structured_chat_agent,
)
from langchain.agents.tool_calling_agent.base import create_tool_calling_agent
from langchain.agents.xml.base import XMLAgent, create_xml_agent
from langchain_classic.agents.tool_calling_agent.base import create_tool_calling_agent
from langchain_classic.agents.xml.base import XMLAgent, create_xml_agent
if TYPE_CHECKING:
from langchain_community.agent_toolkits.json.base import create_json_agent
@@ -125,7 +127,7 @@ def __getattr__(name: str) -> Any:
suffix=name,
relative_to=here,
)
old_path = "langchain." + relative_path
old_path = "langchain_classic." + relative_path
new_path = "langchain_experimental." + relative_path
msg = (
f"{name} has been moved to langchain experimental. "

View File

@@ -44,13 +44,13 @@ from langchain_core.utils.input import get_color_mapping
from pydantic import BaseModel, ConfigDict, model_validator
from typing_extensions import Self, override
from langchain._api.deprecation import AGENT_DEPRECATION_WARNING
from langchain.agents.agent_iterator import AgentExecutorIterator
from langchain.agents.agent_types import AgentType
from langchain.agents.tools import InvalidTool
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain.utilities.asyncio import asyncio_timeout
from langchain_classic._api.deprecation import AGENT_DEPRECATION_WARNING
from langchain_classic.agents.agent_iterator import AgentExecutorIterator
from langchain_classic.agents.agent_types import AgentType
from langchain_classic.agents.tools import InvalidTool
from langchain_classic.chains.base import Chain
from langchain_classic.chains.llm import LLMChain
from langchain_classic.utilities.asyncio import asyncio_timeout
logger = logging.getLogger(__name__)

View File

@@ -30,11 +30,11 @@ from langchain_core.runnables.utils import AddableDict
from langchain_core.tools import BaseTool
from langchain_core.utils.input import get_color_mapping
from langchain.schema import RUN_KEY
from langchain.utilities.asyncio import asyncio_timeout
from langchain_classic.schema import RUN_KEY
from langchain_classic.utilities.asyncio import asyncio_timeout
if TYPE_CHECKING:
from langchain.agents.agent import AgentExecutor, NextStepOutput
from langchain_classic.agents.agent import AgentExecutor, NextStepOutput
logger = logging.getLogger(__name__)

View File

@@ -20,15 +20,15 @@ from typing import TYPE_CHECKING, Any
from langchain_core._api.path import as_import_path
from langchain_core.tools.retriever import create_retriever_tool
from langchain._api import create_importer
from langchain.agents.agent_toolkits.conversational_retrieval.openai_functions import (
from langchain_classic._api import create_importer
from langchain_classic.agents.agent_toolkits.conversational_retrieval.openai_functions import ( # noqa: E501
create_conversational_retrieval_agent,
)
from langchain.agents.agent_toolkits.vectorstore.base import (
from langchain_classic.agents.agent_toolkits.vectorstore.base import (
create_vectorstore_agent,
create_vectorstore_router_agent,
)
from langchain.agents.agent_toolkits.vectorstore.toolkit import (
from langchain_classic.agents.agent_toolkits.vectorstore.toolkit import (
VectorStoreInfo,
VectorStoreRouterToolkit,
VectorStoreToolkit,
@@ -119,7 +119,7 @@ def __getattr__(name: str) -> Any:
"""Get attr name."""
if name in DEPRECATED_AGENTS:
relative_path = as_import_path(Path(__file__).parent, suffix=name)
old_path = "langchain." + relative_path
old_path = "langchain_classic." + relative_path
new_path = "langchain_experimental." + relative_path
msg = (
f"{name} has been moved to langchain experimental. "

View File

@@ -1,6 +1,6 @@
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.ainetwork.toolkit import AINetworkToolkit

View File

@@ -1,6 +1,6 @@
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.amadeus.toolkit import AmadeusToolkit

View File

@@ -1,6 +1,6 @@
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.azure_cognitive_services import (

View File

@@ -1,6 +1,6 @@
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.clickup.toolkit import ClickupToolkit

View File

@@ -6,12 +6,12 @@ from langchain_core.messages import SystemMessage
from langchain_core.prompts.chat import MessagesPlaceholder
from langchain_core.tools import BaseTool
from langchain.agents.agent import AgentExecutor
from langchain.agents.openai_functions_agent.agent_token_buffer_memory import (
from langchain_classic.agents.agent import AgentExecutor
from langchain_classic.agents.openai_functions_agent.agent_token_buffer_memory import (
AgentTokenBufferMemory,
)
from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent
from langchain.memory.token_buffer import ConversationTokenBufferMemory
from langchain_classic.agents.openai_functions_agent.base import OpenAIFunctionsAgent
from langchain_classic.memory.token_buffer import ConversationTokenBufferMemory
def _get_default_system_message() -> SystemMessage:

View File

@@ -0,0 +1,3 @@
from langchain_classic.tools.retriever import create_retriever_tool
__all__ = ["create_retriever_tool"]

View File

@@ -12,7 +12,7 @@ def __getattr__(name: str) -> Any:
"and https://github.com/langchain-ai/langchain/discussions/11680"
"To keep using this code as is, install langchain experimental and "
"update your import statement from:\n "
f"`langchain.agents.agent_toolkits.csv.{name}` to "
f"`langchain_classic.agents.agent_toolkits.csv.{name}` to "
f"`langchain_experimental.agents.agent_toolkits.{name}`."
)
raise ImportError(msg)

View File

@@ -2,7 +2,7 @@
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.file_management.toolkit import (

View File

@@ -1,6 +1,6 @@
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.file_management.toolkit import (

View File

@@ -1,6 +1,6 @@
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.github.toolkit import (

View File

@@ -1,6 +1,6 @@
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.gitlab.toolkit import GitLabToolkit

View File

@@ -1,6 +1,6 @@
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.gmail.toolkit import GmailToolkit

View File

@@ -1,6 +1,6 @@
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.jira.toolkit import JiraToolkit

View File

@@ -1,6 +1,6 @@
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.json.base import create_json_agent

View File

@@ -1,6 +1,6 @@
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.json.prompt import JSON_PREFIX, JSON_SUFFIX

View File

@@ -1,6 +1,6 @@
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.json.toolkit import JsonToolkit

View File

@@ -1,6 +1,6 @@
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.multion.toolkit import MultionToolkit

View File

@@ -1,6 +1,6 @@
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.nasa.toolkit import NasaToolkit

View File

@@ -1,6 +1,6 @@
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.nla.tool import NLATool

View File

@@ -1,6 +1,6 @@
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.nla.toolkit import NLAToolkit

View File

@@ -1,6 +1,6 @@
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.office365.toolkit import O365Toolkit

View File

@@ -1,6 +1,6 @@
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.openapi.base import create_openapi_agent

View File

@@ -1,6 +1,6 @@
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.openapi.planner import (

View File

@@ -1,6 +1,6 @@
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.openapi.planner_prompt import (

View File

@@ -1,6 +1,6 @@
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.openapi.prompt import (

Some files were not shown because too many files have changed in this diff Show More