mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-07 09:40:07 +00:00
Compare commits
1 Commits
sr/agent-i
...
mdrxy/cods
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
264423f487 |
1
.github/workflows/check_diffs.yml
vendored
1
.github/workflows/check_diffs.yml
vendored
@@ -54,6 +54,7 @@ jobs:
|
||||
dependencies: ${{ steps.set-matrix.outputs.dependencies }}
|
||||
test-doc-imports: ${{ steps.set-matrix.outputs.test-doc-imports }}
|
||||
test-pydantic: ${{ steps.set-matrix.outputs.test-pydantic }}
|
||||
codspeed: ${{ steps.set-matrix.outputs.codspeed }}
|
||||
# Run linting only on packages that have changed files
|
||||
lint:
|
||||
needs: [ build ]
|
||||
|
||||
53
.github/workflows/codspeed.yml
vendored
53
.github/workflows/codspeed.yml
vendored
@@ -17,22 +17,37 @@ env:
|
||||
FIREWORKS_API_KEY: foo
|
||||
|
||||
jobs:
|
||||
codspeed:
|
||||
name: 'Benchmark'
|
||||
# This job analyzes which files changed to determine which packages need codspeed runs
|
||||
build:
|
||||
name: 'Detect Changes for Codspeed'
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'codspeed-ignore') }}
|
||||
steps:
|
||||
- name: '📋 Checkout Code'
|
||||
uses: actions/checkout@v4
|
||||
- name: '🐍 Setup Python 3.11'
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
- name: '📂 Get Changed Files'
|
||||
id: files
|
||||
uses: Ana06/get-changed-files@v2.3.0
|
||||
- name: '🔍 Analyze Changed Files & Generate Codspeed Matrix'
|
||||
id: set-matrix
|
||||
run: |
|
||||
python -m pip install packaging requests
|
||||
python .github/scripts/check_diff.py ${{ steps.files.outputs.all }} >> $GITHUB_OUTPUT
|
||||
outputs:
|
||||
codspeed: ${{ steps.set-matrix.outputs.codspeed }}
|
||||
|
||||
codspeed:
|
||||
name: 'Benchmark'
|
||||
needs: [ build ]
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ needs.build.outputs.codspeed != '[]' }}
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- working-directory: libs/core
|
||||
mode: walltime
|
||||
- working-directory: libs/partners/openai
|
||||
- working-directory: libs/partners/anthropic
|
||||
- working-directory: libs/partners/deepseek
|
||||
- working-directory: libs/partners/fireworks
|
||||
- working-directory: libs/partners/xai
|
||||
- working-directory: libs/partners/mistralai
|
||||
- working-directory: libs/partners/groq
|
||||
job-configs: ${{ fromJson(needs.build.outputs.codspeed) }}
|
||||
fail-fast: false
|
||||
|
||||
steps:
|
||||
@@ -42,25 +57,25 @@ jobs:
|
||||
- name: '📦 Install UV Package Manager'
|
||||
uses: astral-sh/setup-uv@v6
|
||||
with:
|
||||
python-version: "3.12"
|
||||
python-version: ${{ matrix.job-configs.python-version }}
|
||||
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.12"
|
||||
python-version: ${{ matrix.job-configs.python-version }}
|
||||
|
||||
- name: '📦 Install Test Dependencies'
|
||||
run: uv sync --group test
|
||||
working-directory: ${{ matrix.working-directory }}
|
||||
working-directory: ${{ matrix.job-configs.working-directory }}
|
||||
|
||||
- name: '⚡ Run Benchmarks: ${{ matrix.working-directory }}'
|
||||
- name: '⚡ Run Benchmarks: ${{ matrix.job-configs.working-directory }}'
|
||||
uses: CodSpeedHQ/action@v3
|
||||
with:
|
||||
token: ${{ secrets.CODSPEED_TOKEN }}
|
||||
run: |
|
||||
cd ${{ matrix.working-directory }}
|
||||
if [ "${{ matrix.working-directory }}" = "libs/core" ]; then
|
||||
cd ${{ matrix.job-configs.working-directory }}
|
||||
if [ "${{ matrix.job-configs.working-directory }}" = "libs/core" ]; then
|
||||
uv run --no-sync pytest ./tests/benchmarks --codspeed
|
||||
else
|
||||
uv run --no-sync pytest ./tests/ --codspeed
|
||||
fi
|
||||
mode: ${{ matrix.mode || 'instrumentation' }}
|
||||
mode: ${{ matrix.job-configs.working-directory == 'libs/core' && 'walltime' || 'instrumentation' }}
|
||||
|
||||
@@ -43,7 +43,7 @@ interface for models, embeddings, vector stores, and more.
|
||||
Use LangChain for:
|
||||
|
||||
- **Real-time data augmentation**. Easily connect LLMs to diverse data sources and
|
||||
external/internal systems, drawing from LangChain’s vast library of integrations with
|
||||
external / internal systems, drawing from LangChain’s vast library of integrations with
|
||||
model providers, tools, vector stores, retrievers, and more.
|
||||
- **Model interoperability**. Swap models in and out as your engineering team
|
||||
experiments to find the best choice for your application’s needs. As the industry
|
||||
@@ -58,7 +58,7 @@ applications.
|
||||
|
||||
To improve your LLM application development, pair LangChain with:
|
||||
|
||||
- [LangSmith](https://www.langchain.com/langsmith) - Helpful for agent evals and
|
||||
- [LangSmith](http://www.langchain.com/langsmith) - Helpful for agent evals and
|
||||
observability. Debug poor-performing LLM app runs, evaluate agent trajectories, gain
|
||||
visibility in production, and improve performance over time.
|
||||
- [LangGraph](https://langchain-ai.github.io/langgraph/) - Build agents that can
|
||||
@@ -67,7 +67,8 @@ framework. LangGraph offers customizable architecture, long-term memory, and
|
||||
human-in-the-loop workflows — and is trusted in production by companies like LinkedIn,
|
||||
Uber, Klarna, and GitLab.
|
||||
- [LangGraph Platform](https://docs.langchain.com/langgraph-platform) - Deploy
|
||||
and scale agents effortlessly with a purpose-built deployment platform for long-running, stateful workflows. Discover, reuse, configure, and share agents across
|
||||
and scale agents effortlessly with a purpose-built deployment platform for long
|
||||
running, stateful workflows. Discover, reuse, configure, and share agents across
|
||||
teams — and iterate quickly with visual prototyping in
|
||||
[LangGraph Studio](https://langchain-ai.github.io/langgraph/concepts/langgraph_studio/).
|
||||
|
||||
@@ -82,4 +83,4 @@ concepts behind the LangChain framework.
|
||||
- [LangChain Forum](https://forum.langchain.com/): Connect with the community and share all of your technical questions, ideas, and feedback.
|
||||
- [API Reference](https://python.langchain.com/api_reference/): Detailed reference on
|
||||
navigating base packages and integrations for LangChain.
|
||||
- [Chat LangChain](https://chat.langchain.com/): Ask questions & chat with our documentation.
|
||||
- [Chat LangChain](https://chat.langchain.com/): Ask questions & chat with our documentation
|
||||
|
||||
@@ -4,9 +4,9 @@ LangChain has a large ecosystem of integrations with various external resources
|
||||
|
||||
## Best practices
|
||||
|
||||
When building such applications, developers should remember to follow good security practices:
|
||||
When building such applications developers should remember to follow good security practices:
|
||||
|
||||
* [**Limit Permissions**](https://en.wikipedia.org/wiki/Principle_of_least_privilege): Scope permissions specifically to the application's need. Granting broad or excessive permissions can introduce significant security vulnerabilities. To avoid such vulnerabilities, consider using read-only credentials, disallowing access to sensitive resources, using sandboxing techniques (such as running inside a container), specifying proxy configurations to control external requests, etc., as appropriate for your application.
|
||||
* [**Limit Permissions**](https://en.wikipedia.org/wiki/Principle_of_least_privilege): Scope permissions specifically to the application's need. Granting broad or excessive permissions can introduce significant security vulnerabilities. To avoid such vulnerabilities, consider using read-only credentials, disallowing access to sensitive resources, using sandboxing techniques (such as running inside a container), specifying proxy configurations to control external requests, etc. as appropriate for your application.
|
||||
* **Anticipate Potential Misuse**: Just as humans can err, so can Large Language Models (LLMs). Always assume that any system access or credentials may be used in any way allowed by the permissions they are assigned. For example, if a pair of database credentials allows deleting data, it's safest to assume that any LLM able to use those credentials may in fact delete data.
|
||||
* [**Defense in Depth**](https://en.wikipedia.org/wiki/Defense_in_depth_(computing)): No security technique is perfect. Fine-tuning and good chain design can reduce, but not eliminate, the odds that a Large Language Model (LLM) may make a mistake. It's best to combine multiple layered security approaches rather than relying on any single layer of defense to ensure security. For example: use both read-only permissions and sandboxing to ensure that LLMs are only able to access data that is explicitly meant for them to use.
|
||||
|
||||
@@ -67,7 +67,8 @@ All out of scope targets defined by huntr as well as:
|
||||
for more details, but generally tools interact with the real world. Developers are
|
||||
expected to understand the security implications of their code and are responsible
|
||||
for the security of their tools.
|
||||
* Code documented with security notices. This will be decided on a case-by-case basis, but likely will not be eligible for a bounty as the code is already
|
||||
* Code documented with security notices. This will be decided on a case by
|
||||
case basis, but likely will not be eligible for a bounty as the code is already
|
||||
documented with guidelines for developers that should be followed for making their
|
||||
application secure.
|
||||
* Any LangSmith related repositories or APIs (see [Reporting LangSmith Vulnerabilities](#reporting-langsmith-vulnerabilities)).
|
||||
|
||||
@@ -97,7 +97,7 @@ def skip_private_members(app, what, name, obj, skip, options):
|
||||
if hasattr(obj, "__doc__") and obj.__doc__ and ":private:" in obj.__doc__:
|
||||
return True
|
||||
if name == "__init__" and obj.__objclass__ is object:
|
||||
# don't document default init
|
||||
# dont document default init
|
||||
return True
|
||||
return None
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
"source": [
|
||||
"# Azure AI Data\n",
|
||||
"\n",
|
||||
">[Azure AI Foundry (formerly Azure AI Studio)](https://ai.azure.com/) provides the capability to upload data assets to cloud storage and register existing data assets from the following sources:\n",
|
||||
">[Azure AI Studio](https://ai.azure.com/) provides the capability to upload data assets to cloud storage and register existing data assets from the following sources:\n",
|
||||
">\n",
|
||||
">- `Microsoft OneLake`\n",
|
||||
">- `Azure Blob Storage`\n",
|
||||
|
||||
@@ -23,7 +23,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! docker run -d -p 8123:8123 -p 9000:9000 --name langchain-clickhouse-server --ulimit nofile=262144:262144 -e CLICKHOUSE_SKIP_USER_SETUP=1 clickhouse/clickhouse-server:25.7"
|
||||
"! docker run -d -p 8123:8123 -p9000:9000 --name langchain-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server:24.7.6.8"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -310,8 +310,7 @@
|
||||
" where_str=f\"{meta}.source = 'tweet'\",\n",
|
||||
")\n",
|
||||
"for res in results:\n",
|
||||
" page_content, metadata = res\n",
|
||||
" print(f\"* {page_content} [{metadata}]\")"
|
||||
" print(f\"* {res.page_content} [{res.metadata}]\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -88,7 +88,7 @@
|
||||
"The following may help resolve this error:\n",
|
||||
"\n",
|
||||
"- Ensure that all inputs to chat models are an array of LangChain message classes or a supported message-like.\n",
|
||||
" - Check that there is no stringification or other unexpected transformation occurring.\n",
|
||||
" - Check that there is no stringification or other unexpected transformation occuring.\n",
|
||||
"- Check the error's stack trace and add log or debugger statements."
|
||||
]
|
||||
},
|
||||
|
||||
@@ -78,11 +78,6 @@ def _generate_response_from_error(error: BaseException) -> list[ChatGeneration]:
|
||||
if hasattr(error, "response"):
|
||||
response = error.response
|
||||
metadata: dict = {}
|
||||
if hasattr(response, "json"):
|
||||
try:
|
||||
metadata["body"] = response.json()
|
||||
except Exception:
|
||||
metadata["body"] = getattr(response, "text", None)
|
||||
if hasattr(response, "headers"):
|
||||
try:
|
||||
metadata["headers"] = dict(response.headers)
|
||||
|
||||
@@ -246,8 +246,6 @@ class JsonOutputKeyToolsParser(JsonOutputToolsParser):
|
||||
_ = tool_call.pop("id")
|
||||
else:
|
||||
try:
|
||||
# This exists purely for backward compatibility / cached messages
|
||||
# All new messages should use `message.tool_calls`
|
||||
raw_tool_calls = copy.deepcopy(message.additional_kwargs["tool_calls"])
|
||||
except KeyError:
|
||||
if self.first_tool_only:
|
||||
|
||||
@@ -999,7 +999,7 @@ async def _astream_events_implementation_v2(
|
||||
continue
|
||||
|
||||
# If it's the end event corresponding to the root runnable
|
||||
# we don't include the input in the event since it's guaranteed
|
||||
# we dont include the input in the event since it's guaranteed
|
||||
# to be included in the first event.
|
||||
if (
|
||||
event["run_id"] == first_event_run_id
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
.PHONY: all clean docs_build docs_clean docs_linkcheck api_docs_build api_docs_clean api_docs_linkcheck format lint test tests test_watch integration_tests help extended_tests start_services stop_services
|
||||
.PHONY: all clean docs_build docs_clean docs_linkcheck api_docs_build api_docs_clean api_docs_linkcheck format lint test tests test_watch integration_tests help extended_tests
|
||||
|
||||
# Default target executed when no arguments are given to make.
|
||||
all: help
|
||||
@@ -7,12 +7,6 @@ all: help
|
||||
# TESTING AND COVERAGE
|
||||
######################
|
||||
|
||||
start_services:
|
||||
docker compose -f tests/unit_tests/agents/compose-postgres.yml -f tests/unit_tests/agents/compose-redis.yml up -V --force-recreate --wait --remove-orphans
|
||||
|
||||
stop_services:
|
||||
docker compose -f tests/unit_tests/agents/compose-postgres.yml -f tests/unit_tests/agents/compose-redis.yml down -v
|
||||
|
||||
# Define a variable for the test file path.
|
||||
TEST_FILE ?= tests/unit_tests/
|
||||
|
||||
@@ -27,32 +21,17 @@ coverage:
|
||||
--cov-report term-missing:skip-covered \
|
||||
$(TEST_FILE)
|
||||
|
||||
test:
|
||||
make start_services && LANGGRAPH_TEST_FAST=0 uv run --group test pytest -n auto --disable-socket --allow-unix-socket $(TEST_FILE) --cov-report term-missing:skip-covered; \
|
||||
EXIT_CODE=$$?; \
|
||||
make stop_services; \
|
||||
exit $$EXIT_CODE
|
||||
|
||||
test_fast:
|
||||
LANGGRAPH_TEST_FAST=1 uv run --group test pytest -n auto --disable-socket --allow-unix-socket $(TEST_FILE)
|
||||
test tests:
|
||||
uv run --group test pytest -n auto --disable-socket --allow-unix-socket $(TEST_FILE)
|
||||
|
||||
extended_tests:
|
||||
make start_services && LANGGRAPH_TEST_FAST=0 uv run --group test pytest --disable-socket --allow-unix-socket --only-extended tests/unit_tests; \
|
||||
EXIT_CODE=$$?; \
|
||||
make stop_services; \
|
||||
exit $$EXIT_CODE
|
||||
uv run --group test pytest --disable-socket --allow-unix-socket --only-extended tests/unit_tests
|
||||
|
||||
test_watch:
|
||||
make start_services && LANGGRAPH_TEST_FAST=0 uv run --group test ptw --snapshot-update --now . -- -x --disable-socket --allow-unix-socket --disable-warnings tests/unit_tests; \
|
||||
EXIT_CODE=$$?; \
|
||||
make stop_services; \
|
||||
exit $$EXIT_CODE
|
||||
uv run --group test ptw --snapshot-update --now . -- -x --disable-socket --allow-unix-socket --disable-warnings tests/unit_tests
|
||||
|
||||
test_watch_extended:
|
||||
make start_services && LANGGRAPH_TEST_FAST=0 uv run --group test ptw --snapshot-update --now . -- -x --disable-socket --allow-unix-socket --only-extended tests/unit_tests; \
|
||||
EXIT_CODE=$$?; \
|
||||
make stop_services; \
|
||||
exit $$EXIT_CODE
|
||||
uv run --group test ptw --snapshot-update --now . -- -x --disable-socket --allow-unix-socket --only-extended tests/unit_tests
|
||||
|
||||
integration_tests:
|
||||
uv run --group test --group test_integration pytest tests/integration_tests
|
||||
@@ -108,8 +87,7 @@ help:
|
||||
@echo 'spell_fix - run codespell on the project and fix the errors'
|
||||
@echo '-- TESTS --'
|
||||
@echo 'coverage - run unit tests and generate coverage report'
|
||||
@echo 'test - run unit tests with all services'
|
||||
@echo 'test_fast - run unit tests with in-memory services only'
|
||||
@echo 'test - run unit tests'
|
||||
@echo 'tests - run unit tests (alias for "make test")'
|
||||
@echo 'test TEST_FILE=<test_file> - run all tests in file'
|
||||
@echo 'extended_tests - run only extended unit tests'
|
||||
|
||||
@@ -32,4 +32,9 @@ def format_document_xml(doc: Document) -> str:
|
||||
if doc.metadata:
|
||||
metadata_items = [f"{k}: {v!s}" for k, v in doc.metadata.items()]
|
||||
metadata_str = f"<metadata>{', '.join(metadata_items)}</metadata>"
|
||||
return f"<document>{id_str}<content>{doc.page_content}</content>{metadata_str}</document>"
|
||||
return (
|
||||
f"<document>{id_str}"
|
||||
f"<content>{doc.page_content}</content>"
|
||||
f"{metadata_str}"
|
||||
f"</document>"
|
||||
)
|
||||
|
||||
@@ -92,7 +92,9 @@ async def aresolve_prompt(
|
||||
str,
|
||||
None,
|
||||
Callable[[StateT, Runtime[ContextT]], list[MessageLikeRepresentation]],
|
||||
Callable[[StateT, Runtime[ContextT]], Awaitable[list[MessageLikeRepresentation]]],
|
||||
Callable[
|
||||
[StateT, Runtime[ContextT]], Awaitable[list[MessageLikeRepresentation]]
|
||||
],
|
||||
],
|
||||
state: StateT,
|
||||
runtime: Runtime[ContextT],
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
"""Agents and abstractions."""
|
||||
|
||||
from langchain.agents.react_agent import AgentState, create_react_agent
|
||||
from langchain.agents.tool_node import ToolNode
|
||||
|
||||
__all__ = ["AgentState", "ToolNode", "create_react_agent"]
|
||||
@@ -1 +0,0 @@
|
||||
"""Internal utilities for agents."""
|
||||
@@ -1,11 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Awaitable
|
||||
from typing import Callable, TypeVar, Union
|
||||
|
||||
from typing_extensions import ParamSpec
|
||||
|
||||
P = ParamSpec("P")
|
||||
R = TypeVar("R")
|
||||
|
||||
SyncOrAsync = Callable[P, Union[R, Awaitable[R]]]
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,404 +0,0 @@
|
||||
"""Types for setting agent response formats."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import sys
|
||||
import uuid
|
||||
from dataclasses import dataclass, is_dataclass
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Callable,
|
||||
Generic,
|
||||
Literal,
|
||||
TypeVar,
|
||||
Union,
|
||||
get_args,
|
||||
get_origin,
|
||||
)
|
||||
|
||||
from langchain_core.tools import BaseTool, StructuredTool
|
||||
from pydantic import BaseModel, TypeAdapter
|
||||
from typing_extensions import Self, is_typeddict
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Iterable
|
||||
|
||||
from langchain_core.messages import AIMessage
|
||||
|
||||
# Supported schema types: Pydantic models, dataclasses, TypedDict, JSON schema dicts
|
||||
SchemaT = TypeVar("SchemaT")
|
||||
|
||||
|
||||
if sys.version_info >= (3, 10):
|
||||
from types import UnionType
|
||||
else:
|
||||
UnionType = Union
|
||||
|
||||
SchemaKind = Literal["pydantic", "dataclass", "typeddict", "json_schema"]
|
||||
|
||||
|
||||
class StructuredOutputError(Exception):
|
||||
"""Base class for structured output errors."""
|
||||
|
||||
|
||||
class MultipleStructuredOutputsError(StructuredOutputError):
|
||||
"""Raised when model returns multiple structured output tool calls when only one is expected."""
|
||||
|
||||
def __init__(self, tool_names: list[str]) -> None:
|
||||
"""Initialize MultipleStructuredOutputsError."""
|
||||
self.tool_names = tool_names
|
||||
super().__init__(
|
||||
f"Model incorrectly returned multiple structured responses ({', '.join(tool_names)}) when only one is expected."
|
||||
)
|
||||
|
||||
|
||||
class StructuredOutputParsingError(StructuredOutputError):
|
||||
"""Raised when structured output tool call arguments fail to parse according to the schema."""
|
||||
|
||||
def __init__(self, tool_name: str, parse_error: Exception) -> None:
|
||||
"""Initialize StructuredOutputParsingError."""
|
||||
self.tool_name = tool_name
|
||||
self.parse_error = parse_error
|
||||
super().__init__(
|
||||
f"Failed to parse structured output for tool '{tool_name}': {parse_error}."
|
||||
)
|
||||
|
||||
|
||||
def _parse_with_schema(
|
||||
schema: Union[type[SchemaT], dict], schema_kind: SchemaKind, data: dict[str, Any]
|
||||
) -> Any:
|
||||
"""Parse data using for any supported schema type.
|
||||
|
||||
Args:
|
||||
schema: The schema type (Pydantic model, dataclass, or TypedDict)
|
||||
schema_kind: The type of the schema (pydantic, dataclass, typeddict, or json_schema)
|
||||
data: The data to parse
|
||||
|
||||
Returns:
|
||||
The parsed instance according to the schema type
|
||||
|
||||
Raises:
|
||||
ValueError: If parsing fails
|
||||
"""
|
||||
if schema_kind == "json_schema":
|
||||
return data
|
||||
try:
|
||||
adapter: TypeAdapter[SchemaT] = TypeAdapter(schema)
|
||||
return adapter.validate_python(data)
|
||||
except Exception as e:
|
||||
schema_name = getattr(schema, "__name__", str(schema))
|
||||
msg = f"Failed to parse data to {schema_name}: {e}"
|
||||
raise ValueError(msg) from e
|
||||
|
||||
|
||||
@dataclass(init=False)
|
||||
class _SchemaSpec(Generic[SchemaT]):
|
||||
"""Describes a structured output schema."""
|
||||
|
||||
schema: type[SchemaT]
|
||||
"""The schema for the response, can be a Pydantic model, dataclass, TypedDict, or JSON schema dict."""
|
||||
|
||||
name: str
|
||||
"""Name of the schema, used for tool calling.
|
||||
|
||||
If not provided, the name will be the model name or "response_format" if it's a JSON schema.
|
||||
"""
|
||||
|
||||
description: str
|
||||
"""Custom description of the schema.
|
||||
|
||||
If not provided, provided will use the model's docstring.
|
||||
"""
|
||||
|
||||
schema_kind: SchemaKind
|
||||
"""The kind of schema."""
|
||||
|
||||
json_schema: dict[str, Any]
|
||||
"""JSON schema associated with the schema."""
|
||||
|
||||
strict: bool = False
|
||||
"""Whether to enforce strict validation of the schema."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
schema: type[SchemaT],
|
||||
*,
|
||||
name: str | None = None,
|
||||
description: str | None = None,
|
||||
strict: bool = False,
|
||||
) -> None:
|
||||
"""Initialize SchemaSpec with schema and optional parameters."""
|
||||
self.schema = schema
|
||||
|
||||
if name:
|
||||
self.name = name
|
||||
elif isinstance(schema, dict):
|
||||
self.name = str(schema.get("title", f"response_format_{str(uuid.uuid4())[:4]}"))
|
||||
else:
|
||||
self.name = str(getattr(schema, "__name__", f"response_format_{str(uuid.uuid4())[:4]}"))
|
||||
|
||||
self.description = description or (
|
||||
schema.get("description", "")
|
||||
if isinstance(schema, dict)
|
||||
else getattr(schema, "__doc__", None) or ""
|
||||
)
|
||||
|
||||
self.strict = strict
|
||||
|
||||
if isinstance(schema, dict):
|
||||
self.schema_kind = "json_schema"
|
||||
self.json_schema = schema
|
||||
elif isinstance(schema, type) and issubclass(schema, BaseModel):
|
||||
self.schema_kind = "pydantic"
|
||||
self.json_schema = schema.model_json_schema()
|
||||
elif is_dataclass(schema):
|
||||
self.schema_kind = "dataclass"
|
||||
self.json_schema = TypeAdapter(schema).json_schema()
|
||||
elif is_typeddict(schema):
|
||||
self.schema_kind = "typeddict"
|
||||
self.json_schema = TypeAdapter(schema).json_schema()
|
||||
else:
|
||||
msg = (
|
||||
f"Unsupported schema type: {type(schema)}. "
|
||||
f"Supported types: Pydantic models, dataclasses, TypedDicts, and JSON schema dicts."
|
||||
)
|
||||
raise ValueError(msg)
|
||||
|
||||
|
||||
@dataclass(init=False)
|
||||
class ToolOutput(Generic[SchemaT]):
|
||||
"""Use a tool calling strategy for model responses."""
|
||||
|
||||
schema: type[SchemaT]
|
||||
"""Schema for the tool calls."""
|
||||
|
||||
schema_specs: list[_SchemaSpec[SchemaT]]
|
||||
"""Schema specs for the tool calls."""
|
||||
|
||||
tool_message_content: str | None
|
||||
"""The content of the tool message to be returned when the model calls an artificial structured output tool."""
|
||||
|
||||
handle_errors: Union[
|
||||
bool,
|
||||
str,
|
||||
type[Exception],
|
||||
tuple[type[Exception], ...],
|
||||
Callable[[Exception], str],
|
||||
]
|
||||
"""Error handling strategy for structured output via ToolOutput. Default is True.
|
||||
|
||||
- True: Catch all errors with default error template
|
||||
- str: Catch all errors with this custom message
|
||||
- type[Exception]: Only catch this exception type with default message
|
||||
- tuple[type[Exception], ...]: Only catch these exception types with default message
|
||||
- Callable[[Exception], str]: Custom function that returns error message
|
||||
- False: No retry, let exceptions propagate
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
schema: type[SchemaT],
|
||||
tool_message_content: str | None = None,
|
||||
handle_errors: bool
|
||||
| str
|
||||
| type[Exception]
|
||||
| tuple[type[Exception], ...]
|
||||
| Callable[[Exception], str]
|
||||
| None = None,
|
||||
) -> None:
|
||||
"""Initialize ToolOutput with schemas, tool message content, and error handling strategy."""
|
||||
self.schema = schema
|
||||
self.tool_message_content = tool_message_content
|
||||
if handle_errors is None:
|
||||
self.handle_errors = True
|
||||
else:
|
||||
self.handle_errors = handle_errors
|
||||
|
||||
def _iter_variants(schema: Any) -> Iterable[Any]:
|
||||
"""Yield leaf variants from Union and JSON Schema oneOf."""
|
||||
if get_origin(schema) in (UnionType, Union):
|
||||
for arg in get_args(schema):
|
||||
yield from _iter_variants(arg)
|
||||
return
|
||||
|
||||
if isinstance(schema, dict) and "oneOf" in schema:
|
||||
for sub in schema.get("oneOf", []):
|
||||
yield from _iter_variants(sub)
|
||||
return
|
||||
|
||||
yield schema
|
||||
|
||||
self.schema_specs = [_SchemaSpec(s) for s in _iter_variants(schema)]
|
||||
|
||||
|
||||
@dataclass(init=False)
|
||||
class NativeOutput(Generic[SchemaT]):
|
||||
"""Use the model provider's native structured output method."""
|
||||
|
||||
schema: type[SchemaT]
|
||||
"""Schema for native mode."""
|
||||
|
||||
schema_spec: _SchemaSpec[SchemaT]
|
||||
"""Schema spec for native mode."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
schema: type[SchemaT],
|
||||
) -> None:
|
||||
"""Initialize NativeOutput with schema."""
|
||||
self.schema = schema
|
||||
self.schema_spec = _SchemaSpec(schema)
|
||||
|
||||
def to_model_kwargs(self) -> dict[str, Any]:
|
||||
"""Convert the schema to the appropriate format for the model provider."""
|
||||
# OpenAI:
|
||||
# - see https://platform.openai.com/docs/guides/structured-outputs
|
||||
response_format = {
|
||||
"type": "json_schema",
|
||||
"json_schema": {
|
||||
"name": self.schema_spec.name,
|
||||
"schema": self.schema_spec.json_schema,
|
||||
},
|
||||
}
|
||||
return {"response_format": response_format}
|
||||
|
||||
|
||||
@dataclass
|
||||
class OutputToolBinding(Generic[SchemaT]):
|
||||
"""Information for tracking structured output tool metadata.
|
||||
|
||||
This contains all necessary information to handle structured responses
|
||||
generated via tool calls, including the original schema, its type classification,
|
||||
and the corresponding tool implementation used by the tools strategy.
|
||||
"""
|
||||
|
||||
schema: type[SchemaT]
|
||||
"""The original schema provided for structured output (Pydantic model, dataclass, TypedDict, or JSON schema dict)."""
|
||||
|
||||
schema_kind: SchemaKind
|
||||
"""Classification of the schema type for proper response construction."""
|
||||
|
||||
tool: BaseTool
|
||||
"""LangChain tool instance created from the schema for model binding."""
|
||||
|
||||
@classmethod
|
||||
def from_schema_spec(cls, schema_spec: _SchemaSpec[SchemaT]) -> Self:
|
||||
"""Create an OutputToolBinding instance from a SchemaSpec.
|
||||
|
||||
Args:
|
||||
schema_spec: The SchemaSpec to convert
|
||||
|
||||
Returns:
|
||||
An OutputToolBinding instance with the appropriate tool created
|
||||
"""
|
||||
return cls(
|
||||
schema=schema_spec.schema,
|
||||
schema_kind=schema_spec.schema_kind,
|
||||
tool=StructuredTool(
|
||||
args_schema=schema_spec.json_schema,
|
||||
name=schema_spec.name,
|
||||
description=schema_spec.description,
|
||||
),
|
||||
)
|
||||
|
||||
def parse(self, tool_args: dict[str, Any]) -> SchemaT:
|
||||
"""Parse tool arguments according to the schema.
|
||||
|
||||
Args:
|
||||
tool_args: The arguments from the tool call
|
||||
|
||||
Returns:
|
||||
The parsed response according to the schema type
|
||||
|
||||
Raises:
|
||||
ValueError: If parsing fails
|
||||
"""
|
||||
return _parse_with_schema(self.schema, self.schema_kind, tool_args)
|
||||
|
||||
|
||||
@dataclass
|
||||
class NativeOutputBinding(Generic[SchemaT]):
|
||||
"""Information for tracking native structured output metadata.
|
||||
|
||||
This contains all necessary information to handle structured responses
|
||||
generated via native provider output, including the original schema,
|
||||
its type classification, and parsing logic for provider-enforced JSON.
|
||||
"""
|
||||
|
||||
schema: type[SchemaT]
|
||||
"""The original schema provided for structured output (Pydantic model, dataclass, TypedDict, or JSON schema dict)."""
|
||||
|
||||
schema_kind: SchemaKind
|
||||
"""Classification of the schema type for proper response construction."""
|
||||
|
||||
@classmethod
|
||||
def from_schema_spec(cls, schema_spec: _SchemaSpec[SchemaT]) -> Self:
|
||||
"""Create a NativeOutputBinding instance from a SchemaSpec.
|
||||
|
||||
Args:
|
||||
schema_spec: The SchemaSpec to convert
|
||||
|
||||
Returns:
|
||||
A NativeOutputBinding instance for parsing native structured output
|
||||
"""
|
||||
return cls(
|
||||
schema=schema_spec.schema,
|
||||
schema_kind=schema_spec.schema_kind,
|
||||
)
|
||||
|
||||
def parse(self, response: AIMessage) -> SchemaT:
|
||||
"""Parse AIMessage content according to the schema.
|
||||
|
||||
Args:
|
||||
response: The AI message containing the structured output
|
||||
|
||||
Returns:
|
||||
The parsed response according to the schema
|
||||
|
||||
Raises:
|
||||
ValueError: If text extraction, JSON parsing or schema validation fails
|
||||
"""
|
||||
# Extract text content from AIMessage and parse as JSON
|
||||
raw_text = self._extract_text_content_from_message(response)
|
||||
|
||||
import json
|
||||
|
||||
try:
|
||||
data = json.loads(raw_text)
|
||||
except Exception as e:
|
||||
schema_name = getattr(self.schema, "__name__", "response_format")
|
||||
msg = f"Native structured output expected valid JSON for {schema_name}, but parsing failed: {e}."
|
||||
raise ValueError(msg) from e
|
||||
|
||||
# Parse according to schema
|
||||
return _parse_with_schema(self.schema, self.schema_kind, data)
|
||||
|
||||
def _extract_text_content_from_message(self, message: AIMessage) -> str:
|
||||
"""Extract text content from an AIMessage.
|
||||
|
||||
Args:
|
||||
message: The AI message to extract text from
|
||||
|
||||
Returns:
|
||||
The extracted text content
|
||||
"""
|
||||
content = message.content
|
||||
if isinstance(content, str):
|
||||
return content
|
||||
if isinstance(content, list):
|
||||
parts: list[str] = []
|
||||
for c in content:
|
||||
if isinstance(c, dict):
|
||||
if c.get("type") == "text" and "text" in c:
|
||||
parts.append(str(c["text"]))
|
||||
elif "content" in c and isinstance(c["content"], str):
|
||||
parts.append(c["content"])
|
||||
else:
|
||||
parts.append(str(c))
|
||||
return "".join(parts)
|
||||
return str(content)
|
||||
|
||||
|
||||
ResponseFormat = Union[ToolOutput[SchemaT], NativeOutput[SchemaT]]
|
||||
File diff suppressed because it is too large
Load Diff
@@ -190,7 +190,9 @@ class _MapReduceExtractor(Generic[ContextT]):
|
||||
if isinstance(model, str):
|
||||
model = init_chat_model(model)
|
||||
|
||||
self.model = model.with_structured_output(response_format) if response_format else model
|
||||
self.model = (
|
||||
model.with_structured_output(response_format) if response_format else model
|
||||
)
|
||||
self.map_prompt = map_prompt
|
||||
self.reduce_prompt = reduce_prompt
|
||||
self.reduce = reduce
|
||||
@@ -340,7 +342,9 @@ class _MapReduceExtractor(Generic[ContextT]):
|
||||
config: RunnableConfig,
|
||||
) -> dict[str, list[ExtractionResult]]:
|
||||
prompt = await self._aget_map_prompt(state, runtime)
|
||||
response = cast("AIMessage", await self.model.ainvoke(prompt, config=config))
|
||||
response = cast(
|
||||
"AIMessage", await self.model.ainvoke(prompt, config=config)
|
||||
)
|
||||
result = response if self.response_format else response.text()
|
||||
extraction_result: ExtractionResult = {
|
||||
"indexes": state["indexes"],
|
||||
@@ -371,7 +375,9 @@ class _MapReduceExtractor(Generic[ContextT]):
|
||||
config: RunnableConfig,
|
||||
) -> MapReduceNodeUpdate:
|
||||
prompt = await self._aget_reduce_prompt(state, runtime)
|
||||
response = cast("AIMessage", await self.model.ainvoke(prompt, config=config))
|
||||
response = cast(
|
||||
"AIMessage", await self.model.ainvoke(prompt, config=config)
|
||||
)
|
||||
result = response if self.response_format else response.text()
|
||||
return {"result": result}
|
||||
|
||||
|
||||
@@ -173,7 +173,9 @@ class _Extractor(Generic[ContextT]):
|
||||
if isinstance(model, str):
|
||||
model = init_chat_model(model)
|
||||
|
||||
self.model = model.with_structured_output(response_format) if response_format else model
|
||||
self.model = (
|
||||
model.with_structured_output(response_format) if response_format else model
|
||||
)
|
||||
self.initial_prompt = prompt
|
||||
self.refine_prompt = refine_prompt
|
||||
self.context_schema = context_schema
|
||||
@@ -186,7 +188,9 @@ class _Extractor(Generic[ContextT]):
|
||||
|
||||
# Choose default prompt based on structured output format
|
||||
default_prompt = (
|
||||
DEFAULT_STRUCTURED_INIT_PROMPT if self.response_format else DEFAULT_INIT_PROMPT
|
||||
DEFAULT_STRUCTURED_INIT_PROMPT
|
||||
if self.response_format
|
||||
else DEFAULT_INIT_PROMPT
|
||||
)
|
||||
|
||||
return resolve_prompt(
|
||||
@@ -205,7 +209,9 @@ class _Extractor(Generic[ContextT]):
|
||||
|
||||
# Choose default prompt based on structured output format
|
||||
default_prompt = (
|
||||
DEFAULT_STRUCTURED_INIT_PROMPT if self.response_format else DEFAULT_INIT_PROMPT
|
||||
DEFAULT_STRUCTURED_INIT_PROMPT
|
||||
if self.response_format
|
||||
else DEFAULT_INIT_PROMPT
|
||||
)
|
||||
|
||||
return await aresolve_prompt(
|
||||
@@ -240,7 +246,9 @@ class _Extractor(Generic[ContextT]):
|
||||
|
||||
# Choose default prompt based on structured output format
|
||||
default_prompt = (
|
||||
DEFAULT_STRUCTURED_REFINE_PROMPT if self.response_format else DEFAULT_REFINE_PROMPT
|
||||
DEFAULT_STRUCTURED_REFINE_PROMPT
|
||||
if self.response_format
|
||||
else DEFAULT_REFINE_PROMPT
|
||||
)
|
||||
|
||||
return resolve_prompt(
|
||||
@@ -275,7 +283,9 @@ class _Extractor(Generic[ContextT]):
|
||||
|
||||
# Choose default prompt based on structured output format
|
||||
default_prompt = (
|
||||
DEFAULT_STRUCTURED_REFINE_PROMPT if self.response_format else DEFAULT_REFINE_PROMPT
|
||||
DEFAULT_STRUCTURED_REFINE_PROMPT
|
||||
if self.response_format
|
||||
else DEFAULT_REFINE_PROMPT
|
||||
)
|
||||
|
||||
return await aresolve_prompt(
|
||||
@@ -330,12 +340,16 @@ class _Extractor(Generic[ContextT]):
|
||||
if "result" not in state or state["result"] == "":
|
||||
# Initial processing
|
||||
prompt = await self._aget_initial_prompt(state, runtime)
|
||||
response = cast("AIMessage", await self.model.ainvoke(prompt, config=config))
|
||||
response = cast(
|
||||
"AIMessage", await self.model.ainvoke(prompt, config=config)
|
||||
)
|
||||
result = response if self.response_format else response.text()
|
||||
return {"result": result}
|
||||
# Refinement
|
||||
prompt = await self._aget_refine_prompt(state, runtime)
|
||||
response = cast("AIMessage", await self.model.ainvoke(prompt, config=config))
|
||||
response = cast(
|
||||
"AIMessage", await self.model.ainvoke(prompt, config=config)
|
||||
)
|
||||
result = response if self.response_format else response.text()
|
||||
return {"result": result}
|
||||
|
||||
|
||||
@@ -67,7 +67,9 @@ def init_chat_model(
|
||||
model: Optional[str] = None,
|
||||
*,
|
||||
model_provider: Optional[str] = None,
|
||||
configurable_fields: Optional[Union[Literal["any"], list[str], tuple[str, ...]]] = None,
|
||||
configurable_fields: Optional[
|
||||
Union[Literal["any"], list[str], tuple[str, ...]]
|
||||
] = None,
|
||||
config_prefix: Optional[str] = None,
|
||||
**kwargs: Any,
|
||||
) -> Union[BaseChatModel, _ConfigurableModel]:
|
||||
@@ -444,7 +446,9 @@ def _init_chat_model_helper(
|
||||
|
||||
return ChatPerplexity(model=model, **kwargs)
|
||||
supported = ", ".join(_SUPPORTED_PROVIDERS)
|
||||
msg = f"Unsupported {model_provider=}.\n\nSupported model providers are: {supported}"
|
||||
msg = (
|
||||
f"Unsupported {model_provider=}.\n\nSupported model providers are: {supported}"
|
||||
)
|
||||
raise ValueError(msg)
|
||||
|
||||
|
||||
@@ -497,13 +501,18 @@ def _attempt_infer_model_provider(model_name: str) -> Optional[str]:
|
||||
|
||||
|
||||
def _parse_model(model: str, model_provider: Optional[str]) -> tuple[str, str]:
|
||||
if not model_provider and ":" in model and model.split(":")[0] in _SUPPORTED_PROVIDERS:
|
||||
if (
|
||||
not model_provider
|
||||
and ":" in model
|
||||
and model.split(":")[0] in _SUPPORTED_PROVIDERS
|
||||
):
|
||||
model_provider = model.split(":")[0]
|
||||
model = ":".join(model.split(":")[1:])
|
||||
model_provider = model_provider or _attempt_infer_model_provider(model)
|
||||
if not model_provider:
|
||||
msg = (
|
||||
f"Unable to infer model provider for {model=}, please specify model_provider directly."
|
||||
f"Unable to infer model provider for {model=}, please specify "
|
||||
f"model_provider directly."
|
||||
)
|
||||
raise ValueError(msg)
|
||||
model_provider = model_provider.replace("-", "_").lower()
|
||||
@@ -513,7 +522,9 @@ def _parse_model(model: str, model_provider: Optional[str]) -> tuple[str, str]:
|
||||
def _check_pkg(pkg: str, *, pkg_kebab: Optional[str] = None) -> None:
|
||||
if not util.find_spec(pkg):
|
||||
pkg_kebab = pkg_kebab if pkg_kebab is not None else pkg.replace("_", "-")
|
||||
msg = f"Unable to import {pkg}. Please install with `pip install -U {pkg_kebab}`"
|
||||
msg = (
|
||||
f"Unable to import {pkg}. Please install with `pip install -U {pkg_kebab}`"
|
||||
)
|
||||
raise ImportError(msg)
|
||||
|
||||
|
||||
@@ -535,7 +546,9 @@ class _ConfigurableModel(Runnable[LanguageModelInput, Any]):
|
||||
) -> None:
|
||||
self._default_config: dict = default_config or {}
|
||||
self._configurable_fields: Union[Literal["any"], list[str]] = (
|
||||
configurable_fields if configurable_fields == "any" else list(configurable_fields)
|
||||
configurable_fields
|
||||
if configurable_fields == "any"
|
||||
else list(configurable_fields)
|
||||
)
|
||||
self._config_prefix = (
|
||||
config_prefix + "_"
|
||||
@@ -591,7 +604,9 @@ class _ConfigurableModel(Runnable[LanguageModelInput, Any]):
|
||||
if k.startswith(self._config_prefix)
|
||||
}
|
||||
if self._configurable_fields != "any":
|
||||
model_params = {k: v for k, v in model_params.items() if k in self._configurable_fields}
|
||||
model_params = {
|
||||
k: v for k, v in model_params.items() if k in self._configurable_fields
|
||||
}
|
||||
return model_params
|
||||
|
||||
def with_config(
|
||||
|
||||
@@ -19,7 +19,9 @@ _SUPPORTED_PROVIDERS = {
|
||||
|
||||
def _get_provider_list() -> str:
|
||||
"""Get formatted list of providers and their packages."""
|
||||
return "\n".join(f" - {p}: {pkg.replace('_', '-')}" for p, pkg in _SUPPORTED_PROVIDERS.items())
|
||||
return "\n".join(
|
||||
f" - {p}: {pkg.replace('_', '-')}" for p, pkg in _SUPPORTED_PROVIDERS.items()
|
||||
)
|
||||
|
||||
|
||||
def _parse_model_string(model_name: str) -> tuple[str, str]:
|
||||
@@ -115,7 +117,10 @@ def _infer_model_and_provider(
|
||||
def _check_pkg(pkg: str) -> None:
|
||||
"""Check if a package is installed."""
|
||||
if not util.find_spec(pkg):
|
||||
msg = f"Could not import {pkg} python package. Please install it with `pip install {pkg}`"
|
||||
msg = (
|
||||
f"Could not import {pkg} python package. "
|
||||
f"Please install it with `pip install {pkg}`"
|
||||
)
|
||||
raise ImportError(msg)
|
||||
|
||||
|
||||
@@ -177,7 +182,9 @@ def init_embeddings(
|
||||
"""
|
||||
if not model:
|
||||
providers = _SUPPORTED_PROVIDERS.keys()
|
||||
msg = f"Must specify model name. Supported providers are: {', '.join(providers)}"
|
||||
msg = (
|
||||
f"Must specify model name. Supported providers are: {', '.join(providers)}"
|
||||
)
|
||||
raise ValueError(msg)
|
||||
|
||||
provider, model_name = _infer_model_and_provider(model, provider=provider)
|
||||
|
||||
@@ -181,7 +181,9 @@ class CacheBackedEmbeddings(Embeddings):
|
||||
vectors: list[Union[list[float], None]] = self.document_embedding_store.mget(
|
||||
texts,
|
||||
)
|
||||
all_missing_indices: list[int] = [i for i, vector in enumerate(vectors) if vector is None]
|
||||
all_missing_indices: list[int] = [
|
||||
i for i, vector in enumerate(vectors) if vector is None
|
||||
]
|
||||
|
||||
for missing_indices in batch_iterate(self.batch_size, all_missing_indices):
|
||||
missing_texts = [texts[i] for i in missing_indices]
|
||||
@@ -210,8 +212,12 @@ class CacheBackedEmbeddings(Embeddings):
|
||||
Returns:
|
||||
A list of embeddings for the given texts.
|
||||
"""
|
||||
vectors: list[Union[list[float], None]] = await self.document_embedding_store.amget(texts)
|
||||
all_missing_indices: list[int] = [i for i, vector in enumerate(vectors) if vector is None]
|
||||
vectors: list[
|
||||
Union[list[float], None]
|
||||
] = await self.document_embedding_store.amget(texts)
|
||||
all_missing_indices: list[int] = [
|
||||
i for i, vector in enumerate(vectors) if vector is None
|
||||
]
|
||||
|
||||
# batch_iterate supports None batch_size which returns all elements at once
|
||||
# as a single batch.
|
||||
|
||||
@@ -66,25 +66,33 @@ class EncoderBackedStore(BaseStore[K, V]):
|
||||
"""Get the values associated with the given keys."""
|
||||
encoded_keys: list[str] = [self.key_encoder(key) for key in keys]
|
||||
values = self.store.mget(encoded_keys)
|
||||
return [self.value_deserializer(value) if value is not None else value for value in values]
|
||||
return [
|
||||
self.value_deserializer(value) if value is not None else value
|
||||
for value in values
|
||||
]
|
||||
|
||||
async def amget(self, keys: Sequence[K]) -> list[Optional[V]]:
|
||||
"""Get the values associated with the given keys."""
|
||||
encoded_keys: list[str] = [self.key_encoder(key) for key in keys]
|
||||
values = await self.store.amget(encoded_keys)
|
||||
return [self.value_deserializer(value) if value is not None else value for value in values]
|
||||
return [
|
||||
self.value_deserializer(value) if value is not None else value
|
||||
for value in values
|
||||
]
|
||||
|
||||
def mset(self, key_value_pairs: Sequence[tuple[K, V]]) -> None:
|
||||
"""Set the values for the given keys."""
|
||||
encoded_pairs = [
|
||||
(self.key_encoder(key), self.value_serializer(value)) for key, value in key_value_pairs
|
||||
(self.key_encoder(key), self.value_serializer(value))
|
||||
for key, value in key_value_pairs
|
||||
]
|
||||
self.store.mset(encoded_pairs)
|
||||
|
||||
async def amset(self, key_value_pairs: Sequence[tuple[K, V]]) -> None:
|
||||
"""Set the values for the given keys."""
|
||||
encoded_pairs = [
|
||||
(self.key_encoder(key), self.value_serializer(value)) for key, value in key_value_pairs
|
||||
(self.key_encoder(key), self.value_serializer(value))
|
||||
for key, value in key_value_pairs
|
||||
]
|
||||
await self.store.amset(encoded_pairs)
|
||||
|
||||
|
||||
@@ -50,7 +50,6 @@ test = [
|
||||
"pytest-watcher<1.0.0,>=0.2.6",
|
||||
"pytest-asyncio<1.0.0,>=0.23.2",
|
||||
"pytest-socket<1.0.0,>=0.6.0",
|
||||
"pytest-mock<4.0.0,>=3.12.0",
|
||||
"syrupy<5.0.0,>=4.0.2",
|
||||
"pytest-xdist<4.0.0,>=3.6.1",
|
||||
"blockbuster<1.6,>=1.5.18",
|
||||
@@ -89,7 +88,6 @@ langchain-openai = { path = "../partners/openai", editable = true }
|
||||
[tool.ruff]
|
||||
target-version = "py39"
|
||||
exclude = ["tests/integration_tests/examples/non-utf8-encoding.py"]
|
||||
line-length = 100
|
||||
|
||||
[tool.mypy]
|
||||
strict = "True"
|
||||
@@ -134,13 +132,9 @@ pyupgrade.keep-runtime-typing = true
|
||||
flake8-annotations.allow-star-arg-any = true
|
||||
|
||||
[tool.ruff.lint.per-file-ignores]
|
||||
"tests/**/*" = ["ALL"]
|
||||
"langchain/agents/*" = [
|
||||
"E501", # line too long
|
||||
"ANN401", # we use Any
|
||||
"A001", # input is shadowing builtin
|
||||
"A002", # input is shadowing builtin
|
||||
"PLR2004", # magic values are fine for this case
|
||||
"tests/*" = [
|
||||
"D", # Documentation rules
|
||||
"PLC0415", # Imports should be at the top. Not always desirable for tests
|
||||
]
|
||||
|
||||
[tool.ruff.lint.extend-per-file-ignores]
|
||||
|
||||
@@ -31,7 +31,9 @@ async def test_init_chat_model_chain() -> None:
|
||||
chain = prompt | model_with_config
|
||||
output = chain.invoke({"input": "bar"})
|
||||
assert isinstance(output, AIMessage)
|
||||
events = [event async for event in chain.astream_events({"input": "bar"}, version="v2")]
|
||||
events = [
|
||||
event async for event in chain.astream_events({"input": "bar"}, version="v2")
|
||||
]
|
||||
assert events
|
||||
|
||||
|
||||
|
||||
@@ -1,83 +0,0 @@
|
||||
# serializer version: 1
|
||||
# name: test_react_agent_graph_structure[None-None-tools0]
|
||||
'''
|
||||
graph TD;
|
||||
__start__ --> model;
|
||||
model --> __end__;
|
||||
|
||||
'''
|
||||
# ---
|
||||
# name: test_react_agent_graph_structure[None-None-tools1]
|
||||
'''
|
||||
graph TD;
|
||||
__start__ --> model;
|
||||
model -.-> __end__;
|
||||
model -.-> tools;
|
||||
tools --> model;
|
||||
|
||||
'''
|
||||
# ---
|
||||
# name: test_react_agent_graph_structure[None-pre_model_hook-tools0]
|
||||
'''
|
||||
graph TD;
|
||||
__start__ --> pre_model_hook;
|
||||
pre_model_hook --> model;
|
||||
model --> __end__;
|
||||
|
||||
'''
|
||||
# ---
|
||||
# name: test_react_agent_graph_structure[None-pre_model_hook-tools1]
|
||||
'''
|
||||
graph TD;
|
||||
__start__ --> pre_model_hook;
|
||||
model -.-> __end__;
|
||||
model -.-> tools;
|
||||
pre_model_hook --> model;
|
||||
tools --> pre_model_hook;
|
||||
|
||||
'''
|
||||
# ---
|
||||
# name: test_react_agent_graph_structure[post_model_hook-None-tools0]
|
||||
'''
|
||||
graph TD;
|
||||
__start__ --> model;
|
||||
model --> post_model_hook;
|
||||
post_model_hook --> __end__;
|
||||
|
||||
'''
|
||||
# ---
|
||||
# name: test_react_agent_graph_structure[post_model_hook-None-tools1]
|
||||
'''
|
||||
graph TD;
|
||||
__start__ --> model;
|
||||
model --> post_model_hook;
|
||||
post_model_hook -.-> __end__;
|
||||
post_model_hook -.-> model;
|
||||
post_model_hook -.-> tools;
|
||||
tools --> model;
|
||||
|
||||
'''
|
||||
# ---
|
||||
# name: test_react_agent_graph_structure[post_model_hook-pre_model_hook-tools0]
|
||||
'''
|
||||
graph TD;
|
||||
__start__ --> pre_model_hook;
|
||||
model --> post_model_hook;
|
||||
pre_model_hook --> model;
|
||||
post_model_hook --> __end__;
|
||||
|
||||
'''
|
||||
# ---
|
||||
# name: test_react_agent_graph_structure[post_model_hook-pre_model_hook-tools1]
|
||||
'''
|
||||
graph TD;
|
||||
__start__ --> pre_model_hook;
|
||||
model --> post_model_hook;
|
||||
post_model_hook -.-> __end__;
|
||||
post_model_hook -.-> pre_model_hook;
|
||||
post_model_hook -.-> tools;
|
||||
pre_model_hook --> model;
|
||||
tools --> pre_model_hook;
|
||||
|
||||
'''
|
||||
# ---
|
||||
@@ -1,18 +0,0 @@
|
||||
import re
|
||||
from typing import Union
|
||||
|
||||
|
||||
class AnyStr(str):
|
||||
def __init__(self, prefix: Union[str, re.Pattern] = "") -> None:
|
||||
super().__init__()
|
||||
self.prefix = prefix
|
||||
|
||||
def __eq__(self, other: object) -> bool:
|
||||
return isinstance(other, str) and (
|
||||
other.startswith(self.prefix)
|
||||
if isinstance(self.prefix, str)
|
||||
else self.prefix.match(other)
|
||||
)
|
||||
|
||||
def __hash__(self) -> int:
|
||||
return hash((str(self), self.prefix))
|
||||
@@ -1,17 +0,0 @@
|
||||
name: langgraph-tests
|
||||
services:
|
||||
postgres-test:
|
||||
image: postgres:16
|
||||
ports:
|
||||
- "5442:5432"
|
||||
environment:
|
||||
POSTGRES_DB: postgres
|
||||
POSTGRES_USER: postgres
|
||||
POSTGRES_PASSWORD: postgres
|
||||
healthcheck:
|
||||
test: pg_isready -U postgres
|
||||
start_period: 10s
|
||||
timeout: 1s
|
||||
retries: 5
|
||||
interval: 60s
|
||||
start_interval: 1s
|
||||
@@ -1,16 +0,0 @@
|
||||
name: langgraph-tests-redis
|
||||
services:
|
||||
redis-test:
|
||||
image: redis:7-alpine
|
||||
ports:
|
||||
- "6379:6379"
|
||||
command: redis-server --maxmemory 256mb --maxmemory-policy allkeys-lru
|
||||
healthcheck:
|
||||
test: redis-cli ping
|
||||
start_period: 10s
|
||||
timeout: 1s
|
||||
retries: 5
|
||||
interval: 5s
|
||||
start_interval: 1s
|
||||
tmpfs:
|
||||
- /data # Use tmpfs for faster testing
|
||||
@@ -1,194 +0,0 @@
|
||||
import os
|
||||
from collections.abc import AsyncIterator, Iterator
|
||||
from uuid import UUID
|
||||
|
||||
import pytest
|
||||
from langgraph.checkpoint.base import BaseCheckpointSaver
|
||||
from langgraph.store.base import BaseStore
|
||||
from pytest_mock import MockerFixture
|
||||
|
||||
from .conftest_checkpointer import (
|
||||
_checkpointer_memory,
|
||||
_checkpointer_postgres,
|
||||
_checkpointer_postgres_aio,
|
||||
_checkpointer_postgres_aio_pipe,
|
||||
_checkpointer_postgres_aio_pool,
|
||||
_checkpointer_postgres_pipe,
|
||||
_checkpointer_postgres_pool,
|
||||
_checkpointer_sqlite,
|
||||
_checkpointer_sqlite_aio,
|
||||
)
|
||||
from .conftest_store import (
|
||||
_store_memory,
|
||||
_store_postgres,
|
||||
_store_postgres_aio,
|
||||
_store_postgres_aio_pipe,
|
||||
_store_postgres_aio_pool,
|
||||
_store_postgres_pipe,
|
||||
_store_postgres_pool,
|
||||
)
|
||||
|
||||
# Global variables for checkpointer and store configurations
|
||||
FAST_MODE = os.getenv("LANGGRAPH_TEST_FAST", "true").lower() in ("true", "1", "yes")
|
||||
|
||||
SYNC_CHECKPOINTER_PARAMS = (
|
||||
["memory"]
|
||||
if FAST_MODE
|
||||
else [
|
||||
"memory",
|
||||
"sqlite",
|
||||
"postgres",
|
||||
"postgres_pipe",
|
||||
"postgres_pool",
|
||||
]
|
||||
)
|
||||
|
||||
ASYNC_CHECKPOINTER_PARAMS = (
|
||||
["memory"]
|
||||
if FAST_MODE
|
||||
else [
|
||||
"memory",
|
||||
"sqlite_aio",
|
||||
"postgres_aio",
|
||||
"postgres_aio_pipe",
|
||||
"postgres_aio_pool",
|
||||
]
|
||||
)
|
||||
|
||||
SYNC_STORE_PARAMS = (
|
||||
["in_memory"]
|
||||
if FAST_MODE
|
||||
else [
|
||||
"in_memory",
|
||||
"postgres",
|
||||
"postgres_pipe",
|
||||
"postgres_pool",
|
||||
]
|
||||
)
|
||||
|
||||
ASYNC_STORE_PARAMS = (
|
||||
["in_memory"]
|
||||
if FAST_MODE
|
||||
else [
|
||||
"in_memory",
|
||||
"postgres_aio",
|
||||
"postgres_aio_pipe",
|
||||
"postgres_aio_pool",
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def anyio_backend() -> str:
|
||||
return "asyncio"
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def deterministic_uuids(mocker: MockerFixture) -> MockerFixture:
|
||||
side_effect = (UUID(f"00000000-0000-4000-8000-{i:012}", version=4) for i in range(10000))
|
||||
return mocker.patch("uuid.uuid4", side_effect=side_effect)
|
||||
|
||||
|
||||
# checkpointer fixtures
|
||||
|
||||
|
||||
@pytest.fixture(
|
||||
params=SYNC_STORE_PARAMS,
|
||||
)
|
||||
def sync_store(request: pytest.FixtureRequest) -> Iterator[BaseStore]:
|
||||
store_name = request.param
|
||||
if store_name is None:
|
||||
yield None
|
||||
elif store_name == "in_memory":
|
||||
with _store_memory() as store:
|
||||
yield store
|
||||
elif store_name == "postgres":
|
||||
with _store_postgres() as store:
|
||||
yield store
|
||||
elif store_name == "postgres_pipe":
|
||||
with _store_postgres_pipe() as store:
|
||||
yield store
|
||||
elif store_name == "postgres_pool":
|
||||
with _store_postgres_pool() as store:
|
||||
yield store
|
||||
else:
|
||||
msg = f"Unknown store {store_name}"
|
||||
raise NotImplementedError(msg)
|
||||
|
||||
|
||||
@pytest.fixture(
|
||||
params=ASYNC_STORE_PARAMS,
|
||||
)
|
||||
async def async_store(request: pytest.FixtureRequest) -> AsyncIterator[BaseStore]:
|
||||
store_name = request.param
|
||||
if store_name is None:
|
||||
yield None
|
||||
elif store_name == "in_memory":
|
||||
with _store_memory() as store:
|
||||
yield store
|
||||
elif store_name == "postgres_aio":
|
||||
async with _store_postgres_aio() as store:
|
||||
yield store
|
||||
elif store_name == "postgres_aio_pipe":
|
||||
async with _store_postgres_aio_pipe() as store:
|
||||
yield store
|
||||
elif store_name == "postgres_aio_pool":
|
||||
async with _store_postgres_aio_pool() as store:
|
||||
yield store
|
||||
else:
|
||||
msg = f"Unknown store {store_name}"
|
||||
raise NotImplementedError(msg)
|
||||
|
||||
|
||||
@pytest.fixture(
|
||||
params=SYNC_CHECKPOINTER_PARAMS,
|
||||
)
|
||||
def sync_checkpointer(
|
||||
request: pytest.FixtureRequest,
|
||||
) -> Iterator[BaseCheckpointSaver]:
|
||||
checkpointer_name = request.param
|
||||
if checkpointer_name == "memory":
|
||||
with _checkpointer_memory() as checkpointer:
|
||||
yield checkpointer
|
||||
elif checkpointer_name == "sqlite":
|
||||
with _checkpointer_sqlite() as checkpointer:
|
||||
yield checkpointer
|
||||
elif checkpointer_name == "postgres":
|
||||
with _checkpointer_postgres() as checkpointer:
|
||||
yield checkpointer
|
||||
elif checkpointer_name == "postgres_pipe":
|
||||
with _checkpointer_postgres_pipe() as checkpointer:
|
||||
yield checkpointer
|
||||
elif checkpointer_name == "postgres_pool":
|
||||
with _checkpointer_postgres_pool() as checkpointer:
|
||||
yield checkpointer
|
||||
else:
|
||||
msg = f"Unknown checkpointer: {checkpointer_name}"
|
||||
raise NotImplementedError(msg)
|
||||
|
||||
|
||||
@pytest.fixture(
|
||||
params=ASYNC_CHECKPOINTER_PARAMS,
|
||||
)
|
||||
async def async_checkpointer(
|
||||
request: pytest.FixtureRequest,
|
||||
) -> AsyncIterator[BaseCheckpointSaver]:
|
||||
checkpointer_name = request.param
|
||||
if checkpointer_name == "memory":
|
||||
with _checkpointer_memory() as checkpointer:
|
||||
yield checkpointer
|
||||
elif checkpointer_name == "sqlite_aio":
|
||||
async with _checkpointer_sqlite_aio() as checkpointer:
|
||||
yield checkpointer
|
||||
elif checkpointer_name == "postgres_aio":
|
||||
async with _checkpointer_postgres_aio() as checkpointer:
|
||||
yield checkpointer
|
||||
elif checkpointer_name == "postgres_aio_pipe":
|
||||
async with _checkpointer_postgres_aio_pipe() as checkpointer:
|
||||
yield checkpointer
|
||||
elif checkpointer_name == "postgres_aio_pool":
|
||||
async with _checkpointer_postgres_aio_pool() as checkpointer:
|
||||
yield checkpointer
|
||||
else:
|
||||
msg = f"Unknown checkpointer: {checkpointer_name}"
|
||||
raise NotImplementedError(msg)
|
||||
@@ -1,64 +0,0 @@
|
||||
from contextlib import asynccontextmanager, contextmanager
|
||||
|
||||
from .memory_assert import (
|
||||
MemorySaverAssertImmutable,
|
||||
)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def _checkpointer_memory():
|
||||
yield MemorySaverAssertImmutable()
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def _checkpointer_memory_aio():
|
||||
yield MemorySaverAssertImmutable()
|
||||
|
||||
|
||||
# Placeholder functions for other checkpointer types that aren't available
|
||||
@contextmanager
|
||||
def _checkpointer_sqlite():
|
||||
# Fallback to memory for now
|
||||
yield MemorySaverAssertImmutable()
|
||||
|
||||
|
||||
@contextmanager
|
||||
def _checkpointer_postgres():
|
||||
# Fallback to memory for now
|
||||
yield MemorySaverAssertImmutable()
|
||||
|
||||
|
||||
@contextmanager
|
||||
def _checkpointer_postgres_pipe():
|
||||
# Fallback to memory for now
|
||||
yield MemorySaverAssertImmutable()
|
||||
|
||||
|
||||
@contextmanager
|
||||
def _checkpointer_postgres_pool():
|
||||
# Fallback to memory for now
|
||||
yield MemorySaverAssertImmutable()
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def _checkpointer_sqlite_aio():
|
||||
# Fallback to memory for now
|
||||
yield MemorySaverAssertImmutable()
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def _checkpointer_postgres_aio():
|
||||
# Fallback to memory for now
|
||||
yield MemorySaverAssertImmutable()
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def _checkpointer_postgres_aio_pipe():
|
||||
# Fallback to memory for now
|
||||
yield MemorySaverAssertImmutable()
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def _checkpointer_postgres_aio_pool():
|
||||
# Fallback to memory for now
|
||||
yield MemorySaverAssertImmutable()
|
||||
@@ -1,58 +0,0 @@
|
||||
from contextlib import asynccontextmanager, contextmanager
|
||||
|
||||
from langgraph.store.memory import InMemoryStore
|
||||
|
||||
|
||||
@contextmanager
|
||||
def _store_memory():
|
||||
store = InMemoryStore()
|
||||
yield store
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def _store_memory_aio():
|
||||
store = InMemoryStore()
|
||||
yield store
|
||||
|
||||
|
||||
# Placeholder functions for other store types that aren't available
|
||||
@contextmanager
|
||||
def _store_postgres():
|
||||
# Fallback to memory for now
|
||||
store = InMemoryStore()
|
||||
yield store
|
||||
|
||||
|
||||
@contextmanager
|
||||
def _store_postgres_pipe():
|
||||
# Fallback to memory for now
|
||||
store = InMemoryStore()
|
||||
yield store
|
||||
|
||||
|
||||
@contextmanager
|
||||
def _store_postgres_pool():
|
||||
# Fallback to memory for now
|
||||
store = InMemoryStore()
|
||||
yield store
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def _store_postgres_aio():
|
||||
# Fallback to memory for now
|
||||
store = InMemoryStore()
|
||||
yield store
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def _store_postgres_aio_pipe():
|
||||
# Fallback to memory for now
|
||||
store = InMemoryStore()
|
||||
yield store
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def _store_postgres_aio_pool():
|
||||
# Fallback to memory for now
|
||||
store = InMemoryStore()
|
||||
yield store
|
||||
@@ -1,57 +0,0 @@
|
||||
import os
|
||||
import tempfile
|
||||
from collections import defaultdict
|
||||
from functools import partial
|
||||
from typing import Optional
|
||||
|
||||
from langgraph.checkpoint.base import (
|
||||
ChannelVersions,
|
||||
Checkpoint,
|
||||
CheckpointMetadata,
|
||||
SerializerProtocol,
|
||||
)
|
||||
from langgraph.checkpoint.memory import InMemorySaver, PersistentDict
|
||||
from langgraph.pregel._checkpoint import copy_checkpoint
|
||||
|
||||
|
||||
class MemorySaverAssertImmutable(InMemorySaver):
|
||||
storage_for_copies: defaultdict[str, dict[str, dict[str, Checkpoint]]]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
serde: Optional[SerializerProtocol] = None,
|
||||
put_sleep: Optional[float] = None,
|
||||
) -> None:
|
||||
_, filename = tempfile.mkstemp()
|
||||
super().__init__(serde=serde, factory=partial(PersistentDict, filename=filename))
|
||||
self.storage_for_copies = defaultdict(lambda: defaultdict(dict))
|
||||
self.put_sleep = put_sleep
|
||||
self.stack.callback(os.remove, filename)
|
||||
|
||||
def put(
|
||||
self,
|
||||
config: dict,
|
||||
checkpoint: Checkpoint,
|
||||
metadata: CheckpointMetadata,
|
||||
new_versions: ChannelVersions,
|
||||
) -> None:
|
||||
if self.put_sleep:
|
||||
import time
|
||||
|
||||
time.sleep(self.put_sleep)
|
||||
# assert checkpoint hasn't been modified since last written
|
||||
thread_id = config["configurable"]["thread_id"]
|
||||
checkpoint_ns = config["configurable"]["checkpoint_ns"]
|
||||
if saved := super().get(config):
|
||||
assert (
|
||||
self.serde.loads_typed(
|
||||
self.storage_for_copies[thread_id][checkpoint_ns][saved["id"]]
|
||||
)
|
||||
== saved
|
||||
)
|
||||
self.storage_for_copies[thread_id][checkpoint_ns][checkpoint["id"]] = (
|
||||
self.serde.dumps_typed(copy_checkpoint(checkpoint))
|
||||
)
|
||||
# call super to write checkpoint
|
||||
return super().put(config, checkpoint, metadata, new_versions)
|
||||
@@ -1,28 +0,0 @@
|
||||
"""Redefined messages as a work-around for pydantic issue with AnyStr.
|
||||
|
||||
The code below creates version of pydantic models
|
||||
that will work in unit tests with AnyStr as id field
|
||||
Please note that the `id` field is assigned AFTER the model is created
|
||||
to workaround an issue with pydantic ignoring the __eq__ method on
|
||||
subclassed strings.
|
||||
"""
|
||||
|
||||
from typing import Any
|
||||
|
||||
from langchain_core.messages import HumanMessage, ToolMessage
|
||||
|
||||
from .any_str import AnyStr
|
||||
|
||||
|
||||
def _AnyIdHumanMessage(**kwargs: Any) -> HumanMessage:
|
||||
"""Create a human message with an any id field."""
|
||||
message = HumanMessage(**kwargs)
|
||||
message.id = AnyStr()
|
||||
return message
|
||||
|
||||
|
||||
def _AnyIdToolMessage(**kwargs: Any) -> ToolMessage:
|
||||
"""Create a tool message with an any id field."""
|
||||
message = ToolMessage(**kwargs)
|
||||
message.id = AnyStr()
|
||||
return message
|
||||
@@ -1,113 +0,0 @@
|
||||
import json
|
||||
from collections.abc import Sequence
|
||||
from dataclasses import asdict, is_dataclass
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
Generic,
|
||||
Literal,
|
||||
Optional,
|
||||
TypeVar,
|
||||
Union,
|
||||
)
|
||||
|
||||
from langchain_core.callbacks import CallbackManagerForLLMRun
|
||||
from langchain_core.language_models import BaseChatModel, LanguageModelInput
|
||||
from langchain_core.messages import (
|
||||
AIMessage,
|
||||
BaseMessage,
|
||||
ToolCall,
|
||||
)
|
||||
from langchain_core.outputs import ChatGeneration, ChatResult
|
||||
from langchain_core.runnables import Runnable
|
||||
from langchain_core.tools import BaseTool
|
||||
from pydantic import BaseModel
|
||||
|
||||
StructuredResponseT = TypeVar("StructuredResponseT")
|
||||
|
||||
|
||||
class FakeToolCallingModel(BaseChatModel, Generic[StructuredResponseT]):
|
||||
tool_calls: Optional[Union[list[list[ToolCall]], list[list[dict]]]] = None
|
||||
structured_response: Optional[StructuredResponseT] = None
|
||||
index: int = 0
|
||||
tool_style: Literal["openai", "anthropic"] = "openai"
|
||||
|
||||
def _generate(
|
||||
self,
|
||||
messages: list[BaseMessage],
|
||||
stop: Optional[list[str]] = None,
|
||||
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
||||
**kwargs: Any,
|
||||
) -> ChatResult:
|
||||
"""Top Level call"""
|
||||
rf = kwargs.get("response_format")
|
||||
is_native = isinstance(rf, dict) and rf.get("type") == "json_schema"
|
||||
|
||||
if self.tool_calls:
|
||||
if is_native:
|
||||
tool_calls = (
|
||||
self.tool_calls[self.index] if self.index < len(self.tool_calls) else []
|
||||
)
|
||||
else:
|
||||
tool_calls = self.tool_calls[self.index % len(self.tool_calls)]
|
||||
else:
|
||||
tool_calls = []
|
||||
|
||||
if is_native and not tool_calls:
|
||||
if isinstance(self.structured_response, BaseModel):
|
||||
content_obj = self.structured_response.model_dump()
|
||||
elif is_dataclass(self.structured_response):
|
||||
content_obj = asdict(self.structured_response)
|
||||
elif isinstance(self.structured_response, dict):
|
||||
content_obj = self.structured_response
|
||||
message = AIMessage(content=json.dumps(content_obj), id=str(self.index))
|
||||
else:
|
||||
messages_string = "-".join([m.content for m in messages])
|
||||
message = AIMessage(
|
||||
content=messages_string,
|
||||
id=str(self.index),
|
||||
tool_calls=tool_calls.copy(),
|
||||
)
|
||||
self.index += 1
|
||||
return ChatResult(generations=[ChatGeneration(message=message)])
|
||||
|
||||
@property
|
||||
def _llm_type(self) -> str:
|
||||
return "fake-tool-call-model"
|
||||
|
||||
def bind_tools(
|
||||
self,
|
||||
tools: Sequence[Union[dict[str, Any], type[BaseModel], Callable, BaseTool]],
|
||||
**kwargs: Any,
|
||||
) -> Runnable[LanguageModelInput, BaseMessage]:
|
||||
if len(tools) == 0:
|
||||
msg = "Must provide at least one tool"
|
||||
raise ValueError(msg)
|
||||
|
||||
tool_dicts = []
|
||||
for tool in tools:
|
||||
if isinstance(tool, dict):
|
||||
tool_dicts.append(tool)
|
||||
continue
|
||||
if not isinstance(tool, BaseTool):
|
||||
msg = "Only BaseTool and dict is supported by FakeToolCallingModel.bind_tools"
|
||||
raise TypeError(msg)
|
||||
|
||||
# NOTE: this is a simplified tool spec for testing purposes only
|
||||
if self.tool_style == "openai":
|
||||
tool_dicts.append(
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": tool.name,
|
||||
},
|
||||
}
|
||||
)
|
||||
elif self.tool_style == "anthropic":
|
||||
tool_dicts.append(
|
||||
{
|
||||
"name": tool.name,
|
||||
}
|
||||
)
|
||||
|
||||
return self.bind(tools=tool_dicts)
|
||||
@@ -1,87 +0,0 @@
|
||||
[
|
||||
{
|
||||
"name": "updated structured response",
|
||||
"responseFormat": [
|
||||
{
|
||||
"title": "role_schema_structured_output",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": { "type": "string" },
|
||||
"role": { "type": "string" }
|
||||
},
|
||||
"required": ["name", "role"]
|
||||
},
|
||||
{
|
||||
"title": "department_schema_structured_output",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": { "type": "string" },
|
||||
"department": { "type": "string" }
|
||||
},
|
||||
"required": ["name", "department"]
|
||||
}
|
||||
],
|
||||
"assertionsByInvocation": [
|
||||
{
|
||||
"prompt": "What is the role of Sabine?",
|
||||
"toolsWithExpectedCalls": {
|
||||
"getEmployeeRole": 1,
|
||||
"getEmployeeDepartment": 0
|
||||
},
|
||||
"expectedLastMessage": "Returning structured response: {'name': 'Sabine', 'role': 'Developer'}",
|
||||
"expectedStructuredResponse": { "name": "Sabine", "role": "Developer" },
|
||||
"llmRequestCount": 2
|
||||
},
|
||||
{
|
||||
"prompt": "In which department does Henrik work?",
|
||||
"toolsWithExpectedCalls": {
|
||||
"getEmployeeRole": 1,
|
||||
"getEmployeeDepartment": 1
|
||||
},
|
||||
"expectedLastMessage": "Returning structured response: {'name': 'Henrik', 'department': 'IT'}",
|
||||
"expectedStructuredResponse": { "name": "Henrik", "department": "IT" },
|
||||
"llmRequestCount": 4
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "asking for information that does not fit into the response format",
|
||||
"responseFormat": [
|
||||
{
|
||||
"schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": { "type": "string" },
|
||||
"role": { "type": "string" }
|
||||
},
|
||||
"required": ["name", "role"]
|
||||
}
|
||||
},
|
||||
{
|
||||
"schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": { "type": "string" },
|
||||
"department": { "type": "string" }
|
||||
},
|
||||
"required": ["name", "department"]
|
||||
}
|
||||
}
|
||||
],
|
||||
"assertionsByInvocation": [
|
||||
{
|
||||
"prompt": "How much does Saskia earn?",
|
||||
"toolsWithExpectedCalls": {
|
||||
"getEmployeeRole": 1,
|
||||
"getEmployeeDepartment": 0
|
||||
},
|
||||
"expectedLastMessage": "Returning structured response: {'name': 'Saskia', 'role': 'Software Engineer'}",
|
||||
"expectedStructuredResponse": {
|
||||
"name": "Saskia",
|
||||
"role": "Software Engineer"
|
||||
},
|
||||
"llmRequestCount": 2
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
@@ -1,48 +0,0 @@
|
||||
[
|
||||
{
|
||||
"name": "Scenario: NO return_direct, NO response_format",
|
||||
"returnDirect": false,
|
||||
"responseFormat": null,
|
||||
"expectedToolCalls": 10,
|
||||
"expectedLastMessage": "Attempts: 10",
|
||||
"expectedStructuredResponse": null
|
||||
},
|
||||
{
|
||||
"name": "Scenario: NO return_direct, YES response_format",
|
||||
"returnDirect": false,
|
||||
"responseFormat": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"attempts": { "type": "number" },
|
||||
"succeeded": { "type": "boolean" }
|
||||
},
|
||||
"required": ["attempts", "succeeded"]
|
||||
},
|
||||
"expectedToolCalls": 10,
|
||||
"expectedLastMessage": "Returning structured response: {'attempts': 10, 'succeeded': True}",
|
||||
"expectedStructuredResponse": { "attempts": 10, "succeeded": true }
|
||||
},
|
||||
{
|
||||
"name": "Scenario: YES return_direct, NO response_format",
|
||||
"returnDirect": true,
|
||||
"responseFormat": null,
|
||||
"expectedToolCalls": 1,
|
||||
"expectedLastMessage": "{\"status\": \"pending\", \"attempts\": 1}",
|
||||
"expectedStructuredResponse": null
|
||||
},
|
||||
{
|
||||
"name": "Scenario: YES return_direct, YES response_format",
|
||||
"returnDirect": true,
|
||||
"responseFormat": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"attempts": { "type": "number" },
|
||||
"succeeded": { "type": "boolean" }
|
||||
},
|
||||
"required": ["attempts", "succeeded"]
|
||||
},
|
||||
"expectedToolCalls": 1,
|
||||
"expectedLastMessage": "{\"status\": \"pending\", \"attempts\": 1}",
|
||||
"expectedStructuredResponse": null
|
||||
}
|
||||
]
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,57 +0,0 @@
|
||||
from typing import Callable, Union
|
||||
|
||||
import pytest
|
||||
from pydantic import BaseModel
|
||||
from syrupy.assertion import SnapshotAssertion
|
||||
|
||||
from langchain.agents import create_react_agent
|
||||
|
||||
from .model import FakeToolCallingModel
|
||||
|
||||
model = FakeToolCallingModel()
|
||||
|
||||
|
||||
def tool() -> None:
|
||||
"""Testing tool."""
|
||||
|
||||
|
||||
def pre_model_hook() -> None:
|
||||
"""Pre-model hook."""
|
||||
|
||||
|
||||
def post_model_hook() -> None:
|
||||
"""Post-model hook."""
|
||||
|
||||
|
||||
class ResponseFormat(BaseModel):
|
||||
"""Response format for the agent."""
|
||||
|
||||
result: str
|
||||
|
||||
|
||||
@pytest.mark.parametrize("tools", [[], [tool]])
|
||||
@pytest.mark.parametrize("pre_model_hook", [None, pre_model_hook])
|
||||
@pytest.mark.parametrize("post_model_hook", [None, post_model_hook])
|
||||
def test_react_agent_graph_structure(
|
||||
snapshot: SnapshotAssertion,
|
||||
tools: list[Callable],
|
||||
pre_model_hook: Union[Callable, None],
|
||||
post_model_hook: Union[Callable, None],
|
||||
) -> None:
|
||||
agent = create_react_agent(
|
||||
model,
|
||||
tools=tools,
|
||||
pre_model_hook=pre_model_hook,
|
||||
post_model_hook=post_model_hook,
|
||||
)
|
||||
try:
|
||||
assert agent.get_graph().draw_mermaid(with_styles=False) == snapshot
|
||||
except Exception as e:
|
||||
msg = (
|
||||
"The graph structure has changed. Please update the snapshot."
|
||||
"Configuration used:\n"
|
||||
f"tools: {tools}, "
|
||||
f"pre_model_hook: {pre_model_hook}, "
|
||||
f"post_model_hook: {post_model_hook}, "
|
||||
)
|
||||
raise ValueError(msg) from e
|
||||
@@ -1,771 +0,0 @@
|
||||
"""Test suite for create_react_agent with structured output response_format permutations."""
|
||||
|
||||
import pytest
|
||||
|
||||
# Skip this test since langgraph.prebuilt.responses is not available
|
||||
pytest.skip("langgraph.prebuilt.responses not available", allow_module_level=True)
|
||||
|
||||
# from dataclasses import dataclass
|
||||
# from typing import Union
|
||||
|
||||
# from langchain_core.messages import HumanMessage
|
||||
# from langchain.agents import create_react_agent
|
||||
# from langchain.agents.responses import (
|
||||
# MultipleStructuredOutputsError,
|
||||
# NativeOutput,
|
||||
# StructuredOutputParsingError,
|
||||
# ToolOutput,
|
||||
# )
|
||||
# from pydantic import BaseModel, Field
|
||||
# from typing_extensions import TypedDict
|
||||
|
||||
# from tests.model import FakeToolCallingModel
|
||||
|
||||
try:
|
||||
from langchain_openai import ChatOpenAI
|
||||
except ImportError:
|
||||
skip_openai_integration_tests = True
|
||||
else:
|
||||
skip_openai_integration_tests = False
|
||||
|
||||
|
||||
# Test data models
|
||||
class WeatherBaseModel(BaseModel):
|
||||
"""Weather response."""
|
||||
|
||||
temperature: float = Field(description="The temperature in fahrenheit")
|
||||
condition: str = Field(description="Weather condition")
|
||||
|
||||
|
||||
@dataclass
|
||||
class WeatherDataclass:
|
||||
"""Weather response."""
|
||||
|
||||
temperature: float
|
||||
condition: str
|
||||
|
||||
|
||||
class WeatherTypedDict(TypedDict):
|
||||
"""Weather response."""
|
||||
|
||||
temperature: float
|
||||
condition: str
|
||||
|
||||
|
||||
weather_json_schema = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"temperature": {"type": "number", "description": "Temperature in fahrenheit"},
|
||||
"condition": {"type": "string", "description": "Weather condition"},
|
||||
},
|
||||
"title": "weather_schema",
|
||||
"required": ["temperature", "condition"],
|
||||
}
|
||||
|
||||
|
||||
class LocationResponse(BaseModel):
|
||||
city: str = Field(description="The city name")
|
||||
country: str = Field(description="The country name")
|
||||
|
||||
|
||||
class LocationTypedDict(TypedDict):
|
||||
city: str
|
||||
country: str
|
||||
|
||||
|
||||
location_json_schema = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"city": {"type": "string", "description": "The city name"},
|
||||
"country": {"type": "string", "description": "The country name"},
|
||||
},
|
||||
"title": "location_schema",
|
||||
"required": ["city", "country"],
|
||||
}
|
||||
|
||||
|
||||
def get_weather() -> str:
|
||||
"""Get the weather."""
|
||||
|
||||
return "The weather is sunny and 75°F."
|
||||
|
||||
|
||||
def get_location() -> str:
|
||||
"""Get the current location."""
|
||||
|
||||
return "You are in New York, USA."
|
||||
|
||||
|
||||
# Standardized test data
|
||||
WEATHER_DATA = {"temperature": 75.0, "condition": "sunny"}
|
||||
LOCATION_DATA = {"city": "New York", "country": "USA"}
|
||||
|
||||
# Standardized expected responses
|
||||
EXPECTED_WEATHER_PYDANTIC = WeatherBaseModel(**WEATHER_DATA)
|
||||
EXPECTED_WEATHER_DATACLASS = WeatherDataclass(**WEATHER_DATA)
|
||||
EXPECTED_WEATHER_DICT: WeatherTypedDict = {"temperature": 75.0, "condition": "sunny"}
|
||||
EXPECTED_LOCATION = LocationResponse(**LOCATION_DATA)
|
||||
EXPECTED_LOCATION_DICT: LocationTypedDict = {"city": "New York", "country": "USA"}
|
||||
|
||||
|
||||
class TestResponseFormatAsModel:
|
||||
def test_pydantic_model(self) -> None:
|
||||
"""Test response_format as Pydantic model."""
|
||||
tool_calls = [
|
||||
[{"args": {}, "id": "1", "name": "get_weather"}],
|
||||
[
|
||||
{
|
||||
"name": "WeatherBaseModel",
|
||||
"id": "2",
|
||||
"args": WEATHER_DATA,
|
||||
}
|
||||
],
|
||||
]
|
||||
|
||||
model = FakeToolCallingModel(tool_calls=tool_calls)
|
||||
|
||||
agent = create_react_agent(model, [get_weather], response_format=WeatherBaseModel)
|
||||
response = agent.invoke({"messages": [HumanMessage("What's the weather?")]})
|
||||
|
||||
assert response["structured_response"] == EXPECTED_WEATHER_PYDANTIC
|
||||
assert len(response["messages"]) == 5
|
||||
|
||||
def test_dataclass(self) -> None:
|
||||
"""Test response_format as dataclass."""
|
||||
tool_calls = [
|
||||
[{"args": {}, "id": "1", "name": "get_weather"}],
|
||||
[
|
||||
{
|
||||
"name": "WeatherDataclass",
|
||||
"id": "2",
|
||||
"args": WEATHER_DATA,
|
||||
}
|
||||
],
|
||||
]
|
||||
|
||||
model = FakeToolCallingModel(tool_calls=tool_calls)
|
||||
|
||||
agent = create_react_agent(model, [get_weather], response_format=WeatherDataclass)
|
||||
response = agent.invoke({"messages": [HumanMessage("What's the weather?")]})
|
||||
|
||||
assert response["structured_response"] == EXPECTED_WEATHER_DATACLASS
|
||||
assert len(response["messages"]) == 5
|
||||
|
||||
def test_typed_dict(self) -> None:
|
||||
"""Test response_format as TypedDict."""
|
||||
tool_calls = [
|
||||
[{"args": {}, "id": "1", "name": "get_weather"}],
|
||||
[
|
||||
{
|
||||
"name": "WeatherTypedDict",
|
||||
"id": "2",
|
||||
"args": WEATHER_DATA,
|
||||
}
|
||||
],
|
||||
]
|
||||
|
||||
model = FakeToolCallingModel(tool_calls=tool_calls)
|
||||
|
||||
agent = create_react_agent(model, [get_weather], response_format=WeatherTypedDict)
|
||||
response = agent.invoke({"messages": [HumanMessage("What's the weather?")]})
|
||||
|
||||
assert response["structured_response"] == EXPECTED_WEATHER_DICT
|
||||
assert len(response["messages"]) == 5
|
||||
|
||||
def test_json_schema(self) -> None:
|
||||
"""Test response_format as JSON schema."""
|
||||
tool_calls = [
|
||||
[{"args": {}, "id": "1", "name": "get_weather"}],
|
||||
[
|
||||
{
|
||||
"name": "weather_schema",
|
||||
"id": "2",
|
||||
"args": WEATHER_DATA,
|
||||
}
|
||||
],
|
||||
]
|
||||
|
||||
model = FakeToolCallingModel(tool_calls=tool_calls)
|
||||
|
||||
agent = create_react_agent(model, [get_weather], response_format=weather_json_schema)
|
||||
response = agent.invoke({"messages": [HumanMessage("What's the weather?")]})
|
||||
|
||||
assert response["structured_response"] == EXPECTED_WEATHER_DICT
|
||||
assert len(response["messages"]) == 5
|
||||
|
||||
|
||||
class TestResponseFormatAsToolOutput:
|
||||
def test_pydantic_model(self) -> None:
|
||||
"""Test response_format as ToolOutput with Pydantic model."""
|
||||
tool_calls = [
|
||||
[{"args": {}, "id": "1", "name": "get_weather"}],
|
||||
[
|
||||
{
|
||||
"name": "WeatherBaseModel",
|
||||
"id": "2",
|
||||
"args": WEATHER_DATA,
|
||||
}
|
||||
],
|
||||
]
|
||||
|
||||
model = FakeToolCallingModel(tool_calls=tool_calls)
|
||||
|
||||
agent = create_react_agent(
|
||||
model, [get_weather], response_format=ToolOutput(WeatherBaseModel)
|
||||
)
|
||||
response = agent.invoke({"messages": [HumanMessage("What's the weather?")]})
|
||||
|
||||
assert response["structured_response"] == EXPECTED_WEATHER_PYDANTIC
|
||||
assert len(response["messages"]) == 5
|
||||
|
||||
def test_dataclass(self) -> None:
|
||||
"""Test response_format as ToolOutput with dataclass."""
|
||||
tool_calls = [
|
||||
[{"args": {}, "id": "1", "name": "get_weather"}],
|
||||
[
|
||||
{
|
||||
"name": "WeatherDataclass",
|
||||
"id": "2",
|
||||
"args": WEATHER_DATA,
|
||||
}
|
||||
],
|
||||
]
|
||||
|
||||
model = FakeToolCallingModel(tool_calls=tool_calls)
|
||||
|
||||
agent = create_react_agent(
|
||||
model, [get_weather], response_format=ToolOutput(WeatherDataclass)
|
||||
)
|
||||
response = agent.invoke({"messages": [HumanMessage("What's the weather?")]})
|
||||
|
||||
assert response["structured_response"] == EXPECTED_WEATHER_DATACLASS
|
||||
assert len(response["messages"]) == 5
|
||||
|
||||
def test_typed_dict(self) -> None:
|
||||
"""Test response_format as ToolOutput with TypedDict."""
|
||||
tool_calls = [
|
||||
[{"args": {}, "id": "1", "name": "get_weather"}],
|
||||
[
|
||||
{
|
||||
"name": "WeatherTypedDict",
|
||||
"id": "2",
|
||||
"args": WEATHER_DATA,
|
||||
}
|
||||
],
|
||||
]
|
||||
|
||||
model = FakeToolCallingModel(tool_calls=tool_calls)
|
||||
|
||||
agent = create_react_agent(
|
||||
model, [get_weather], response_format=ToolOutput(WeatherTypedDict)
|
||||
)
|
||||
response = agent.invoke({"messages": [HumanMessage("What's the weather?")]})
|
||||
|
||||
assert response["structured_response"] == EXPECTED_WEATHER_DICT
|
||||
assert len(response["messages"]) == 5
|
||||
|
||||
def test_json_schema(self) -> None:
|
||||
"""Test response_format as ToolOutput with JSON schema."""
|
||||
tool_calls = [
|
||||
[{"args": {}, "id": "1", "name": "get_weather"}],
|
||||
[
|
||||
{
|
||||
"name": "weather_schema",
|
||||
"id": "2",
|
||||
"args": WEATHER_DATA,
|
||||
}
|
||||
],
|
||||
]
|
||||
|
||||
model = FakeToolCallingModel(tool_calls=tool_calls)
|
||||
|
||||
agent = create_react_agent(
|
||||
model, [get_weather], response_format=ToolOutput(weather_json_schema)
|
||||
)
|
||||
response = agent.invoke({"messages": [HumanMessage("What's the weather?")]})
|
||||
|
||||
assert response["structured_response"] == EXPECTED_WEATHER_DICT
|
||||
assert len(response["messages"]) == 5
|
||||
|
||||
def test_union_of_json_schemas(self) -> None:
|
||||
"""Test response_format as ToolOutput with union of JSON schemas."""
|
||||
tool_calls = [
|
||||
[{"args": {}, "id": "1", "name": "get_weather"}],
|
||||
[
|
||||
{
|
||||
"name": "weather_schema",
|
||||
"id": "2",
|
||||
"args": WEATHER_DATA,
|
||||
}
|
||||
],
|
||||
]
|
||||
|
||||
model = FakeToolCallingModel(tool_calls=tool_calls)
|
||||
|
||||
agent = create_react_agent(
|
||||
model,
|
||||
[get_weather, get_location],
|
||||
response_format=ToolOutput({"oneOf": [weather_json_schema, location_json_schema]}),
|
||||
)
|
||||
response = agent.invoke({"messages": [HumanMessage("What's the weather?")]})
|
||||
|
||||
assert response["structured_response"] == EXPECTED_WEATHER_DICT
|
||||
assert len(response["messages"]) == 5
|
||||
|
||||
# Test with LocationResponse
|
||||
tool_calls_location = [
|
||||
[{"args": {}, "id": "1", "name": "get_location"}],
|
||||
[
|
||||
{
|
||||
"name": "location_schema",
|
||||
"id": "2",
|
||||
"args": LOCATION_DATA,
|
||||
}
|
||||
],
|
||||
]
|
||||
|
||||
model_location = FakeToolCallingModel(tool_calls=tool_calls_location)
|
||||
|
||||
agent_location = create_react_agent(
|
||||
model_location,
|
||||
[get_weather, get_location],
|
||||
response_format=ToolOutput({"oneOf": [weather_json_schema, location_json_schema]}),
|
||||
)
|
||||
response_location = agent_location.invoke({"messages": [HumanMessage("Where am I?")]})
|
||||
|
||||
assert response_location["structured_response"] == EXPECTED_LOCATION_DICT
|
||||
assert len(response_location["messages"]) == 5
|
||||
|
||||
def test_union_of_types(self) -> None:
|
||||
"""Test response_format as ToolOutput with Union of various types."""
|
||||
# Test with WeatherBaseModel
|
||||
tool_calls = [
|
||||
[{"args": {}, "id": "1", "name": "get_weather"}],
|
||||
[
|
||||
{
|
||||
"name": "WeatherBaseModel",
|
||||
"id": "2",
|
||||
"args": WEATHER_DATA,
|
||||
}
|
||||
],
|
||||
]
|
||||
|
||||
model = FakeToolCallingModel[Union[WeatherBaseModel, LocationResponse]](
|
||||
tool_calls=tool_calls
|
||||
)
|
||||
|
||||
agent = create_react_agent(
|
||||
model,
|
||||
[get_weather, get_location],
|
||||
response_format=ToolOutput(Union[WeatherBaseModel, LocationResponse]),
|
||||
)
|
||||
response = agent.invoke({"messages": [HumanMessage("What's the weather?")]})
|
||||
|
||||
assert response["structured_response"] == EXPECTED_WEATHER_PYDANTIC
|
||||
assert len(response["messages"]) == 5
|
||||
|
||||
# Test with LocationResponse
|
||||
tool_calls_location = [
|
||||
[{"args": {}, "id": "1", "name": "get_location"}],
|
||||
[
|
||||
{
|
||||
"name": "LocationResponse",
|
||||
"id": "2",
|
||||
"args": LOCATION_DATA,
|
||||
}
|
||||
],
|
||||
]
|
||||
|
||||
model_location = FakeToolCallingModel(tool_calls=tool_calls_location)
|
||||
|
||||
agent_location = create_react_agent(
|
||||
model_location,
|
||||
[get_weather, get_location],
|
||||
response_format=ToolOutput(Union[WeatherBaseModel, LocationResponse]),
|
||||
)
|
||||
response_location = agent_location.invoke({"messages": [HumanMessage("Where am I?")]})
|
||||
|
||||
assert response_location["structured_response"] == EXPECTED_LOCATION
|
||||
assert len(response_location["messages"]) == 5
|
||||
|
||||
def test_multiple_structured_outputs_error_without_retry(self) -> None:
|
||||
"""Test that MultipleStructuredOutputsError is raised when model returns multiple structured tool calls without retry."""
|
||||
tool_calls = [
|
||||
[
|
||||
{
|
||||
"name": "WeatherBaseModel",
|
||||
"id": "1",
|
||||
"args": WEATHER_DATA,
|
||||
},
|
||||
{
|
||||
"name": "LocationResponse",
|
||||
"id": "2",
|
||||
"args": LOCATION_DATA,
|
||||
},
|
||||
],
|
||||
]
|
||||
|
||||
model = FakeToolCallingModel(tool_calls=tool_calls)
|
||||
|
||||
agent = create_react_agent(
|
||||
model,
|
||||
[],
|
||||
response_format=ToolOutput(
|
||||
Union[WeatherBaseModel, LocationResponse],
|
||||
handle_errors=False,
|
||||
),
|
||||
)
|
||||
|
||||
with pytest.raises(
|
||||
MultipleStructuredOutputsError,
|
||||
match=".*WeatherBaseModel.*LocationResponse.*",
|
||||
):
|
||||
agent.invoke({"messages": [HumanMessage("Give me weather and location")]})
|
||||
|
||||
def test_multiple_structured_outputs_with_retry(self) -> None:
|
||||
"""Test that retry handles multiple structured output tool calls."""
|
||||
tool_calls = [
|
||||
[
|
||||
{
|
||||
"name": "WeatherBaseModel",
|
||||
"id": "1",
|
||||
"args": WEATHER_DATA,
|
||||
},
|
||||
{
|
||||
"name": "LocationResponse",
|
||||
"id": "2",
|
||||
"args": LOCATION_DATA,
|
||||
},
|
||||
],
|
||||
[
|
||||
{
|
||||
"name": "WeatherBaseModel",
|
||||
"id": "3",
|
||||
"args": WEATHER_DATA,
|
||||
},
|
||||
],
|
||||
]
|
||||
|
||||
model = FakeToolCallingModel(tool_calls=tool_calls)
|
||||
|
||||
agent = create_react_agent(
|
||||
model,
|
||||
[],
|
||||
response_format=ToolOutput(
|
||||
Union[WeatherBaseModel, LocationResponse],
|
||||
handle_errors=True,
|
||||
),
|
||||
)
|
||||
|
||||
response = agent.invoke({"messages": [HumanMessage("Give me weather")]})
|
||||
|
||||
# HumanMessage, AIMessage, ToolMessage, ToolMessage, AI, ToolMessage
|
||||
assert len(response["messages"]) == 6
|
||||
assert response["structured_response"] == EXPECTED_WEATHER_PYDANTIC
|
||||
|
||||
def test_structured_output_parsing_error_without_retry(self) -> None:
|
||||
"""Test that StructuredOutputParsingError is raised when tool args fail to parse without retry."""
|
||||
tool_calls = [
|
||||
[
|
||||
{
|
||||
"name": "WeatherBaseModel",
|
||||
"id": "1",
|
||||
"args": {"invalid": "data"},
|
||||
},
|
||||
],
|
||||
]
|
||||
|
||||
model = FakeToolCallingModel(tool_calls=tool_calls)
|
||||
|
||||
agent = create_react_agent(
|
||||
model,
|
||||
[],
|
||||
response_format=ToolOutput(
|
||||
WeatherBaseModel,
|
||||
handle_errors=False,
|
||||
),
|
||||
)
|
||||
|
||||
with pytest.raises(
|
||||
StructuredOutputParsingError,
|
||||
match=".*WeatherBaseModel.*",
|
||||
):
|
||||
agent.invoke({"messages": [HumanMessage("What's the weather?")]})
|
||||
|
||||
def test_structured_output_parsing_error_with_retry(self) -> None:
|
||||
"""Test that retry handles parsing errors for structured output."""
|
||||
tool_calls = [
|
||||
[
|
||||
{
|
||||
"name": "WeatherBaseModel",
|
||||
"id": "1",
|
||||
"args": {"invalid": "data"},
|
||||
},
|
||||
],
|
||||
[
|
||||
{
|
||||
"name": "WeatherBaseModel",
|
||||
"id": "2",
|
||||
"args": WEATHER_DATA,
|
||||
},
|
||||
],
|
||||
]
|
||||
|
||||
model = FakeToolCallingModel(tool_calls=tool_calls)
|
||||
|
||||
agent = create_react_agent(
|
||||
model,
|
||||
[],
|
||||
response_format=ToolOutput(
|
||||
WeatherBaseModel,
|
||||
handle_errors=(StructuredOutputParsingError,),
|
||||
),
|
||||
)
|
||||
|
||||
response = agent.invoke({"messages": [HumanMessage("What's the weather?")]})
|
||||
|
||||
# HumanMessage, AIMessage, ToolMessage, AIMessage, ToolMessage
|
||||
assert len(response["messages"]) == 5
|
||||
assert response["structured_response"] == EXPECTED_WEATHER_PYDANTIC
|
||||
|
||||
def test_retry_with_custom_function(self) -> None:
|
||||
"""Test retry with custom message generation."""
|
||||
tool_calls = [
|
||||
[
|
||||
{
|
||||
"name": "WeatherBaseModel",
|
||||
"id": "1",
|
||||
"args": WEATHER_DATA,
|
||||
},
|
||||
{
|
||||
"name": "LocationResponse",
|
||||
"id": "2",
|
||||
"args": LOCATION_DATA,
|
||||
},
|
||||
],
|
||||
[
|
||||
{
|
||||
"name": "WeatherBaseModel",
|
||||
"id": "3",
|
||||
"args": WEATHER_DATA,
|
||||
},
|
||||
],
|
||||
]
|
||||
|
||||
model = FakeToolCallingModel(tool_calls=tool_calls)
|
||||
|
||||
def custom_message(exception: Exception) -> str:
|
||||
if isinstance(exception, MultipleStructuredOutputsError):
|
||||
return "Custom error: Multiple outputs not allowed"
|
||||
return "Custom error"
|
||||
|
||||
agent = create_react_agent(
|
||||
model,
|
||||
[],
|
||||
response_format=ToolOutput(
|
||||
Union[WeatherBaseModel, LocationResponse],
|
||||
handle_errors=custom_message,
|
||||
),
|
||||
)
|
||||
|
||||
response = agent.invoke({"messages": [HumanMessage("Give me weather")]})
|
||||
|
||||
# HumanMessage, AIMessage, ToolMessage, ToolMessage, AI, ToolMessage
|
||||
assert len(response["messages"]) == 6
|
||||
assert response["messages"][2].content == "Custom error: Multiple outputs not allowed"
|
||||
assert response["messages"][3].content == "Custom error: Multiple outputs not allowed"
|
||||
assert response["structured_response"] == EXPECTED_WEATHER_PYDANTIC
|
||||
|
||||
def test_retry_with_custom_string_message(self) -> None:
|
||||
"""Test retry with custom static string message."""
|
||||
tool_calls = [
|
||||
[
|
||||
{
|
||||
"name": "WeatherBaseModel",
|
||||
"id": "1",
|
||||
"args": {"invalid": "data"},
|
||||
},
|
||||
],
|
||||
[
|
||||
{
|
||||
"name": "WeatherBaseModel",
|
||||
"id": "2",
|
||||
"args": WEATHER_DATA,
|
||||
},
|
||||
],
|
||||
]
|
||||
|
||||
model = FakeToolCallingModel(tool_calls=tool_calls)
|
||||
|
||||
agent = create_react_agent(
|
||||
model,
|
||||
[],
|
||||
response_format=ToolOutput(
|
||||
WeatherBaseModel,
|
||||
handle_errors="Please provide valid weather data with temperature and condition.",
|
||||
),
|
||||
)
|
||||
|
||||
response = agent.invoke({"messages": [HumanMessage("What's the weather?")]})
|
||||
|
||||
assert len(response["messages"]) == 5
|
||||
assert (
|
||||
response["messages"][2].content
|
||||
== "Please provide valid weather data with temperature and condition."
|
||||
)
|
||||
assert response["structured_response"] == EXPECTED_WEATHER_PYDANTIC
|
||||
|
||||
|
||||
class TestResponseFormatAsNativeOutput:
|
||||
def test_pydantic_model(self) -> None:
|
||||
"""Test response_format as NativeOutput with Pydantic model."""
|
||||
tool_calls = [
|
||||
[{"args": {}, "id": "1", "name": "get_weather"}],
|
||||
]
|
||||
|
||||
model = FakeToolCallingModel[WeatherBaseModel](
|
||||
tool_calls=tool_calls, structured_response=EXPECTED_WEATHER_PYDANTIC
|
||||
)
|
||||
|
||||
agent = create_react_agent(
|
||||
model, [get_weather], response_format=NativeOutput(WeatherBaseModel)
|
||||
)
|
||||
response = agent.invoke({"messages": [HumanMessage("What's the weather?")]})
|
||||
|
||||
assert response["structured_response"] == EXPECTED_WEATHER_PYDANTIC
|
||||
assert len(response["messages"]) == 4
|
||||
|
||||
def test_dataclass(self) -> None:
|
||||
"""Test response_format as NativeOutput with dataclass."""
|
||||
tool_calls = [
|
||||
[{"args": {}, "id": "1", "name": "get_weather"}],
|
||||
]
|
||||
|
||||
model = FakeToolCallingModel[WeatherDataclass](
|
||||
tool_calls=tool_calls, structured_response=EXPECTED_WEATHER_DATACLASS
|
||||
)
|
||||
|
||||
agent = create_react_agent(
|
||||
model, [get_weather], response_format=NativeOutput(WeatherDataclass)
|
||||
)
|
||||
response = agent.invoke(
|
||||
{"messages": [HumanMessage("What's the weather?")]},
|
||||
)
|
||||
|
||||
assert response["structured_response"] == EXPECTED_WEATHER_DATACLASS
|
||||
assert len(response["messages"]) == 4
|
||||
|
||||
def test_typed_dict(self) -> None:
|
||||
"""Test response_format as NativeOutput with TypedDict."""
|
||||
tool_calls = [
|
||||
[{"args": {}, "id": "1", "name": "get_weather"}],
|
||||
]
|
||||
|
||||
model = FakeToolCallingModel[WeatherTypedDict](
|
||||
tool_calls=tool_calls, structured_response=EXPECTED_WEATHER_DICT
|
||||
)
|
||||
|
||||
agent = create_react_agent(
|
||||
model, [get_weather], response_format=NativeOutput(WeatherTypedDict)
|
||||
)
|
||||
response = agent.invoke({"messages": [HumanMessage("What's the weather?")]})
|
||||
|
||||
assert response["structured_response"] == EXPECTED_WEATHER_DICT
|
||||
assert len(response["messages"]) == 4
|
||||
|
||||
def test_json_schema(self) -> None:
|
||||
"""Test response_format as NativeOutput with JSON schema."""
|
||||
tool_calls = [
|
||||
[{"args": {}, "id": "1", "name": "get_weather"}],
|
||||
]
|
||||
|
||||
model = FakeToolCallingModel[dict](
|
||||
tool_calls=tool_calls, structured_response=EXPECTED_WEATHER_DICT
|
||||
)
|
||||
|
||||
agent = create_react_agent(
|
||||
model, [get_weather], response_format=NativeOutput(weather_json_schema)
|
||||
)
|
||||
response = agent.invoke({"messages": [HumanMessage("What's the weather?")]})
|
||||
|
||||
assert response["structured_response"] == EXPECTED_WEATHER_DICT
|
||||
assert len(response["messages"]) == 4
|
||||
|
||||
|
||||
def test_union_of_types() -> None:
|
||||
"""Test response_format as NativeOutput with Union (if supported)."""
|
||||
tool_calls = [
|
||||
[{"args": {}, "id": "1", "name": "get_weather"}],
|
||||
[
|
||||
{
|
||||
"name": "WeatherBaseModel",
|
||||
"id": "2",
|
||||
"args": WEATHER_DATA,
|
||||
}
|
||||
],
|
||||
]
|
||||
|
||||
model = FakeToolCallingModel[Union[WeatherBaseModel, LocationResponse]](
|
||||
tool_calls=tool_calls, structured_response=EXPECTED_WEATHER_PYDANTIC
|
||||
)
|
||||
|
||||
agent = create_react_agent(
|
||||
model,
|
||||
[get_weather, get_location],
|
||||
response_format=ToolOutput(Union[WeatherBaseModel, LocationResponse]),
|
||||
)
|
||||
response = agent.invoke({"messages": [HumanMessage("What's the weather?")]})
|
||||
|
||||
assert response["structured_response"] == EXPECTED_WEATHER_PYDANTIC
|
||||
assert len(response["messages"]) == 5
|
||||
|
||||
|
||||
@pytest.mark.skipif(skip_openai_integration_tests, reason="OpenAI integration tests are disabled.")
|
||||
def test_inference_to_native_output() -> None:
|
||||
"""Test that native output is inferred when a model supports it."""
|
||||
model = ChatOpenAI(model="gpt-5")
|
||||
agent = create_react_agent(
|
||||
model,
|
||||
prompt="You are a helpful weather assistant. Please call the get_weather tool, then use the WeatherReport tool to generate the final response.",
|
||||
tools=[get_weather],
|
||||
response_format=WeatherBaseModel,
|
||||
)
|
||||
response = agent.invoke({"messages": [HumanMessage("What's the weather?")]})
|
||||
|
||||
assert isinstance(response["structured_response"], WeatherBaseModel)
|
||||
assert response["structured_response"].temperature == 75.0
|
||||
assert response["structured_response"].condition.lower() == "sunny"
|
||||
assert len(response["messages"]) == 4
|
||||
|
||||
assert [m.type for m in response["messages"]] == [
|
||||
"human", # "What's the weather?"
|
||||
"ai", # "What's the weather?"
|
||||
"tool", # "The weather is sunny and 75°F."
|
||||
"ai", # structured response
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.skipif(skip_openai_integration_tests, reason="OpenAI integration tests are disabled.")
|
||||
def test_inference_to_tool_output() -> None:
|
||||
"""Test that tool output is inferred when a model supports it."""
|
||||
model = ChatOpenAI(model="gpt-4")
|
||||
agent = create_react_agent(
|
||||
model,
|
||||
prompt="You are a helpful weather assistant. Please call the get_weather tool, then use the WeatherReport tool to generate the final response.",
|
||||
tools=[get_weather],
|
||||
response_format=ToolOutput(WeatherBaseModel),
|
||||
)
|
||||
response = agent.invoke({"messages": [HumanMessage("What's the weather?")]})
|
||||
|
||||
assert isinstance(response["structured_response"], WeatherBaseModel)
|
||||
assert response["structured_response"].temperature == 75.0
|
||||
assert response["structured_response"].condition.lower() == "sunny"
|
||||
assert len(response["messages"]) == 5
|
||||
|
||||
assert [m.type for m in response["messages"]] == [
|
||||
"human", # "What's the weather?"
|
||||
"ai", # "What's the weather?"
|
||||
"tool", # "The weather is sunny and 75°F."
|
||||
"ai", # structured response
|
||||
"tool", # artificial tool message
|
||||
]
|
||||
@@ -1,140 +0,0 @@
|
||||
"""Unit tests for langgraph.prebuilt.responses module."""
|
||||
|
||||
import pytest
|
||||
|
||||
# Skip this test since langgraph.prebuilt.responses is not available
|
||||
pytest.skip("langgraph.prebuilt.responses not available", allow_module_level=True)
|
||||
|
||||
|
||||
class _TestModel(BaseModel):
|
||||
"""A test model for structured output."""
|
||||
|
||||
name: str
|
||||
age: int
|
||||
email: str = "default@example.com"
|
||||
|
||||
|
||||
class CustomModel(BaseModel):
|
||||
"""Custom model with a custom docstring."""
|
||||
|
||||
value: float
|
||||
description: str
|
||||
|
||||
|
||||
class EmptyDocModel(BaseModel):
|
||||
# No custom docstring, should have no description in tool
|
||||
data: str
|
||||
|
||||
|
||||
class TestUsingToolStrategy:
|
||||
"""Test UsingToolStrategy dataclass."""
|
||||
|
||||
def test_basic_creation(self) -> None:
|
||||
"""Test basic UsingToolStrategy creation."""
|
||||
strategy = ToolOutput(schema=_TestModel)
|
||||
assert strategy.schema == _TestModel
|
||||
assert strategy.tool_message_content is None
|
||||
assert len(strategy.schema_specs) == 1
|
||||
|
||||
def test_multiple_schemas(self) -> None:
|
||||
"""Test UsingToolStrategy with multiple schemas."""
|
||||
strategy = ToolOutput(schema=Union[_TestModel, CustomModel])
|
||||
assert len(strategy.schema_specs) == 2
|
||||
assert strategy.schema_specs[0].schema == _TestModel
|
||||
assert strategy.schema_specs[1].schema == CustomModel
|
||||
|
||||
def test_schema_with_tool_message_content(self) -> None:
|
||||
"""Test UsingToolStrategy with tool message content."""
|
||||
strategy = ToolOutput(schema=_TestModel, tool_message_content="custom message")
|
||||
assert strategy.schema == _TestModel
|
||||
assert strategy.tool_message_content == "custom message"
|
||||
assert len(strategy.schema_specs) == 1
|
||||
|
||||
|
||||
class TestOutputToolBinding:
|
||||
"""Test OutputToolBinding dataclass and its methods."""
|
||||
|
||||
def test_from_schema_spec_basic(self) -> None:
|
||||
"""Test basic OutputToolBinding creation from SchemaSpec."""
|
||||
schema_spec = _SchemaSpec(schema=_TestModel)
|
||||
tool_binding = OutputToolBinding.from_schema_spec(schema_spec)
|
||||
|
||||
assert tool_binding.schema == _TestModel
|
||||
assert tool_binding.schema_kind == "pydantic"
|
||||
assert tool_binding.tool is not None
|
||||
assert tool_binding.tool.name == "_TestModel"
|
||||
|
||||
def test_from_schema_spec_with_custom_name(self) -> None:
|
||||
"""Test OutputToolBinding creation with custom name."""
|
||||
schema_spec = _SchemaSpec(schema=_TestModel, name="custom_tool_name")
|
||||
tool_binding = OutputToolBinding.from_schema_spec(schema_spec)
|
||||
assert tool_binding.tool.name == "custom_tool_name"
|
||||
|
||||
def test_from_schema_spec_with_custom_description(self) -> None:
|
||||
"""Test OutputToolBinding creation with custom description."""
|
||||
schema_spec = _SchemaSpec(schema=_TestModel, description="Custom tool description")
|
||||
tool_binding = OutputToolBinding.from_schema_spec(schema_spec)
|
||||
|
||||
assert tool_binding.tool.description == "Custom tool description"
|
||||
|
||||
def test_from_schema_spec_with_model_docstring(self) -> None:
|
||||
"""Test OutputToolBinding creation using model docstring as description."""
|
||||
schema_spec = _SchemaSpec(schema=CustomModel)
|
||||
tool_binding = OutputToolBinding.from_schema_spec(schema_spec)
|
||||
|
||||
assert tool_binding.tool.description == "Custom model with a custom docstring."
|
||||
|
||||
@pytest.mark.skip(reason="Need to fix bug in langchain-core for inheritance of doc-strings.")
|
||||
def test_from_schema_spec_empty_docstring(self) -> None:
|
||||
"""Test OutputToolBinding creation with model that has default docstring."""
|
||||
|
||||
# Create a model with the same docstring as BaseModel
|
||||
class DefaultDocModel(BaseModel):
|
||||
# This should have the same docstring as BaseModel
|
||||
pass
|
||||
|
||||
schema_spec = _SchemaSpec(schema=DefaultDocModel)
|
||||
tool_binding = OutputToolBinding.from_schema_spec(schema_spec)
|
||||
|
||||
# Should use empty description when model has default BaseModel docstring
|
||||
assert tool_binding.tool.description == ""
|
||||
|
||||
def test_parse_payload_pydantic_success(self) -> None:
|
||||
"""Test successful parsing for Pydantic model."""
|
||||
schema_spec = _SchemaSpec(schema=_TestModel)
|
||||
tool_binding = OutputToolBinding.from_schema_spec(schema_spec)
|
||||
|
||||
tool_args = {"name": "John", "age": 30}
|
||||
result = tool_binding.parse(tool_args)
|
||||
|
||||
assert isinstance(result, _TestModel)
|
||||
assert result.name == "John"
|
||||
assert result.age == 30
|
||||
assert result.email == "default@example.com" # default value
|
||||
|
||||
def test_parse_payload_pydantic_validation_error(self) -> None:
|
||||
"""Test parsing failure for invalid Pydantic data."""
|
||||
schema_spec = _SchemaSpec(schema=_TestModel)
|
||||
tool_binding = OutputToolBinding.from_schema_spec(schema_spec)
|
||||
|
||||
# Missing required field 'name'
|
||||
tool_args = {"age": 30}
|
||||
|
||||
with pytest.raises(ValueError, match="Failed to parse data to _TestModel"):
|
||||
tool_binding.parse(tool_args)
|
||||
|
||||
|
||||
class TestEdgeCases:
|
||||
"""Test edge cases and error conditions."""
|
||||
|
||||
def test_empty_schemas_list(self) -> None:
|
||||
"""Test UsingToolStrategy with empty schemas list."""
|
||||
strategy = ToolOutput(EmptyDocModel)
|
||||
assert len(strategy.schema_specs) == 1
|
||||
|
||||
@pytest.mark.skip(reason="Need to fix bug in langchain-core for inheritance of doc-strings.")
|
||||
def test_base_model_doc_constant(self) -> None:
|
||||
"""Test that BASE_MODEL_DOC constant is set correctly."""
|
||||
binding = OutputToolBinding.from_schema_spec(_SchemaSpec(EmptyDocModel))
|
||||
assert binding.tool.name == "EmptyDocModel"
|
||||
assert binding.tool.description[:5] == "" # Should be empty for default docstring
|
||||
@@ -1,147 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import pytest
|
||||
|
||||
# Skip this test since langgraph.prebuilt.responses is not available
|
||||
pytest.skip("langgraph.prebuilt.responses not available", allow_module_level=True)
|
||||
|
||||
try:
|
||||
from langchain_openai import ChatOpenAI
|
||||
except ImportError:
|
||||
skip_openai_integration_tests = True
|
||||
else:
|
||||
skip_openai_integration_tests = False
|
||||
|
||||
AGENT_PROMPT = "You are an HR assistant."
|
||||
|
||||
|
||||
class ToolCalls(BaseSchema):
|
||||
get_employee_role: int
|
||||
get_employee_department: int
|
||||
|
||||
|
||||
class AssertionByInvocation(BaseSchema):
|
||||
prompt: str
|
||||
tools_with_expected_calls: ToolCalls
|
||||
expected_last_message: str
|
||||
expected_structured_response: Optional[Dict[str, Any]]
|
||||
llm_request_count: int
|
||||
|
||||
|
||||
class TestCase(BaseSchema):
|
||||
name: str
|
||||
response_format: Union[Dict[str, Any], List[Dict[str, Any]]]
|
||||
assertions_by_invocation: List[AssertionByInvocation]
|
||||
|
||||
|
||||
class Employee(BaseModel):
|
||||
name: str
|
||||
role: str
|
||||
department: str
|
||||
|
||||
|
||||
EMPLOYEES: list[Employee] = [
|
||||
Employee(name="Sabine", role="Developer", department="IT"),
|
||||
Employee(name="Henrik", role="Product Manager", department="IT"),
|
||||
Employee(name="Jessica", role="HR", department="People"),
|
||||
]
|
||||
|
||||
TEST_CASES = load_spec("responses", as_model=TestCase)
|
||||
|
||||
|
||||
def _make_tool(fn, *, name: str, description: str):
|
||||
mock = MagicMock(side_effect=lambda *, name: fn(name=name))
|
||||
InputModel = create_model(f"{name}_input", name=(str, ...))
|
||||
|
||||
@tool(name, description=description, args_schema=InputModel)
|
||||
def _wrapped(name: str):
|
||||
return mock(name=name)
|
||||
|
||||
return {"tool": _wrapped, "mock": mock}
|
||||
|
||||
|
||||
@pytest.mark.skipif(skip_openai_integration_tests, reason="OpenAI integration tests are disabled.")
|
||||
@pytest.mark.parametrize("case", TEST_CASES, ids=[c.name for c in TEST_CASES])
|
||||
def test_responses_integration_matrix(case: TestCase) -> None:
|
||||
if case.name == "asking for information that does not fit into the response format":
|
||||
pytest.xfail(
|
||||
"currently failing due to undefined behavior when model cannot conform to any of the structured response formats."
|
||||
)
|
||||
|
||||
def get_employee_role(*, name: str) -> Optional[str]:
|
||||
for e in EMPLOYEES:
|
||||
if e.name == name:
|
||||
return e.role
|
||||
return None
|
||||
|
||||
def get_employee_department(*, name: str) -> Optional[str]:
|
||||
for e in EMPLOYEES:
|
||||
if e.name == name:
|
||||
return e.department
|
||||
return None
|
||||
|
||||
role_tool = _make_tool(
|
||||
get_employee_role,
|
||||
name="get_employee_role",
|
||||
description="Get the employee role by name",
|
||||
)
|
||||
dept_tool = _make_tool(
|
||||
get_employee_department,
|
||||
name="get_employee_department",
|
||||
description="Get the employee department by name",
|
||||
)
|
||||
|
||||
response_format_spec = case.response_format
|
||||
if isinstance(response_format_spec, dict):
|
||||
response_format_spec = [response_format_spec]
|
||||
# Unwrap nested schema objects
|
||||
response_format_spec = [item.get("schema", item) for item in response_format_spec]
|
||||
if len(response_format_spec) == 1:
|
||||
tool_output = ToolOutput(response_format_spec[0])
|
||||
else:
|
||||
tool_output = ToolOutput({"oneOf": response_format_spec})
|
||||
|
||||
llm_request_count = 0
|
||||
|
||||
for assertion in case.assertions_by_invocation:
|
||||
|
||||
def on_request(request: httpx.Request) -> None:
|
||||
nonlocal llm_request_count
|
||||
llm_request_count += 1
|
||||
|
||||
http_client = httpx.Client(
|
||||
event_hooks={"request": [on_request]},
|
||||
)
|
||||
|
||||
model = ChatOpenAI(
|
||||
model="gpt-4o",
|
||||
temperature=0,
|
||||
http_client=http_client,
|
||||
)
|
||||
|
||||
agent = create_react_agent(
|
||||
model,
|
||||
tools=[role_tool["tool"], dept_tool["tool"]],
|
||||
prompt=AGENT_PROMPT,
|
||||
response_format=tool_output,
|
||||
)
|
||||
|
||||
result = agent.invoke({"messages": [HumanMessage(assertion.prompt)]})
|
||||
|
||||
# Count tool calls
|
||||
assert role_tool["mock"].call_count == assertion.tools_with_expected_calls.get_employee_role
|
||||
assert (
|
||||
dept_tool["mock"].call_count
|
||||
== assertion.tools_with_expected_calls.get_employee_department
|
||||
)
|
||||
|
||||
# Count LLM calls
|
||||
assert llm_request_count == assertion.llm_request_count
|
||||
|
||||
# Check last message content
|
||||
last_message = result["messages"][-1]
|
||||
assert last_message.content == assertion.expected_last_message
|
||||
|
||||
# Check structured response
|
||||
structured_response_json = result["structured_response"]
|
||||
assert structured_response_json == assertion.expected_structured_response
|
||||
@@ -1,107 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import pytest
|
||||
|
||||
# Skip this test since langgraph.prebuilt.responses is not available
|
||||
pytest.skip("langgraph.prebuilt.responses not available", allow_module_level=True)
|
||||
|
||||
try:
|
||||
from langchain_openai import ChatOpenAI
|
||||
except ImportError:
|
||||
skip_openai_integration_tests = True
|
||||
else:
|
||||
skip_openai_integration_tests = False
|
||||
|
||||
AGENT_PROMPT = """
|
||||
You are a strict polling bot.
|
||||
|
||||
- Only use the "poll_job" tool until it returns { status: "succeeded" }.
|
||||
- If status is "pending", call the tool again. Do not produce a final answer.
|
||||
- When it is "succeeded", return exactly: "Attempts: <number>" with no extra text.
|
||||
"""
|
||||
|
||||
|
||||
class TestCase(BaseSchema):
|
||||
name: str
|
||||
return_direct: bool
|
||||
response_format: Optional[Dict[str, Any]]
|
||||
expected_tool_calls: int
|
||||
expected_last_message: str
|
||||
expected_structured_response: Optional[Dict[str, Any]]
|
||||
|
||||
|
||||
TEST_CASES = load_spec("return_direct", as_model=TestCase)
|
||||
|
||||
|
||||
def _make_tool(return_direct: bool):
|
||||
attempts = 0
|
||||
|
||||
def _side_effect():
|
||||
nonlocal attempts
|
||||
attempts += 1
|
||||
return {
|
||||
"status": "succeeded" if attempts >= 10 else "pending",
|
||||
"attempts": attempts,
|
||||
}
|
||||
|
||||
mock = MagicMock(side_effect=_side_effect)
|
||||
|
||||
@tool(
|
||||
"pollJob",
|
||||
description=(
|
||||
"Check the status of a long-running job. "
|
||||
"Returns { status: 'pending' | 'succeeded', attempts: number }."
|
||||
),
|
||||
return_direct=return_direct,
|
||||
)
|
||||
def _wrapped():
|
||||
return mock()
|
||||
|
||||
return {"tool": _wrapped, "mock": mock}
|
||||
|
||||
|
||||
@pytest.mark.skipif(skip_openai_integration_tests, reason="OpenAI integration tests are disabled.")
|
||||
@pytest.mark.parametrize("case", TEST_CASES, ids=[c.name for c in TEST_CASES])
|
||||
def test_return_direct_integration_matrix(case: TestCase) -> None:
|
||||
poll_tool = _make_tool(case.return_direct)
|
||||
|
||||
model = ChatOpenAI(
|
||||
model="gpt-4o",
|
||||
temperature=0,
|
||||
)
|
||||
|
||||
if case.response_format:
|
||||
agent = create_react_agent(
|
||||
model,
|
||||
tools=[poll_tool["tool"]],
|
||||
prompt=AGENT_PROMPT,
|
||||
response_format=ToolOutput(case.response_format),
|
||||
)
|
||||
else:
|
||||
agent = create_react_agent(
|
||||
model,
|
||||
tools=[poll_tool["tool"]],
|
||||
prompt=AGENT_PROMPT,
|
||||
)
|
||||
|
||||
result = agent.invoke(
|
||||
{
|
||||
"messages": [
|
||||
HumanMessage("Poll the job until it's done and tell me how many attempts it took.")
|
||||
]
|
||||
}
|
||||
)
|
||||
|
||||
# Count tool calls
|
||||
assert poll_tool["mock"].call_count == case.expected_tool_calls
|
||||
|
||||
# Check last message content
|
||||
last_message = result["messages"][-1]
|
||||
assert last_message.content == case.expected_last_message
|
||||
|
||||
# Check structured response
|
||||
if case.expected_structured_response is not None:
|
||||
structured_response_json = result["structured_response"]
|
||||
assert structured_response_json == case.expected_structured_response
|
||||
else:
|
||||
assert "structured_response" not in result
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,21 +0,0 @@
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
from pydantic import BaseModel, ConfigDict
|
||||
from pydantic.alias_generators import to_camel
|
||||
|
||||
|
||||
class BaseSchema(BaseModel):
|
||||
model_config = ConfigDict(
|
||||
alias_generator=to_camel,
|
||||
populate_by_name=True,
|
||||
from_attributes=True,
|
||||
)
|
||||
|
||||
|
||||
def load_spec(spec_name: str, as_model: type[BaseModel]) -> list[BaseModel]:
|
||||
with (Path(__file__).parent / "specifications" / f"{spec_name}.json").open(
|
||||
"r", encoding="utf-8"
|
||||
) as f:
|
||||
data = json.load(f)
|
||||
return [as_model(**item) for item in data]
|
||||
@@ -53,7 +53,9 @@ def pytest_addoption(parser: pytest.Parser) -> None:
|
||||
)
|
||||
|
||||
|
||||
def pytest_collection_modifyitems(config: pytest.Config, items: Sequence[pytest.Function]) -> None:
|
||||
def pytest_collection_modifyitems(
|
||||
config: pytest.Config, items: Sequence[pytest.Function]
|
||||
) -> None:
|
||||
"""Add implementations for handling custom markers.
|
||||
|
||||
At the moment, this adds support for a custom `requires` marker.
|
||||
|
||||
@@ -113,7 +113,9 @@ async def test_aembed_documents(cache_embeddings: CacheBackedEmbeddings) -> None
|
||||
vectors = await cache_embeddings.aembed_documents(texts)
|
||||
expected_vectors: list[list[float]] = [[1, 2.0], [2.0, 3.0], [1.0, 2.0], [3.0, 4.0]]
|
||||
assert vectors == expected_vectors
|
||||
keys = [key async for key in cache_embeddings.document_embedding_store.ayield_keys()]
|
||||
keys = [
|
||||
key async for key in cache_embeddings.document_embedding_store.ayield_keys()
|
||||
]
|
||||
assert len(keys) == 4
|
||||
# UUID is expected to be the same for the same text
|
||||
assert keys[0] == "test_namespace812b86c1-8ebf-5483-95c6-c95cf2b52d12"
|
||||
@@ -126,7 +128,10 @@ async def test_aembed_documents_batch(
|
||||
texts = ["1", "22", "a", "333", "RAISE_EXCEPTION"]
|
||||
with contextlib.suppress(ValueError):
|
||||
await cache_embeddings_batch.aembed_documents(texts)
|
||||
keys = [key async for key in cache_embeddings_batch.document_embedding_store.ayield_keys()]
|
||||
keys = [
|
||||
key
|
||||
async for key in cache_embeddings_batch.document_embedding_store.ayield_keys()
|
||||
]
|
||||
# only the first batch of three embeddings should exist
|
||||
assert len(keys) == 3
|
||||
# UUID is expected to be the same for the same text
|
||||
|
||||
@@ -13,7 +13,9 @@ def test_import_all() -> None:
|
||||
library_code = PKG_ROOT / "langchain"
|
||||
for path in library_code.rglob("*.py"):
|
||||
# Calculate the relative path to the module
|
||||
module_name = path.relative_to(PKG_ROOT).with_suffix("").as_posix().replace("/", ".")
|
||||
module_name = (
|
||||
path.relative_to(PKG_ROOT).with_suffix("").as_posix().replace("/", ".")
|
||||
)
|
||||
if module_name.endswith("__init__"):
|
||||
# Without init
|
||||
module_name = module_name.rsplit(".", 1)[0]
|
||||
@@ -37,7 +39,9 @@ def test_import_all_using_dir() -> None:
|
||||
library_code = PKG_ROOT / "langchain"
|
||||
for path in library_code.rglob("*.py"):
|
||||
# Calculate the relative path to the module
|
||||
module_name = path.relative_to(PKG_ROOT).with_suffix("").as_posix().replace("/", ".")
|
||||
module_name = (
|
||||
path.relative_to(PKG_ROOT).with_suffix("").as_posix().replace("/", ".")
|
||||
)
|
||||
if module_name.endswith("__init__"):
|
||||
# Without init
|
||||
module_name = module_name.rsplit(".", 1)[0]
|
||||
|
||||
54
libs/langchain_v1/uv.lock
generated
54
libs/langchain_v1/uv.lock
generated
@@ -1628,7 +1628,6 @@ test = [
|
||||
{ name = "pytest" },
|
||||
{ name = "pytest-asyncio" },
|
||||
{ name = "pytest-cov" },
|
||||
{ name = "pytest-mock" },
|
||||
{ name = "pytest-socket" },
|
||||
{ name = "pytest-watcher" },
|
||||
{ name = "pytest-xdist" },
|
||||
@@ -1688,7 +1687,6 @@ test = [
|
||||
{ name = "pytest", specifier = ">=8,<9" },
|
||||
{ name = "pytest-asyncio", specifier = ">=0.23.2,<1.0.0" },
|
||||
{ name = "pytest-cov", specifier = ">=4.0.0,<5.0.0" },
|
||||
{ name = "pytest-mock", specifier = ">=3.12.0,<4.0.0" },
|
||||
{ name = "pytest-socket", specifier = ">=0.6.0,<1.0.0" },
|
||||
{ name = "pytest-watcher", specifier = ">=0.2.6,<1.0.0" },
|
||||
{ name = "pytest-xdist", specifier = ">=3.6.1,<4.0.0" },
|
||||
@@ -1759,7 +1757,7 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "langchain-core"
|
||||
version = "0.3.74"
|
||||
version = "0.3.72"
|
||||
source = { editable = "../core" }
|
||||
dependencies = [
|
||||
{ name = "jsonpatch" },
|
||||
@@ -1810,7 +1808,7 @@ test = [
|
||||
test-integration = []
|
||||
typing = [
|
||||
{ name = "langchain-text-splitters", directory = "../text-splitters" },
|
||||
{ name = "mypy", specifier = ">=1.17.1,<1.18" },
|
||||
{ name = "mypy", specifier = ">=1.15,<1.16" },
|
||||
{ name = "types-pyyaml", specifier = ">=6.0.12.2,<7.0.0.0" },
|
||||
{ name = "types-requests", specifier = ">=2.28.11.5,<3.0.0.0" },
|
||||
]
|
||||
@@ -1939,7 +1937,7 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "langchain-openai"
|
||||
version = "0.3.31"
|
||||
version = "0.3.28"
|
||||
source = { editable = "../partners/openai" }
|
||||
dependencies = [
|
||||
{ name = "langchain-core" },
|
||||
@@ -1950,14 +1948,14 @@ dependencies = [
|
||||
[package.metadata]
|
||||
requires-dist = [
|
||||
{ name = "langchain-core", editable = "../core" },
|
||||
{ name = "openai", specifier = ">=1.99.9,<2.0.0" },
|
||||
{ name = "openai", specifier = ">=1.86.0,<2.0.0" },
|
||||
{ name = "tiktoken", specifier = ">=0.7,<1" },
|
||||
]
|
||||
|
||||
[package.metadata.requires-dev]
|
||||
codespell = [{ name = "codespell", specifier = ">=2.2.0,<3.0.0" }]
|
||||
dev = [{ name = "langchain-core", editable = "../core" }]
|
||||
lint = [{ name = "ruff", specifier = ">=0.12.8,<0.13" }]
|
||||
lint = [{ name = "ruff", specifier = ">=0.12.2,<0.13" }]
|
||||
test = [
|
||||
{ name = "freezegun", specifier = ">=1.2.2,<2.0.0" },
|
||||
{ name = "langchain-core", editable = "../core" },
|
||||
@@ -1983,7 +1981,7 @@ test-integration = [
|
||||
]
|
||||
typing = [
|
||||
{ name = "langchain-core", editable = "../core" },
|
||||
{ name = "mypy", specifier = ">=1.17.1,<2.0" },
|
||||
{ name = "mypy", specifier = ">=1.10,<2.0" },
|
||||
{ name = "types-tqdm", specifier = ">=4.66.0.5,<5.0.0.0" },
|
||||
]
|
||||
|
||||
@@ -2037,7 +2035,7 @@ requires-dist = [
|
||||
|
||||
[package.metadata.requires-dev]
|
||||
codespell = [{ name = "codespell", specifier = ">=2.2.0,<3.0.0" }]
|
||||
lint = [{ name = "ruff", specifier = ">=0.12.8,<0.13" }]
|
||||
lint = [{ name = "ruff", specifier = ">=0.12.2,<0.13" }]
|
||||
test = [{ name = "langchain-core", editable = "../core" }]
|
||||
test-integration = []
|
||||
typing = [
|
||||
@@ -2051,14 +2049,10 @@ version = "0.3.9"
|
||||
source = { editable = "../text-splitters" }
|
||||
dependencies = [
|
||||
{ name = "langchain-core" },
|
||||
{ name = "pip" },
|
||||
]
|
||||
|
||||
[package.metadata]
|
||||
requires-dist = [
|
||||
{ name = "langchain-core", editable = "../core" },
|
||||
{ name = "pip", specifier = ">=25.2" },
|
||||
]
|
||||
requires-dist = [{ name = "langchain-core", editable = "../core" }]
|
||||
|
||||
[package.metadata.requires-dev]
|
||||
dev = [
|
||||
@@ -2067,7 +2061,7 @@ dev = [
|
||||
]
|
||||
lint = [
|
||||
{ name = "langchain-core", editable = "../core" },
|
||||
{ name = "ruff", specifier = ">=0.12.8,<0.13" },
|
||||
{ name = "ruff", specifier = ">=0.12.2,<0.13" },
|
||||
]
|
||||
test = [
|
||||
{ name = "freezegun", specifier = ">=1.2.2,<2.0.0" },
|
||||
@@ -2084,12 +2078,11 @@ test-integration = [
|
||||
{ name = "sentence-transformers", specifier = ">=3.0.1" },
|
||||
{ name = "spacy", specifier = ">=3.8.7,<4.0.0" },
|
||||
{ name = "thinc", specifier = ">=8.3.6,<9.0.0" },
|
||||
{ name = "tiktoken", specifier = ">=0.8.0,<1.0.0" },
|
||||
{ name = "transformers", specifier = ">=4.51.3,<5.0.0" },
|
||||
]
|
||||
typing = [
|
||||
{ name = "lxml-stubs", specifier = ">=0.5.1,<1.0.0" },
|
||||
{ name = "mypy", specifier = ">=1.17.1,<1.18" },
|
||||
{ name = "mypy", specifier = ">=1.15,<2.0" },
|
||||
{ name = "tiktoken", specifier = ">=0.8.0,<1.0.0" },
|
||||
{ name = "types-requests", specifier = ">=2.31.0.20240218,<3.0.0.0" },
|
||||
]
|
||||
@@ -2664,7 +2657,7 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "openai"
|
||||
version = "1.101.0"
|
||||
version = "1.97.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "anyio" },
|
||||
@@ -2676,9 +2669,9 @@ dependencies = [
|
||||
{ name = "tqdm" },
|
||||
{ name = "typing-extensions" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/00/7c/eaf06b62281f5ca4f774c4cff066e6ddfd6a027e0ac791be16acec3a95e3/openai-1.101.0.tar.gz", hash = "sha256:29f56df2236069686e64aca0e13c24a4ec310545afb25ef7da2ab1a18523f22d", size = 518415, upload-time = "2025-08-21T21:11:01.645Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/a6/57/1c471f6b3efb879d26686d31582997615e969f3bb4458111c9705e56332e/openai-1.97.1.tar.gz", hash = "sha256:a744b27ae624e3d4135225da9b1c89c107a2a7e5bc4c93e5b7b5214772ce7a4e", size = 494267, upload-time = "2025-07-22T13:10:12.607Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/c8/a6/0e39baa335bbd1c66c7e0a41dbbec10c5a15ab95c1344e7f7beb28eee65a/openai-1.101.0-py3-none-any.whl", hash = "sha256:6539a446cce154f8d9fb42757acdfd3ed9357ab0d34fcac11096c461da87133b", size = 810772, upload-time = "2025-08-21T21:10:59.215Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ee/35/412a0e9c3f0d37c94ed764b8ac7adae2d834dbd20e69f6aca582118e0f55/openai-1.97.1-py3-none-any.whl", hash = "sha256:4e96bbdf672ec3d44968c9ea39d2c375891db1acc1794668d8149d5fa6000606", size = 764380, upload-time = "2025-07-22T13:10:10.689Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2943,15 +2936,6 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/34/e7/ae39f538fd6844e982063c3a5e4598b8ced43b9633baa3a85ef33af8c05c/pillow-11.3.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:c84d689db21a1c397d001aa08241044aa2069e7587b398c8cc63020390b1c1b8", size = 6984598, upload-time = "2025-07-01T09:16:27.732Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pip"
|
||||
version = "25.2"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/20/16/650289cd3f43d5a2fadfd98c68bd1e1e7f2550a1a5326768cddfbcedb2c5/pip-25.2.tar.gz", hash = "sha256:578283f006390f85bb6282dffb876454593d637f5d1be494b5202ce4877e71f2", size = 1840021, upload-time = "2025-07-30T21:50:15.401Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/b7/3f/945ef7ab14dc4f9d7f40288d2df998d1837ee0888ec3659c813487572faa/pip-25.2-py3-none-any.whl", hash = "sha256:6d67a2b4e7f14d8b31b8b52648866fa717f45a1eb70e83002f4331d07e953717", size = 1752557, upload-time = "2025-07-30T21:50:13.323Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pluggy"
|
||||
version = "1.6.0"
|
||||
@@ -3411,18 +3395,6 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/a7/4b/8b78d126e275efa2379b1c2e09dc52cf70df16fc3b90613ef82531499d73/pytest_cov-4.1.0-py3-none-any.whl", hash = "sha256:6ba70b9e97e69fcc3fb45bfeab2d0a138fb65c4d0d6a41ef33983ad114be8c3a", size = 21949, upload-time = "2023-05-24T18:44:54.079Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pytest-mock"
|
||||
version = "3.14.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "pytest" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/71/28/67172c96ba684058a4d24ffe144d64783d2a270d0af0d9e792737bddc75c/pytest_mock-3.14.1.tar.gz", hash = "sha256:159e9edac4c451ce77a5cdb9fc5d1100708d2dd4ba3c3df572f14097351af80e", size = 33241, upload-time = "2025-05-26T13:58:45.167Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/b2/05/77b60e520511c53d1c1ca75f1930c7dd8e971d0c4379b7f4b3f9644685ba/pytest_mock-3.14.1-py3-none-any.whl", hash = "sha256:178aefcd11307d874b4cd3100344e7e2d888d9791a6a1d9bfe90fbc1b74fd1d0", size = 9923, upload-time = "2025-05-26T13:58:43.487Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pytest-recording"
|
||||
version = "0.13.4"
|
||||
|
||||
@@ -8,7 +8,8 @@ UV_FROZEN = true
|
||||
|
||||
# Define a variable for the test file path.
|
||||
TEST_FILE ?= tests/unit_tests/
|
||||
integration_test: TEST_FILE = tests/integration_tests/
|
||||
integration_test integration_tests: TEST_FILE = tests/integration_tests/
|
||||
# note: leaving out integration_tests (with s) command to skip release testing for now
|
||||
# TODO(erick) configure ollama server to run in CI, in separate repo
|
||||
|
||||
# Define variables for test model configuration
|
||||
@@ -25,10 +26,9 @@ test_watch:
|
||||
|
||||
|
||||
# integration tests are run without the --disable-socket flag to allow network calls
|
||||
integration_test:
|
||||
integration_test integration_tests:
|
||||
OLLAMA_TEST_MODEL=$(OLLAMA_TEST_MODEL) OLLAMA_REASONING_TEST_MODEL=$(OLLAMA_REASONING_TEST_MODEL) uv run --group test --group test_integration pytest $(TEST_FILE)
|
||||
|
||||
# CI integration tests - disabled until ollama service is configured in CI
|
||||
# note: leaving out integration_tests (with s) command to skip release testing for now
|
||||
|
||||
######################
|
||||
# LINTING AND FORMATTING
|
||||
@@ -52,6 +52,12 @@ format format_diff:
|
||||
[ "$(PYTHON_FILES)" = "" ] || uv run --all-groups ruff format $(PYTHON_FILES)
|
||||
[ "$(PYTHON_FILES)" = "" ] || uv run --all-groups ruff check --fix $(PYTHON_FILES)
|
||||
|
||||
spell_check:
|
||||
uv run --all-groups codespell --toml pyproject.toml
|
||||
|
||||
spell_fix:
|
||||
uv run --all-groups codespell --toml pyproject.toml -w
|
||||
|
||||
check_imports: $(shell find langchain_ollama -name '*.py')
|
||||
uv run --all-groups python ./scripts/check_imports.py $^
|
||||
|
||||
|
||||
@@ -7,11 +7,11 @@ authors = []
|
||||
license = { text = "MIT" }
|
||||
requires-python = ">=3.9"
|
||||
dependencies = [
|
||||
"ollama>=0.5.3,<1.0.0",
|
||||
"langchain-core<1.0.0,>=0.3.74",
|
||||
"ollama>=0.5.1,<1.0.0",
|
||||
"langchain-core<1.0.0,>=0.3.70",
|
||||
]
|
||||
name = "langchain-ollama"
|
||||
version = "0.3.7"
|
||||
version = "0.3.6"
|
||||
description = "An integration package connecting Ollama and LangChain"
|
||||
readme = "README.md"
|
||||
|
||||
@@ -22,18 +22,19 @@ repository = "https://github.com/langchain-ai/langchain"
|
||||
|
||||
[dependency-groups]
|
||||
test = [
|
||||
"pytest<9.0.0,>=8.4.1",
|
||||
"pytest-asyncio<1.0.0,>=0.26.0",
|
||||
"syrupy<5.0.0,>=4.9.1",
|
||||
"pytest<8.0.0,>=7.4.3",
|
||||
"pytest-asyncio<1.0.0,>=0.23.2",
|
||||
"syrupy<5.0.0,>=4.0.2",
|
||||
"pytest-socket<1.0.0,>=0.7.0",
|
||||
"pytest-watcher<1.0.0,>=0.4.3",
|
||||
"pytest-watcher<1.0.0,>=0.3.4",
|
||||
"langchain-core",
|
||||
"langchain-tests",
|
||||
]
|
||||
codespell = ["codespell<3.0.0,>=2.2.6"]
|
||||
test_integration = []
|
||||
lint = ["ruff<0.13,>=0.12.10"]
|
||||
lint = ["ruff<0.13,>=0.12.2"]
|
||||
dev = ["langchain-core"]
|
||||
typing = ["mypy<2.0.0,>=1.17.1", "langchain-core"]
|
||||
typing = ["mypy<2.0.0,>=1.7.1", "langchain-core"]
|
||||
|
||||
[tool.uv.sources]
|
||||
langchain-core = { path = "../../core", editable = true }
|
||||
|
||||
176
libs/partners/ollama/uv.lock
generated
176
libs/partners/ollama/uv.lock
generated
@@ -188,6 +188,15 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/0e/f6/65ecc6878a89bb1c23a086ea335ad4bf21a588990c3f535a227b9eea9108/charset_normalizer-3.4.1-py3-none-any.whl", hash = "sha256:d98b1668f06378c6dbefec3b92299716b931cd4e6061f3c875a71ced1780ab85", size = 49767, upload-time = "2024-12-24T18:12:32.852Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "codespell"
|
||||
version = "2.4.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/15/e0/709453393c0ea77d007d907dd436b3ee262e28b30995ea1aa36c6ffbccaf/codespell-2.4.1.tar.gz", hash = "sha256:299fcdcb09d23e81e35a671bbe746d5ad7e8385972e65dbb833a2eaac33c01e5", size = 344740, upload-time = "2025-01-28T18:52:39.411Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/20/01/b394922252051e97aab231d416c86da3d8a6d781eeadcdca1082867de64e/codespell-2.4.1-py3-none-any.whl", hash = "sha256:3dadafa67df7e4a3dbf51e0d7315061b80d265f9552ebd699b3dd6834b47e425", size = 344501, upload-time = "2025-01-28T18:52:37.057Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "colorama"
|
||||
version = "0.4.6"
|
||||
@@ -354,7 +363,7 @@ typing = [
|
||||
|
||||
[[package]]
|
||||
name = "langchain-ollama"
|
||||
version = "0.3.7"
|
||||
version = "0.3.6"
|
||||
source = { editable = "." }
|
||||
dependencies = [
|
||||
{ name = "langchain-core" },
|
||||
@@ -362,6 +371,9 @@ dependencies = [
|
||||
]
|
||||
|
||||
[package.dev-dependencies]
|
||||
codespell = [
|
||||
{ name = "codespell" },
|
||||
]
|
||||
dev = [
|
||||
{ name = "langchain-core" },
|
||||
]
|
||||
@@ -385,25 +397,26 @@ typing = [
|
||||
[package.metadata]
|
||||
requires-dist = [
|
||||
{ name = "langchain-core", editable = "../../core" },
|
||||
{ name = "ollama", specifier = ">=0.5.3,<1.0.0" },
|
||||
{ name = "ollama", specifier = ">=0.5.1,<1.0.0" },
|
||||
]
|
||||
|
||||
[package.metadata.requires-dev]
|
||||
codespell = [{ name = "codespell", specifier = ">=2.2.6,<3.0.0" }]
|
||||
dev = [{ name = "langchain-core", editable = "../../core" }]
|
||||
lint = [{ name = "ruff", specifier = ">=0.12.10,<0.13" }]
|
||||
lint = [{ name = "ruff", specifier = ">=0.12.2,<0.13" }]
|
||||
test = [
|
||||
{ name = "langchain-core", editable = "../../core" },
|
||||
{ name = "langchain-tests", editable = "../../standard-tests" },
|
||||
{ name = "pytest", specifier = ">=8.4.1,<9.0.0" },
|
||||
{ name = "pytest-asyncio", specifier = ">=0.26.0,<1.0.0" },
|
||||
{ name = "pytest", specifier = ">=7.4.3,<8.0.0" },
|
||||
{ name = "pytest-asyncio", specifier = ">=0.23.2,<1.0.0" },
|
||||
{ name = "pytest-socket", specifier = ">=0.7.0,<1.0.0" },
|
||||
{ name = "pytest-watcher", specifier = ">=0.4.3,<1.0.0" },
|
||||
{ name = "syrupy", specifier = ">=4.9.1,<5.0.0" },
|
||||
{ name = "pytest-watcher", specifier = ">=0.3.4,<1.0.0" },
|
||||
{ name = "syrupy", specifier = ">=4.0.2,<5.0.0" },
|
||||
]
|
||||
test-integration = []
|
||||
typing = [
|
||||
{ name = "langchain-core", editable = "../../core" },
|
||||
{ name = "mypy", specifier = ">=1.17.1,<2.0.0" },
|
||||
{ name = "mypy", specifier = ">=1.7.1,<2.0.0" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -612,53 +625,46 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "mypy"
|
||||
version = "1.17.1"
|
||||
version = "1.15.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "mypy-extensions" },
|
||||
{ name = "pathspec" },
|
||||
{ name = "tomli", marker = "python_full_version < '3.11'" },
|
||||
{ name = "typing-extensions" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/8e/22/ea637422dedf0bf36f3ef238eab4e455e2a0dcc3082b5cc067615347ab8e/mypy-1.17.1.tar.gz", hash = "sha256:25e01ec741ab5bb3eec8ba9cdb0f769230368a22c959c4937360efb89b7e9f01", size = 3352570, upload-time = "2025-07-31T07:54:19.204Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/ce/43/d5e49a86afa64bd3839ea0d5b9c7103487007d728e1293f52525d6d5486a/mypy-1.15.0.tar.gz", hash = "sha256:404534629d51d3efea5c800ee7c42b72a6554d6c400e6a79eafe15d11341fd43", size = 3239717, upload-time = "2025-02-05T03:50:34.655Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/77/a9/3d7aa83955617cdf02f94e50aab5c830d205cfa4320cf124ff64acce3a8e/mypy-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:3fbe6d5555bf608c47203baa3e72dbc6ec9965b3d7c318aa9a4ca76f465bd972", size = 11003299, upload-time = "2025-07-31T07:54:06.425Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/83/e8/72e62ff837dd5caaac2b4a5c07ce769c8e808a00a65e5d8f94ea9c6f20ab/mypy-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:80ef5c058b7bce08c83cac668158cb7edea692e458d21098c7d3bce35a5d43e7", size = 10125451, upload-time = "2025-07-31T07:53:52.974Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/7d/10/f3f3543f6448db11881776f26a0ed079865926b0c841818ee22de2c6bbab/mypy-1.17.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c4a580f8a70c69e4a75587bd925d298434057fe2a428faaf927ffe6e4b9a98df", size = 11916211, upload-time = "2025-07-31T07:53:18.879Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/06/bf/63e83ed551282d67bb3f7fea2cd5561b08d2bb6eb287c096539feb5ddbc5/mypy-1.17.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dd86bb649299f09d987a2eebb4d52d10603224500792e1bee18303bbcc1ce390", size = 12652687, upload-time = "2025-07-31T07:53:30.544Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/69/66/68f2eeef11facf597143e85b694a161868b3b006a5fbad50e09ea117ef24/mypy-1.17.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:a76906f26bd8d51ea9504966a9c25419f2e668f012e0bdf3da4ea1526c534d94", size = 12896322, upload-time = "2025-07-31T07:53:50.74Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a3/87/8e3e9c2c8bd0d7e071a89c71be28ad088aaecbadf0454f46a540bda7bca6/mypy-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:e79311f2d904ccb59787477b7bd5d26f3347789c06fcd7656fa500875290264b", size = 9507962, upload-time = "2025-07-31T07:53:08.431Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/46/cf/eadc80c4e0a70db1c08921dcc220357ba8ab2faecb4392e3cebeb10edbfa/mypy-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ad37544be07c5d7fba814eb370e006df58fed8ad1ef33ed1649cb1889ba6ff58", size = 10921009, upload-time = "2025-07-31T07:53:23.037Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5d/c1/c869d8c067829ad30d9bdae051046561552516cfb3a14f7f0347b7d973ee/mypy-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:064e2ff508e5464b4bd807a7c1625bc5047c5022b85c70f030680e18f37273a5", size = 10047482, upload-time = "2025-07-31T07:53:26.151Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/98/b9/803672bab3fe03cee2e14786ca056efda4bb511ea02dadcedde6176d06d0/mypy-1.17.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:70401bbabd2fa1aa7c43bb358f54037baf0586f41e83b0ae67dd0534fc64edfd", size = 11832883, upload-time = "2025-07-31T07:53:47.948Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/88/fb/fcdac695beca66800918c18697b48833a9a6701de288452b6715a98cfee1/mypy-1.17.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e92bdc656b7757c438660f775f872a669b8ff374edc4d18277d86b63edba6b8b", size = 12566215, upload-time = "2025-07-31T07:54:04.031Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/7f/37/a932da3d3dace99ee8eb2043b6ab03b6768c36eb29a02f98f46c18c0da0e/mypy-1.17.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c1fdf4abb29ed1cb091cf432979e162c208a5ac676ce35010373ff29247bcad5", size = 12751956, upload-time = "2025-07-31T07:53:36.263Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8c/cf/6438a429e0f2f5cab8bc83e53dbebfa666476f40ee322e13cac5e64b79e7/mypy-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:ff2933428516ab63f961644bc49bc4cbe42bbffb2cd3b71cc7277c07d16b1a8b", size = 9507307, upload-time = "2025-07-31T07:53:59.734Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/17/a2/7034d0d61af8098ec47902108553122baa0f438df8a713be860f7407c9e6/mypy-1.17.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:69e83ea6553a3ba79c08c6e15dbd9bfa912ec1e493bf75489ef93beb65209aeb", size = 11086295, upload-time = "2025-07-31T07:53:28.124Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/14/1f/19e7e44b594d4b12f6ba8064dbe136505cec813549ca3e5191e40b1d3cc2/mypy-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1b16708a66d38abb1e6b5702f5c2c87e133289da36f6a1d15f6a5221085c6403", size = 10112355, upload-time = "2025-07-31T07:53:21.121Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5b/69/baa33927e29e6b4c55d798a9d44db5d394072eef2bdc18c3e2048c9ed1e9/mypy-1.17.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:89e972c0035e9e05823907ad5398c5a73b9f47a002b22359b177d40bdaee7056", size = 11875285, upload-time = "2025-07-31T07:53:55.293Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/90/13/f3a89c76b0a41e19490b01e7069713a30949d9a6c147289ee1521bcea245/mypy-1.17.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:03b6d0ed2b188e35ee6d5c36b5580cffd6da23319991c49ab5556c023ccf1341", size = 12737895, upload-time = "2025-07-31T07:53:43.623Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/23/a1/c4ee79ac484241301564072e6476c5a5be2590bc2e7bfd28220033d2ef8f/mypy-1.17.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c837b896b37cd103570d776bda106eabb8737aa6dd4f248451aecf53030cdbeb", size = 12931025, upload-time = "2025-07-31T07:54:17.125Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/89/b8/7409477be7919a0608900e6320b155c72caab4fef46427c5cc75f85edadd/mypy-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:665afab0963a4b39dff7c1fa563cc8b11ecff7910206db4b2e64dd1ba25aed19", size = 9584664, upload-time = "2025-07-31T07:54:12.842Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5b/82/aec2fc9b9b149f372850291827537a508d6c4d3664b1750a324b91f71355/mypy-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:93378d3203a5c0800c6b6d850ad2f19f7a3cdf1a3701d3416dbf128805c6a6a7", size = 11075338, upload-time = "2025-07-31T07:53:38.873Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/07/ac/ee93fbde9d2242657128af8c86f5d917cd2887584cf948a8e3663d0cd737/mypy-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:15d54056f7fe7a826d897789f53dd6377ec2ea8ba6f776dc83c2902b899fee81", size = 10113066, upload-time = "2025-07-31T07:54:14.707Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5a/68/946a1e0be93f17f7caa56c45844ec691ca153ee8b62f21eddda336a2d203/mypy-1.17.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:209a58fed9987eccc20f2ca94afe7257a8f46eb5df1fb69958650973230f91e6", size = 11875473, upload-time = "2025-07-31T07:53:14.504Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/9f/0f/478b4dce1cb4f43cf0f0d00fba3030b21ca04a01b74d1cd272a528cf446f/mypy-1.17.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:099b9a5da47de9e2cb5165e581f158e854d9e19d2e96b6698c0d64de911dd849", size = 12744296, upload-time = "2025-07-31T07:53:03.896Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ca/70/afa5850176379d1b303f992a828de95fc14487429a7139a4e0bdd17a8279/mypy-1.17.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa6ffadfbe6994d724c5a1bb6123a7d27dd68fc9c059561cd33b664a79578e14", size = 12914657, upload-time = "2025-07-31T07:54:08.576Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/53/f9/4a83e1c856a3d9c8f6edaa4749a4864ee98486e9b9dbfbc93842891029c2/mypy-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:9a2b7d9180aed171f033c9f2fc6c204c1245cf60b0cb61cf2e7acc24eea78e0a", size = 9593320, upload-time = "2025-07-31T07:53:01.341Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/38/56/79c2fac86da57c7d8c48622a05873eaab40b905096c33597462713f5af90/mypy-1.17.1-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:15a83369400454c41ed3a118e0cc58bd8123921a602f385cb6d6ea5df050c733", size = 11040037, upload-time = "2025-07-31T07:54:10.942Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/4d/c3/adabe6ff53638e3cad19e3547268482408323b1e68bf082c9119000cd049/mypy-1.17.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:55b918670f692fc9fba55c3298d8a3beae295c5cded0a55dccdc5bbead814acd", size = 10131550, upload-time = "2025-07-31T07:53:41.307Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b8/c5/2e234c22c3bdeb23a7817af57a58865a39753bde52c74e2c661ee0cfc640/mypy-1.17.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:62761474061feef6f720149d7ba876122007ddc64adff5ba6f374fda35a018a0", size = 11872963, upload-time = "2025-07-31T07:53:16.878Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ab/26/c13c130f35ca8caa5f2ceab68a247775648fdcd6c9a18f158825f2bc2410/mypy-1.17.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c49562d3d908fd49ed0938e5423daed8d407774a479b595b143a3d7f87cdae6a", size = 12710189, upload-time = "2025-07-31T07:54:01.962Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/82/df/c7d79d09f6de8383fe800521d066d877e54d30b4fb94281c262be2df84ef/mypy-1.17.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:397fba5d7616a5bc60b45c7ed204717eaddc38f826e3645402c426057ead9a91", size = 12900322, upload-time = "2025-07-31T07:53:10.551Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b8/98/3d5a48978b4f708c55ae832619addc66d677f6dc59f3ebad71bae8285ca6/mypy-1.17.1-cp314-cp314-win_amd64.whl", hash = "sha256:9d6b20b97d373f41617bd0708fd46aa656059af57f2ef72aa8c7d6a2b73b74ed", size = 9751879, upload-time = "2025-07-31T07:52:56.683Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/29/cb/673e3d34e5d8de60b3a61f44f80150a738bff568cd6b7efb55742a605e98/mypy-1.17.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5d1092694f166a7e56c805caaf794e0585cabdbf1df36911c414e4e9abb62ae9", size = 10992466, upload-time = "2025-07-31T07:53:57.574Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/0c/d0/fe1895836eea3a33ab801561987a10569df92f2d3d4715abf2cfeaa29cb2/mypy-1.17.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:79d44f9bfb004941ebb0abe8eff6504223a9c1ac51ef967d1263c6572bbebc99", size = 10117638, upload-time = "2025-07-31T07:53:34.256Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/97/f3/514aa5532303aafb95b9ca400a31054a2bd9489de166558c2baaeea9c522/mypy-1.17.1-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b01586eed696ec905e61bd2568f48740f7ac4a45b3a468e6423a03d3788a51a8", size = 11915673, upload-time = "2025-07-31T07:52:59.361Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ab/c3/c0805f0edec96fe8e2c048b03769a6291523d509be8ee7f56ae922fa3882/mypy-1.17.1-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:43808d9476c36b927fbcd0b0255ce75efe1b68a080154a38ae68a7e62de8f0f8", size = 12649022, upload-time = "2025-07-31T07:53:45.92Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/45/3e/d646b5a298ada21a8512fa7e5531f664535a495efa672601702398cea2b4/mypy-1.17.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:feb8cc32d319edd5859da2cc084493b3e2ce5e49a946377663cc90f6c15fb259", size = 12895536, upload-time = "2025-07-31T07:53:06.17Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/14/55/e13d0dcd276975927d1f4e9e2ec4fd409e199f01bdc671717e673cc63a22/mypy-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:d7598cf74c3e16539d4e2f0b8d8c318e00041553d83d4861f87c7a72e95ac24d", size = 9512564, upload-time = "2025-07-31T07:53:12.346Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1d/f3/8fcd2af0f5b806f6cf463efaffd3c9548a28f84220493ecd38d127b6b66d/mypy-1.17.1-py3-none-any.whl", hash = "sha256:a9f52c0351c21fe24c21d8c0eb1f62967b262d6729393397b6f443c3b773c3b9", size = 2283411, upload-time = "2025-07-31T07:53:24.664Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/68/f8/65a7ce8d0e09b6329ad0c8d40330d100ea343bd4dd04c4f8ae26462d0a17/mypy-1.15.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:979e4e1a006511dacf628e36fadfecbcc0160a8af6ca7dad2f5025529e082c13", size = 10738433, upload-time = "2025-02-05T03:49:29.145Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b4/95/9c0ecb8eacfe048583706249439ff52105b3f552ea9c4024166c03224270/mypy-1.15.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c4bb0e1bd29f7d34efcccd71cf733580191e9a264a2202b0239da95984c5b559", size = 9861472, upload-time = "2025-02-05T03:49:16.986Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/84/09/9ec95e982e282e20c0d5407bc65031dfd0f0f8ecc66b69538296e06fcbee/mypy-1.15.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:be68172e9fd9ad8fb876c6389f16d1c1b5f100ffa779f77b1fb2176fcc9ab95b", size = 11611424, upload-time = "2025-02-05T03:49:46.908Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/78/13/f7d14e55865036a1e6a0a69580c240f43bc1f37407fe9235c0d4ef25ffb0/mypy-1.15.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c7be1e46525adfa0d97681432ee9fcd61a3964c2446795714699a998d193f1a3", size = 12365450, upload-time = "2025-02-05T03:50:05.89Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/48/e1/301a73852d40c241e915ac6d7bcd7fedd47d519246db2d7b86b9d7e7a0cb/mypy-1.15.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:2e2c2e6d3593f6451b18588848e66260ff62ccca522dd231cd4dd59b0160668b", size = 12551765, upload-time = "2025-02-05T03:49:33.56Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/77/ba/c37bc323ae5fe7f3f15a28e06ab012cd0b7552886118943e90b15af31195/mypy-1.15.0-cp310-cp310-win_amd64.whl", hash = "sha256:6983aae8b2f653e098edb77f893f7b6aca69f6cffb19b2cc7443f23cce5f4828", size = 9274701, upload-time = "2025-02-05T03:49:38.981Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/03/bc/f6339726c627bd7ca1ce0fa56c9ae2d0144604a319e0e339bdadafbbb599/mypy-1.15.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2922d42e16d6de288022e5ca321cd0618b238cfc5570e0263e5ba0a77dbef56f", size = 10662338, upload-time = "2025-02-05T03:50:17.287Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e2/90/8dcf506ca1a09b0d17555cc00cd69aee402c203911410136cd716559efe7/mypy-1.15.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2ee2d57e01a7c35de00f4634ba1bbf015185b219e4dc5909e281016df43f5ee5", size = 9787540, upload-time = "2025-02-05T03:49:51.21Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/05/05/a10f9479681e5da09ef2f9426f650d7b550d4bafbef683b69aad1ba87457/mypy-1.15.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:973500e0774b85d9689715feeffcc980193086551110fd678ebe1f4342fb7c5e", size = 11538051, upload-time = "2025-02-05T03:50:20.885Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e9/9a/1f7d18b30edd57441a6411fcbc0c6869448d1a4bacbaee60656ac0fc29c8/mypy-1.15.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5a95fb17c13e29d2d5195869262f8125dfdb5c134dc8d9a9d0aecf7525b10c2c", size = 12286751, upload-time = "2025-02-05T03:49:42.408Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/72/af/19ff499b6f1dafcaf56f9881f7a965ac2f474f69f6f618b5175b044299f5/mypy-1.15.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1905f494bfd7d85a23a88c5d97840888a7bd516545fc5aaedff0267e0bb54e2f", size = 12421783, upload-time = "2025-02-05T03:49:07.707Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/96/39/11b57431a1f686c1aed54bf794870efe0f6aeca11aca281a0bd87a5ad42c/mypy-1.15.0-cp311-cp311-win_amd64.whl", hash = "sha256:c9817fa23833ff189db061e6d2eff49b2f3b6ed9856b4a0a73046e41932d744f", size = 9265618, upload-time = "2025-02-05T03:49:54.581Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/98/3a/03c74331c5eb8bd025734e04c9840532226775c47a2c39b56a0c8d4f128d/mypy-1.15.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:aea39e0583d05124836ea645f412e88a5c7d0fd77a6d694b60d9b6b2d9f184fd", size = 10793981, upload-time = "2025-02-05T03:50:28.25Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f0/1a/41759b18f2cfd568848a37c89030aeb03534411eef981df621d8fad08a1d/mypy-1.15.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2f2147ab812b75e5b5499b01ade1f4a81489a147c01585cda36019102538615f", size = 9749175, upload-time = "2025-02-05T03:50:13.411Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/12/7e/873481abf1ef112c582db832740f4c11b2bfa510e829d6da29b0ab8c3f9c/mypy-1.15.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ce436f4c6d218a070048ed6a44c0bbb10cd2cc5e272b29e7845f6a2f57ee4464", size = 11455675, upload-time = "2025-02-05T03:50:31.421Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b3/d0/92ae4cde706923a2d3f2d6c39629134063ff64b9dedca9c1388363da072d/mypy-1.15.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8023ff13985661b50a5928fc7a5ca15f3d1affb41e5f0a9952cb68ef090b31ee", size = 12410020, upload-time = "2025-02-05T03:48:48.705Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/46/8b/df49974b337cce35f828ba6fda228152d6db45fed4c86ba56ffe442434fd/mypy-1.15.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1124a18bc11a6a62887e3e137f37f53fbae476dc36c185d549d4f837a2a6a14e", size = 12498582, upload-time = "2025-02-05T03:49:03.628Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/13/50/da5203fcf6c53044a0b699939f31075c45ae8a4cadf538a9069b165c1050/mypy-1.15.0-cp312-cp312-win_amd64.whl", hash = "sha256:171a9ca9a40cd1843abeca0e405bc1940cd9b305eaeea2dda769ba096932bb22", size = 9366614, upload-time = "2025-02-05T03:50:00.313Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6a/9b/fd2e05d6ffff24d912f150b87db9e364fa8282045c875654ce7e32fffa66/mypy-1.15.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:93faf3fdb04768d44bf28693293f3904bbb555d076b781ad2530214ee53e3445", size = 10788592, upload-time = "2025-02-05T03:48:55.789Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/74/37/b246d711c28a03ead1fd906bbc7106659aed7c089d55fe40dd58db812628/mypy-1.15.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:811aeccadfb730024c5d3e326b2fbe9249bb7413553f15499a4050f7c30e801d", size = 9753611, upload-time = "2025-02-05T03:48:44.581Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a6/ac/395808a92e10cfdac8003c3de9a2ab6dc7cde6c0d2a4df3df1b815ffd067/mypy-1.15.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:98b7b9b9aedb65fe628c62a6dc57f6d5088ef2dfca37903a7d9ee374d03acca5", size = 11438443, upload-time = "2025-02-05T03:49:25.514Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d2/8b/801aa06445d2de3895f59e476f38f3f8d610ef5d6908245f07d002676cbf/mypy-1.15.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c43a7682e24b4f576d93072216bf56eeff70d9140241f9edec0c104d0c515036", size = 12402541, upload-time = "2025-02-05T03:49:57.623Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c7/67/5a4268782eb77344cc613a4cf23540928e41f018a9a1ec4c6882baf20ab8/mypy-1.15.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:baefc32840a9f00babd83251560e0ae1573e2f9d1b067719479bfb0e987c6357", size = 12494348, upload-time = "2025-02-05T03:48:52.361Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/83/3e/57bb447f7bbbfaabf1712d96f9df142624a386d98fb026a761532526057e/mypy-1.15.0-cp313-cp313-win_amd64.whl", hash = "sha256:b9378e2c00146c44793c98b8d5a61039a048e31f429fb0eb546d93f4b000bedf", size = 9373648, upload-time = "2025-02-05T03:49:11.395Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5a/fa/79cf41a55b682794abe71372151dbbf856e3008f6767057229e6649d294a/mypy-1.15.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e601a7fa172c2131bff456bb3ee08a88360760d0d2f8cbd7a75a65497e2df078", size = 10737129, upload-time = "2025-02-05T03:50:24.509Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d3/33/dd8feb2597d648de29e3da0a8bf4e1afbda472964d2a4a0052203a6f3594/mypy-1.15.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:712e962a6357634fef20412699a3655c610110e01cdaa6180acec7fc9f8513ba", size = 9856335, upload-time = "2025-02-05T03:49:36.398Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e4/b5/74508959c1b06b96674b364ffeb7ae5802646b32929b7701fc6b18447592/mypy-1.15.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f95579473af29ab73a10bada2f9722856792a36ec5af5399b653aa28360290a5", size = 11611935, upload-time = "2025-02-05T03:49:14.154Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6c/53/da61b9d9973efcd6507183fdad96606996191657fe79701b2c818714d573/mypy-1.15.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8f8722560a14cde92fdb1e31597760dc35f9f5524cce17836c0d22841830fd5b", size = 12365827, upload-time = "2025-02-05T03:48:59.458Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c1/72/965bd9ee89540c79a25778cc080c7e6ef40aa1eeac4d52cec7eae6eb5228/mypy-1.15.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1fbb8da62dc352133d7d7ca90ed2fb0e9d42bb1a32724c287d3c76c58cbaa9c2", size = 12541924, upload-time = "2025-02-05T03:50:03.12Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/46/d0/f41645c2eb263e6c77ada7d76f894c580c9ddb20d77f0c24d34273a4dab2/mypy-1.15.0-cp39-cp39-win_amd64.whl", hash = "sha256:d10d994b41fb3497719bbf866f227b3489048ea4bbbb5015357db306249f7980", size = 9271176, upload-time = "2025-02-05T03:50:10.86Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/09/4e/a7d65c7322c510de2c409ff3828b03354a7c43f5a8ed458a7a131b41c7b9/mypy-1.15.0-py3-none-any.whl", hash = "sha256:5469affef548bd1895d86d3bf10ce2b44e33d86923c29e4d675b3e323437ea3e", size = 2221777, upload-time = "2025-02-05T03:50:08.348Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -789,15 +795,15 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "ollama"
|
||||
version = "0.5.3"
|
||||
version = "0.5.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "httpx" },
|
||||
{ name = "pydantic" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/91/6d/ae96027416dcc2e98c944c050c492789502d7d7c0b95a740f0bb39268632/ollama-0.5.3.tar.gz", hash = "sha256:40b6dff729df3b24e56d4042fd9d37e231cee8e528677e0d085413a1d6692394", size = 43331, upload-time = "2025-08-07T21:44:10.422Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/8d/96/c7fe0d2d1b3053be614822a7b722c7465161b3672ce90df71515137580a0/ollama-0.5.1.tar.gz", hash = "sha256:5a799e4dc4e7af638b11e3ae588ab17623ee019e496caaf4323efbaa8feeff93", size = 41112, upload-time = "2025-05-30T21:32:48.679Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/be/f6/2091e50b8b6c3e6901f6eab283d5efd66fb71c86ddb1b4d68766c3eeba0f/ollama-0.5.3-py3-none-any.whl", hash = "sha256:a8303b413d99a9043dbf77ebf11ced672396b59bec27e6d5db67c88f01b279d2", size = 13490, upload-time = "2025-08-07T21:44:09.353Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d6/76/3f96c8cdbf3955d7a73ee94ce3e0db0755d6de1e0098a70275940d1aff2f/ollama-0.5.1-py3-none-any.whl", hash = "sha256:4c8839f35bc173c7057b1eb2cbe7f498c1a7e134eafc9192824c8aecb3617506", size = 13369, upload-time = "2025-05-30T21:32:47.429Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -882,15 +888,6 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/88/ef/eb23f262cca3c0c4eb7ab1933c3b1f03d021f2c48f54763065b6f0e321be/packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759", size = 65451, upload-time = "2024-11-08T09:47:44.722Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pathspec"
|
||||
version = "0.12.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/ca/bc/f35b8446f4531a7cb215605d100cd88b7ac6f44ab3fc94870c120ab3adbf/pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712", size = 51043, upload-time = "2023-12-10T22:30:45Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/cc/20/ff623b09d963f88bfde16306a54e12ee5ea43e9b597108672ff3a408aad6/pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08", size = 31191, upload-time = "2023-12-10T22:30:43.14Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pluggy"
|
||||
version = "1.5.0"
|
||||
@@ -1158,7 +1155,7 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "pytest"
|
||||
version = "8.4.1"
|
||||
version = "7.4.4"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "colorama", marker = "sys_platform == 'win32'" },
|
||||
@@ -1166,25 +1163,23 @@ dependencies = [
|
||||
{ name = "iniconfig" },
|
||||
{ name = "packaging" },
|
||||
{ name = "pluggy" },
|
||||
{ name = "pygments" },
|
||||
{ name = "tomli", marker = "python_full_version < '3.11'" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/08/ba/45911d754e8eba3d5a841a5ce61a65a685ff1798421ac054f85aa8747dfb/pytest-8.4.1.tar.gz", hash = "sha256:7c67fd69174877359ed9371ec3af8a3d2b04741818c51e5e99cc1742251fa93c", size = 1517714, upload-time = "2025-06-18T05:48:06.109Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/80/1f/9d8e98e4133ffb16c90f3b405c43e38d3abb715bb5d7a63a5a684f7e46a3/pytest-7.4.4.tar.gz", hash = "sha256:2cf0005922c6ace4a3e2ec8b4080eb0d9753fdc93107415332f50ce9e7994280", size = 1357116, upload-time = "2023-12-31T12:00:18.035Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/29/16/c8a903f4c4dffe7a12843191437d7cd8e32751d5de349d45d3fe69544e87/pytest-8.4.1-py3-none-any.whl", hash = "sha256:539c70ba6fcead8e78eebbf1115e8b589e7565830d7d006a8723f19ac8a0afb7", size = 365474, upload-time = "2025-06-18T05:48:03.955Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/51/ff/f6e8b8f39e08547faece4bd80f89d5a8de68a38b2d179cc1c4490ffa3286/pytest-7.4.4-py3-none-any.whl", hash = "sha256:b090cdf5ed60bf4c45261be03239c2c1c22df034fbffe691abe93cd80cea01d8", size = 325287, upload-time = "2023-12-31T12:00:13.963Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pytest-asyncio"
|
||||
version = "0.26.0"
|
||||
version = "0.23.8"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "pytest" },
|
||||
{ name = "typing-extensions", marker = "python_full_version < '3.10'" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/8e/c4/453c52c659521066969523e87d85d54139bbd17b78f09532fb8eb8cdb58e/pytest_asyncio-0.26.0.tar.gz", hash = "sha256:c4df2a697648241ff39e7f0e4a73050b03f123f760673956cf0d72a4990e312f", size = 54156, upload-time = "2025-03-25T06:22:28.883Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/de/b4/0b378b7bf26a8ae161c3890c0b48a91a04106c5713ce81b4b080ea2f4f18/pytest_asyncio-0.23.8.tar.gz", hash = "sha256:759b10b33a6dc61cce40a8bd5205e302978bbbcc00e279a8b61d9a6a3c82e4d3", size = 46920, upload-time = "2024-07-17T17:39:34.617Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/20/7f/338843f449ace853647ace35870874f69a764d251872ed1b4de9f234822c/pytest_asyncio-0.26.0-py3-none-any.whl", hash = "sha256:7b51ed894f4fbea1340262bdae5135797ebbe21d8638978e35d31c6d19f72fb0", size = 19694, upload-time = "2025-03-25T06:22:27.807Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ee/82/62e2d63639ecb0fbe8a7ee59ef0bc69a4669ec50f6d3459f74ad4e4189a2/pytest_asyncio-0.23.8-py3-none-any.whl", hash = "sha256:50265d892689a5faefb84df80819d1ecef566eb3549cf915dfb33569359d1ce2", size = 17663, upload-time = "2024-07-17T17:39:32.478Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1360,28 +1355,27 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "ruff"
|
||||
version = "0.12.10"
|
||||
version = "0.12.2"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/3b/eb/8c073deb376e46ae767f4961390d17545e8535921d2f65101720ed8bd434/ruff-0.12.10.tar.gz", hash = "sha256:189ab65149d11ea69a2d775343adf5f49bb2426fc4780f65ee33b423ad2e47f9", size = 5310076, upload-time = "2025-08-21T18:23:22.595Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/6c/3d/d9a195676f25d00dbfcf3cf95fdd4c685c497fcfa7e862a44ac5e4e96480/ruff-0.12.2.tar.gz", hash = "sha256:d7b4f55cd6f325cb7621244f19c873c565a08aff5a4ba9c69aa7355f3f7afd3e", size = 4432239, upload-time = "2025-07-03T16:40:19.566Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/24/e7/560d049d15585d6c201f9eeacd2fd130def3741323e5ccf123786e0e3c95/ruff-0.12.10-py3-none-linux_armv6l.whl", hash = "sha256:8b593cb0fb55cc8692dac7b06deb29afda78c721c7ccfed22db941201b7b8f7b", size = 11935161, upload-time = "2025-08-21T18:22:26.965Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d1/b0/ad2464922a1113c365d12b8f80ed70fcfb39764288ac77c995156080488d/ruff-0.12.10-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:ebb7333a45d56efc7c110a46a69a1b32365d5c5161e7244aaf3aa20ce62399c1", size = 12660884, upload-time = "2025-08-21T18:22:30.925Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d7/f1/97f509b4108d7bae16c48389f54f005b62ce86712120fd8b2d8e88a7cb49/ruff-0.12.10-py3-none-macosx_11_0_arm64.whl", hash = "sha256:d59e58586829f8e4a9920788f6efba97a13d1fa320b047814e8afede381c6839", size = 11872754, upload-time = "2025-08-21T18:22:34.035Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/12/ad/44f606d243f744a75adc432275217296095101f83f966842063d78eee2d3/ruff-0.12.10-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:822d9677b560f1fdeab69b89d1f444bf5459da4aa04e06e766cf0121771ab844", size = 12092276, upload-time = "2025-08-21T18:22:36.764Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/06/1f/ed6c265e199568010197909b25c896d66e4ef2c5e1c3808caf461f6f3579/ruff-0.12.10-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:37b4a64f4062a50c75019c61c7017ff598cb444984b638511f48539d3a1c98db", size = 11734700, upload-time = "2025-08-21T18:22:39.822Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/63/c5/b21cde720f54a1d1db71538c0bc9b73dee4b563a7dd7d2e404914904d7f5/ruff-0.12.10-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2c6f4064c69d2542029b2a61d39920c85240c39837599d7f2e32e80d36401d6e", size = 13468783, upload-time = "2025-08-21T18:22:42.559Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/02/9e/39369e6ac7f2a1848f22fb0b00b690492f20811a1ac5c1fd1d2798329263/ruff-0.12.10-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:059e863ea3a9ade41407ad71c1de2badfbe01539117f38f763ba42a1206f7559", size = 14436642, upload-time = "2025-08-21T18:22:45.612Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e3/03/5da8cad4b0d5242a936eb203b58318016db44f5c5d351b07e3f5e211bb89/ruff-0.12.10-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1bef6161e297c68908b7218fa6e0e93e99a286e5ed9653d4be71e687dff101cf", size = 13859107, upload-time = "2025-08-21T18:22:48.886Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/19/19/dd7273b69bf7f93a070c9cec9494a94048325ad18fdcf50114f07e6bf417/ruff-0.12.10-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4f1345fbf8fb0531cd722285b5f15af49b2932742fc96b633e883da8d841896b", size = 12886521, upload-time = "2025-08-21T18:22:51.567Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c0/1d/b4207ec35e7babaee62c462769e77457e26eb853fbdc877af29417033333/ruff-0.12.10-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f68433c4fbc63efbfa3ba5db31727db229fa4e61000f452c540474b03de52a9", size = 13097528, upload-time = "2025-08-21T18:22:54.609Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ff/00/58f7b873b21114456e880b75176af3490d7a2836033779ca42f50de3b47a/ruff-0.12.10-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:141ce3d88803c625257b8a6debf4a0473eb6eed9643a6189b68838b43e78165a", size = 13080443, upload-time = "2025-08-21T18:22:57.413Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/12/8c/9e6660007fb10189ccb78a02b41691288038e51e4788bf49b0a60f740604/ruff-0.12.10-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:f3fc21178cd44c98142ae7590f42ddcb587b8e09a3b849cbc84edb62ee95de60", size = 11896759, upload-time = "2025-08-21T18:23:00.473Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/67/4c/6d092bb99ea9ea6ebda817a0e7ad886f42a58b4501a7e27cd97371d0ba54/ruff-0.12.10-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:7d1a4e0bdfafcd2e3e235ecf50bf0176f74dd37902f241588ae1f6c827a36c56", size = 11701463, upload-time = "2025-08-21T18:23:03.211Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/59/80/d982c55e91df981f3ab62559371380616c57ffd0172d96850280c2b04fa8/ruff-0.12.10-py3-none-musllinux_1_2_i686.whl", hash = "sha256:e67d96827854f50b9e3e8327b031647e7bcc090dbe7bb11101a81a3a2cbf1cc9", size = 12691603, upload-time = "2025-08-21T18:23:06.935Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ad/37/63a9c788bbe0b0850611669ec6b8589838faf2f4f959647f2d3e320383ae/ruff-0.12.10-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:ae479e1a18b439c59138f066ae79cc0f3ee250712a873d00dbafadaad9481e5b", size = 13164356, upload-time = "2025-08-21T18:23:10.225Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/47/d4/1aaa7fb201a74181989970ebccd12f88c0fc074777027e2a21de5a90657e/ruff-0.12.10-py3-none-win32.whl", hash = "sha256:9de785e95dc2f09846c5e6e1d3a3d32ecd0b283a979898ad427a9be7be22b266", size = 11896089, upload-time = "2025-08-21T18:23:14.232Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ad/14/2ad38fd4037daab9e023456a4a40ed0154e9971f8d6aed41bdea390aabd9/ruff-0.12.10-py3-none-win_amd64.whl", hash = "sha256:7837eca8787f076f67aba2ca559cefd9c5cbc3a9852fd66186f4201b87c1563e", size = 13004616, upload-time = "2025-08-21T18:23:17.422Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/24/3c/21cf283d67af33a8e6ed242396863af195a8a6134ec581524fd22b9811b6/ruff-0.12.10-py3-none-win_arm64.whl", hash = "sha256:cc138cc06ed9d4bfa9d667a65af7172b47840e1a98b02ce7011c391e54635ffc", size = 12074225, upload-time = "2025-08-21T18:23:20.137Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/74/b6/2098d0126d2d3318fd5bec3ad40d06c25d377d95749f7a0c5af17129b3b1/ruff-0.12.2-py3-none-linux_armv6l.whl", hash = "sha256:093ea2b221df1d2b8e7ad92fc6ffdca40a2cb10d8564477a987b44fd4008a7be", size = 10369761, upload-time = "2025-07-03T16:39:38.847Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b1/4b/5da0142033dbe155dc598cfb99262d8ee2449d76920ea92c4eeb9547c208/ruff-0.12.2-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:09e4cf27cc10f96b1708100fa851e0daf21767e9709e1649175355280e0d950e", size = 11155659, upload-time = "2025-07-03T16:39:42.294Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3e/21/967b82550a503d7c5c5c127d11c935344b35e8c521f52915fc858fb3e473/ruff-0.12.2-py3-none-macosx_11_0_arm64.whl", hash = "sha256:8ae64755b22f4ff85e9c52d1f82644abd0b6b6b6deedceb74bd71f35c24044cc", size = 10537769, upload-time = "2025-07-03T16:39:44.75Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/33/91/00cff7102e2ec71a4890fb7ba1803f2cdb122d82787c7d7cf8041fe8cbc1/ruff-0.12.2-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3eb3a6b2db4d6e2c77e682f0b988d4d61aff06860158fdb413118ca133d57922", size = 10717602, upload-time = "2025-07-03T16:39:47.652Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/9b/eb/928814daec4e1ba9115858adcda44a637fb9010618721937491e4e2283b8/ruff-0.12.2-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:73448de992d05517170fc37169cbca857dfeaeaa8c2b9be494d7bcb0d36c8f4b", size = 10198772, upload-time = "2025-07-03T16:39:49.641Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/50/fa/f15089bc20c40f4f72334f9145dde55ab2b680e51afb3b55422effbf2fb6/ruff-0.12.2-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3b8b94317cbc2ae4a2771af641739f933934b03555e51515e6e021c64441532d", size = 11845173, upload-time = "2025-07-03T16:39:52.069Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/43/9f/1f6f98f39f2b9302acc161a4a2187b1e3a97634fe918a8e731e591841cf4/ruff-0.12.2-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:45fc42c3bf1d30d2008023a0a9a0cfb06bf9835b147f11fe0679f21ae86d34b1", size = 12553002, upload-time = "2025-07-03T16:39:54.551Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d8/70/08991ac46e38ddd231c8f4fd05ef189b1b94be8883e8c0c146a025c20a19/ruff-0.12.2-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce48f675c394c37e958bf229fb5c1e843e20945a6d962cf3ea20b7a107dcd9f4", size = 12171330, upload-time = "2025-07-03T16:39:57.55Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/88/a9/5a55266fec474acfd0a1c73285f19dd22461d95a538f29bba02edd07a5d9/ruff-0.12.2-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:793d8859445ea47591272021a81391350205a4af65a9392401f418a95dfb75c9", size = 11774717, upload-time = "2025-07-03T16:39:59.78Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/87/e5/0c270e458fc73c46c0d0f7cf970bb14786e5fdb88c87b5e423a4bd65232b/ruff-0.12.2-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6932323db80484dda89153da3d8e58164d01d6da86857c79f1961934354992da", size = 11646659, upload-time = "2025-07-03T16:40:01.934Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b7/b6/45ab96070c9752af37f0be364d849ed70e9ccede07675b0ec4e3ef76b63b/ruff-0.12.2-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:6aa7e623a3a11538108f61e859ebf016c4f14a7e6e4eba1980190cacb57714ce", size = 10604012, upload-time = "2025-07-03T16:40:04.363Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/86/91/26a6e6a424eb147cc7627eebae095cfa0b4b337a7c1c413c447c9ebb72fd/ruff-0.12.2-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:2a4a20aeed74671b2def096bdf2eac610c7d8ffcbf4fb0e627c06947a1d7078d", size = 10176799, upload-time = "2025-07-03T16:40:06.514Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f5/0c/9f344583465a61c8918a7cda604226e77b2c548daf8ef7c2bfccf2b37200/ruff-0.12.2-py3-none-musllinux_1_2_i686.whl", hash = "sha256:71a4c550195612f486c9d1f2b045a600aeba851b298c667807ae933478fcef04", size = 11241507, upload-time = "2025-07-03T16:40:08.708Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1c/b7/99c34ded8fb5f86c0280278fa89a0066c3760edc326e935ce0b1550d315d/ruff-0.12.2-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:4987b8f4ceadf597c927beee65a5eaf994c6e2b631df963f86d8ad1bdea99342", size = 11717609, upload-time = "2025-07-03T16:40:10.836Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/51/de/8589fa724590faa057e5a6d171e7f2f6cffe3287406ef40e49c682c07d89/ruff-0.12.2-py3-none-win32.whl", hash = "sha256:369ffb69b70cd55b6c3fc453b9492d98aed98062db9fec828cdfd069555f5f1a", size = 10523823, upload-time = "2025-07-03T16:40:13.203Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/94/47/8abf129102ae4c90cba0c2199a1a9b0fa896f6f806238d6f8c14448cc748/ruff-0.12.2-py3-none-win_amd64.whl", hash = "sha256:dca8a3b6d6dc9810ed8f328d406516bf4d660c00caeaef36eb831cf4871b0639", size = 11629831, upload-time = "2025-07-03T16:40:15.478Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e2/1f/72d2946e3cc7456bb837e88000eb3437e55f80db339c840c04015a11115d/ruff-0.12.2-py3-none-win_arm64.whl", hash = "sha256:48d6c6bfb4761df68bc05ae630e24f506755e702d4fb08f08460be778c7ccb12", size = 10735334, upload-time = "2025-07-03T16:40:17.677Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1395,14 +1389,14 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "syrupy"
|
||||
version = "4.9.1"
|
||||
version = "4.8.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "pytest" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/8c/f8/022d8704a3314f3e96dbd6bbd16ebe119ce30e35f41aabfa92345652fceb/syrupy-4.9.1.tar.gz", hash = "sha256:b7d0fcadad80a7d2f6c4c71917918e8ebe2483e8c703dfc8d49cdbb01081f9a4", size = 52492, upload-time = "2025-03-24T01:36:37.225Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/c4/32/8b56491ed50ae103c2db14885c29fe765170bdf044fe5868548113da35ef/syrupy-4.8.1.tar.gz", hash = "sha256:8da8c0311e6d92de0b15767768c6ab98982b7b4a4c67083c08fbac3fbad4d44c", size = 50192, upload-time = "2025-01-13T12:09:31.445Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/ec/9d/aef9ec5fd5a4ee2f6a96032c4eda5888c5c7cec65cef6b28c4fc37671d88/syrupy-4.9.1-py3-none-any.whl", hash = "sha256:b94cc12ed0e5e75b448255430af642516842a2374a46936dd2650cfb6dd20eda", size = 52214, upload-time = "2025-03-24T01:36:35.278Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/80/47/5e8f44ec0f287b08e8c1f3fc63fe1fbe182f07bf606eec903d7827b95e51/syrupy-4.8.1-py3-none-any.whl", hash = "sha256:274f97cbaf44175f5e478a2f3a53559d31f41c66c6bf28131695f94ac893ea00", size = 50326, upload-time = "2025-01-13T12:09:29.96Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
||||
@@ -14,7 +14,7 @@ pip install langchain-openai
|
||||
|
||||
## Chat model
|
||||
|
||||
See a [usage example](https://python.langchain.com/docs/integrations/chat/openai).
|
||||
See a [usage example](http://python.langchain.com/docs/integrations/chat/openai).
|
||||
|
||||
```python
|
||||
from langchain_openai import ChatOpenAI
|
||||
@@ -26,11 +26,11 @@ If you are using a model hosted on `Azure`, you should use different wrapper for
|
||||
from langchain_openai import AzureChatOpenAI
|
||||
```
|
||||
|
||||
For a more detailed walkthrough of the `Azure` wrapper, see [AzureChatOpenAI](https://python.langchain.com/docs/integrations/chat/azure_chat_openai)
|
||||
For a more detailed walkthrough of the `Azure` wrapper, see [AzureChatOpenAI](http://python.langchain.com/docs/integrations/chat/azure_chat_openai)
|
||||
|
||||
## Text Embedding Model
|
||||
|
||||
See a [usage example](https://python.langchain.com/docs/integrations/text_embedding/openai)
|
||||
See a [usage example](http://python.langchain.com/docs/integrations/text_embedding/openai)
|
||||
|
||||
```python
|
||||
from langchain_openai import OpenAIEmbeddings
|
||||
@@ -46,7 +46,7 @@ For a more detailed walkthrough of the `Azure` wrapper, see [AzureOpenAIEmbeddin
|
||||
|
||||
## LLM (Legacy)
|
||||
|
||||
LLM refers to the legacy text-completion models that preceded chat models. See a [usage example](https://python.langchain.com/docs/integrations/llms/openai).
|
||||
LLM refers to the legacy text-completion models that preceded chat models. See a [usage example](http://python.langchain.com/docs/integrations/llms/openai).
|
||||
|
||||
```python
|
||||
from langchain_openai import OpenAI
|
||||
@@ -58,4 +58,4 @@ If you are using a model hosted on `Azure`, you should use different wrapper for
|
||||
from langchain_openai import AzureOpenAI
|
||||
```
|
||||
|
||||
For a more detailed walkthrough of the `Azure` wrapper, see [Azure OpenAI](https://python.langchain.com/docs/integrations/llms/azure_openai)
|
||||
For a more detailed walkthrough of the `Azure` wrapper, see [Azure OpenAI](http://python.langchain.com/docs/integrations/llms/azure_openai)
|
||||
|
||||
@@ -31,7 +31,7 @@ class DocumentIndexerTestSuite(ABC):
|
||||
"""Get the index."""
|
||||
|
||||
def test_upsert_documents_has_no_ids(self, index: DocumentIndex) -> None:
|
||||
"""Verify that there is no parameter called ids in upsert."""
|
||||
"""Verify that there is not parameter called ids in upsert."""
|
||||
signature = inspect.signature(index.upsert)
|
||||
assert "ids" not in signature.parameters
|
||||
|
||||
@@ -67,7 +67,7 @@ class DocumentIndexerTestSuite(ABC):
|
||||
)
|
||||
|
||||
def test_upsert_some_ids(self, index: DocumentIndex) -> None:
|
||||
"""Test an upsert where some docs have ids and some don't."""
|
||||
"""Test an upsert where some docs have ids and some dont."""
|
||||
foo_uuid = str(uuid.UUID(int=7))
|
||||
documents = [
|
||||
Document(id=foo_uuid, page_content="foo", metadata={"id": 1}),
|
||||
@@ -257,7 +257,7 @@ class AsyncDocumentIndexTestSuite(ABC):
|
||||
)
|
||||
|
||||
async def test_upsert_some_ids(self, index: DocumentIndex) -> None:
|
||||
"""Test an upsert where some docs have ids and some don't."""
|
||||
"""Test an upsert where some docs have ids and some dont."""
|
||||
foo_uuid = str(uuid.UUID(int=7))
|
||||
documents = [
|
||||
Document(id=foo_uuid, page_content="foo", metadata={"id": 1}),
|
||||
|
||||
@@ -414,7 +414,7 @@ class ExperimentalMarkdownSyntaxTextSplitter:
|
||||
|
||||
self._complete_chunk_doc()
|
||||
# I don't see why `return_each_line` is a necessary feature of this splitter.
|
||||
# It's easy enough to do outside of the class and the caller can have more
|
||||
# It's easy enough to to do outside of the class and the caller can have more
|
||||
# control over it.
|
||||
if self.return_each_line:
|
||||
return [
|
||||
|
||||
Reference in New Issue
Block a user