mirror of
https://github.com/hwchase17/langchain.git
synced 2026-01-21 21:56:38 +00:00
Compare commits
5 Commits
async_impl
...
sr/agent-i
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
64e5e83a73 | ||
|
|
1a491b6199 | ||
|
|
a292c45d53 | ||
|
|
ddb00006fc | ||
|
|
812446210b |
@@ -1,4 +1,4 @@
|
||||
.PHONY: all clean docs_build docs_clean docs_linkcheck api_docs_build api_docs_clean api_docs_linkcheck format lint test tests test_watch integration_tests help extended_tests
|
||||
.PHONY: all clean docs_build docs_clean docs_linkcheck api_docs_build api_docs_clean api_docs_linkcheck format lint test tests test_watch integration_tests help extended_tests start_services stop_services
|
||||
|
||||
# Default target executed when no arguments are given to make.
|
||||
all: help
|
||||
@@ -7,6 +7,12 @@ all: help
|
||||
# TESTING AND COVERAGE
|
||||
######################
|
||||
|
||||
start_services:
|
||||
docker compose -f tests/unit_tests/agents/compose-postgres.yml -f tests/unit_tests/agents/compose-redis.yml up -V --force-recreate --wait --remove-orphans
|
||||
|
||||
stop_services:
|
||||
docker compose -f tests/unit_tests/agents/compose-postgres.yml -f tests/unit_tests/agents/compose-redis.yml down -v
|
||||
|
||||
# Define a variable for the test file path.
|
||||
TEST_FILE ?= tests/unit_tests/
|
||||
|
||||
@@ -21,17 +27,32 @@ coverage:
|
||||
--cov-report term-missing:skip-covered \
|
||||
$(TEST_FILE)
|
||||
|
||||
test tests:
|
||||
uv run --group test pytest -n auto --disable-socket --allow-unix-socket $(TEST_FILE)
|
||||
test:
|
||||
make start_services && LANGGRAPH_TEST_FAST=0 uv run --group test pytest -n auto --disable-socket --allow-unix-socket $(TEST_FILE) --cov-report term-missing:skip-covered; \
|
||||
EXIT_CODE=$$?; \
|
||||
make stop_services; \
|
||||
exit $$EXIT_CODE
|
||||
|
||||
test_fast:
|
||||
LANGGRAPH_TEST_FAST=1 uv run --group test pytest -n auto --disable-socket --allow-unix-socket $(TEST_FILE)
|
||||
|
||||
extended_tests:
|
||||
uv run --group test pytest --disable-socket --allow-unix-socket --only-extended tests/unit_tests
|
||||
make start_services && LANGGRAPH_TEST_FAST=0 uv run --group test pytest --disable-socket --allow-unix-socket --only-extended tests/unit_tests; \
|
||||
EXIT_CODE=$$?; \
|
||||
make stop_services; \
|
||||
exit $$EXIT_CODE
|
||||
|
||||
test_watch:
|
||||
uv run --group test ptw --snapshot-update --now . -- -x --disable-socket --allow-unix-socket --disable-warnings tests/unit_tests
|
||||
make start_services && LANGGRAPH_TEST_FAST=0 uv run --group test ptw --snapshot-update --now . -- -x --disable-socket --allow-unix-socket --disable-warnings tests/unit_tests; \
|
||||
EXIT_CODE=$$?; \
|
||||
make stop_services; \
|
||||
exit $$EXIT_CODE
|
||||
|
||||
test_watch_extended:
|
||||
uv run --group test ptw --snapshot-update --now . -- -x --disable-socket --allow-unix-socket --only-extended tests/unit_tests
|
||||
make start_services && LANGGRAPH_TEST_FAST=0 uv run --group test ptw --snapshot-update --now . -- -x --disable-socket --allow-unix-socket --only-extended tests/unit_tests; \
|
||||
EXIT_CODE=$$?; \
|
||||
make stop_services; \
|
||||
exit $$EXIT_CODE
|
||||
|
||||
integration_tests:
|
||||
uv run --group test --group test_integration pytest tests/integration_tests
|
||||
@@ -87,7 +108,8 @@ help:
|
||||
@echo 'spell_fix - run codespell on the project and fix the errors'
|
||||
@echo '-- TESTS --'
|
||||
@echo 'coverage - run unit tests and generate coverage report'
|
||||
@echo 'test - run unit tests'
|
||||
@echo 'test - run unit tests with all services'
|
||||
@echo 'test_fast - run unit tests with in-memory services only'
|
||||
@echo 'tests - run unit tests (alias for "make test")'
|
||||
@echo 'test TEST_FILE=<test_file> - run all tests in file'
|
||||
@echo 'extended_tests - run only extended unit tests'
|
||||
|
||||
@@ -32,9 +32,4 @@ def format_document_xml(doc: Document) -> str:
|
||||
if doc.metadata:
|
||||
metadata_items = [f"{k}: {v!s}" for k, v in doc.metadata.items()]
|
||||
metadata_str = f"<metadata>{', '.join(metadata_items)}</metadata>"
|
||||
return (
|
||||
f"<document>{id_str}"
|
||||
f"<content>{doc.page_content}</content>"
|
||||
f"{metadata_str}"
|
||||
f"</document>"
|
||||
)
|
||||
return f"<document>{id_str}<content>{doc.page_content}</content>{metadata_str}</document>"
|
||||
|
||||
@@ -92,9 +92,7 @@ async def aresolve_prompt(
|
||||
str,
|
||||
None,
|
||||
Callable[[StateT, Runtime[ContextT]], list[MessageLikeRepresentation]],
|
||||
Callable[
|
||||
[StateT, Runtime[ContextT]], Awaitable[list[MessageLikeRepresentation]]
|
||||
],
|
||||
Callable[[StateT, Runtime[ContextT]], Awaitable[list[MessageLikeRepresentation]]],
|
||||
],
|
||||
state: StateT,
|
||||
runtime: Runtime[ContextT],
|
||||
|
||||
6
libs/langchain_v1/langchain/agents/__init__.py
Normal file
6
libs/langchain_v1/langchain/agents/__init__.py
Normal file
@@ -0,0 +1,6 @@
|
||||
"""Agents and abstractions."""
|
||||
|
||||
from langchain.agents.react_agent import AgentState, create_react_agent
|
||||
from langchain.agents.tool_node import ToolNode
|
||||
|
||||
__all__ = ["AgentState", "ToolNode", "create_react_agent"]
|
||||
1
libs/langchain_v1/langchain/agents/_internal/__init__.py
Normal file
1
libs/langchain_v1/langchain/agents/_internal/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Internal utilities for agents."""
|
||||
11
libs/langchain_v1/langchain/agents/_internal/_typing.py
Normal file
11
libs/langchain_v1/langchain/agents/_internal/_typing.py
Normal file
@@ -0,0 +1,11 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Awaitable
|
||||
from typing import Callable, TypeVar, Union
|
||||
|
||||
from typing_extensions import ParamSpec
|
||||
|
||||
P = ParamSpec("P")
|
||||
R = TypeVar("R")
|
||||
|
||||
SyncOrAsync = Callable[P, Union[R, Awaitable[R]]]
|
||||
0
libs/langchain_v1/langchain/agents/py.typed
Normal file
0
libs/langchain_v1/langchain/agents/py.typed
Normal file
1085
libs/langchain_v1/langchain/agents/react_agent.py
Normal file
1085
libs/langchain_v1/langchain/agents/react_agent.py
Normal file
File diff suppressed because it is too large
Load Diff
404
libs/langchain_v1/langchain/agents/structured_output.py
Normal file
404
libs/langchain_v1/langchain/agents/structured_output.py
Normal file
@@ -0,0 +1,404 @@
|
||||
"""Types for setting agent response formats."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import sys
|
||||
import uuid
|
||||
from dataclasses import dataclass, is_dataclass
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Callable,
|
||||
Generic,
|
||||
Literal,
|
||||
TypeVar,
|
||||
Union,
|
||||
get_args,
|
||||
get_origin,
|
||||
)
|
||||
|
||||
from langchain_core.tools import BaseTool, StructuredTool
|
||||
from pydantic import BaseModel, TypeAdapter
|
||||
from typing_extensions import Self, is_typeddict
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Iterable
|
||||
|
||||
from langchain_core.messages import AIMessage
|
||||
|
||||
# Supported schema types: Pydantic models, dataclasses, TypedDict, JSON schema dicts
|
||||
SchemaT = TypeVar("SchemaT")
|
||||
|
||||
|
||||
if sys.version_info >= (3, 10):
|
||||
from types import UnionType
|
||||
else:
|
||||
UnionType = Union
|
||||
|
||||
SchemaKind = Literal["pydantic", "dataclass", "typeddict", "json_schema"]
|
||||
|
||||
|
||||
class StructuredOutputError(Exception):
|
||||
"""Base class for structured output errors."""
|
||||
|
||||
|
||||
class MultipleStructuredOutputsError(StructuredOutputError):
|
||||
"""Raised when model returns multiple structured output tool calls when only one is expected."""
|
||||
|
||||
def __init__(self, tool_names: list[str]) -> None:
|
||||
"""Initialize MultipleStructuredOutputsError."""
|
||||
self.tool_names = tool_names
|
||||
super().__init__(
|
||||
f"Model incorrectly returned multiple structured responses ({', '.join(tool_names)}) when only one is expected."
|
||||
)
|
||||
|
||||
|
||||
class StructuredOutputParsingError(StructuredOutputError):
|
||||
"""Raised when structured output tool call arguments fail to parse according to the schema."""
|
||||
|
||||
def __init__(self, tool_name: str, parse_error: Exception) -> None:
|
||||
"""Initialize StructuredOutputParsingError."""
|
||||
self.tool_name = tool_name
|
||||
self.parse_error = parse_error
|
||||
super().__init__(
|
||||
f"Failed to parse structured output for tool '{tool_name}': {parse_error}."
|
||||
)
|
||||
|
||||
|
||||
def _parse_with_schema(
|
||||
schema: Union[type[SchemaT], dict], schema_kind: SchemaKind, data: dict[str, Any]
|
||||
) -> Any:
|
||||
"""Parse data using for any supported schema type.
|
||||
|
||||
Args:
|
||||
schema: The schema type (Pydantic model, dataclass, or TypedDict)
|
||||
schema_kind: The type of the schema (pydantic, dataclass, typeddict, or json_schema)
|
||||
data: The data to parse
|
||||
|
||||
Returns:
|
||||
The parsed instance according to the schema type
|
||||
|
||||
Raises:
|
||||
ValueError: If parsing fails
|
||||
"""
|
||||
if schema_kind == "json_schema":
|
||||
return data
|
||||
try:
|
||||
adapter: TypeAdapter[SchemaT] = TypeAdapter(schema)
|
||||
return adapter.validate_python(data)
|
||||
except Exception as e:
|
||||
schema_name = getattr(schema, "__name__", str(schema))
|
||||
msg = f"Failed to parse data to {schema_name}: {e}"
|
||||
raise ValueError(msg) from e
|
||||
|
||||
|
||||
@dataclass(init=False)
|
||||
class _SchemaSpec(Generic[SchemaT]):
|
||||
"""Describes a structured output schema."""
|
||||
|
||||
schema: type[SchemaT]
|
||||
"""The schema for the response, can be a Pydantic model, dataclass, TypedDict, or JSON schema dict."""
|
||||
|
||||
name: str
|
||||
"""Name of the schema, used for tool calling.
|
||||
|
||||
If not provided, the name will be the model name or "response_format" if it's a JSON schema.
|
||||
"""
|
||||
|
||||
description: str
|
||||
"""Custom description of the schema.
|
||||
|
||||
If not provided, provided will use the model's docstring.
|
||||
"""
|
||||
|
||||
schema_kind: SchemaKind
|
||||
"""The kind of schema."""
|
||||
|
||||
json_schema: dict[str, Any]
|
||||
"""JSON schema associated with the schema."""
|
||||
|
||||
strict: bool = False
|
||||
"""Whether to enforce strict validation of the schema."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
schema: type[SchemaT],
|
||||
*,
|
||||
name: str | None = None,
|
||||
description: str | None = None,
|
||||
strict: bool = False,
|
||||
) -> None:
|
||||
"""Initialize SchemaSpec with schema and optional parameters."""
|
||||
self.schema = schema
|
||||
|
||||
if name:
|
||||
self.name = name
|
||||
elif isinstance(schema, dict):
|
||||
self.name = str(schema.get("title", f"response_format_{str(uuid.uuid4())[:4]}"))
|
||||
else:
|
||||
self.name = str(getattr(schema, "__name__", f"response_format_{str(uuid.uuid4())[:4]}"))
|
||||
|
||||
self.description = description or (
|
||||
schema.get("description", "")
|
||||
if isinstance(schema, dict)
|
||||
else getattr(schema, "__doc__", None) or ""
|
||||
)
|
||||
|
||||
self.strict = strict
|
||||
|
||||
if isinstance(schema, dict):
|
||||
self.schema_kind = "json_schema"
|
||||
self.json_schema = schema
|
||||
elif isinstance(schema, type) and issubclass(schema, BaseModel):
|
||||
self.schema_kind = "pydantic"
|
||||
self.json_schema = schema.model_json_schema()
|
||||
elif is_dataclass(schema):
|
||||
self.schema_kind = "dataclass"
|
||||
self.json_schema = TypeAdapter(schema).json_schema()
|
||||
elif is_typeddict(schema):
|
||||
self.schema_kind = "typeddict"
|
||||
self.json_schema = TypeAdapter(schema).json_schema()
|
||||
else:
|
||||
msg = (
|
||||
f"Unsupported schema type: {type(schema)}. "
|
||||
f"Supported types: Pydantic models, dataclasses, TypedDicts, and JSON schema dicts."
|
||||
)
|
||||
raise ValueError(msg)
|
||||
|
||||
|
||||
@dataclass(init=False)
|
||||
class ToolOutput(Generic[SchemaT]):
|
||||
"""Use a tool calling strategy for model responses."""
|
||||
|
||||
schema: type[SchemaT]
|
||||
"""Schema for the tool calls."""
|
||||
|
||||
schema_specs: list[_SchemaSpec[SchemaT]]
|
||||
"""Schema specs for the tool calls."""
|
||||
|
||||
tool_message_content: str | None
|
||||
"""The content of the tool message to be returned when the model calls an artificial structured output tool."""
|
||||
|
||||
handle_errors: Union[
|
||||
bool,
|
||||
str,
|
||||
type[Exception],
|
||||
tuple[type[Exception], ...],
|
||||
Callable[[Exception], str],
|
||||
]
|
||||
"""Error handling strategy for structured output via ToolOutput. Default is True.
|
||||
|
||||
- True: Catch all errors with default error template
|
||||
- str: Catch all errors with this custom message
|
||||
- type[Exception]: Only catch this exception type with default message
|
||||
- tuple[type[Exception], ...]: Only catch these exception types with default message
|
||||
- Callable[[Exception], str]: Custom function that returns error message
|
||||
- False: No retry, let exceptions propagate
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
schema: type[SchemaT],
|
||||
tool_message_content: str | None = None,
|
||||
handle_errors: bool
|
||||
| str
|
||||
| type[Exception]
|
||||
| tuple[type[Exception], ...]
|
||||
| Callable[[Exception], str]
|
||||
| None = None,
|
||||
) -> None:
|
||||
"""Initialize ToolOutput with schemas, tool message content, and error handling strategy."""
|
||||
self.schema = schema
|
||||
self.tool_message_content = tool_message_content
|
||||
if handle_errors is None:
|
||||
self.handle_errors = True
|
||||
else:
|
||||
self.handle_errors = handle_errors
|
||||
|
||||
def _iter_variants(schema: Any) -> Iterable[Any]:
|
||||
"""Yield leaf variants from Union and JSON Schema oneOf."""
|
||||
if get_origin(schema) in (UnionType, Union):
|
||||
for arg in get_args(schema):
|
||||
yield from _iter_variants(arg)
|
||||
return
|
||||
|
||||
if isinstance(schema, dict) and "oneOf" in schema:
|
||||
for sub in schema.get("oneOf", []):
|
||||
yield from _iter_variants(sub)
|
||||
return
|
||||
|
||||
yield schema
|
||||
|
||||
self.schema_specs = [_SchemaSpec(s) for s in _iter_variants(schema)]
|
||||
|
||||
|
||||
@dataclass(init=False)
|
||||
class NativeOutput(Generic[SchemaT]):
|
||||
"""Use the model provider's native structured output method."""
|
||||
|
||||
schema: type[SchemaT]
|
||||
"""Schema for native mode."""
|
||||
|
||||
schema_spec: _SchemaSpec[SchemaT]
|
||||
"""Schema spec for native mode."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
schema: type[SchemaT],
|
||||
) -> None:
|
||||
"""Initialize NativeOutput with schema."""
|
||||
self.schema = schema
|
||||
self.schema_spec = _SchemaSpec(schema)
|
||||
|
||||
def to_model_kwargs(self) -> dict[str, Any]:
|
||||
"""Convert the schema to the appropriate format for the model provider."""
|
||||
# OpenAI:
|
||||
# - see https://platform.openai.com/docs/guides/structured-outputs
|
||||
response_format = {
|
||||
"type": "json_schema",
|
||||
"json_schema": {
|
||||
"name": self.schema_spec.name,
|
||||
"schema": self.schema_spec.json_schema,
|
||||
},
|
||||
}
|
||||
return {"response_format": response_format}
|
||||
|
||||
|
||||
@dataclass
|
||||
class OutputToolBinding(Generic[SchemaT]):
|
||||
"""Information for tracking structured output tool metadata.
|
||||
|
||||
This contains all necessary information to handle structured responses
|
||||
generated via tool calls, including the original schema, its type classification,
|
||||
and the corresponding tool implementation used by the tools strategy.
|
||||
"""
|
||||
|
||||
schema: type[SchemaT]
|
||||
"""The original schema provided for structured output (Pydantic model, dataclass, TypedDict, or JSON schema dict)."""
|
||||
|
||||
schema_kind: SchemaKind
|
||||
"""Classification of the schema type for proper response construction."""
|
||||
|
||||
tool: BaseTool
|
||||
"""LangChain tool instance created from the schema for model binding."""
|
||||
|
||||
@classmethod
|
||||
def from_schema_spec(cls, schema_spec: _SchemaSpec[SchemaT]) -> Self:
|
||||
"""Create an OutputToolBinding instance from a SchemaSpec.
|
||||
|
||||
Args:
|
||||
schema_spec: The SchemaSpec to convert
|
||||
|
||||
Returns:
|
||||
An OutputToolBinding instance with the appropriate tool created
|
||||
"""
|
||||
return cls(
|
||||
schema=schema_spec.schema,
|
||||
schema_kind=schema_spec.schema_kind,
|
||||
tool=StructuredTool(
|
||||
args_schema=schema_spec.json_schema,
|
||||
name=schema_spec.name,
|
||||
description=schema_spec.description,
|
||||
),
|
||||
)
|
||||
|
||||
def parse(self, tool_args: dict[str, Any]) -> SchemaT:
|
||||
"""Parse tool arguments according to the schema.
|
||||
|
||||
Args:
|
||||
tool_args: The arguments from the tool call
|
||||
|
||||
Returns:
|
||||
The parsed response according to the schema type
|
||||
|
||||
Raises:
|
||||
ValueError: If parsing fails
|
||||
"""
|
||||
return _parse_with_schema(self.schema, self.schema_kind, tool_args)
|
||||
|
||||
|
||||
@dataclass
|
||||
class NativeOutputBinding(Generic[SchemaT]):
|
||||
"""Information for tracking native structured output metadata.
|
||||
|
||||
This contains all necessary information to handle structured responses
|
||||
generated via native provider output, including the original schema,
|
||||
its type classification, and parsing logic for provider-enforced JSON.
|
||||
"""
|
||||
|
||||
schema: type[SchemaT]
|
||||
"""The original schema provided for structured output (Pydantic model, dataclass, TypedDict, or JSON schema dict)."""
|
||||
|
||||
schema_kind: SchemaKind
|
||||
"""Classification of the schema type for proper response construction."""
|
||||
|
||||
@classmethod
|
||||
def from_schema_spec(cls, schema_spec: _SchemaSpec[SchemaT]) -> Self:
|
||||
"""Create a NativeOutputBinding instance from a SchemaSpec.
|
||||
|
||||
Args:
|
||||
schema_spec: The SchemaSpec to convert
|
||||
|
||||
Returns:
|
||||
A NativeOutputBinding instance for parsing native structured output
|
||||
"""
|
||||
return cls(
|
||||
schema=schema_spec.schema,
|
||||
schema_kind=schema_spec.schema_kind,
|
||||
)
|
||||
|
||||
def parse(self, response: AIMessage) -> SchemaT:
|
||||
"""Parse AIMessage content according to the schema.
|
||||
|
||||
Args:
|
||||
response: The AI message containing the structured output
|
||||
|
||||
Returns:
|
||||
The parsed response according to the schema
|
||||
|
||||
Raises:
|
||||
ValueError: If text extraction, JSON parsing or schema validation fails
|
||||
"""
|
||||
# Extract text content from AIMessage and parse as JSON
|
||||
raw_text = self._extract_text_content_from_message(response)
|
||||
|
||||
import json
|
||||
|
||||
try:
|
||||
data = json.loads(raw_text)
|
||||
except Exception as e:
|
||||
schema_name = getattr(self.schema, "__name__", "response_format")
|
||||
msg = f"Native structured output expected valid JSON for {schema_name}, but parsing failed: {e}."
|
||||
raise ValueError(msg) from e
|
||||
|
||||
# Parse according to schema
|
||||
return _parse_with_schema(self.schema, self.schema_kind, data)
|
||||
|
||||
def _extract_text_content_from_message(self, message: AIMessage) -> str:
|
||||
"""Extract text content from an AIMessage.
|
||||
|
||||
Args:
|
||||
message: The AI message to extract text from
|
||||
|
||||
Returns:
|
||||
The extracted text content
|
||||
"""
|
||||
content = message.content
|
||||
if isinstance(content, str):
|
||||
return content
|
||||
if isinstance(content, list):
|
||||
parts: list[str] = []
|
||||
for c in content:
|
||||
if isinstance(c, dict):
|
||||
if c.get("type") == "text" and "text" in c:
|
||||
parts.append(str(c["text"]))
|
||||
elif "content" in c and isinstance(c["content"], str):
|
||||
parts.append(c["content"])
|
||||
else:
|
||||
parts.append(str(c))
|
||||
return "".join(parts)
|
||||
return str(content)
|
||||
|
||||
|
||||
ResponseFormat = Union[ToolOutput[SchemaT], NativeOutput[SchemaT]]
|
||||
1177
libs/langchain_v1/langchain/agents/tool_node.py
Normal file
1177
libs/langchain_v1/langchain/agents/tool_node.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -190,9 +190,7 @@ class _MapReduceExtractor(Generic[ContextT]):
|
||||
if isinstance(model, str):
|
||||
model = init_chat_model(model)
|
||||
|
||||
self.model = (
|
||||
model.with_structured_output(response_format) if response_format else model
|
||||
)
|
||||
self.model = model.with_structured_output(response_format) if response_format else model
|
||||
self.map_prompt = map_prompt
|
||||
self.reduce_prompt = reduce_prompt
|
||||
self.reduce = reduce
|
||||
@@ -342,9 +340,7 @@ class _MapReduceExtractor(Generic[ContextT]):
|
||||
config: RunnableConfig,
|
||||
) -> dict[str, list[ExtractionResult]]:
|
||||
prompt = await self._aget_map_prompt(state, runtime)
|
||||
response = cast(
|
||||
"AIMessage", await self.model.ainvoke(prompt, config=config)
|
||||
)
|
||||
response = cast("AIMessage", await self.model.ainvoke(prompt, config=config))
|
||||
result = response if self.response_format else response.text()
|
||||
extraction_result: ExtractionResult = {
|
||||
"indexes": state["indexes"],
|
||||
@@ -375,9 +371,7 @@ class _MapReduceExtractor(Generic[ContextT]):
|
||||
config: RunnableConfig,
|
||||
) -> MapReduceNodeUpdate:
|
||||
prompt = await self._aget_reduce_prompt(state, runtime)
|
||||
response = cast(
|
||||
"AIMessage", await self.model.ainvoke(prompt, config=config)
|
||||
)
|
||||
response = cast("AIMessage", await self.model.ainvoke(prompt, config=config))
|
||||
result = response if self.response_format else response.text()
|
||||
return {"result": result}
|
||||
|
||||
|
||||
@@ -173,9 +173,7 @@ class _Extractor(Generic[ContextT]):
|
||||
if isinstance(model, str):
|
||||
model = init_chat_model(model)
|
||||
|
||||
self.model = (
|
||||
model.with_structured_output(response_format) if response_format else model
|
||||
)
|
||||
self.model = model.with_structured_output(response_format) if response_format else model
|
||||
self.initial_prompt = prompt
|
||||
self.refine_prompt = refine_prompt
|
||||
self.context_schema = context_schema
|
||||
@@ -188,9 +186,7 @@ class _Extractor(Generic[ContextT]):
|
||||
|
||||
# Choose default prompt based on structured output format
|
||||
default_prompt = (
|
||||
DEFAULT_STRUCTURED_INIT_PROMPT
|
||||
if self.response_format
|
||||
else DEFAULT_INIT_PROMPT
|
||||
DEFAULT_STRUCTURED_INIT_PROMPT if self.response_format else DEFAULT_INIT_PROMPT
|
||||
)
|
||||
|
||||
return resolve_prompt(
|
||||
@@ -209,9 +205,7 @@ class _Extractor(Generic[ContextT]):
|
||||
|
||||
# Choose default prompt based on structured output format
|
||||
default_prompt = (
|
||||
DEFAULT_STRUCTURED_INIT_PROMPT
|
||||
if self.response_format
|
||||
else DEFAULT_INIT_PROMPT
|
||||
DEFAULT_STRUCTURED_INIT_PROMPT if self.response_format else DEFAULT_INIT_PROMPT
|
||||
)
|
||||
|
||||
return await aresolve_prompt(
|
||||
@@ -246,9 +240,7 @@ class _Extractor(Generic[ContextT]):
|
||||
|
||||
# Choose default prompt based on structured output format
|
||||
default_prompt = (
|
||||
DEFAULT_STRUCTURED_REFINE_PROMPT
|
||||
if self.response_format
|
||||
else DEFAULT_REFINE_PROMPT
|
||||
DEFAULT_STRUCTURED_REFINE_PROMPT if self.response_format else DEFAULT_REFINE_PROMPT
|
||||
)
|
||||
|
||||
return resolve_prompt(
|
||||
@@ -283,9 +275,7 @@ class _Extractor(Generic[ContextT]):
|
||||
|
||||
# Choose default prompt based on structured output format
|
||||
default_prompt = (
|
||||
DEFAULT_STRUCTURED_REFINE_PROMPT
|
||||
if self.response_format
|
||||
else DEFAULT_REFINE_PROMPT
|
||||
DEFAULT_STRUCTURED_REFINE_PROMPT if self.response_format else DEFAULT_REFINE_PROMPT
|
||||
)
|
||||
|
||||
return await aresolve_prompt(
|
||||
@@ -340,16 +330,12 @@ class _Extractor(Generic[ContextT]):
|
||||
if "result" not in state or state["result"] == "":
|
||||
# Initial processing
|
||||
prompt = await self._aget_initial_prompt(state, runtime)
|
||||
response = cast(
|
||||
"AIMessage", await self.model.ainvoke(prompt, config=config)
|
||||
)
|
||||
response = cast("AIMessage", await self.model.ainvoke(prompt, config=config))
|
||||
result = response if self.response_format else response.text()
|
||||
return {"result": result}
|
||||
# Refinement
|
||||
prompt = await self._aget_refine_prompt(state, runtime)
|
||||
response = cast(
|
||||
"AIMessage", await self.model.ainvoke(prompt, config=config)
|
||||
)
|
||||
response = cast("AIMessage", await self.model.ainvoke(prompt, config=config))
|
||||
result = response if self.response_format else response.text()
|
||||
return {"result": result}
|
||||
|
||||
|
||||
@@ -67,9 +67,7 @@ def init_chat_model(
|
||||
model: Optional[str] = None,
|
||||
*,
|
||||
model_provider: Optional[str] = None,
|
||||
configurable_fields: Optional[
|
||||
Union[Literal["any"], list[str], tuple[str, ...]]
|
||||
] = None,
|
||||
configurable_fields: Optional[Union[Literal["any"], list[str], tuple[str, ...]]] = None,
|
||||
config_prefix: Optional[str] = None,
|
||||
**kwargs: Any,
|
||||
) -> Union[BaseChatModel, _ConfigurableModel]:
|
||||
@@ -446,9 +444,7 @@ def _init_chat_model_helper(
|
||||
|
||||
return ChatPerplexity(model=model, **kwargs)
|
||||
supported = ", ".join(_SUPPORTED_PROVIDERS)
|
||||
msg = (
|
||||
f"Unsupported {model_provider=}.\n\nSupported model providers are: {supported}"
|
||||
)
|
||||
msg = f"Unsupported {model_provider=}.\n\nSupported model providers are: {supported}"
|
||||
raise ValueError(msg)
|
||||
|
||||
|
||||
@@ -501,18 +497,13 @@ def _attempt_infer_model_provider(model_name: str) -> Optional[str]:
|
||||
|
||||
|
||||
def _parse_model(model: str, model_provider: Optional[str]) -> tuple[str, str]:
|
||||
if (
|
||||
not model_provider
|
||||
and ":" in model
|
||||
and model.split(":")[0] in _SUPPORTED_PROVIDERS
|
||||
):
|
||||
if not model_provider and ":" in model and model.split(":")[0] in _SUPPORTED_PROVIDERS:
|
||||
model_provider = model.split(":")[0]
|
||||
model = ":".join(model.split(":")[1:])
|
||||
model_provider = model_provider or _attempt_infer_model_provider(model)
|
||||
if not model_provider:
|
||||
msg = (
|
||||
f"Unable to infer model provider for {model=}, please specify "
|
||||
f"model_provider directly."
|
||||
f"Unable to infer model provider for {model=}, please specify model_provider directly."
|
||||
)
|
||||
raise ValueError(msg)
|
||||
model_provider = model_provider.replace("-", "_").lower()
|
||||
@@ -522,9 +513,7 @@ def _parse_model(model: str, model_provider: Optional[str]) -> tuple[str, str]:
|
||||
def _check_pkg(pkg: str, *, pkg_kebab: Optional[str] = None) -> None:
|
||||
if not util.find_spec(pkg):
|
||||
pkg_kebab = pkg_kebab if pkg_kebab is not None else pkg.replace("_", "-")
|
||||
msg = (
|
||||
f"Unable to import {pkg}. Please install with `pip install -U {pkg_kebab}`"
|
||||
)
|
||||
msg = f"Unable to import {pkg}. Please install with `pip install -U {pkg_kebab}`"
|
||||
raise ImportError(msg)
|
||||
|
||||
|
||||
@@ -546,9 +535,7 @@ class _ConfigurableModel(Runnable[LanguageModelInput, Any]):
|
||||
) -> None:
|
||||
self._default_config: dict = default_config or {}
|
||||
self._configurable_fields: Union[Literal["any"], list[str]] = (
|
||||
configurable_fields
|
||||
if configurable_fields == "any"
|
||||
else list(configurable_fields)
|
||||
configurable_fields if configurable_fields == "any" else list(configurable_fields)
|
||||
)
|
||||
self._config_prefix = (
|
||||
config_prefix + "_"
|
||||
@@ -604,9 +591,7 @@ class _ConfigurableModel(Runnable[LanguageModelInput, Any]):
|
||||
if k.startswith(self._config_prefix)
|
||||
}
|
||||
if self._configurable_fields != "any":
|
||||
model_params = {
|
||||
k: v for k, v in model_params.items() if k in self._configurable_fields
|
||||
}
|
||||
model_params = {k: v for k, v in model_params.items() if k in self._configurable_fields}
|
||||
return model_params
|
||||
|
||||
def with_config(
|
||||
|
||||
@@ -19,9 +19,7 @@ _SUPPORTED_PROVIDERS = {
|
||||
|
||||
def _get_provider_list() -> str:
|
||||
"""Get formatted list of providers and their packages."""
|
||||
return "\n".join(
|
||||
f" - {p}: {pkg.replace('_', '-')}" for p, pkg in _SUPPORTED_PROVIDERS.items()
|
||||
)
|
||||
return "\n".join(f" - {p}: {pkg.replace('_', '-')}" for p, pkg in _SUPPORTED_PROVIDERS.items())
|
||||
|
||||
|
||||
def _parse_model_string(model_name: str) -> tuple[str, str]:
|
||||
@@ -117,10 +115,7 @@ def _infer_model_and_provider(
|
||||
def _check_pkg(pkg: str) -> None:
|
||||
"""Check if a package is installed."""
|
||||
if not util.find_spec(pkg):
|
||||
msg = (
|
||||
f"Could not import {pkg} python package. "
|
||||
f"Please install it with `pip install {pkg}`"
|
||||
)
|
||||
msg = f"Could not import {pkg} python package. Please install it with `pip install {pkg}`"
|
||||
raise ImportError(msg)
|
||||
|
||||
|
||||
@@ -182,9 +177,7 @@ def init_embeddings(
|
||||
"""
|
||||
if not model:
|
||||
providers = _SUPPORTED_PROVIDERS.keys()
|
||||
msg = (
|
||||
f"Must specify model name. Supported providers are: {', '.join(providers)}"
|
||||
)
|
||||
msg = f"Must specify model name. Supported providers are: {', '.join(providers)}"
|
||||
raise ValueError(msg)
|
||||
|
||||
provider, model_name = _infer_model_and_provider(model, provider=provider)
|
||||
|
||||
@@ -181,9 +181,7 @@ class CacheBackedEmbeddings(Embeddings):
|
||||
vectors: list[Union[list[float], None]] = self.document_embedding_store.mget(
|
||||
texts,
|
||||
)
|
||||
all_missing_indices: list[int] = [
|
||||
i for i, vector in enumerate(vectors) if vector is None
|
||||
]
|
||||
all_missing_indices: list[int] = [i for i, vector in enumerate(vectors) if vector is None]
|
||||
|
||||
for missing_indices in batch_iterate(self.batch_size, all_missing_indices):
|
||||
missing_texts = [texts[i] for i in missing_indices]
|
||||
@@ -212,12 +210,8 @@ class CacheBackedEmbeddings(Embeddings):
|
||||
Returns:
|
||||
A list of embeddings for the given texts.
|
||||
"""
|
||||
vectors: list[
|
||||
Union[list[float], None]
|
||||
] = await self.document_embedding_store.amget(texts)
|
||||
all_missing_indices: list[int] = [
|
||||
i for i, vector in enumerate(vectors) if vector is None
|
||||
]
|
||||
vectors: list[Union[list[float], None]] = await self.document_embedding_store.amget(texts)
|
||||
all_missing_indices: list[int] = [i for i, vector in enumerate(vectors) if vector is None]
|
||||
|
||||
# batch_iterate supports None batch_size which returns all elements at once
|
||||
# as a single batch.
|
||||
|
||||
@@ -66,33 +66,25 @@ class EncoderBackedStore(BaseStore[K, V]):
|
||||
"""Get the values associated with the given keys."""
|
||||
encoded_keys: list[str] = [self.key_encoder(key) for key in keys]
|
||||
values = self.store.mget(encoded_keys)
|
||||
return [
|
||||
self.value_deserializer(value) if value is not None else value
|
||||
for value in values
|
||||
]
|
||||
return [self.value_deserializer(value) if value is not None else value for value in values]
|
||||
|
||||
async def amget(self, keys: Sequence[K]) -> list[Optional[V]]:
|
||||
"""Get the values associated with the given keys."""
|
||||
encoded_keys: list[str] = [self.key_encoder(key) for key in keys]
|
||||
values = await self.store.amget(encoded_keys)
|
||||
return [
|
||||
self.value_deserializer(value) if value is not None else value
|
||||
for value in values
|
||||
]
|
||||
return [self.value_deserializer(value) if value is not None else value for value in values]
|
||||
|
||||
def mset(self, key_value_pairs: Sequence[tuple[K, V]]) -> None:
|
||||
"""Set the values for the given keys."""
|
||||
encoded_pairs = [
|
||||
(self.key_encoder(key), self.value_serializer(value))
|
||||
for key, value in key_value_pairs
|
||||
(self.key_encoder(key), self.value_serializer(value)) for key, value in key_value_pairs
|
||||
]
|
||||
self.store.mset(encoded_pairs)
|
||||
|
||||
async def amset(self, key_value_pairs: Sequence[tuple[K, V]]) -> None:
|
||||
"""Set the values for the given keys."""
|
||||
encoded_pairs = [
|
||||
(self.key_encoder(key), self.value_serializer(value))
|
||||
for key, value in key_value_pairs
|
||||
(self.key_encoder(key), self.value_serializer(value)) for key, value in key_value_pairs
|
||||
]
|
||||
await self.store.amset(encoded_pairs)
|
||||
|
||||
|
||||
@@ -50,6 +50,7 @@ test = [
|
||||
"pytest-watcher<1.0.0,>=0.2.6",
|
||||
"pytest-asyncio<1.0.0,>=0.23.2",
|
||||
"pytest-socket<1.0.0,>=0.6.0",
|
||||
"pytest-mock<4.0.0,>=3.12.0",
|
||||
"syrupy<5.0.0,>=4.0.2",
|
||||
"pytest-xdist<4.0.0,>=3.6.1",
|
||||
"blockbuster<1.6,>=1.5.18",
|
||||
@@ -88,6 +89,7 @@ langchain-openai = { path = "../partners/openai", editable = true }
|
||||
[tool.ruff]
|
||||
target-version = "py39"
|
||||
exclude = ["tests/integration_tests/examples/non-utf8-encoding.py"]
|
||||
line-length = 100
|
||||
|
||||
[tool.mypy]
|
||||
strict = "True"
|
||||
@@ -132,9 +134,13 @@ pyupgrade.keep-runtime-typing = true
|
||||
flake8-annotations.allow-star-arg-any = true
|
||||
|
||||
[tool.ruff.lint.per-file-ignores]
|
||||
"tests/*" = [
|
||||
"D", # Documentation rules
|
||||
"PLC0415", # Imports should be at the top. Not always desirable for tests
|
||||
"tests/**/*" = ["ALL"]
|
||||
"langchain/agents/*" = [
|
||||
"E501", # line too long
|
||||
"ANN401", # we use Any
|
||||
"A001", # input is shadowing builtin
|
||||
"A002", # input is shadowing builtin
|
||||
"PLR2004", # magic values are fine for this case
|
||||
]
|
||||
|
||||
[tool.ruff.lint.extend-per-file-ignores]
|
||||
|
||||
@@ -31,9 +31,7 @@ async def test_init_chat_model_chain() -> None:
|
||||
chain = prompt | model_with_config
|
||||
output = chain.invoke({"input": "bar"})
|
||||
assert isinstance(output, AIMessage)
|
||||
events = [
|
||||
event async for event in chain.astream_events({"input": "bar"}, version="v2")
|
||||
]
|
||||
events = [event async for event in chain.astream_events({"input": "bar"}, version="v2")]
|
||||
assert events
|
||||
|
||||
|
||||
|
||||
@@ -0,0 +1,83 @@
|
||||
# serializer version: 1
|
||||
# name: test_react_agent_graph_structure[None-None-tools0]
|
||||
'''
|
||||
graph TD;
|
||||
__start__ --> model;
|
||||
model --> __end__;
|
||||
|
||||
'''
|
||||
# ---
|
||||
# name: test_react_agent_graph_structure[None-None-tools1]
|
||||
'''
|
||||
graph TD;
|
||||
__start__ --> model;
|
||||
model -.-> __end__;
|
||||
model -.-> tools;
|
||||
tools --> model;
|
||||
|
||||
'''
|
||||
# ---
|
||||
# name: test_react_agent_graph_structure[None-pre_model_hook-tools0]
|
||||
'''
|
||||
graph TD;
|
||||
__start__ --> pre_model_hook;
|
||||
pre_model_hook --> model;
|
||||
model --> __end__;
|
||||
|
||||
'''
|
||||
# ---
|
||||
# name: test_react_agent_graph_structure[None-pre_model_hook-tools1]
|
||||
'''
|
||||
graph TD;
|
||||
__start__ --> pre_model_hook;
|
||||
model -.-> __end__;
|
||||
model -.-> tools;
|
||||
pre_model_hook --> model;
|
||||
tools --> pre_model_hook;
|
||||
|
||||
'''
|
||||
# ---
|
||||
# name: test_react_agent_graph_structure[post_model_hook-None-tools0]
|
||||
'''
|
||||
graph TD;
|
||||
__start__ --> model;
|
||||
model --> post_model_hook;
|
||||
post_model_hook --> __end__;
|
||||
|
||||
'''
|
||||
# ---
|
||||
# name: test_react_agent_graph_structure[post_model_hook-None-tools1]
|
||||
'''
|
||||
graph TD;
|
||||
__start__ --> model;
|
||||
model --> post_model_hook;
|
||||
post_model_hook -.-> __end__;
|
||||
post_model_hook -.-> model;
|
||||
post_model_hook -.-> tools;
|
||||
tools --> model;
|
||||
|
||||
'''
|
||||
# ---
|
||||
# name: test_react_agent_graph_structure[post_model_hook-pre_model_hook-tools0]
|
||||
'''
|
||||
graph TD;
|
||||
__start__ --> pre_model_hook;
|
||||
model --> post_model_hook;
|
||||
pre_model_hook --> model;
|
||||
post_model_hook --> __end__;
|
||||
|
||||
'''
|
||||
# ---
|
||||
# name: test_react_agent_graph_structure[post_model_hook-pre_model_hook-tools1]
|
||||
'''
|
||||
graph TD;
|
||||
__start__ --> pre_model_hook;
|
||||
model --> post_model_hook;
|
||||
post_model_hook -.-> __end__;
|
||||
post_model_hook -.-> pre_model_hook;
|
||||
post_model_hook -.-> tools;
|
||||
pre_model_hook --> model;
|
||||
tools --> pre_model_hook;
|
||||
|
||||
'''
|
||||
# ---
|
||||
18
libs/langchain_v1/tests/unit_tests/agents/any_str.py
Normal file
18
libs/langchain_v1/tests/unit_tests/agents/any_str.py
Normal file
@@ -0,0 +1,18 @@
|
||||
import re
|
||||
from typing import Union
|
||||
|
||||
|
||||
class AnyStr(str):
|
||||
def __init__(self, prefix: Union[str, re.Pattern] = "") -> None:
|
||||
super().__init__()
|
||||
self.prefix = prefix
|
||||
|
||||
def __eq__(self, other: object) -> bool:
|
||||
return isinstance(other, str) and (
|
||||
other.startswith(self.prefix)
|
||||
if isinstance(self.prefix, str)
|
||||
else self.prefix.match(other)
|
||||
)
|
||||
|
||||
def __hash__(self) -> int:
|
||||
return hash((str(self), self.prefix))
|
||||
@@ -0,0 +1,17 @@
|
||||
name: langgraph-tests
|
||||
services:
|
||||
postgres-test:
|
||||
image: postgres:16
|
||||
ports:
|
||||
- "5442:5432"
|
||||
environment:
|
||||
POSTGRES_DB: postgres
|
||||
POSTGRES_USER: postgres
|
||||
POSTGRES_PASSWORD: postgres
|
||||
healthcheck:
|
||||
test: pg_isready -U postgres
|
||||
start_period: 10s
|
||||
timeout: 1s
|
||||
retries: 5
|
||||
interval: 60s
|
||||
start_interval: 1s
|
||||
16
libs/langchain_v1/tests/unit_tests/agents/compose-redis.yml
Normal file
16
libs/langchain_v1/tests/unit_tests/agents/compose-redis.yml
Normal file
@@ -0,0 +1,16 @@
|
||||
name: langgraph-tests-redis
|
||||
services:
|
||||
redis-test:
|
||||
image: redis:7-alpine
|
||||
ports:
|
||||
- "6379:6379"
|
||||
command: redis-server --maxmemory 256mb --maxmemory-policy allkeys-lru
|
||||
healthcheck:
|
||||
test: redis-cli ping
|
||||
start_period: 10s
|
||||
timeout: 1s
|
||||
retries: 5
|
||||
interval: 5s
|
||||
start_interval: 1s
|
||||
tmpfs:
|
||||
- /data # Use tmpfs for faster testing
|
||||
194
libs/langchain_v1/tests/unit_tests/agents/conftest.py
Normal file
194
libs/langchain_v1/tests/unit_tests/agents/conftest.py
Normal file
@@ -0,0 +1,194 @@
|
||||
import os
|
||||
from collections.abc import AsyncIterator, Iterator
|
||||
from uuid import UUID
|
||||
|
||||
import pytest
|
||||
from langgraph.checkpoint.base import BaseCheckpointSaver
|
||||
from langgraph.store.base import BaseStore
|
||||
from pytest_mock import MockerFixture
|
||||
|
||||
from .conftest_checkpointer import (
|
||||
_checkpointer_memory,
|
||||
_checkpointer_postgres,
|
||||
_checkpointer_postgres_aio,
|
||||
_checkpointer_postgres_aio_pipe,
|
||||
_checkpointer_postgres_aio_pool,
|
||||
_checkpointer_postgres_pipe,
|
||||
_checkpointer_postgres_pool,
|
||||
_checkpointer_sqlite,
|
||||
_checkpointer_sqlite_aio,
|
||||
)
|
||||
from .conftest_store import (
|
||||
_store_memory,
|
||||
_store_postgres,
|
||||
_store_postgres_aio,
|
||||
_store_postgres_aio_pipe,
|
||||
_store_postgres_aio_pool,
|
||||
_store_postgres_pipe,
|
||||
_store_postgres_pool,
|
||||
)
|
||||
|
||||
# Global variables for checkpointer and store configurations
|
||||
FAST_MODE = os.getenv("LANGGRAPH_TEST_FAST", "true").lower() in ("true", "1", "yes")
|
||||
|
||||
SYNC_CHECKPOINTER_PARAMS = (
|
||||
["memory"]
|
||||
if FAST_MODE
|
||||
else [
|
||||
"memory",
|
||||
"sqlite",
|
||||
"postgres",
|
||||
"postgres_pipe",
|
||||
"postgres_pool",
|
||||
]
|
||||
)
|
||||
|
||||
ASYNC_CHECKPOINTER_PARAMS = (
|
||||
["memory"]
|
||||
if FAST_MODE
|
||||
else [
|
||||
"memory",
|
||||
"sqlite_aio",
|
||||
"postgres_aio",
|
||||
"postgres_aio_pipe",
|
||||
"postgres_aio_pool",
|
||||
]
|
||||
)
|
||||
|
||||
SYNC_STORE_PARAMS = (
|
||||
["in_memory"]
|
||||
if FAST_MODE
|
||||
else [
|
||||
"in_memory",
|
||||
"postgres",
|
||||
"postgres_pipe",
|
||||
"postgres_pool",
|
||||
]
|
||||
)
|
||||
|
||||
ASYNC_STORE_PARAMS = (
|
||||
["in_memory"]
|
||||
if FAST_MODE
|
||||
else [
|
||||
"in_memory",
|
||||
"postgres_aio",
|
||||
"postgres_aio_pipe",
|
||||
"postgres_aio_pool",
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def anyio_backend() -> str:
|
||||
return "asyncio"
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def deterministic_uuids(mocker: MockerFixture) -> MockerFixture:
|
||||
side_effect = (UUID(f"00000000-0000-4000-8000-{i:012}", version=4) for i in range(10000))
|
||||
return mocker.patch("uuid.uuid4", side_effect=side_effect)
|
||||
|
||||
|
||||
# checkpointer fixtures
|
||||
|
||||
|
||||
@pytest.fixture(
|
||||
params=SYNC_STORE_PARAMS,
|
||||
)
|
||||
def sync_store(request: pytest.FixtureRequest) -> Iterator[BaseStore]:
|
||||
store_name = request.param
|
||||
if store_name is None:
|
||||
yield None
|
||||
elif store_name == "in_memory":
|
||||
with _store_memory() as store:
|
||||
yield store
|
||||
elif store_name == "postgres":
|
||||
with _store_postgres() as store:
|
||||
yield store
|
||||
elif store_name == "postgres_pipe":
|
||||
with _store_postgres_pipe() as store:
|
||||
yield store
|
||||
elif store_name == "postgres_pool":
|
||||
with _store_postgres_pool() as store:
|
||||
yield store
|
||||
else:
|
||||
msg = f"Unknown store {store_name}"
|
||||
raise NotImplementedError(msg)
|
||||
|
||||
|
||||
@pytest.fixture(
|
||||
params=ASYNC_STORE_PARAMS,
|
||||
)
|
||||
async def async_store(request: pytest.FixtureRequest) -> AsyncIterator[BaseStore]:
|
||||
store_name = request.param
|
||||
if store_name is None:
|
||||
yield None
|
||||
elif store_name == "in_memory":
|
||||
with _store_memory() as store:
|
||||
yield store
|
||||
elif store_name == "postgres_aio":
|
||||
async with _store_postgres_aio() as store:
|
||||
yield store
|
||||
elif store_name == "postgres_aio_pipe":
|
||||
async with _store_postgres_aio_pipe() as store:
|
||||
yield store
|
||||
elif store_name == "postgres_aio_pool":
|
||||
async with _store_postgres_aio_pool() as store:
|
||||
yield store
|
||||
else:
|
||||
msg = f"Unknown store {store_name}"
|
||||
raise NotImplementedError(msg)
|
||||
|
||||
|
||||
@pytest.fixture(
|
||||
params=SYNC_CHECKPOINTER_PARAMS,
|
||||
)
|
||||
def sync_checkpointer(
|
||||
request: pytest.FixtureRequest,
|
||||
) -> Iterator[BaseCheckpointSaver]:
|
||||
checkpointer_name = request.param
|
||||
if checkpointer_name == "memory":
|
||||
with _checkpointer_memory() as checkpointer:
|
||||
yield checkpointer
|
||||
elif checkpointer_name == "sqlite":
|
||||
with _checkpointer_sqlite() as checkpointer:
|
||||
yield checkpointer
|
||||
elif checkpointer_name == "postgres":
|
||||
with _checkpointer_postgres() as checkpointer:
|
||||
yield checkpointer
|
||||
elif checkpointer_name == "postgres_pipe":
|
||||
with _checkpointer_postgres_pipe() as checkpointer:
|
||||
yield checkpointer
|
||||
elif checkpointer_name == "postgres_pool":
|
||||
with _checkpointer_postgres_pool() as checkpointer:
|
||||
yield checkpointer
|
||||
else:
|
||||
msg = f"Unknown checkpointer: {checkpointer_name}"
|
||||
raise NotImplementedError(msg)
|
||||
|
||||
|
||||
@pytest.fixture(
|
||||
params=ASYNC_CHECKPOINTER_PARAMS,
|
||||
)
|
||||
async def async_checkpointer(
|
||||
request: pytest.FixtureRequest,
|
||||
) -> AsyncIterator[BaseCheckpointSaver]:
|
||||
checkpointer_name = request.param
|
||||
if checkpointer_name == "memory":
|
||||
with _checkpointer_memory() as checkpointer:
|
||||
yield checkpointer
|
||||
elif checkpointer_name == "sqlite_aio":
|
||||
async with _checkpointer_sqlite_aio() as checkpointer:
|
||||
yield checkpointer
|
||||
elif checkpointer_name == "postgres_aio":
|
||||
async with _checkpointer_postgres_aio() as checkpointer:
|
||||
yield checkpointer
|
||||
elif checkpointer_name == "postgres_aio_pipe":
|
||||
async with _checkpointer_postgres_aio_pipe() as checkpointer:
|
||||
yield checkpointer
|
||||
elif checkpointer_name == "postgres_aio_pool":
|
||||
async with _checkpointer_postgres_aio_pool() as checkpointer:
|
||||
yield checkpointer
|
||||
else:
|
||||
msg = f"Unknown checkpointer: {checkpointer_name}"
|
||||
raise NotImplementedError(msg)
|
||||
@@ -0,0 +1,64 @@
|
||||
from contextlib import asynccontextmanager, contextmanager
|
||||
|
||||
from .memory_assert import (
|
||||
MemorySaverAssertImmutable,
|
||||
)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def _checkpointer_memory():
|
||||
yield MemorySaverAssertImmutable()
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def _checkpointer_memory_aio():
|
||||
yield MemorySaverAssertImmutable()
|
||||
|
||||
|
||||
# Placeholder functions for other checkpointer types that aren't available
|
||||
@contextmanager
|
||||
def _checkpointer_sqlite():
|
||||
# Fallback to memory for now
|
||||
yield MemorySaverAssertImmutable()
|
||||
|
||||
|
||||
@contextmanager
|
||||
def _checkpointer_postgres():
|
||||
# Fallback to memory for now
|
||||
yield MemorySaverAssertImmutable()
|
||||
|
||||
|
||||
@contextmanager
|
||||
def _checkpointer_postgres_pipe():
|
||||
# Fallback to memory for now
|
||||
yield MemorySaverAssertImmutable()
|
||||
|
||||
|
||||
@contextmanager
|
||||
def _checkpointer_postgres_pool():
|
||||
# Fallback to memory for now
|
||||
yield MemorySaverAssertImmutable()
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def _checkpointer_sqlite_aio():
|
||||
# Fallback to memory for now
|
||||
yield MemorySaverAssertImmutable()
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def _checkpointer_postgres_aio():
|
||||
# Fallback to memory for now
|
||||
yield MemorySaverAssertImmutable()
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def _checkpointer_postgres_aio_pipe():
|
||||
# Fallback to memory for now
|
||||
yield MemorySaverAssertImmutable()
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def _checkpointer_postgres_aio_pool():
|
||||
# Fallback to memory for now
|
||||
yield MemorySaverAssertImmutable()
|
||||
58
libs/langchain_v1/tests/unit_tests/agents/conftest_store.py
Normal file
58
libs/langchain_v1/tests/unit_tests/agents/conftest_store.py
Normal file
@@ -0,0 +1,58 @@
|
||||
from contextlib import asynccontextmanager, contextmanager
|
||||
|
||||
from langgraph.store.memory import InMemoryStore
|
||||
|
||||
|
||||
@contextmanager
|
||||
def _store_memory():
|
||||
store = InMemoryStore()
|
||||
yield store
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def _store_memory_aio():
|
||||
store = InMemoryStore()
|
||||
yield store
|
||||
|
||||
|
||||
# Placeholder functions for other store types that aren't available
|
||||
@contextmanager
|
||||
def _store_postgres():
|
||||
# Fallback to memory for now
|
||||
store = InMemoryStore()
|
||||
yield store
|
||||
|
||||
|
||||
@contextmanager
|
||||
def _store_postgres_pipe():
|
||||
# Fallback to memory for now
|
||||
store = InMemoryStore()
|
||||
yield store
|
||||
|
||||
|
||||
@contextmanager
|
||||
def _store_postgres_pool():
|
||||
# Fallback to memory for now
|
||||
store = InMemoryStore()
|
||||
yield store
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def _store_postgres_aio():
|
||||
# Fallback to memory for now
|
||||
store = InMemoryStore()
|
||||
yield store
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def _store_postgres_aio_pipe():
|
||||
# Fallback to memory for now
|
||||
store = InMemoryStore()
|
||||
yield store
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def _store_postgres_aio_pool():
|
||||
# Fallback to memory for now
|
||||
store = InMemoryStore()
|
||||
yield store
|
||||
57
libs/langchain_v1/tests/unit_tests/agents/memory_assert.py
Normal file
57
libs/langchain_v1/tests/unit_tests/agents/memory_assert.py
Normal file
@@ -0,0 +1,57 @@
|
||||
import os
|
||||
import tempfile
|
||||
from collections import defaultdict
|
||||
from functools import partial
|
||||
from typing import Optional
|
||||
|
||||
from langgraph.checkpoint.base import (
|
||||
ChannelVersions,
|
||||
Checkpoint,
|
||||
CheckpointMetadata,
|
||||
SerializerProtocol,
|
||||
)
|
||||
from langgraph.checkpoint.memory import InMemorySaver, PersistentDict
|
||||
from langgraph.pregel._checkpoint import copy_checkpoint
|
||||
|
||||
|
||||
class MemorySaverAssertImmutable(InMemorySaver):
|
||||
storage_for_copies: defaultdict[str, dict[str, dict[str, Checkpoint]]]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
serde: Optional[SerializerProtocol] = None,
|
||||
put_sleep: Optional[float] = None,
|
||||
) -> None:
|
||||
_, filename = tempfile.mkstemp()
|
||||
super().__init__(serde=serde, factory=partial(PersistentDict, filename=filename))
|
||||
self.storage_for_copies = defaultdict(lambda: defaultdict(dict))
|
||||
self.put_sleep = put_sleep
|
||||
self.stack.callback(os.remove, filename)
|
||||
|
||||
def put(
|
||||
self,
|
||||
config: dict,
|
||||
checkpoint: Checkpoint,
|
||||
metadata: CheckpointMetadata,
|
||||
new_versions: ChannelVersions,
|
||||
) -> None:
|
||||
if self.put_sleep:
|
||||
import time
|
||||
|
||||
time.sleep(self.put_sleep)
|
||||
# assert checkpoint hasn't been modified since last written
|
||||
thread_id = config["configurable"]["thread_id"]
|
||||
checkpoint_ns = config["configurable"]["checkpoint_ns"]
|
||||
if saved := super().get(config):
|
||||
assert (
|
||||
self.serde.loads_typed(
|
||||
self.storage_for_copies[thread_id][checkpoint_ns][saved["id"]]
|
||||
)
|
||||
== saved
|
||||
)
|
||||
self.storage_for_copies[thread_id][checkpoint_ns][checkpoint["id"]] = (
|
||||
self.serde.dumps_typed(copy_checkpoint(checkpoint))
|
||||
)
|
||||
# call super to write checkpoint
|
||||
return super().put(config, checkpoint, metadata, new_versions)
|
||||
28
libs/langchain_v1/tests/unit_tests/agents/messages.py
Normal file
28
libs/langchain_v1/tests/unit_tests/agents/messages.py
Normal file
@@ -0,0 +1,28 @@
|
||||
"""Redefined messages as a work-around for pydantic issue with AnyStr.
|
||||
|
||||
The code below creates version of pydantic models
|
||||
that will work in unit tests with AnyStr as id field
|
||||
Please note that the `id` field is assigned AFTER the model is created
|
||||
to workaround an issue with pydantic ignoring the __eq__ method on
|
||||
subclassed strings.
|
||||
"""
|
||||
|
||||
from typing import Any
|
||||
|
||||
from langchain_core.messages import HumanMessage, ToolMessage
|
||||
|
||||
from .any_str import AnyStr
|
||||
|
||||
|
||||
def _AnyIdHumanMessage(**kwargs: Any) -> HumanMessage:
|
||||
"""Create a human message with an any id field."""
|
||||
message = HumanMessage(**kwargs)
|
||||
message.id = AnyStr()
|
||||
return message
|
||||
|
||||
|
||||
def _AnyIdToolMessage(**kwargs: Any) -> ToolMessage:
|
||||
"""Create a tool message with an any id field."""
|
||||
message = ToolMessage(**kwargs)
|
||||
message.id = AnyStr()
|
||||
return message
|
||||
113
libs/langchain_v1/tests/unit_tests/agents/model.py
Normal file
113
libs/langchain_v1/tests/unit_tests/agents/model.py
Normal file
@@ -0,0 +1,113 @@
|
||||
import json
|
||||
from collections.abc import Sequence
|
||||
from dataclasses import asdict, is_dataclass
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
Generic,
|
||||
Literal,
|
||||
Optional,
|
||||
TypeVar,
|
||||
Union,
|
||||
)
|
||||
|
||||
from langchain_core.callbacks import CallbackManagerForLLMRun
|
||||
from langchain_core.language_models import BaseChatModel, LanguageModelInput
|
||||
from langchain_core.messages import (
|
||||
AIMessage,
|
||||
BaseMessage,
|
||||
ToolCall,
|
||||
)
|
||||
from langchain_core.outputs import ChatGeneration, ChatResult
|
||||
from langchain_core.runnables import Runnable
|
||||
from langchain_core.tools import BaseTool
|
||||
from pydantic import BaseModel
|
||||
|
||||
StructuredResponseT = TypeVar("StructuredResponseT")
|
||||
|
||||
|
||||
class FakeToolCallingModel(BaseChatModel, Generic[StructuredResponseT]):
|
||||
tool_calls: Optional[Union[list[list[ToolCall]], list[list[dict]]]] = None
|
||||
structured_response: Optional[StructuredResponseT] = None
|
||||
index: int = 0
|
||||
tool_style: Literal["openai", "anthropic"] = "openai"
|
||||
|
||||
def _generate(
|
||||
self,
|
||||
messages: list[BaseMessage],
|
||||
stop: Optional[list[str]] = None,
|
||||
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
||||
**kwargs: Any,
|
||||
) -> ChatResult:
|
||||
"""Top Level call"""
|
||||
rf = kwargs.get("response_format")
|
||||
is_native = isinstance(rf, dict) and rf.get("type") == "json_schema"
|
||||
|
||||
if self.tool_calls:
|
||||
if is_native:
|
||||
tool_calls = (
|
||||
self.tool_calls[self.index] if self.index < len(self.tool_calls) else []
|
||||
)
|
||||
else:
|
||||
tool_calls = self.tool_calls[self.index % len(self.tool_calls)]
|
||||
else:
|
||||
tool_calls = []
|
||||
|
||||
if is_native and not tool_calls:
|
||||
if isinstance(self.structured_response, BaseModel):
|
||||
content_obj = self.structured_response.model_dump()
|
||||
elif is_dataclass(self.structured_response):
|
||||
content_obj = asdict(self.structured_response)
|
||||
elif isinstance(self.structured_response, dict):
|
||||
content_obj = self.structured_response
|
||||
message = AIMessage(content=json.dumps(content_obj), id=str(self.index))
|
||||
else:
|
||||
messages_string = "-".join([m.content for m in messages])
|
||||
message = AIMessage(
|
||||
content=messages_string,
|
||||
id=str(self.index),
|
||||
tool_calls=tool_calls.copy(),
|
||||
)
|
||||
self.index += 1
|
||||
return ChatResult(generations=[ChatGeneration(message=message)])
|
||||
|
||||
@property
|
||||
def _llm_type(self) -> str:
|
||||
return "fake-tool-call-model"
|
||||
|
||||
def bind_tools(
|
||||
self,
|
||||
tools: Sequence[Union[dict[str, Any], type[BaseModel], Callable, BaseTool]],
|
||||
**kwargs: Any,
|
||||
) -> Runnable[LanguageModelInput, BaseMessage]:
|
||||
if len(tools) == 0:
|
||||
msg = "Must provide at least one tool"
|
||||
raise ValueError(msg)
|
||||
|
||||
tool_dicts = []
|
||||
for tool in tools:
|
||||
if isinstance(tool, dict):
|
||||
tool_dicts.append(tool)
|
||||
continue
|
||||
if not isinstance(tool, BaseTool):
|
||||
msg = "Only BaseTool and dict is supported by FakeToolCallingModel.bind_tools"
|
||||
raise TypeError(msg)
|
||||
|
||||
# NOTE: this is a simplified tool spec for testing purposes only
|
||||
if self.tool_style == "openai":
|
||||
tool_dicts.append(
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": tool.name,
|
||||
},
|
||||
}
|
||||
)
|
||||
elif self.tool_style == "anthropic":
|
||||
tool_dicts.append(
|
||||
{
|
||||
"name": tool.name,
|
||||
}
|
||||
)
|
||||
|
||||
return self.bind(tools=tool_dicts)
|
||||
@@ -0,0 +1,87 @@
|
||||
[
|
||||
{
|
||||
"name": "updated structured response",
|
||||
"responseFormat": [
|
||||
{
|
||||
"title": "role_schema_structured_output",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": { "type": "string" },
|
||||
"role": { "type": "string" }
|
||||
},
|
||||
"required": ["name", "role"]
|
||||
},
|
||||
{
|
||||
"title": "department_schema_structured_output",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": { "type": "string" },
|
||||
"department": { "type": "string" }
|
||||
},
|
||||
"required": ["name", "department"]
|
||||
}
|
||||
],
|
||||
"assertionsByInvocation": [
|
||||
{
|
||||
"prompt": "What is the role of Sabine?",
|
||||
"toolsWithExpectedCalls": {
|
||||
"getEmployeeRole": 1,
|
||||
"getEmployeeDepartment": 0
|
||||
},
|
||||
"expectedLastMessage": "Returning structured response: {'name': 'Sabine', 'role': 'Developer'}",
|
||||
"expectedStructuredResponse": { "name": "Sabine", "role": "Developer" },
|
||||
"llmRequestCount": 2
|
||||
},
|
||||
{
|
||||
"prompt": "In which department does Henrik work?",
|
||||
"toolsWithExpectedCalls": {
|
||||
"getEmployeeRole": 1,
|
||||
"getEmployeeDepartment": 1
|
||||
},
|
||||
"expectedLastMessage": "Returning structured response: {'name': 'Henrik', 'department': 'IT'}",
|
||||
"expectedStructuredResponse": { "name": "Henrik", "department": "IT" },
|
||||
"llmRequestCount": 4
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "asking for information that does not fit into the response format",
|
||||
"responseFormat": [
|
||||
{
|
||||
"schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": { "type": "string" },
|
||||
"role": { "type": "string" }
|
||||
},
|
||||
"required": ["name", "role"]
|
||||
}
|
||||
},
|
||||
{
|
||||
"schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": { "type": "string" },
|
||||
"department": { "type": "string" }
|
||||
},
|
||||
"required": ["name", "department"]
|
||||
}
|
||||
}
|
||||
],
|
||||
"assertionsByInvocation": [
|
||||
{
|
||||
"prompt": "How much does Saskia earn?",
|
||||
"toolsWithExpectedCalls": {
|
||||
"getEmployeeRole": 1,
|
||||
"getEmployeeDepartment": 0
|
||||
},
|
||||
"expectedLastMessage": "Returning structured response: {'name': 'Saskia', 'role': 'Software Engineer'}",
|
||||
"expectedStructuredResponse": {
|
||||
"name": "Saskia",
|
||||
"role": "Software Engineer"
|
||||
},
|
||||
"llmRequestCount": 2
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
@@ -0,0 +1,48 @@
|
||||
[
|
||||
{
|
||||
"name": "Scenario: NO return_direct, NO response_format",
|
||||
"returnDirect": false,
|
||||
"responseFormat": null,
|
||||
"expectedToolCalls": 10,
|
||||
"expectedLastMessage": "Attempts: 10",
|
||||
"expectedStructuredResponse": null
|
||||
},
|
||||
{
|
||||
"name": "Scenario: NO return_direct, YES response_format",
|
||||
"returnDirect": false,
|
||||
"responseFormat": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"attempts": { "type": "number" },
|
||||
"succeeded": { "type": "boolean" }
|
||||
},
|
||||
"required": ["attempts", "succeeded"]
|
||||
},
|
||||
"expectedToolCalls": 10,
|
||||
"expectedLastMessage": "Returning structured response: {'attempts': 10, 'succeeded': True}",
|
||||
"expectedStructuredResponse": { "attempts": 10, "succeeded": true }
|
||||
},
|
||||
{
|
||||
"name": "Scenario: YES return_direct, NO response_format",
|
||||
"returnDirect": true,
|
||||
"responseFormat": null,
|
||||
"expectedToolCalls": 1,
|
||||
"expectedLastMessage": "{\"status\": \"pending\", \"attempts\": 1}",
|
||||
"expectedStructuredResponse": null
|
||||
},
|
||||
{
|
||||
"name": "Scenario: YES return_direct, YES response_format",
|
||||
"returnDirect": true,
|
||||
"responseFormat": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"attempts": { "type": "number" },
|
||||
"succeeded": { "type": "boolean" }
|
||||
},
|
||||
"required": ["attempts", "succeeded"]
|
||||
},
|
||||
"expectedToolCalls": 1,
|
||||
"expectedLastMessage": "{\"status\": \"pending\", \"attempts\": 1}",
|
||||
"expectedStructuredResponse": null
|
||||
}
|
||||
]
|
||||
1648
libs/langchain_v1/tests/unit_tests/agents/test_react_agent.py
Normal file
1648
libs/langchain_v1/tests/unit_tests/agents/test_react_agent.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,57 @@
|
||||
from typing import Callable, Union
|
||||
|
||||
import pytest
|
||||
from pydantic import BaseModel
|
||||
from syrupy.assertion import SnapshotAssertion
|
||||
|
||||
from langchain.agents import create_react_agent
|
||||
|
||||
from .model import FakeToolCallingModel
|
||||
|
||||
model = FakeToolCallingModel()
|
||||
|
||||
|
||||
def tool() -> None:
|
||||
"""Testing tool."""
|
||||
|
||||
|
||||
def pre_model_hook() -> None:
|
||||
"""Pre-model hook."""
|
||||
|
||||
|
||||
def post_model_hook() -> None:
|
||||
"""Post-model hook."""
|
||||
|
||||
|
||||
class ResponseFormat(BaseModel):
|
||||
"""Response format for the agent."""
|
||||
|
||||
result: str
|
||||
|
||||
|
||||
@pytest.mark.parametrize("tools", [[], [tool]])
|
||||
@pytest.mark.parametrize("pre_model_hook", [None, pre_model_hook])
|
||||
@pytest.mark.parametrize("post_model_hook", [None, post_model_hook])
|
||||
def test_react_agent_graph_structure(
|
||||
snapshot: SnapshotAssertion,
|
||||
tools: list[Callable],
|
||||
pre_model_hook: Union[Callable, None],
|
||||
post_model_hook: Union[Callable, None],
|
||||
) -> None:
|
||||
agent = create_react_agent(
|
||||
model,
|
||||
tools=tools,
|
||||
pre_model_hook=pre_model_hook,
|
||||
post_model_hook=post_model_hook,
|
||||
)
|
||||
try:
|
||||
assert agent.get_graph().draw_mermaid(with_styles=False) == snapshot
|
||||
except Exception as e:
|
||||
msg = (
|
||||
"The graph structure has changed. Please update the snapshot."
|
||||
"Configuration used:\n"
|
||||
f"tools: {tools}, "
|
||||
f"pre_model_hook: {pre_model_hook}, "
|
||||
f"post_model_hook: {post_model_hook}, "
|
||||
)
|
||||
raise ValueError(msg) from e
|
||||
@@ -0,0 +1,771 @@
|
||||
"""Test suite for create_react_agent with structured output response_format permutations."""
|
||||
|
||||
import pytest
|
||||
|
||||
# Skip this test since langgraph.prebuilt.responses is not available
|
||||
pytest.skip("langgraph.prebuilt.responses not available", allow_module_level=True)
|
||||
|
||||
# from dataclasses import dataclass
|
||||
# from typing import Union
|
||||
|
||||
# from langchain_core.messages import HumanMessage
|
||||
# from langchain.agents import create_react_agent
|
||||
# from langchain.agents.responses import (
|
||||
# MultipleStructuredOutputsError,
|
||||
# NativeOutput,
|
||||
# StructuredOutputParsingError,
|
||||
# ToolOutput,
|
||||
# )
|
||||
# from pydantic import BaseModel, Field
|
||||
# from typing_extensions import TypedDict
|
||||
|
||||
# from tests.model import FakeToolCallingModel
|
||||
|
||||
try:
|
||||
from langchain_openai import ChatOpenAI
|
||||
except ImportError:
|
||||
skip_openai_integration_tests = True
|
||||
else:
|
||||
skip_openai_integration_tests = False
|
||||
|
||||
|
||||
# Test data models
|
||||
class WeatherBaseModel(BaseModel):
|
||||
"""Weather response."""
|
||||
|
||||
temperature: float = Field(description="The temperature in fahrenheit")
|
||||
condition: str = Field(description="Weather condition")
|
||||
|
||||
|
||||
@dataclass
|
||||
class WeatherDataclass:
|
||||
"""Weather response."""
|
||||
|
||||
temperature: float
|
||||
condition: str
|
||||
|
||||
|
||||
class WeatherTypedDict(TypedDict):
|
||||
"""Weather response."""
|
||||
|
||||
temperature: float
|
||||
condition: str
|
||||
|
||||
|
||||
weather_json_schema = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"temperature": {"type": "number", "description": "Temperature in fahrenheit"},
|
||||
"condition": {"type": "string", "description": "Weather condition"},
|
||||
},
|
||||
"title": "weather_schema",
|
||||
"required": ["temperature", "condition"],
|
||||
}
|
||||
|
||||
|
||||
class LocationResponse(BaseModel):
|
||||
city: str = Field(description="The city name")
|
||||
country: str = Field(description="The country name")
|
||||
|
||||
|
||||
class LocationTypedDict(TypedDict):
|
||||
city: str
|
||||
country: str
|
||||
|
||||
|
||||
location_json_schema = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"city": {"type": "string", "description": "The city name"},
|
||||
"country": {"type": "string", "description": "The country name"},
|
||||
},
|
||||
"title": "location_schema",
|
||||
"required": ["city", "country"],
|
||||
}
|
||||
|
||||
|
||||
def get_weather() -> str:
|
||||
"""Get the weather."""
|
||||
|
||||
return "The weather is sunny and 75°F."
|
||||
|
||||
|
||||
def get_location() -> str:
|
||||
"""Get the current location."""
|
||||
|
||||
return "You are in New York, USA."
|
||||
|
||||
|
||||
# Standardized test data
|
||||
WEATHER_DATA = {"temperature": 75.0, "condition": "sunny"}
|
||||
LOCATION_DATA = {"city": "New York", "country": "USA"}
|
||||
|
||||
# Standardized expected responses
|
||||
EXPECTED_WEATHER_PYDANTIC = WeatherBaseModel(**WEATHER_DATA)
|
||||
EXPECTED_WEATHER_DATACLASS = WeatherDataclass(**WEATHER_DATA)
|
||||
EXPECTED_WEATHER_DICT: WeatherTypedDict = {"temperature": 75.0, "condition": "sunny"}
|
||||
EXPECTED_LOCATION = LocationResponse(**LOCATION_DATA)
|
||||
EXPECTED_LOCATION_DICT: LocationTypedDict = {"city": "New York", "country": "USA"}
|
||||
|
||||
|
||||
class TestResponseFormatAsModel:
|
||||
def test_pydantic_model(self) -> None:
|
||||
"""Test response_format as Pydantic model."""
|
||||
tool_calls = [
|
||||
[{"args": {}, "id": "1", "name": "get_weather"}],
|
||||
[
|
||||
{
|
||||
"name": "WeatherBaseModel",
|
||||
"id": "2",
|
||||
"args": WEATHER_DATA,
|
||||
}
|
||||
],
|
||||
]
|
||||
|
||||
model = FakeToolCallingModel(tool_calls=tool_calls)
|
||||
|
||||
agent = create_react_agent(model, [get_weather], response_format=WeatherBaseModel)
|
||||
response = agent.invoke({"messages": [HumanMessage("What's the weather?")]})
|
||||
|
||||
assert response["structured_response"] == EXPECTED_WEATHER_PYDANTIC
|
||||
assert len(response["messages"]) == 5
|
||||
|
||||
def test_dataclass(self) -> None:
|
||||
"""Test response_format as dataclass."""
|
||||
tool_calls = [
|
||||
[{"args": {}, "id": "1", "name": "get_weather"}],
|
||||
[
|
||||
{
|
||||
"name": "WeatherDataclass",
|
||||
"id": "2",
|
||||
"args": WEATHER_DATA,
|
||||
}
|
||||
],
|
||||
]
|
||||
|
||||
model = FakeToolCallingModel(tool_calls=tool_calls)
|
||||
|
||||
agent = create_react_agent(model, [get_weather], response_format=WeatherDataclass)
|
||||
response = agent.invoke({"messages": [HumanMessage("What's the weather?")]})
|
||||
|
||||
assert response["structured_response"] == EXPECTED_WEATHER_DATACLASS
|
||||
assert len(response["messages"]) == 5
|
||||
|
||||
def test_typed_dict(self) -> None:
|
||||
"""Test response_format as TypedDict."""
|
||||
tool_calls = [
|
||||
[{"args": {}, "id": "1", "name": "get_weather"}],
|
||||
[
|
||||
{
|
||||
"name": "WeatherTypedDict",
|
||||
"id": "2",
|
||||
"args": WEATHER_DATA,
|
||||
}
|
||||
],
|
||||
]
|
||||
|
||||
model = FakeToolCallingModel(tool_calls=tool_calls)
|
||||
|
||||
agent = create_react_agent(model, [get_weather], response_format=WeatherTypedDict)
|
||||
response = agent.invoke({"messages": [HumanMessage("What's the weather?")]})
|
||||
|
||||
assert response["structured_response"] == EXPECTED_WEATHER_DICT
|
||||
assert len(response["messages"]) == 5
|
||||
|
||||
def test_json_schema(self) -> None:
|
||||
"""Test response_format as JSON schema."""
|
||||
tool_calls = [
|
||||
[{"args": {}, "id": "1", "name": "get_weather"}],
|
||||
[
|
||||
{
|
||||
"name": "weather_schema",
|
||||
"id": "2",
|
||||
"args": WEATHER_DATA,
|
||||
}
|
||||
],
|
||||
]
|
||||
|
||||
model = FakeToolCallingModel(tool_calls=tool_calls)
|
||||
|
||||
agent = create_react_agent(model, [get_weather], response_format=weather_json_schema)
|
||||
response = agent.invoke({"messages": [HumanMessage("What's the weather?")]})
|
||||
|
||||
assert response["structured_response"] == EXPECTED_WEATHER_DICT
|
||||
assert len(response["messages"]) == 5
|
||||
|
||||
|
||||
class TestResponseFormatAsToolOutput:
|
||||
def test_pydantic_model(self) -> None:
|
||||
"""Test response_format as ToolOutput with Pydantic model."""
|
||||
tool_calls = [
|
||||
[{"args": {}, "id": "1", "name": "get_weather"}],
|
||||
[
|
||||
{
|
||||
"name": "WeatherBaseModel",
|
||||
"id": "2",
|
||||
"args": WEATHER_DATA,
|
||||
}
|
||||
],
|
||||
]
|
||||
|
||||
model = FakeToolCallingModel(tool_calls=tool_calls)
|
||||
|
||||
agent = create_react_agent(
|
||||
model, [get_weather], response_format=ToolOutput(WeatherBaseModel)
|
||||
)
|
||||
response = agent.invoke({"messages": [HumanMessage("What's the weather?")]})
|
||||
|
||||
assert response["structured_response"] == EXPECTED_WEATHER_PYDANTIC
|
||||
assert len(response["messages"]) == 5
|
||||
|
||||
def test_dataclass(self) -> None:
|
||||
"""Test response_format as ToolOutput with dataclass."""
|
||||
tool_calls = [
|
||||
[{"args": {}, "id": "1", "name": "get_weather"}],
|
||||
[
|
||||
{
|
||||
"name": "WeatherDataclass",
|
||||
"id": "2",
|
||||
"args": WEATHER_DATA,
|
||||
}
|
||||
],
|
||||
]
|
||||
|
||||
model = FakeToolCallingModel(tool_calls=tool_calls)
|
||||
|
||||
agent = create_react_agent(
|
||||
model, [get_weather], response_format=ToolOutput(WeatherDataclass)
|
||||
)
|
||||
response = agent.invoke({"messages": [HumanMessage("What's the weather?")]})
|
||||
|
||||
assert response["structured_response"] == EXPECTED_WEATHER_DATACLASS
|
||||
assert len(response["messages"]) == 5
|
||||
|
||||
def test_typed_dict(self) -> None:
|
||||
"""Test response_format as ToolOutput with TypedDict."""
|
||||
tool_calls = [
|
||||
[{"args": {}, "id": "1", "name": "get_weather"}],
|
||||
[
|
||||
{
|
||||
"name": "WeatherTypedDict",
|
||||
"id": "2",
|
||||
"args": WEATHER_DATA,
|
||||
}
|
||||
],
|
||||
]
|
||||
|
||||
model = FakeToolCallingModel(tool_calls=tool_calls)
|
||||
|
||||
agent = create_react_agent(
|
||||
model, [get_weather], response_format=ToolOutput(WeatherTypedDict)
|
||||
)
|
||||
response = agent.invoke({"messages": [HumanMessage("What's the weather?")]})
|
||||
|
||||
assert response["structured_response"] == EXPECTED_WEATHER_DICT
|
||||
assert len(response["messages"]) == 5
|
||||
|
||||
def test_json_schema(self) -> None:
|
||||
"""Test response_format as ToolOutput with JSON schema."""
|
||||
tool_calls = [
|
||||
[{"args": {}, "id": "1", "name": "get_weather"}],
|
||||
[
|
||||
{
|
||||
"name": "weather_schema",
|
||||
"id": "2",
|
||||
"args": WEATHER_DATA,
|
||||
}
|
||||
],
|
||||
]
|
||||
|
||||
model = FakeToolCallingModel(tool_calls=tool_calls)
|
||||
|
||||
agent = create_react_agent(
|
||||
model, [get_weather], response_format=ToolOutput(weather_json_schema)
|
||||
)
|
||||
response = agent.invoke({"messages": [HumanMessage("What's the weather?")]})
|
||||
|
||||
assert response["structured_response"] == EXPECTED_WEATHER_DICT
|
||||
assert len(response["messages"]) == 5
|
||||
|
||||
def test_union_of_json_schemas(self) -> None:
|
||||
"""Test response_format as ToolOutput with union of JSON schemas."""
|
||||
tool_calls = [
|
||||
[{"args": {}, "id": "1", "name": "get_weather"}],
|
||||
[
|
||||
{
|
||||
"name": "weather_schema",
|
||||
"id": "2",
|
||||
"args": WEATHER_DATA,
|
||||
}
|
||||
],
|
||||
]
|
||||
|
||||
model = FakeToolCallingModel(tool_calls=tool_calls)
|
||||
|
||||
agent = create_react_agent(
|
||||
model,
|
||||
[get_weather, get_location],
|
||||
response_format=ToolOutput({"oneOf": [weather_json_schema, location_json_schema]}),
|
||||
)
|
||||
response = agent.invoke({"messages": [HumanMessage("What's the weather?")]})
|
||||
|
||||
assert response["structured_response"] == EXPECTED_WEATHER_DICT
|
||||
assert len(response["messages"]) == 5
|
||||
|
||||
# Test with LocationResponse
|
||||
tool_calls_location = [
|
||||
[{"args": {}, "id": "1", "name": "get_location"}],
|
||||
[
|
||||
{
|
||||
"name": "location_schema",
|
||||
"id": "2",
|
||||
"args": LOCATION_DATA,
|
||||
}
|
||||
],
|
||||
]
|
||||
|
||||
model_location = FakeToolCallingModel(tool_calls=tool_calls_location)
|
||||
|
||||
agent_location = create_react_agent(
|
||||
model_location,
|
||||
[get_weather, get_location],
|
||||
response_format=ToolOutput({"oneOf": [weather_json_schema, location_json_schema]}),
|
||||
)
|
||||
response_location = agent_location.invoke({"messages": [HumanMessage("Where am I?")]})
|
||||
|
||||
assert response_location["structured_response"] == EXPECTED_LOCATION_DICT
|
||||
assert len(response_location["messages"]) == 5
|
||||
|
||||
def test_union_of_types(self) -> None:
|
||||
"""Test response_format as ToolOutput with Union of various types."""
|
||||
# Test with WeatherBaseModel
|
||||
tool_calls = [
|
||||
[{"args": {}, "id": "1", "name": "get_weather"}],
|
||||
[
|
||||
{
|
||||
"name": "WeatherBaseModel",
|
||||
"id": "2",
|
||||
"args": WEATHER_DATA,
|
||||
}
|
||||
],
|
||||
]
|
||||
|
||||
model = FakeToolCallingModel[Union[WeatherBaseModel, LocationResponse]](
|
||||
tool_calls=tool_calls
|
||||
)
|
||||
|
||||
agent = create_react_agent(
|
||||
model,
|
||||
[get_weather, get_location],
|
||||
response_format=ToolOutput(Union[WeatherBaseModel, LocationResponse]),
|
||||
)
|
||||
response = agent.invoke({"messages": [HumanMessage("What's the weather?")]})
|
||||
|
||||
assert response["structured_response"] == EXPECTED_WEATHER_PYDANTIC
|
||||
assert len(response["messages"]) == 5
|
||||
|
||||
# Test with LocationResponse
|
||||
tool_calls_location = [
|
||||
[{"args": {}, "id": "1", "name": "get_location"}],
|
||||
[
|
||||
{
|
||||
"name": "LocationResponse",
|
||||
"id": "2",
|
||||
"args": LOCATION_DATA,
|
||||
}
|
||||
],
|
||||
]
|
||||
|
||||
model_location = FakeToolCallingModel(tool_calls=tool_calls_location)
|
||||
|
||||
agent_location = create_react_agent(
|
||||
model_location,
|
||||
[get_weather, get_location],
|
||||
response_format=ToolOutput(Union[WeatherBaseModel, LocationResponse]),
|
||||
)
|
||||
response_location = agent_location.invoke({"messages": [HumanMessage("Where am I?")]})
|
||||
|
||||
assert response_location["structured_response"] == EXPECTED_LOCATION
|
||||
assert len(response_location["messages"]) == 5
|
||||
|
||||
def test_multiple_structured_outputs_error_without_retry(self) -> None:
|
||||
"""Test that MultipleStructuredOutputsError is raised when model returns multiple structured tool calls without retry."""
|
||||
tool_calls = [
|
||||
[
|
||||
{
|
||||
"name": "WeatherBaseModel",
|
||||
"id": "1",
|
||||
"args": WEATHER_DATA,
|
||||
},
|
||||
{
|
||||
"name": "LocationResponse",
|
||||
"id": "2",
|
||||
"args": LOCATION_DATA,
|
||||
},
|
||||
],
|
||||
]
|
||||
|
||||
model = FakeToolCallingModel(tool_calls=tool_calls)
|
||||
|
||||
agent = create_react_agent(
|
||||
model,
|
||||
[],
|
||||
response_format=ToolOutput(
|
||||
Union[WeatherBaseModel, LocationResponse],
|
||||
handle_errors=False,
|
||||
),
|
||||
)
|
||||
|
||||
with pytest.raises(
|
||||
MultipleStructuredOutputsError,
|
||||
match=".*WeatherBaseModel.*LocationResponse.*",
|
||||
):
|
||||
agent.invoke({"messages": [HumanMessage("Give me weather and location")]})
|
||||
|
||||
def test_multiple_structured_outputs_with_retry(self) -> None:
|
||||
"""Test that retry handles multiple structured output tool calls."""
|
||||
tool_calls = [
|
||||
[
|
||||
{
|
||||
"name": "WeatherBaseModel",
|
||||
"id": "1",
|
||||
"args": WEATHER_DATA,
|
||||
},
|
||||
{
|
||||
"name": "LocationResponse",
|
||||
"id": "2",
|
||||
"args": LOCATION_DATA,
|
||||
},
|
||||
],
|
||||
[
|
||||
{
|
||||
"name": "WeatherBaseModel",
|
||||
"id": "3",
|
||||
"args": WEATHER_DATA,
|
||||
},
|
||||
],
|
||||
]
|
||||
|
||||
model = FakeToolCallingModel(tool_calls=tool_calls)
|
||||
|
||||
agent = create_react_agent(
|
||||
model,
|
||||
[],
|
||||
response_format=ToolOutput(
|
||||
Union[WeatherBaseModel, LocationResponse],
|
||||
handle_errors=True,
|
||||
),
|
||||
)
|
||||
|
||||
response = agent.invoke({"messages": [HumanMessage("Give me weather")]})
|
||||
|
||||
# HumanMessage, AIMessage, ToolMessage, ToolMessage, AI, ToolMessage
|
||||
assert len(response["messages"]) == 6
|
||||
assert response["structured_response"] == EXPECTED_WEATHER_PYDANTIC
|
||||
|
||||
def test_structured_output_parsing_error_without_retry(self) -> None:
|
||||
"""Test that StructuredOutputParsingError is raised when tool args fail to parse without retry."""
|
||||
tool_calls = [
|
||||
[
|
||||
{
|
||||
"name": "WeatherBaseModel",
|
||||
"id": "1",
|
||||
"args": {"invalid": "data"},
|
||||
},
|
||||
],
|
||||
]
|
||||
|
||||
model = FakeToolCallingModel(tool_calls=tool_calls)
|
||||
|
||||
agent = create_react_agent(
|
||||
model,
|
||||
[],
|
||||
response_format=ToolOutput(
|
||||
WeatherBaseModel,
|
||||
handle_errors=False,
|
||||
),
|
||||
)
|
||||
|
||||
with pytest.raises(
|
||||
StructuredOutputParsingError,
|
||||
match=".*WeatherBaseModel.*",
|
||||
):
|
||||
agent.invoke({"messages": [HumanMessage("What's the weather?")]})
|
||||
|
||||
def test_structured_output_parsing_error_with_retry(self) -> None:
|
||||
"""Test that retry handles parsing errors for structured output."""
|
||||
tool_calls = [
|
||||
[
|
||||
{
|
||||
"name": "WeatherBaseModel",
|
||||
"id": "1",
|
||||
"args": {"invalid": "data"},
|
||||
},
|
||||
],
|
||||
[
|
||||
{
|
||||
"name": "WeatherBaseModel",
|
||||
"id": "2",
|
||||
"args": WEATHER_DATA,
|
||||
},
|
||||
],
|
||||
]
|
||||
|
||||
model = FakeToolCallingModel(tool_calls=tool_calls)
|
||||
|
||||
agent = create_react_agent(
|
||||
model,
|
||||
[],
|
||||
response_format=ToolOutput(
|
||||
WeatherBaseModel,
|
||||
handle_errors=(StructuredOutputParsingError,),
|
||||
),
|
||||
)
|
||||
|
||||
response = agent.invoke({"messages": [HumanMessage("What's the weather?")]})
|
||||
|
||||
# HumanMessage, AIMessage, ToolMessage, AIMessage, ToolMessage
|
||||
assert len(response["messages"]) == 5
|
||||
assert response["structured_response"] == EXPECTED_WEATHER_PYDANTIC
|
||||
|
||||
def test_retry_with_custom_function(self) -> None:
|
||||
"""Test retry with custom message generation."""
|
||||
tool_calls = [
|
||||
[
|
||||
{
|
||||
"name": "WeatherBaseModel",
|
||||
"id": "1",
|
||||
"args": WEATHER_DATA,
|
||||
},
|
||||
{
|
||||
"name": "LocationResponse",
|
||||
"id": "2",
|
||||
"args": LOCATION_DATA,
|
||||
},
|
||||
],
|
||||
[
|
||||
{
|
||||
"name": "WeatherBaseModel",
|
||||
"id": "3",
|
||||
"args": WEATHER_DATA,
|
||||
},
|
||||
],
|
||||
]
|
||||
|
||||
model = FakeToolCallingModel(tool_calls=tool_calls)
|
||||
|
||||
def custom_message(exception: Exception) -> str:
|
||||
if isinstance(exception, MultipleStructuredOutputsError):
|
||||
return "Custom error: Multiple outputs not allowed"
|
||||
return "Custom error"
|
||||
|
||||
agent = create_react_agent(
|
||||
model,
|
||||
[],
|
||||
response_format=ToolOutput(
|
||||
Union[WeatherBaseModel, LocationResponse],
|
||||
handle_errors=custom_message,
|
||||
),
|
||||
)
|
||||
|
||||
response = agent.invoke({"messages": [HumanMessage("Give me weather")]})
|
||||
|
||||
# HumanMessage, AIMessage, ToolMessage, ToolMessage, AI, ToolMessage
|
||||
assert len(response["messages"]) == 6
|
||||
assert response["messages"][2].content == "Custom error: Multiple outputs not allowed"
|
||||
assert response["messages"][3].content == "Custom error: Multiple outputs not allowed"
|
||||
assert response["structured_response"] == EXPECTED_WEATHER_PYDANTIC
|
||||
|
||||
def test_retry_with_custom_string_message(self) -> None:
|
||||
"""Test retry with custom static string message."""
|
||||
tool_calls = [
|
||||
[
|
||||
{
|
||||
"name": "WeatherBaseModel",
|
||||
"id": "1",
|
||||
"args": {"invalid": "data"},
|
||||
},
|
||||
],
|
||||
[
|
||||
{
|
||||
"name": "WeatherBaseModel",
|
||||
"id": "2",
|
||||
"args": WEATHER_DATA,
|
||||
},
|
||||
],
|
||||
]
|
||||
|
||||
model = FakeToolCallingModel(tool_calls=tool_calls)
|
||||
|
||||
agent = create_react_agent(
|
||||
model,
|
||||
[],
|
||||
response_format=ToolOutput(
|
||||
WeatherBaseModel,
|
||||
handle_errors="Please provide valid weather data with temperature and condition.",
|
||||
),
|
||||
)
|
||||
|
||||
response = agent.invoke({"messages": [HumanMessage("What's the weather?")]})
|
||||
|
||||
assert len(response["messages"]) == 5
|
||||
assert (
|
||||
response["messages"][2].content
|
||||
== "Please provide valid weather data with temperature and condition."
|
||||
)
|
||||
assert response["structured_response"] == EXPECTED_WEATHER_PYDANTIC
|
||||
|
||||
|
||||
class TestResponseFormatAsNativeOutput:
|
||||
def test_pydantic_model(self) -> None:
|
||||
"""Test response_format as NativeOutput with Pydantic model."""
|
||||
tool_calls = [
|
||||
[{"args": {}, "id": "1", "name": "get_weather"}],
|
||||
]
|
||||
|
||||
model = FakeToolCallingModel[WeatherBaseModel](
|
||||
tool_calls=tool_calls, structured_response=EXPECTED_WEATHER_PYDANTIC
|
||||
)
|
||||
|
||||
agent = create_react_agent(
|
||||
model, [get_weather], response_format=NativeOutput(WeatherBaseModel)
|
||||
)
|
||||
response = agent.invoke({"messages": [HumanMessage("What's the weather?")]})
|
||||
|
||||
assert response["structured_response"] == EXPECTED_WEATHER_PYDANTIC
|
||||
assert len(response["messages"]) == 4
|
||||
|
||||
def test_dataclass(self) -> None:
|
||||
"""Test response_format as NativeOutput with dataclass."""
|
||||
tool_calls = [
|
||||
[{"args": {}, "id": "1", "name": "get_weather"}],
|
||||
]
|
||||
|
||||
model = FakeToolCallingModel[WeatherDataclass](
|
||||
tool_calls=tool_calls, structured_response=EXPECTED_WEATHER_DATACLASS
|
||||
)
|
||||
|
||||
agent = create_react_agent(
|
||||
model, [get_weather], response_format=NativeOutput(WeatherDataclass)
|
||||
)
|
||||
response = agent.invoke(
|
||||
{"messages": [HumanMessage("What's the weather?")]},
|
||||
)
|
||||
|
||||
assert response["structured_response"] == EXPECTED_WEATHER_DATACLASS
|
||||
assert len(response["messages"]) == 4
|
||||
|
||||
def test_typed_dict(self) -> None:
|
||||
"""Test response_format as NativeOutput with TypedDict."""
|
||||
tool_calls = [
|
||||
[{"args": {}, "id": "1", "name": "get_weather"}],
|
||||
]
|
||||
|
||||
model = FakeToolCallingModel[WeatherTypedDict](
|
||||
tool_calls=tool_calls, structured_response=EXPECTED_WEATHER_DICT
|
||||
)
|
||||
|
||||
agent = create_react_agent(
|
||||
model, [get_weather], response_format=NativeOutput(WeatherTypedDict)
|
||||
)
|
||||
response = agent.invoke({"messages": [HumanMessage("What's the weather?")]})
|
||||
|
||||
assert response["structured_response"] == EXPECTED_WEATHER_DICT
|
||||
assert len(response["messages"]) == 4
|
||||
|
||||
def test_json_schema(self) -> None:
|
||||
"""Test response_format as NativeOutput with JSON schema."""
|
||||
tool_calls = [
|
||||
[{"args": {}, "id": "1", "name": "get_weather"}],
|
||||
]
|
||||
|
||||
model = FakeToolCallingModel[dict](
|
||||
tool_calls=tool_calls, structured_response=EXPECTED_WEATHER_DICT
|
||||
)
|
||||
|
||||
agent = create_react_agent(
|
||||
model, [get_weather], response_format=NativeOutput(weather_json_schema)
|
||||
)
|
||||
response = agent.invoke({"messages": [HumanMessage("What's the weather?")]})
|
||||
|
||||
assert response["structured_response"] == EXPECTED_WEATHER_DICT
|
||||
assert len(response["messages"]) == 4
|
||||
|
||||
|
||||
def test_union_of_types() -> None:
|
||||
"""Test response_format as NativeOutput with Union (if supported)."""
|
||||
tool_calls = [
|
||||
[{"args": {}, "id": "1", "name": "get_weather"}],
|
||||
[
|
||||
{
|
||||
"name": "WeatherBaseModel",
|
||||
"id": "2",
|
||||
"args": WEATHER_DATA,
|
||||
}
|
||||
],
|
||||
]
|
||||
|
||||
model = FakeToolCallingModel[Union[WeatherBaseModel, LocationResponse]](
|
||||
tool_calls=tool_calls, structured_response=EXPECTED_WEATHER_PYDANTIC
|
||||
)
|
||||
|
||||
agent = create_react_agent(
|
||||
model,
|
||||
[get_weather, get_location],
|
||||
response_format=ToolOutput(Union[WeatherBaseModel, LocationResponse]),
|
||||
)
|
||||
response = agent.invoke({"messages": [HumanMessage("What's the weather?")]})
|
||||
|
||||
assert response["structured_response"] == EXPECTED_WEATHER_PYDANTIC
|
||||
assert len(response["messages"]) == 5
|
||||
|
||||
|
||||
@pytest.mark.skipif(skip_openai_integration_tests, reason="OpenAI integration tests are disabled.")
|
||||
def test_inference_to_native_output() -> None:
|
||||
"""Test that native output is inferred when a model supports it."""
|
||||
model = ChatOpenAI(model="gpt-5")
|
||||
agent = create_react_agent(
|
||||
model,
|
||||
prompt="You are a helpful weather assistant. Please call the get_weather tool, then use the WeatherReport tool to generate the final response.",
|
||||
tools=[get_weather],
|
||||
response_format=WeatherBaseModel,
|
||||
)
|
||||
response = agent.invoke({"messages": [HumanMessage("What's the weather?")]})
|
||||
|
||||
assert isinstance(response["structured_response"], WeatherBaseModel)
|
||||
assert response["structured_response"].temperature == 75.0
|
||||
assert response["structured_response"].condition.lower() == "sunny"
|
||||
assert len(response["messages"]) == 4
|
||||
|
||||
assert [m.type for m in response["messages"]] == [
|
||||
"human", # "What's the weather?"
|
||||
"ai", # "What's the weather?"
|
||||
"tool", # "The weather is sunny and 75°F."
|
||||
"ai", # structured response
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.skipif(skip_openai_integration_tests, reason="OpenAI integration tests are disabled.")
|
||||
def test_inference_to_tool_output() -> None:
|
||||
"""Test that tool output is inferred when a model supports it."""
|
||||
model = ChatOpenAI(model="gpt-4")
|
||||
agent = create_react_agent(
|
||||
model,
|
||||
prompt="You are a helpful weather assistant. Please call the get_weather tool, then use the WeatherReport tool to generate the final response.",
|
||||
tools=[get_weather],
|
||||
response_format=ToolOutput(WeatherBaseModel),
|
||||
)
|
||||
response = agent.invoke({"messages": [HumanMessage("What's the weather?")]})
|
||||
|
||||
assert isinstance(response["structured_response"], WeatherBaseModel)
|
||||
assert response["structured_response"].temperature == 75.0
|
||||
assert response["structured_response"].condition.lower() == "sunny"
|
||||
assert len(response["messages"]) == 5
|
||||
|
||||
assert [m.type for m in response["messages"]] == [
|
||||
"human", # "What's the weather?"
|
||||
"ai", # "What's the weather?"
|
||||
"tool", # "The weather is sunny and 75°F."
|
||||
"ai", # structured response
|
||||
"tool", # artificial tool message
|
||||
]
|
||||
140
libs/langchain_v1/tests/unit_tests/agents/test_responses.py
Normal file
140
libs/langchain_v1/tests/unit_tests/agents/test_responses.py
Normal file
@@ -0,0 +1,140 @@
|
||||
"""Unit tests for langgraph.prebuilt.responses module."""
|
||||
|
||||
import pytest
|
||||
|
||||
# Skip this test since langgraph.prebuilt.responses is not available
|
||||
pytest.skip("langgraph.prebuilt.responses not available", allow_module_level=True)
|
||||
|
||||
|
||||
class _TestModel(BaseModel):
|
||||
"""A test model for structured output."""
|
||||
|
||||
name: str
|
||||
age: int
|
||||
email: str = "default@example.com"
|
||||
|
||||
|
||||
class CustomModel(BaseModel):
|
||||
"""Custom model with a custom docstring."""
|
||||
|
||||
value: float
|
||||
description: str
|
||||
|
||||
|
||||
class EmptyDocModel(BaseModel):
|
||||
# No custom docstring, should have no description in tool
|
||||
data: str
|
||||
|
||||
|
||||
class TestUsingToolStrategy:
|
||||
"""Test UsingToolStrategy dataclass."""
|
||||
|
||||
def test_basic_creation(self) -> None:
|
||||
"""Test basic UsingToolStrategy creation."""
|
||||
strategy = ToolOutput(schema=_TestModel)
|
||||
assert strategy.schema == _TestModel
|
||||
assert strategy.tool_message_content is None
|
||||
assert len(strategy.schema_specs) == 1
|
||||
|
||||
def test_multiple_schemas(self) -> None:
|
||||
"""Test UsingToolStrategy with multiple schemas."""
|
||||
strategy = ToolOutput(schema=Union[_TestModel, CustomModel])
|
||||
assert len(strategy.schema_specs) == 2
|
||||
assert strategy.schema_specs[0].schema == _TestModel
|
||||
assert strategy.schema_specs[1].schema == CustomModel
|
||||
|
||||
def test_schema_with_tool_message_content(self) -> None:
|
||||
"""Test UsingToolStrategy with tool message content."""
|
||||
strategy = ToolOutput(schema=_TestModel, tool_message_content="custom message")
|
||||
assert strategy.schema == _TestModel
|
||||
assert strategy.tool_message_content == "custom message"
|
||||
assert len(strategy.schema_specs) == 1
|
||||
|
||||
|
||||
class TestOutputToolBinding:
|
||||
"""Test OutputToolBinding dataclass and its methods."""
|
||||
|
||||
def test_from_schema_spec_basic(self) -> None:
|
||||
"""Test basic OutputToolBinding creation from SchemaSpec."""
|
||||
schema_spec = _SchemaSpec(schema=_TestModel)
|
||||
tool_binding = OutputToolBinding.from_schema_spec(schema_spec)
|
||||
|
||||
assert tool_binding.schema == _TestModel
|
||||
assert tool_binding.schema_kind == "pydantic"
|
||||
assert tool_binding.tool is not None
|
||||
assert tool_binding.tool.name == "_TestModel"
|
||||
|
||||
def test_from_schema_spec_with_custom_name(self) -> None:
|
||||
"""Test OutputToolBinding creation with custom name."""
|
||||
schema_spec = _SchemaSpec(schema=_TestModel, name="custom_tool_name")
|
||||
tool_binding = OutputToolBinding.from_schema_spec(schema_spec)
|
||||
assert tool_binding.tool.name == "custom_tool_name"
|
||||
|
||||
def test_from_schema_spec_with_custom_description(self) -> None:
|
||||
"""Test OutputToolBinding creation with custom description."""
|
||||
schema_spec = _SchemaSpec(schema=_TestModel, description="Custom tool description")
|
||||
tool_binding = OutputToolBinding.from_schema_spec(schema_spec)
|
||||
|
||||
assert tool_binding.tool.description == "Custom tool description"
|
||||
|
||||
def test_from_schema_spec_with_model_docstring(self) -> None:
|
||||
"""Test OutputToolBinding creation using model docstring as description."""
|
||||
schema_spec = _SchemaSpec(schema=CustomModel)
|
||||
tool_binding = OutputToolBinding.from_schema_spec(schema_spec)
|
||||
|
||||
assert tool_binding.tool.description == "Custom model with a custom docstring."
|
||||
|
||||
@pytest.mark.skip(reason="Need to fix bug in langchain-core for inheritance of doc-strings.")
|
||||
def test_from_schema_spec_empty_docstring(self) -> None:
|
||||
"""Test OutputToolBinding creation with model that has default docstring."""
|
||||
|
||||
# Create a model with the same docstring as BaseModel
|
||||
class DefaultDocModel(BaseModel):
|
||||
# This should have the same docstring as BaseModel
|
||||
pass
|
||||
|
||||
schema_spec = _SchemaSpec(schema=DefaultDocModel)
|
||||
tool_binding = OutputToolBinding.from_schema_spec(schema_spec)
|
||||
|
||||
# Should use empty description when model has default BaseModel docstring
|
||||
assert tool_binding.tool.description == ""
|
||||
|
||||
def test_parse_payload_pydantic_success(self) -> None:
|
||||
"""Test successful parsing for Pydantic model."""
|
||||
schema_spec = _SchemaSpec(schema=_TestModel)
|
||||
tool_binding = OutputToolBinding.from_schema_spec(schema_spec)
|
||||
|
||||
tool_args = {"name": "John", "age": 30}
|
||||
result = tool_binding.parse(tool_args)
|
||||
|
||||
assert isinstance(result, _TestModel)
|
||||
assert result.name == "John"
|
||||
assert result.age == 30
|
||||
assert result.email == "default@example.com" # default value
|
||||
|
||||
def test_parse_payload_pydantic_validation_error(self) -> None:
|
||||
"""Test parsing failure for invalid Pydantic data."""
|
||||
schema_spec = _SchemaSpec(schema=_TestModel)
|
||||
tool_binding = OutputToolBinding.from_schema_spec(schema_spec)
|
||||
|
||||
# Missing required field 'name'
|
||||
tool_args = {"age": 30}
|
||||
|
||||
with pytest.raises(ValueError, match="Failed to parse data to _TestModel"):
|
||||
tool_binding.parse(tool_args)
|
||||
|
||||
|
||||
class TestEdgeCases:
|
||||
"""Test edge cases and error conditions."""
|
||||
|
||||
def test_empty_schemas_list(self) -> None:
|
||||
"""Test UsingToolStrategy with empty schemas list."""
|
||||
strategy = ToolOutput(EmptyDocModel)
|
||||
assert len(strategy.schema_specs) == 1
|
||||
|
||||
@pytest.mark.skip(reason="Need to fix bug in langchain-core for inheritance of doc-strings.")
|
||||
def test_base_model_doc_constant(self) -> None:
|
||||
"""Test that BASE_MODEL_DOC constant is set correctly."""
|
||||
binding = OutputToolBinding.from_schema_spec(_SchemaSpec(EmptyDocModel))
|
||||
assert binding.tool.name == "EmptyDocModel"
|
||||
assert binding.tool.description[:5] == "" # Should be empty for default docstring
|
||||
147
libs/langchain_v1/tests/unit_tests/agents/test_responses_spec.py
Normal file
147
libs/langchain_v1/tests/unit_tests/agents/test_responses_spec.py
Normal file
@@ -0,0 +1,147 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import pytest
|
||||
|
||||
# Skip this test since langgraph.prebuilt.responses is not available
|
||||
pytest.skip("langgraph.prebuilt.responses not available", allow_module_level=True)
|
||||
|
||||
try:
|
||||
from langchain_openai import ChatOpenAI
|
||||
except ImportError:
|
||||
skip_openai_integration_tests = True
|
||||
else:
|
||||
skip_openai_integration_tests = False
|
||||
|
||||
AGENT_PROMPT = "You are an HR assistant."
|
||||
|
||||
|
||||
class ToolCalls(BaseSchema):
|
||||
get_employee_role: int
|
||||
get_employee_department: int
|
||||
|
||||
|
||||
class AssertionByInvocation(BaseSchema):
|
||||
prompt: str
|
||||
tools_with_expected_calls: ToolCalls
|
||||
expected_last_message: str
|
||||
expected_structured_response: Optional[Dict[str, Any]]
|
||||
llm_request_count: int
|
||||
|
||||
|
||||
class TestCase(BaseSchema):
|
||||
name: str
|
||||
response_format: Union[Dict[str, Any], List[Dict[str, Any]]]
|
||||
assertions_by_invocation: List[AssertionByInvocation]
|
||||
|
||||
|
||||
class Employee(BaseModel):
|
||||
name: str
|
||||
role: str
|
||||
department: str
|
||||
|
||||
|
||||
EMPLOYEES: list[Employee] = [
|
||||
Employee(name="Sabine", role="Developer", department="IT"),
|
||||
Employee(name="Henrik", role="Product Manager", department="IT"),
|
||||
Employee(name="Jessica", role="HR", department="People"),
|
||||
]
|
||||
|
||||
TEST_CASES = load_spec("responses", as_model=TestCase)
|
||||
|
||||
|
||||
def _make_tool(fn, *, name: str, description: str):
|
||||
mock = MagicMock(side_effect=lambda *, name: fn(name=name))
|
||||
InputModel = create_model(f"{name}_input", name=(str, ...))
|
||||
|
||||
@tool(name, description=description, args_schema=InputModel)
|
||||
def _wrapped(name: str):
|
||||
return mock(name=name)
|
||||
|
||||
return {"tool": _wrapped, "mock": mock}
|
||||
|
||||
|
||||
@pytest.mark.skipif(skip_openai_integration_tests, reason="OpenAI integration tests are disabled.")
|
||||
@pytest.mark.parametrize("case", TEST_CASES, ids=[c.name for c in TEST_CASES])
|
||||
def test_responses_integration_matrix(case: TestCase) -> None:
|
||||
if case.name == "asking for information that does not fit into the response format":
|
||||
pytest.xfail(
|
||||
"currently failing due to undefined behavior when model cannot conform to any of the structured response formats."
|
||||
)
|
||||
|
||||
def get_employee_role(*, name: str) -> Optional[str]:
|
||||
for e in EMPLOYEES:
|
||||
if e.name == name:
|
||||
return e.role
|
||||
return None
|
||||
|
||||
def get_employee_department(*, name: str) -> Optional[str]:
|
||||
for e in EMPLOYEES:
|
||||
if e.name == name:
|
||||
return e.department
|
||||
return None
|
||||
|
||||
role_tool = _make_tool(
|
||||
get_employee_role,
|
||||
name="get_employee_role",
|
||||
description="Get the employee role by name",
|
||||
)
|
||||
dept_tool = _make_tool(
|
||||
get_employee_department,
|
||||
name="get_employee_department",
|
||||
description="Get the employee department by name",
|
||||
)
|
||||
|
||||
response_format_spec = case.response_format
|
||||
if isinstance(response_format_spec, dict):
|
||||
response_format_spec = [response_format_spec]
|
||||
# Unwrap nested schema objects
|
||||
response_format_spec = [item.get("schema", item) for item in response_format_spec]
|
||||
if len(response_format_spec) == 1:
|
||||
tool_output = ToolOutput(response_format_spec[0])
|
||||
else:
|
||||
tool_output = ToolOutput({"oneOf": response_format_spec})
|
||||
|
||||
llm_request_count = 0
|
||||
|
||||
for assertion in case.assertions_by_invocation:
|
||||
|
||||
def on_request(request: httpx.Request) -> None:
|
||||
nonlocal llm_request_count
|
||||
llm_request_count += 1
|
||||
|
||||
http_client = httpx.Client(
|
||||
event_hooks={"request": [on_request]},
|
||||
)
|
||||
|
||||
model = ChatOpenAI(
|
||||
model="gpt-4o",
|
||||
temperature=0,
|
||||
http_client=http_client,
|
||||
)
|
||||
|
||||
agent = create_react_agent(
|
||||
model,
|
||||
tools=[role_tool["tool"], dept_tool["tool"]],
|
||||
prompt=AGENT_PROMPT,
|
||||
response_format=tool_output,
|
||||
)
|
||||
|
||||
result = agent.invoke({"messages": [HumanMessage(assertion.prompt)]})
|
||||
|
||||
# Count tool calls
|
||||
assert role_tool["mock"].call_count == assertion.tools_with_expected_calls.get_employee_role
|
||||
assert (
|
||||
dept_tool["mock"].call_count
|
||||
== assertion.tools_with_expected_calls.get_employee_department
|
||||
)
|
||||
|
||||
# Count LLM calls
|
||||
assert llm_request_count == assertion.llm_request_count
|
||||
|
||||
# Check last message content
|
||||
last_message = result["messages"][-1]
|
||||
assert last_message.content == assertion.expected_last_message
|
||||
|
||||
# Check structured response
|
||||
structured_response_json = result["structured_response"]
|
||||
assert structured_response_json == assertion.expected_structured_response
|
||||
@@ -0,0 +1,107 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import pytest
|
||||
|
||||
# Skip this test since langgraph.prebuilt.responses is not available
|
||||
pytest.skip("langgraph.prebuilt.responses not available", allow_module_level=True)
|
||||
|
||||
try:
|
||||
from langchain_openai import ChatOpenAI
|
||||
except ImportError:
|
||||
skip_openai_integration_tests = True
|
||||
else:
|
||||
skip_openai_integration_tests = False
|
||||
|
||||
AGENT_PROMPT = """
|
||||
You are a strict polling bot.
|
||||
|
||||
- Only use the "poll_job" tool until it returns { status: "succeeded" }.
|
||||
- If status is "pending", call the tool again. Do not produce a final answer.
|
||||
- When it is "succeeded", return exactly: "Attempts: <number>" with no extra text.
|
||||
"""
|
||||
|
||||
|
||||
class TestCase(BaseSchema):
|
||||
name: str
|
||||
return_direct: bool
|
||||
response_format: Optional[Dict[str, Any]]
|
||||
expected_tool_calls: int
|
||||
expected_last_message: str
|
||||
expected_structured_response: Optional[Dict[str, Any]]
|
||||
|
||||
|
||||
TEST_CASES = load_spec("return_direct", as_model=TestCase)
|
||||
|
||||
|
||||
def _make_tool(return_direct: bool):
|
||||
attempts = 0
|
||||
|
||||
def _side_effect():
|
||||
nonlocal attempts
|
||||
attempts += 1
|
||||
return {
|
||||
"status": "succeeded" if attempts >= 10 else "pending",
|
||||
"attempts": attempts,
|
||||
}
|
||||
|
||||
mock = MagicMock(side_effect=_side_effect)
|
||||
|
||||
@tool(
|
||||
"pollJob",
|
||||
description=(
|
||||
"Check the status of a long-running job. "
|
||||
"Returns { status: 'pending' | 'succeeded', attempts: number }."
|
||||
),
|
||||
return_direct=return_direct,
|
||||
)
|
||||
def _wrapped():
|
||||
return mock()
|
||||
|
||||
return {"tool": _wrapped, "mock": mock}
|
||||
|
||||
|
||||
@pytest.mark.skipif(skip_openai_integration_tests, reason="OpenAI integration tests are disabled.")
|
||||
@pytest.mark.parametrize("case", TEST_CASES, ids=[c.name for c in TEST_CASES])
|
||||
def test_return_direct_integration_matrix(case: TestCase) -> None:
|
||||
poll_tool = _make_tool(case.return_direct)
|
||||
|
||||
model = ChatOpenAI(
|
||||
model="gpt-4o",
|
||||
temperature=0,
|
||||
)
|
||||
|
||||
if case.response_format:
|
||||
agent = create_react_agent(
|
||||
model,
|
||||
tools=[poll_tool["tool"]],
|
||||
prompt=AGENT_PROMPT,
|
||||
response_format=ToolOutput(case.response_format),
|
||||
)
|
||||
else:
|
||||
agent = create_react_agent(
|
||||
model,
|
||||
tools=[poll_tool["tool"]],
|
||||
prompt=AGENT_PROMPT,
|
||||
)
|
||||
|
||||
result = agent.invoke(
|
||||
{
|
||||
"messages": [
|
||||
HumanMessage("Poll the job until it's done and tell me how many attempts it took.")
|
||||
]
|
||||
}
|
||||
)
|
||||
|
||||
# Count tool calls
|
||||
assert poll_tool["mock"].call_count == case.expected_tool_calls
|
||||
|
||||
# Check last message content
|
||||
last_message = result["messages"][-1]
|
||||
assert last_message.content == case.expected_last_message
|
||||
|
||||
# Check structured response
|
||||
if case.expected_structured_response is not None:
|
||||
structured_response_json = result["structured_response"]
|
||||
assert structured_response_json == case.expected_structured_response
|
||||
else:
|
||||
assert "structured_response" not in result
|
||||
1482
libs/langchain_v1/tests/unit_tests/agents/test_tool_node.py
Normal file
1482
libs/langchain_v1/tests/unit_tests/agents/test_tool_node.py
Normal file
File diff suppressed because it is too large
Load Diff
21
libs/langchain_v1/tests/unit_tests/agents/utils.py
Normal file
21
libs/langchain_v1/tests/unit_tests/agents/utils.py
Normal file
@@ -0,0 +1,21 @@
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
from pydantic import BaseModel, ConfigDict
|
||||
from pydantic.alias_generators import to_camel
|
||||
|
||||
|
||||
class BaseSchema(BaseModel):
|
||||
model_config = ConfigDict(
|
||||
alias_generator=to_camel,
|
||||
populate_by_name=True,
|
||||
from_attributes=True,
|
||||
)
|
||||
|
||||
|
||||
def load_spec(spec_name: str, as_model: type[BaseModel]) -> list[BaseModel]:
|
||||
with (Path(__file__).parent / "specifications" / f"{spec_name}.json").open(
|
||||
"r", encoding="utf-8"
|
||||
) as f:
|
||||
data = json.load(f)
|
||||
return [as_model(**item) for item in data]
|
||||
@@ -53,9 +53,7 @@ def pytest_addoption(parser: pytest.Parser) -> None:
|
||||
)
|
||||
|
||||
|
||||
def pytest_collection_modifyitems(
|
||||
config: pytest.Config, items: Sequence[pytest.Function]
|
||||
) -> None:
|
||||
def pytest_collection_modifyitems(config: pytest.Config, items: Sequence[pytest.Function]) -> None:
|
||||
"""Add implementations for handling custom markers.
|
||||
|
||||
At the moment, this adds support for a custom `requires` marker.
|
||||
|
||||
@@ -113,9 +113,7 @@ async def test_aembed_documents(cache_embeddings: CacheBackedEmbeddings) -> None
|
||||
vectors = await cache_embeddings.aembed_documents(texts)
|
||||
expected_vectors: list[list[float]] = [[1, 2.0], [2.0, 3.0], [1.0, 2.0], [3.0, 4.0]]
|
||||
assert vectors == expected_vectors
|
||||
keys = [
|
||||
key async for key in cache_embeddings.document_embedding_store.ayield_keys()
|
||||
]
|
||||
keys = [key async for key in cache_embeddings.document_embedding_store.ayield_keys()]
|
||||
assert len(keys) == 4
|
||||
# UUID is expected to be the same for the same text
|
||||
assert keys[0] == "test_namespace812b86c1-8ebf-5483-95c6-c95cf2b52d12"
|
||||
@@ -128,10 +126,7 @@ async def test_aembed_documents_batch(
|
||||
texts = ["1", "22", "a", "333", "RAISE_EXCEPTION"]
|
||||
with contextlib.suppress(ValueError):
|
||||
await cache_embeddings_batch.aembed_documents(texts)
|
||||
keys = [
|
||||
key
|
||||
async for key in cache_embeddings_batch.document_embedding_store.ayield_keys()
|
||||
]
|
||||
keys = [key async for key in cache_embeddings_batch.document_embedding_store.ayield_keys()]
|
||||
# only the first batch of three embeddings should exist
|
||||
assert len(keys) == 3
|
||||
# UUID is expected to be the same for the same text
|
||||
|
||||
@@ -13,9 +13,7 @@ def test_import_all() -> None:
|
||||
library_code = PKG_ROOT / "langchain"
|
||||
for path in library_code.rglob("*.py"):
|
||||
# Calculate the relative path to the module
|
||||
module_name = (
|
||||
path.relative_to(PKG_ROOT).with_suffix("").as_posix().replace("/", ".")
|
||||
)
|
||||
module_name = path.relative_to(PKG_ROOT).with_suffix("").as_posix().replace("/", ".")
|
||||
if module_name.endswith("__init__"):
|
||||
# Without init
|
||||
module_name = module_name.rsplit(".", 1)[0]
|
||||
@@ -39,9 +37,7 @@ def test_import_all_using_dir() -> None:
|
||||
library_code = PKG_ROOT / "langchain"
|
||||
for path in library_code.rglob("*.py"):
|
||||
# Calculate the relative path to the module
|
||||
module_name = (
|
||||
path.relative_to(PKG_ROOT).with_suffix("").as_posix().replace("/", ".")
|
||||
)
|
||||
module_name = path.relative_to(PKG_ROOT).with_suffix("").as_posix().replace("/", ".")
|
||||
if module_name.endswith("__init__"):
|
||||
# Without init
|
||||
module_name = module_name.rsplit(".", 1)[0]
|
||||
|
||||
54
libs/langchain_v1/uv.lock
generated
54
libs/langchain_v1/uv.lock
generated
@@ -1628,6 +1628,7 @@ test = [
|
||||
{ name = "pytest" },
|
||||
{ name = "pytest-asyncio" },
|
||||
{ name = "pytest-cov" },
|
||||
{ name = "pytest-mock" },
|
||||
{ name = "pytest-socket" },
|
||||
{ name = "pytest-watcher" },
|
||||
{ name = "pytest-xdist" },
|
||||
@@ -1687,6 +1688,7 @@ test = [
|
||||
{ name = "pytest", specifier = ">=8,<9" },
|
||||
{ name = "pytest-asyncio", specifier = ">=0.23.2,<1.0.0" },
|
||||
{ name = "pytest-cov", specifier = ">=4.0.0,<5.0.0" },
|
||||
{ name = "pytest-mock", specifier = ">=3.12.0,<4.0.0" },
|
||||
{ name = "pytest-socket", specifier = ">=0.6.0,<1.0.0" },
|
||||
{ name = "pytest-watcher", specifier = ">=0.2.6,<1.0.0" },
|
||||
{ name = "pytest-xdist", specifier = ">=3.6.1,<4.0.0" },
|
||||
@@ -1757,7 +1759,7 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "langchain-core"
|
||||
version = "0.3.72"
|
||||
version = "0.3.74"
|
||||
source = { editable = "../core" }
|
||||
dependencies = [
|
||||
{ name = "jsonpatch" },
|
||||
@@ -1808,7 +1810,7 @@ test = [
|
||||
test-integration = []
|
||||
typing = [
|
||||
{ name = "langchain-text-splitters", directory = "../text-splitters" },
|
||||
{ name = "mypy", specifier = ">=1.15,<1.16" },
|
||||
{ name = "mypy", specifier = ">=1.17.1,<1.18" },
|
||||
{ name = "types-pyyaml", specifier = ">=6.0.12.2,<7.0.0.0" },
|
||||
{ name = "types-requests", specifier = ">=2.28.11.5,<3.0.0.0" },
|
||||
]
|
||||
@@ -1937,7 +1939,7 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "langchain-openai"
|
||||
version = "0.3.28"
|
||||
version = "0.3.31"
|
||||
source = { editable = "../partners/openai" }
|
||||
dependencies = [
|
||||
{ name = "langchain-core" },
|
||||
@@ -1948,14 +1950,14 @@ dependencies = [
|
||||
[package.metadata]
|
||||
requires-dist = [
|
||||
{ name = "langchain-core", editable = "../core" },
|
||||
{ name = "openai", specifier = ">=1.86.0,<2.0.0" },
|
||||
{ name = "openai", specifier = ">=1.99.9,<2.0.0" },
|
||||
{ name = "tiktoken", specifier = ">=0.7,<1" },
|
||||
]
|
||||
|
||||
[package.metadata.requires-dev]
|
||||
codespell = [{ name = "codespell", specifier = ">=2.2.0,<3.0.0" }]
|
||||
dev = [{ name = "langchain-core", editable = "../core" }]
|
||||
lint = [{ name = "ruff", specifier = ">=0.12.2,<0.13" }]
|
||||
lint = [{ name = "ruff", specifier = ">=0.12.8,<0.13" }]
|
||||
test = [
|
||||
{ name = "freezegun", specifier = ">=1.2.2,<2.0.0" },
|
||||
{ name = "langchain-core", editable = "../core" },
|
||||
@@ -1981,7 +1983,7 @@ test-integration = [
|
||||
]
|
||||
typing = [
|
||||
{ name = "langchain-core", editable = "../core" },
|
||||
{ name = "mypy", specifier = ">=1.10,<2.0" },
|
||||
{ name = "mypy", specifier = ">=1.17.1,<2.0" },
|
||||
{ name = "types-tqdm", specifier = ">=4.66.0.5,<5.0.0.0" },
|
||||
]
|
||||
|
||||
@@ -2035,7 +2037,7 @@ requires-dist = [
|
||||
|
||||
[package.metadata.requires-dev]
|
||||
codespell = [{ name = "codespell", specifier = ">=2.2.0,<3.0.0" }]
|
||||
lint = [{ name = "ruff", specifier = ">=0.12.2,<0.13" }]
|
||||
lint = [{ name = "ruff", specifier = ">=0.12.8,<0.13" }]
|
||||
test = [{ name = "langchain-core", editable = "../core" }]
|
||||
test-integration = []
|
||||
typing = [
|
||||
@@ -2049,10 +2051,14 @@ version = "0.3.9"
|
||||
source = { editable = "../text-splitters" }
|
||||
dependencies = [
|
||||
{ name = "langchain-core" },
|
||||
{ name = "pip" },
|
||||
]
|
||||
|
||||
[package.metadata]
|
||||
requires-dist = [{ name = "langchain-core", editable = "../core" }]
|
||||
requires-dist = [
|
||||
{ name = "langchain-core", editable = "../core" },
|
||||
{ name = "pip", specifier = ">=25.2" },
|
||||
]
|
||||
|
||||
[package.metadata.requires-dev]
|
||||
dev = [
|
||||
@@ -2061,7 +2067,7 @@ dev = [
|
||||
]
|
||||
lint = [
|
||||
{ name = "langchain-core", editable = "../core" },
|
||||
{ name = "ruff", specifier = ">=0.12.2,<0.13" },
|
||||
{ name = "ruff", specifier = ">=0.12.8,<0.13" },
|
||||
]
|
||||
test = [
|
||||
{ name = "freezegun", specifier = ">=1.2.2,<2.0.0" },
|
||||
@@ -2078,11 +2084,12 @@ test-integration = [
|
||||
{ name = "sentence-transformers", specifier = ">=3.0.1" },
|
||||
{ name = "spacy", specifier = ">=3.8.7,<4.0.0" },
|
||||
{ name = "thinc", specifier = ">=8.3.6,<9.0.0" },
|
||||
{ name = "tiktoken", specifier = ">=0.8.0,<1.0.0" },
|
||||
{ name = "transformers", specifier = ">=4.51.3,<5.0.0" },
|
||||
]
|
||||
typing = [
|
||||
{ name = "lxml-stubs", specifier = ">=0.5.1,<1.0.0" },
|
||||
{ name = "mypy", specifier = ">=1.15,<2.0" },
|
||||
{ name = "mypy", specifier = ">=1.17.1,<1.18" },
|
||||
{ name = "tiktoken", specifier = ">=0.8.0,<1.0.0" },
|
||||
{ name = "types-requests", specifier = ">=2.31.0.20240218,<3.0.0.0" },
|
||||
]
|
||||
@@ -2657,7 +2664,7 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "openai"
|
||||
version = "1.97.1"
|
||||
version = "1.101.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "anyio" },
|
||||
@@ -2669,9 +2676,9 @@ dependencies = [
|
||||
{ name = "tqdm" },
|
||||
{ name = "typing-extensions" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/a6/57/1c471f6b3efb879d26686d31582997615e969f3bb4458111c9705e56332e/openai-1.97.1.tar.gz", hash = "sha256:a744b27ae624e3d4135225da9b1c89c107a2a7e5bc4c93e5b7b5214772ce7a4e", size = 494267, upload-time = "2025-07-22T13:10:12.607Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/00/7c/eaf06b62281f5ca4f774c4cff066e6ddfd6a027e0ac791be16acec3a95e3/openai-1.101.0.tar.gz", hash = "sha256:29f56df2236069686e64aca0e13c24a4ec310545afb25ef7da2ab1a18523f22d", size = 518415, upload-time = "2025-08-21T21:11:01.645Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/ee/35/412a0e9c3f0d37c94ed764b8ac7adae2d834dbd20e69f6aca582118e0f55/openai-1.97.1-py3-none-any.whl", hash = "sha256:4e96bbdf672ec3d44968c9ea39d2c375891db1acc1794668d8149d5fa6000606", size = 764380, upload-time = "2025-07-22T13:10:10.689Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c8/a6/0e39baa335bbd1c66c7e0a41dbbec10c5a15ab95c1344e7f7beb28eee65a/openai-1.101.0-py3-none-any.whl", hash = "sha256:6539a446cce154f8d9fb42757acdfd3ed9357ab0d34fcac11096c461da87133b", size = 810772, upload-time = "2025-08-21T21:10:59.215Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2936,6 +2943,15 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/34/e7/ae39f538fd6844e982063c3a5e4598b8ced43b9633baa3a85ef33af8c05c/pillow-11.3.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:c84d689db21a1c397d001aa08241044aa2069e7587b398c8cc63020390b1c1b8", size = 6984598, upload-time = "2025-07-01T09:16:27.732Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pip"
|
||||
version = "25.2"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/20/16/650289cd3f43d5a2fadfd98c68bd1e1e7f2550a1a5326768cddfbcedb2c5/pip-25.2.tar.gz", hash = "sha256:578283f006390f85bb6282dffb876454593d637f5d1be494b5202ce4877e71f2", size = 1840021, upload-time = "2025-07-30T21:50:15.401Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/b7/3f/945ef7ab14dc4f9d7f40288d2df998d1837ee0888ec3659c813487572faa/pip-25.2-py3-none-any.whl", hash = "sha256:6d67a2b4e7f14d8b31b8b52648866fa717f45a1eb70e83002f4331d07e953717", size = 1752557, upload-time = "2025-07-30T21:50:13.323Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pluggy"
|
||||
version = "1.6.0"
|
||||
@@ -3395,6 +3411,18 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/a7/4b/8b78d126e275efa2379b1c2e09dc52cf70df16fc3b90613ef82531499d73/pytest_cov-4.1.0-py3-none-any.whl", hash = "sha256:6ba70b9e97e69fcc3fb45bfeab2d0a138fb65c4d0d6a41ef33983ad114be8c3a", size = 21949, upload-time = "2023-05-24T18:44:54.079Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pytest-mock"
|
||||
version = "3.14.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "pytest" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/71/28/67172c96ba684058a4d24ffe144d64783d2a270d0af0d9e792737bddc75c/pytest_mock-3.14.1.tar.gz", hash = "sha256:159e9edac4c451ce77a5cdb9fc5d1100708d2dd4ba3c3df572f14097351af80e", size = 33241, upload-time = "2025-05-26T13:58:45.167Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/b2/05/77b60e520511c53d1c1ca75f1930c7dd8e971d0c4379b7f4b3f9644685ba/pytest_mock-3.14.1-py3-none-any.whl", hash = "sha256:178aefcd11307d874b4cd3100344e7e2d888d9791a6a1d9bfe90fbc1b74fd1d0", size = 9923, upload-time = "2025-05-26T13:58:43.487Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pytest-recording"
|
||||
version = "0.13.4"
|
||||
|
||||
Reference in New Issue
Block a user