mirror of
https://github.com/hwchase17/langchain.git
synced 2026-02-19 13:25:35 +00:00
Compare commits
6 Commits
langchain-
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
18230f625f | ||
|
|
83f81d65af | ||
|
|
5c6f8fe0a6 | ||
|
|
5053436dcf | ||
|
|
3686bcbd96 | ||
|
|
9c160e2368 |
@@ -41,8 +41,9 @@ def print_sys_info(*, additional_pkgs: Sequence[str] = ()) -> None:
|
||||
"""
|
||||
# Packages that do not start with "langchain" prefix.
|
||||
other_langchain_packages = [
|
||||
"langserve",
|
||||
"langsmith",
|
||||
"deepagents",
|
||||
"deepagents-cli",
|
||||
]
|
||||
|
||||
langchain_pkgs = [
|
||||
|
||||
@@ -117,7 +117,15 @@ def merge_lists(left: list | None, *others: list | None) -> list | None:
|
||||
to_merge = [
|
||||
i
|
||||
for i, e_left in enumerate(merged)
|
||||
if "index" in e_left and e_left["index"] == e["index"]
|
||||
if (
|
||||
"index" in e_left
|
||||
and e_left["index"] == e["index"] # index matches
|
||||
and ( # IDs not inconsistent
|
||||
e_left.get("id") is None
|
||||
or e.get("id") is None
|
||||
or e_left["id"] == e["id"]
|
||||
)
|
||||
)
|
||||
]
|
||||
if to_merge:
|
||||
# TODO: Remove this once merge_dict is updated with special
|
||||
|
||||
@@ -1206,6 +1206,13 @@ def test_get_ls_params() -> None:
|
||||
ls_params = llm._get_ls_params(temperature=0.2)
|
||||
assert ls_params["ls_temperature"] == 0.2
|
||||
|
||||
# Test integer temperature values (regression test for issue #35300)
|
||||
ls_params = llm._get_ls_params(temperature=0)
|
||||
assert ls_params["ls_temperature"] == 0
|
||||
|
||||
ls_params = llm._get_ls_params(temperature=1)
|
||||
assert ls_params["ls_temperature"] == 1
|
||||
|
||||
ls_params = llm._get_ls_params(max_tokens=2048)
|
||||
assert ls_params["ls_max_tokens"] == 2048
|
||||
|
||||
@@ -1213,40 +1220,6 @@ def test_get_ls_params() -> None:
|
||||
assert ls_params["ls_stop"] == ["stop"]
|
||||
|
||||
|
||||
def test_get_ls_params_int_temperature() -> None:
|
||||
class IntTempModel(BaseChatModel):
|
||||
model: str = "foo"
|
||||
temperature: int = 0
|
||||
max_tokens: int = 1024
|
||||
|
||||
def _generate(
|
||||
self,
|
||||
messages: list[BaseMessage],
|
||||
stop: list[str] | None = None,
|
||||
run_manager: CallbackManagerForLLMRun | None = None,
|
||||
**kwargs: Any,
|
||||
) -> ChatResult:
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def _llm_type(self) -> str:
|
||||
return "fake-chat-model"
|
||||
|
||||
llm = IntTempModel()
|
||||
|
||||
# Integer temperature from self attribute
|
||||
ls_params = llm._get_ls_params()
|
||||
assert ls_params["ls_temperature"] == 0
|
||||
|
||||
# Integer temperature from kwargs
|
||||
ls_params = llm._get_ls_params(temperature=1)
|
||||
assert ls_params["ls_temperature"] == 1
|
||||
|
||||
# Float temperature from kwargs still works
|
||||
ls_params = llm._get_ls_params(temperature=0.5)
|
||||
assert ls_params["ls_temperature"] == 0.5
|
||||
|
||||
|
||||
def test_model_profiles() -> None:
|
||||
model = GenericFakeChatModel(messages=iter([]))
|
||||
assert model.profile is None
|
||||
|
||||
@@ -272,43 +272,15 @@ def test_get_ls_params() -> None:
|
||||
ls_params = llm._get_ls_params(temperature=0.2)
|
||||
assert ls_params["ls_temperature"] == 0.2
|
||||
|
||||
# Test integer temperature values (regression test for issue #35300)
|
||||
ls_params = llm._get_ls_params(temperature=0)
|
||||
assert ls_params["ls_temperature"] == 0
|
||||
|
||||
ls_params = llm._get_ls_params(temperature=1)
|
||||
assert ls_params["ls_temperature"] == 1
|
||||
|
||||
ls_params = llm._get_ls_params(max_tokens=2048)
|
||||
assert ls_params["ls_max_tokens"] == 2048
|
||||
|
||||
ls_params = llm._get_ls_params(stop=["stop"])
|
||||
assert ls_params["ls_stop"] == ["stop"]
|
||||
|
||||
|
||||
def test_get_ls_params_int_temperature() -> None:
|
||||
class IntTempModel(BaseLLM):
|
||||
model: str = "foo"
|
||||
temperature: int = 0
|
||||
max_tokens: int = 1024
|
||||
|
||||
@override
|
||||
def _generate(
|
||||
self,
|
||||
prompts: list[str],
|
||||
stop: list[str] | None = None,
|
||||
run_manager: CallbackManagerForLLMRun | None = None,
|
||||
**kwargs: Any,
|
||||
) -> LLMResult:
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def _llm_type(self) -> str:
|
||||
return "fake-model"
|
||||
|
||||
llm = IntTempModel()
|
||||
|
||||
# Integer temperature from self attribute
|
||||
ls_params = llm._get_ls_params()
|
||||
assert ls_params["ls_temperature"] == 0
|
||||
|
||||
# Integer temperature from kwargs
|
||||
ls_params = llm._get_ls_params(temperature=1)
|
||||
assert ls_params["ls_temperature"] == 1
|
||||
|
||||
# Float temperature from kwargs still works
|
||||
ls_params = llm._get_ls_params(temperature=0.5)
|
||||
assert ls_params["ls_temperature"] == 0.5
|
||||
|
||||
@@ -916,6 +916,47 @@ def test_merge_tool_calls() -> None:
|
||||
assert len(merged) == 2
|
||||
|
||||
|
||||
def test_merge_tool_calls_parallel_same_index() -> None:
|
||||
"""Test parallel tool calls with same index but different IDs."""
|
||||
# Two parallel tool calls with the same index but different IDs
|
||||
left = create_tool_call_chunk(
|
||||
name="read_file", args='{"path": "foo.txt"}', id="tooluse_ABC", index=0
|
||||
)
|
||||
right = create_tool_call_chunk(
|
||||
name="search_text", args='{"query": "bar"}', id="tooluse_DEF", index=0
|
||||
)
|
||||
merged = merge_lists([left], [right])
|
||||
assert merged is not None
|
||||
assert len(merged) == 2
|
||||
assert merged[0]["name"] == "read_file"
|
||||
assert merged[0]["id"] == "tooluse_ABC"
|
||||
assert merged[1]["name"] == "search_text"
|
||||
assert merged[1]["id"] == "tooluse_DEF"
|
||||
|
||||
# Streaming continuation: same index, id=None on continuation chunk
|
||||
# should still merge correctly with the original chunk
|
||||
first = create_tool_call_chunk(name="tool1", args="", id="id1", index=0)
|
||||
continuation = create_tool_call_chunk(
|
||||
name=None, args='{"key": "value"}', id=None, index=0
|
||||
)
|
||||
merged = merge_lists([first], [continuation])
|
||||
assert merged is not None
|
||||
assert len(merged) == 1
|
||||
assert merged[0]["name"] == "tool1"
|
||||
assert merged[0]["args"] == '{"key": "value"}'
|
||||
assert merged[0]["id"] == "id1"
|
||||
|
||||
# Three parallel tool calls all with the same index
|
||||
tc1 = create_tool_call_chunk(name="tool_a", args="{}", id="id_a", index=0)
|
||||
tc2 = create_tool_call_chunk(name="tool_b", args="{}", id="id_b", index=0)
|
||||
tc3 = create_tool_call_chunk(name="tool_c", args="{}", id="id_c", index=0)
|
||||
merged = merge_lists([tc1], [tc2], [tc3])
|
||||
assert merged is not None
|
||||
assert len(merged) == 3
|
||||
assert [m["name"] for m in merged] == ["tool_a", "tool_b", "tool_c"]
|
||||
assert [m["id"] for m in merged] == ["id_a", "id_b", "id_c"]
|
||||
|
||||
|
||||
def test_tool_message_serdes() -> None:
|
||||
message = ToolMessage(
|
||||
"foo", artifact={"bar": {"baz": 123}}, tool_call_id="1", status="error"
|
||||
|
||||
@@ -495,9 +495,14 @@ def _supports_provider_strategy(
|
||||
if (
|
||||
model_profile is not None
|
||||
and model_profile.get("structured_output")
|
||||
# We make an exception for Gemini models, which currently do not support
|
||||
# simultaneous tool use with structured output
|
||||
and not (tools and isinstance(model_name, str) and "gemini" in model_name.lower())
|
||||
# We make an exception for Gemini < 3-series models, which currently do not support
|
||||
# simultaneous tool use with structured output; 3-series can.
|
||||
and not (
|
||||
tools
|
||||
and isinstance(model_name, str)
|
||||
and "gemini" in model_name.lower()
|
||||
and "gemini-3" not in model_name.lower()
|
||||
)
|
||||
):
|
||||
return True
|
||||
|
||||
|
||||
@@ -16,6 +16,7 @@ from pydantic import BaseModel, Field
|
||||
from typing_extensions import TypedDict
|
||||
|
||||
from langchain.agents import create_agent
|
||||
from langchain.agents.factory import _supports_provider_strategy
|
||||
from langchain.agents.middleware.types import (
|
||||
AgentMiddleware,
|
||||
ModelCallResult,
|
||||
@@ -897,3 +898,44 @@ def test_union_of_types() -> None:
|
||||
|
||||
assert response["structured_response"] == EXPECTED_WEATHER_PYDANTIC
|
||||
assert len(response["messages"]) == 5
|
||||
|
||||
|
||||
class TestSupportsProviderStrategy:
|
||||
"""Unit tests for `_supports_provider_strategy`."""
|
||||
|
||||
@staticmethod
|
||||
def _make_structured_model(model_name: str):
|
||||
class GeminiTestChatModel(GenericFakeChatModel):
|
||||
model_name: str
|
||||
|
||||
return GeminiTestChatModel(
|
||||
messages=iter(
|
||||
[
|
||||
AIMessage(content="test-response"),
|
||||
]
|
||||
),
|
||||
profile={"structured_output": True},
|
||||
model_name=model_name,
|
||||
)
|
||||
|
||||
def test_blocks_gemini_v2_with_tools(self) -> None:
|
||||
"""Gemini 2 series models cannot use provider strategy with tools."""
|
||||
model = self._make_structured_model("gemini-2.5-flash")
|
||||
assert not _supports_provider_strategy(model, tools=[get_weather])
|
||||
|
||||
def test_allows_gemini_v3_with_tools(self) -> None:
|
||||
"""Gemini 3 series models support structured output alongside tools."""
|
||||
model = self._make_structured_model("gemini-3-pro-preview")
|
||||
assert _supports_provider_strategy(model, tools=[get_weather])
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"alias",
|
||||
[
|
||||
"gemini-flash-latest",
|
||||
"gemini-flash-lite-latest",
|
||||
],
|
||||
)
|
||||
def test_blocks_gemini_latest_aliases(self, alias: str) -> None:
|
||||
"""Latest aliases stay blocked until they point to Gemini 3."""
|
||||
model = self._make_structured_model(alias)
|
||||
assert not _supports_provider_strategy(model, tools=[get_weather])
|
||||
|
||||
@@ -3765,7 +3765,7 @@ def _convert_to_openai_response_format(
|
||||
def _oai_structured_outputs_parser(
|
||||
ai_msg: AIMessage, schema: type[_BM]
|
||||
) -> PydanticBaseModel | None:
|
||||
if parsed := ai_msg.additional_kwargs.get("parsed"):
|
||||
if (parsed := ai_msg.additional_kwargs.get("parsed")) is not None:
|
||||
if isinstance(parsed, dict):
|
||||
return schema(**parsed)
|
||||
return parsed
|
||||
|
||||
@@ -1388,6 +1388,26 @@ def test_structured_outputs_parser() -> None:
|
||||
assert result == parsed_response
|
||||
|
||||
|
||||
def test_structured_outputs_parser_valid_falsy_response() -> None:
|
||||
class LunchBox(BaseModel):
|
||||
sandwiches: list[str]
|
||||
|
||||
def __len__(self) -> int:
|
||||
return len(self.sandwiches)
|
||||
|
||||
# prepare a valid *but falsy* response object, an empty LunchBox
|
||||
parsed_response = LunchBox(sandwiches=[])
|
||||
assert len(parsed_response) == 0
|
||||
llm_output = AIMessage(
|
||||
content='{"sandwiches": []}', additional_kwargs={"parsed": parsed_response}
|
||||
)
|
||||
output_parser = RunnableLambda(
|
||||
partial(_oai_structured_outputs_parser, schema=LunchBox)
|
||||
)
|
||||
result = output_parser.invoke(llm_output)
|
||||
assert result == parsed_response
|
||||
|
||||
|
||||
def test__construct_lc_result_from_responses_api_error_handling() -> None:
|
||||
"""Test that errors in the response are properly raised."""
|
||||
response = Response(
|
||||
|
||||
Reference in New Issue
Block a user