core: Bump ruff version to 0.12 (#31846)

This commit is contained in:
Christophe Bornet 2025-07-07 16:02:51 +02:00 committed by GitHub
parent 73552883c3
commit 8aed3b61a9
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
16 changed files with 1642 additions and 1615 deletions

View File

@ -130,7 +130,7 @@ class BaseLanguageModel(
) )
@field_validator("verbose", mode="before") @field_validator("verbose", mode="before")
def set_verbose(cls, verbose: Optional[bool]) -> bool: def set_verbose(cls, verbose: Optional[bool]) -> bool: # noqa: FBT001
"""If verbose is None, set it. """If verbose is None, set it.
This allows users to pass in None as verbose to access the global setting. This allows users to pass in None as verbose to access the global setting.

View File

@ -129,7 +129,7 @@ def create_base_retry_decorator(
) )
def _resolve_cache(cache: Union[BaseCache, bool, None]) -> Optional[BaseCache]: def _resolve_cache(*, cache: Union[BaseCache, bool, None]) -> Optional[BaseCache]:
"""Resolve the cache.""" """Resolve the cache."""
if isinstance(cache, BaseCache): if isinstance(cache, BaseCache):
llm_cache = cache llm_cache = cache
@ -155,7 +155,7 @@ def _resolve_cache(cache: Union[BaseCache, bool, None]) -> Optional[BaseCache]:
def get_prompts( def get_prompts(
params: dict[str, Any], params: dict[str, Any],
prompts: list[str], prompts: list[str],
cache: Optional[Union[BaseCache, bool, None]] = None, cache: Optional[Union[BaseCache, bool, None]] = None, # noqa: FBT001
) -> tuple[dict[int, list], str, list[int], list[str]]: ) -> tuple[dict[int, list], str, list[int], list[str]]:
"""Get prompts that are already cached. """Get prompts that are already cached.
@ -176,7 +176,7 @@ def get_prompts(
missing_prompt_idxs = [] missing_prompt_idxs = []
existing_prompts = {} existing_prompts = {}
llm_cache = _resolve_cache(cache) llm_cache = _resolve_cache(cache=cache)
for i, prompt in enumerate(prompts): for i, prompt in enumerate(prompts):
if llm_cache: if llm_cache:
cache_val = llm_cache.lookup(prompt, llm_string) cache_val = llm_cache.lookup(prompt, llm_string)
@ -191,7 +191,7 @@ def get_prompts(
async def aget_prompts( async def aget_prompts(
params: dict[str, Any], params: dict[str, Any],
prompts: list[str], prompts: list[str],
cache: Optional[Union[BaseCache, bool, None]] = None, cache: Optional[Union[BaseCache, bool, None]] = None, # noqa: FBT001
) -> tuple[dict[int, list], str, list[int], list[str]]: ) -> tuple[dict[int, list], str, list[int], list[str]]:
"""Get prompts that are already cached. Async version. """Get prompts that are already cached. Async version.
@ -211,7 +211,7 @@ async def aget_prompts(
missing_prompts = [] missing_prompts = []
missing_prompt_idxs = [] missing_prompt_idxs = []
existing_prompts = {} existing_prompts = {}
llm_cache = _resolve_cache(cache) llm_cache = _resolve_cache(cache=cache)
for i, prompt in enumerate(prompts): for i, prompt in enumerate(prompts):
if llm_cache: if llm_cache:
cache_val = await llm_cache.alookup(prompt, llm_string) cache_val = await llm_cache.alookup(prompt, llm_string)
@ -224,7 +224,7 @@ async def aget_prompts(
def update_cache( def update_cache(
cache: Union[BaseCache, bool, None], cache: Union[BaseCache, bool, None], # noqa: FBT001
existing_prompts: dict[int, list], existing_prompts: dict[int, list],
llm_string: str, llm_string: str,
missing_prompt_idxs: list[int], missing_prompt_idxs: list[int],
@ -247,7 +247,7 @@ def update_cache(
Raises: Raises:
ValueError: If the cache is not set and cache is True. ValueError: If the cache is not set and cache is True.
""" """
llm_cache = _resolve_cache(cache) llm_cache = _resolve_cache(cache=cache)
for i, result in enumerate(new_results.generations): for i, result in enumerate(new_results.generations):
existing_prompts[missing_prompt_idxs[i]] = result existing_prompts[missing_prompt_idxs[i]] = result
prompt = prompts[missing_prompt_idxs[i]] prompt = prompts[missing_prompt_idxs[i]]
@ -257,7 +257,7 @@ def update_cache(
async def aupdate_cache( async def aupdate_cache(
cache: Union[BaseCache, bool, None], cache: Union[BaseCache, bool, None], # noqa: FBT001
existing_prompts: dict[int, list], existing_prompts: dict[int, list],
llm_string: str, llm_string: str,
missing_prompt_idxs: list[int], missing_prompt_idxs: list[int],
@ -280,7 +280,7 @@ async def aupdate_cache(
Raises: Raises:
ValueError: If the cache is not set and cache is True. ValueError: If the cache is not set and cache is True.
""" """
llm_cache = _resolve_cache(cache) llm_cache = _resolve_cache(cache=cache)
for i, result in enumerate(new_results.generations): for i, result in enumerate(new_results.generations):
existing_prompts[missing_prompt_idxs[i]] = result existing_prompts[missing_prompt_idxs[i]] = result
prompt = prompts[missing_prompt_idxs[i]] prompt = prompts[missing_prompt_idxs[i]]

View File

@ -1341,8 +1341,8 @@ def _first_max_tokens(
excluded.content = list(reversed(excluded.content)) excluded.content = list(reversed(excluded.content))
for _ in range(1, num_block): for _ in range(1, num_block):
excluded.content = excluded.content[:-1] excluded.content = excluded.content[:-1]
if token_counter(messages[:idx] + [excluded]) <= max_tokens: if token_counter([*messages[:idx], excluded]) <= max_tokens:
messages = messages[:idx] + [excluded] messages = [*messages[:idx], excluded]
idx += 1 idx += 1
included_partial = True included_partial = True
break break
@ -1393,7 +1393,7 @@ def _first_max_tokens(
if partial_strategy == "last": if partial_strategy == "last":
content_splits = list(reversed(content_splits)) content_splits = list(reversed(content_splits))
excluded.content = "".join(content_splits) excluded.content = "".join(content_splits)
messages = messages[:idx] + [excluded] messages = [*messages[:idx], excluded]
idx += 1 idx += 1
if end_on: if end_on:

View File

@ -97,3 +97,5 @@ class LLMResult(BaseModel):
self.generations == other.generations self.generations == other.generations
and self.llm_output == other.llm_output and self.llm_output == other.llm_output
) )
__hash__ = None # type: ignore[assignment]

View File

@ -4205,6 +4205,8 @@ class RunnableGenerator(Runnable[Input, Output]):
return False return False
return False return False
__hash__ = None # type: ignore[assignment]
@override @override
def __repr__(self) -> str: def __repr__(self) -> str:
return f"RunnableGenerator({self.name})" return f"RunnableGenerator({self.name})"
@ -4588,6 +4590,8 @@ class RunnableLambda(Runnable[Input, Output]):
return False return False
return False return False
__hash__ = None # type: ignore[assignment]
def __repr__(self) -> str: def __repr__(self) -> str:
"""A string representation of this Runnable.""" """A string representation of this Runnable."""
if self._repr is None: if self._repr is None:

View File

@ -3,7 +3,6 @@
from __future__ import annotations from __future__ import annotations
from collections.abc import Mapping from collections.abc import Mapping
from itertools import starmap
from typing import ( from typing import (
TYPE_CHECKING, TYPE_CHECKING,
Any, Any,
@ -206,7 +205,7 @@ class RouterRunnable(RunnableSerializable[RouterInput, Output]):
configs = get_config_list(config, len(inputs)) configs = get_config_list(config, len(inputs))
return await gather_with_concurrency( return await gather_with_concurrency(
configs[0].get("max_concurrency"), configs[0].get("max_concurrency"),
*starmap(ainvoke, zip(runnables, actual_inputs, configs)), *map(ainvoke, runnables, actual_inputs, configs),
) )
@override @override

View File

@ -776,7 +776,7 @@ class ChildTool(BaseTool):
def run( def run(
self, self,
tool_input: Union[str, dict[str, Any]], tool_input: Union[str, dict[str, Any]],
verbose: Optional[bool] = None, verbose: Optional[bool] = None, # noqa: FBT001
start_color: Optional[str] = "green", start_color: Optional[str] = "green",
color: Optional[str] = "green", color: Optional[str] = "green",
callbacks: Callbacks = None, callbacks: Callbacks = None,
@ -888,7 +888,7 @@ class ChildTool(BaseTool):
async def arun( async def arun(
self, self,
tool_input: Union[str, dict], tool_input: Union[str, dict],
verbose: Optional[bool] = None, verbose: Optional[bool] = None, # noqa: FBT001
start_color: Optional[str] = "green", start_color: Optional[str] = "green",
color: Optional[str] = "green", color: Optional[str] = "green",
callbacks: Callbacks = None, callbacks: Callbacks = None,

View File

@ -130,6 +130,8 @@ class RunLogPatch:
def __eq__(self, other: object) -> bool: def __eq__(self, other: object) -> bool:
return isinstance(other, RunLogPatch) and self.ops == other.ops return isinstance(other, RunLogPatch) and self.ops == other.ops
__hash__ = None # type: ignore[assignment]
class RunLog(RunLogPatch): class RunLog(RunLogPatch):
"""Run log.""" """Run log."""
@ -174,6 +176,8 @@ class RunLog(RunLogPatch):
# Then compare that the ops are the same # Then compare that the ops are the same
return super().__eq__(other) return super().__eq__(other)
__hash__ = None # type: ignore[assignment]
T = TypeVar("T") T = TypeVar("T")

View File

@ -27,7 +27,7 @@ repository = "https://github.com/langchain-ai/langchain"
[dependency-groups] [dependency-groups]
lint = [ lint = [
"ruff<0.12.0,>=0.11.2", "ruff<0.13.0,>=0.12.2",
] ]
typing = [ typing = [
"mypy<1.16,>=1.15", "mypy<1.16,>=1.15",
@ -95,12 +95,12 @@ ignore = [
"TC003", # Doesn't play well with Pydantic "TC003", # Doesn't play well with Pydantic
"TD002", # Missing author in TODO "TD002", # Missing author in TODO
"TD003", # Missing issue link in TODO "TD003", # Missing issue link in TODO
"UP007", # Doesn't play well with Pydantic in Python 3.9
# TODO rules # TODO rules
"ANN401", "ANN401",
"BLE", "BLE",
"ERA", "ERA",
"PLC0415",
"PLR2004", "PLR2004",
] ]
flake8-type-checking.runtime-evaluated-base-classes = ["pydantic.BaseModel","langchain_core.load.serializable.Serializable","langchain_core.runnables.base.RunnableSerializable"] flake8-type-checking.runtime-evaluated-base-classes = ["pydantic.BaseModel","langchain_core.load.serializable.Serializable","langchain_core.runnables.base.RunnableSerializable"]
@ -108,6 +108,7 @@ flake8-annotations.allow-star-arg-any = true
flake8-annotations.mypy-init-return = true flake8-annotations.mypy-init-return = true
pydocstyle.convention = "google" pydocstyle.convention = "google"
pydocstyle.ignore-var-parameters = true pydocstyle.ignore-var-parameters = true
pyupgrade.keep-runtime-typing = true
[tool.coverage.run] [tool.coverage.run]
omit = [ "tests/*",] omit = [ "tests/*",]

View File

@ -329,6 +329,7 @@ class StreamingModel(NoStreamingModel):
@pytest.mark.parametrize("disable_streaming", [True, False, "tool_calling"]) @pytest.mark.parametrize("disable_streaming", [True, False, "tool_calling"])
def test_disable_streaming( def test_disable_streaming(
*,
disable_streaming: Union[bool, Literal["tool_calling"]], disable_streaming: Union[bool, Literal["tool_calling"]],
) -> None: ) -> None:
model = StreamingModel(disable_streaming=disable_streaming) model = StreamingModel(disable_streaming=disable_streaming)
@ -353,6 +354,7 @@ def test_disable_streaming(
@pytest.mark.parametrize("disable_streaming", [True, False, "tool_calling"]) @pytest.mark.parametrize("disable_streaming", [True, False, "tool_calling"])
async def test_disable_streaming_async( async def test_disable_streaming_async(
*,
disable_streaming: Union[bool, Literal["tool_calling"]], disable_streaming: Union[bool, Literal["tool_calling"]],
) -> None: ) -> None:
model = StreamingModel(disable_streaming=disable_streaming) model = StreamingModel(disable_streaming=disable_streaming)
@ -379,6 +381,7 @@ async def test_disable_streaming_async(
@pytest.mark.parametrize("disable_streaming", [True, False, "tool_calling"]) @pytest.mark.parametrize("disable_streaming", [True, False, "tool_calling"])
def test_disable_streaming_no_streaming_model( def test_disable_streaming_no_streaming_model(
*,
disable_streaming: Union[bool, Literal["tool_calling"]], disable_streaming: Union[bool, Literal["tool_calling"]],
) -> None: ) -> None:
model = NoStreamingModel(disable_streaming=disable_streaming) model = NoStreamingModel(disable_streaming=disable_streaming)
@ -393,6 +396,7 @@ def test_disable_streaming_no_streaming_model(
@pytest.mark.parametrize("disable_streaming", [True, False, "tool_calling"]) @pytest.mark.parametrize("disable_streaming", [True, False, "tool_calling"])
async def test_disable_streaming_no_streaming_model_async( async def test_disable_streaming_no_streaming_model_async(
*,
disable_streaming: Union[bool, Literal["tool_calling"]], disable_streaming: Union[bool, Literal["tool_calling"]],
) -> None: ) -> None:
model = NoStreamingModel(disable_streaming=disable_streaming) model = NoStreamingModel(disable_streaming=disable_streaming)

View File

@ -21,6 +21,8 @@ class NonBoolObj:
def __repr__(self) -> str: def __repr__(self) -> str:
return self.__class__.__name__ return self.__class__.__name__
__hash__ = None # type: ignore[assignment]
def test_simple_serialization() -> None: def test_simple_serialization() -> None:
class Foo(Serializable): class Foo(Serializable):
@ -100,6 +102,8 @@ def test__is_field_useful() -> None:
def __eq__(self, other: object) -> bool: def __eq__(self, other: object) -> bool:
return self # type: ignore[return-value] return self # type: ignore[return-value]
__hash__ = None # type: ignore[assignment]
default_x = ArrayObj() default_x = ArrayObj()
default_y = NonBoolObj() default_y = NonBoolObj()

View File

@ -266,7 +266,7 @@ def test_prompt_jinja2_missing_input_variables(
suffix = "Ending with {{ bar }}" suffix = "Ending with {{ bar }}"
# Test when missing in suffix # Test when missing in suffix
with pytest.warns(UserWarning): with pytest.warns(UserWarning, match="Missing variables: {'bar'}"):
FewShotPromptTemplate( FewShotPromptTemplate(
input_variables=[], input_variables=[],
suffix=suffix, suffix=suffix,
@ -284,7 +284,7 @@ def test_prompt_jinja2_missing_input_variables(
).input_variables == ["bar"] ).input_variables == ["bar"]
# Test when missing in prefix # Test when missing in prefix
with pytest.warns(UserWarning): with pytest.warns(UserWarning, match="Missing variables: {'foo'}"):
FewShotPromptTemplate( FewShotPromptTemplate(
input_variables=["bar"], input_variables=["bar"],
suffix=suffix, suffix=suffix,
@ -311,7 +311,7 @@ def test_prompt_jinja2_extra_input_variables(
"""Test error is raised when there are too many input variables.""" """Test error is raised when there are too many input variables."""
prefix = "Starting with {{ foo }}" prefix = "Starting with {{ foo }}"
suffix = "Ending with {{ bar }}" suffix = "Ending with {{ bar }}"
with pytest.warns(UserWarning): with pytest.warns(UserWarning, match="Extra variables:"):
FewShotPromptTemplate( FewShotPromptTemplate(
input_variables=["bar", "foo", "extra", "thing"], input_variables=["bar", "foo", "extra", "thing"],
suffix=suffix, suffix=suffix,

View File

@ -509,7 +509,7 @@ def test_prompt_jinja2_missing_input_variables() -> None:
"""Test error is raised when input variables are not provided.""" """Test error is raised when input variables are not provided."""
template = "This is a {{ foo }} test." template = "This is a {{ foo }} test."
input_variables: list = [] input_variables: list = []
with pytest.warns(UserWarning): with pytest.warns(UserWarning, match="Missing variables: {'foo'}"):
PromptTemplate( PromptTemplate(
input_variables=input_variables, input_variables=input_variables,
template=template, template=template,
@ -526,7 +526,7 @@ def test_prompt_jinja2_extra_input_variables() -> None:
"""Test error is raised when there are too many input variables.""" """Test error is raised when there are too many input variables."""
template = "This is a {{ foo }} test." template = "This is a {{ foo }} test."
input_variables = ["foo", "bar"] input_variables = ["foo", "bar"]
with pytest.warns(UserWarning): with pytest.warns(UserWarning, match="Extra variables: {'bar'}"):
PromptTemplate( PromptTemplate(
input_variables=input_variables, input_variables=input_variables,
template=template, template=template,
@ -543,7 +543,9 @@ def test_prompt_jinja2_wrong_input_variables() -> None:
"""Test error is raised when name of input variable is wrong.""" """Test error is raised when name of input variable is wrong."""
template = "This is a {{ foo }} test." template = "This is a {{ foo }} test."
input_variables = ["bar"] input_variables = ["bar"]
with pytest.warns(UserWarning): with pytest.warns(
UserWarning, match="Missing variables: {'foo'} Extra variables: {'bar'}"
):
PromptTemplate( PromptTemplate(
input_variables=input_variables, input_variables=input_variables,
template=template, template=template,

View File

@ -10,6 +10,8 @@ class AnyStr(str):
def __eq__(self, other: object) -> bool: def __eq__(self, other: object) -> bool:
return isinstance(other, str) return isinstance(other, str)
__hash__ = str.__hash__
# The code below creates version of pydantic models # The code below creates version of pydantic models
# that will work in unit tests with AnyStr as id field # that will work in unit tests with AnyStr as id field

View File

@ -884,6 +884,7 @@ def test_validation_error_handling_callable() -> None:
], ],
) )
def test_validation_error_handling_non_validation_error( def test_validation_error_handling_non_validation_error(
*,
handler: Union[ handler: Union[
bool, str, Callable[[Union[ValidationError, ValidationErrorV1]], str] bool, str, Callable[[Union[ValidationError, ValidationErrorV1]], str]
], ],
@ -949,6 +950,7 @@ async def test_async_validation_error_handling_callable() -> None:
], ],
) )
async def test_async_validation_error_handling_non_validation_error( async def test_async_validation_error_handling_non_validation_error(
*,
handler: Union[ handler: Union[
bool, str, Callable[[Union[ValidationError, ValidationErrorV1]], str] bool, str, Callable[[Union[ValidationError, ValidationErrorV1]], str]
], ],
@ -2331,6 +2333,9 @@ def test_tool_return_output_mixin() -> None:
def __eq__(self, other: object) -> bool: def __eq__(self, other: object) -> bool:
return isinstance(other, self.__class__) and self.x == other.x return isinstance(other, self.__class__) and self.x == other.x
def __hash__(self) -> int:
return hash(self.x)
@tool @tool
def foo(x: int) -> Bar: def foo(x: int) -> Bar:
"""Foo.""" """Foo."""

File diff suppressed because it is too large Load Diff