core: Bump ruff version to 0.9 (#29201)

Also run some preview autofix and formatting

---------

Co-authored-by: Erick Friis <erick@langchain.dev>
This commit is contained in:
Christophe Bornet 2025-01-22 01:20:09 +01:00 committed by GitHub
parent 6f95db81b7
commit e4a78dfc2a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
45 changed files with 460 additions and 336 deletions

View File

@ -214,7 +214,7 @@ def beta(
old_doc = inspect.cleandoc(old_doc or "").strip("\n") or ""
components = [message, addendum]
details = " ".join([component.strip() for component in components if component])
new_doc = f".. beta::\n" f" {details}\n\n" f"{old_doc}\n"
new_doc = f".. beta::\n {details}\n\n{old_doc}\n"
if inspect.iscoroutinefunction(obj):
finalized = finalize(awarning_emitting_wrapper, new_doc)

View File

@ -238,6 +238,7 @@ def deprecated(
exclude=obj.exclude,
),
)
elif isinstance(obj, FieldInfoV2):
wrapped = None
if not _obj_type:

View File

@ -385,8 +385,7 @@ async def _ahandle_event_for_handler(
)
except Exception as e:
logger.warning(
f"Error in {handler.__class__.__name__}.{event_name} callback:"
f" {repr(e)}"
f"Error in {handler.__class__.__name__}.{event_name} callback: {repr(e)}"
)
if handler.raise_error:
raise e

View File

@ -1,3 +1,4 @@
import operator
import uuid
from collections.abc import Sequence
from typing import Any, Optional, cast
@ -80,5 +81,5 @@ class InMemoryDocumentIndex(DocumentIndex):
count = document.page_content.count(query)
counts_by_doc.append((document, count))
counts_by_doc.sort(key=lambda x: x[1], reverse=True)
counts_by_doc.sort(key=operator.itemgetter(1), reverse=True)
return [doc.model_copy() for doc, count in counts_by_doc[: self.top_k]]

View File

@ -390,7 +390,7 @@ class BaseLanguageModel(
"Counting tokens in tool schemas is not yet supported. Ignoring tools.",
stacklevel=2,
)
return sum([self.get_num_tokens(get_buffer_string([m])) for m in messages])
return sum(self.get_num_tokens(get_buffer_string([m])) for m in messages)
@classmethod
def _all_required_field_names(cls) -> set:

View File

@ -347,8 +347,7 @@ class BaseLLM(BaseLanguageModel[str], ABC):
"""Get standard params for tracing."""
# get default provider from class name
default_provider = self.__class__.__name__
if default_provider.endswith("LLM"):
default_provider = default_provider[:-3]
default_provider = default_provider.removesuffix("LLM")
default_provider = default_provider.lower()
ls_params = LangSmithParams(ls_provider=default_provider, ls_model_type="llm")

View File

@ -1008,7 +1008,10 @@ def convert_to_openai_messages(
)
raise ValueError(err)
content.append(
{"type": "image_url", "image_url": block["image_url"]}
{
"type": "image_url",
"image_url": block["image_url"],
}
)
# Anthropic and Bedrock converse format
elif (block.get("type") == "image") or "image" in block:
@ -1127,7 +1130,10 @@ def convert_to_openai_messages(
)
raise ValueError(msg)
content.append(
{"type": "text", "text": json.dumps(block["json"])}
{
"type": "text",
"text": json.dumps(block["json"]),
}
)
elif (
block.get("type") == "guard_content"

View File

@ -227,7 +227,7 @@ class MarkdownListOutputParser(ListOutputParser):
def get_format_instructions(self) -> str:
"""Return the format instructions for the Markdown list output."""
return "Your response should be a markdown list, " "eg: `- foo\n- bar\n- baz`"
return "Your response should be a markdown list, eg: `- foo\n- bar\n- baz`"
def parse(self, text: str) -> list[str]:
"""Parse the output of an LLM call.

View File

@ -1398,9 +1398,7 @@ def _create_template_from_message_type(
elif len(template) == 2 and isinstance(template[1], bool):
var_name_wrapped, is_optional = template
if not isinstance(var_name_wrapped, str):
msg = (
"Expected variable name to be a string." f" Got: {var_name_wrapped}"
)
msg = f"Expected variable name to be a string. Got: {var_name_wrapped}"
raise ValueError(msg)
if var_name_wrapped[0] != "{" or var_name_wrapped[-1] != "}":
msg = (

View File

@ -2,7 +2,7 @@ from importlib import metadata
from langchain_core._api.deprecation import warn_deprecated
## Create namespaces for pydantic v1 and v2.
# Create namespaces for pydantic v1 and v2.
# This code must stay at the top of the file before other modules may
# attempt to import pydantic since it adds pydantic_v1 and pydantic_v2 to sys.modules.
#

View File

@ -248,14 +248,14 @@ class InMemoryRateLimiter(BaseRateLimiter):
if not blocking:
return self._consume()
while not self._consume():
while not self._consume(): # noqa: ASYNC110
# This code ignores the ASYNC110 warning which is a false positive in this
# case.
# There is no external actor that can mark that the Event is done
# since the tokens are managed by the rate limiter itself.
# It needs to wake up to re-fill the tokens.
# https://docs.astral.sh/ruff/rules/async-busy-wait/
await asyncio.sleep(self.check_every_n_seconds) # ruff: noqa: ASYNC110
await asyncio.sleep(self.check_every_n_seconds)
return True

View File

@ -2862,7 +2862,7 @@ class RunnableSequence(RunnableSerializable[Input, Output]):
# calculate context dependencies
specs_by_pos = groupby(
[tup for tup in all_specs if tup[0].id.startswith(CONTEXT_CONFIG_PREFIX)],
lambda x: x[1],
itemgetter(1),
)
next_deps: set[str] = set()
deps_by_pos: dict[int, set[str]] = {}
@ -3006,7 +3006,7 @@ class RunnableSequence(RunnableSerializable[Input, Output]):
for i, step in enumerate(self.steps):
# mark each step as a child run
config = patch_config(
config, callbacks=run_manager.get_child(f"seq:step:{i+1}")
config, callbacks=run_manager.get_child(f"seq:step:{i + 1}")
)
context = copy_context()
context.run(_set_config_context, config)
@ -3046,7 +3046,7 @@ class RunnableSequence(RunnableSerializable[Input, Output]):
for i, step in enumerate(self.steps):
# mark each step as a child run
config = patch_config(
config, callbacks=run_manager.get_child(f"seq:step:{i+1}")
config, callbacks=run_manager.get_child(f"seq:step:{i + 1}")
)
context = copy_context()
context.run(_set_config_context, config)
@ -3131,7 +3131,8 @@ class RunnableSequence(RunnableSerializable[Input, Output]):
[
# each step a child run of the corresponding root run
patch_config(
config, callbacks=rm.get_child(f"seq:step:{stepidx+1}")
config,
callbacks=rm.get_child(f"seq:step:{stepidx + 1}"),
)
for i, (rm, config) in enumerate(zip(run_managers, configs))
if i not in failed_inputs_map
@ -3163,7 +3164,7 @@ class RunnableSequence(RunnableSerializable[Input, Output]):
[
# each step a child run of the corresponding root run
patch_config(
config, callbacks=rm.get_child(f"seq:step:{i+1}")
config, callbacks=rm.get_child(f"seq:step:{i + 1}")
)
for rm, config in zip(run_managers, configs)
],
@ -3260,7 +3261,8 @@ class RunnableSequence(RunnableSerializable[Input, Output]):
[
# each step a child run of the corresponding root run
patch_config(
config, callbacks=rm.get_child(f"seq:step:{stepidx+1}")
config,
callbacks=rm.get_child(f"seq:step:{stepidx + 1}"),
)
for i, (rm, config) in enumerate(zip(run_managers, configs))
if i not in failed_inputs_map
@ -3292,7 +3294,7 @@ class RunnableSequence(RunnableSerializable[Input, Output]):
[
# each step a child run of the corresponding root run
patch_config(
config, callbacks=rm.get_child(f"seq:step:{i+1}")
config, callbacks=rm.get_child(f"seq:step:{i + 1}")
)
for rm, config in zip(run_managers, configs)
],
@ -3339,7 +3341,7 @@ class RunnableSequence(RunnableSerializable[Input, Output]):
final_pipeline = cast(Iterator[Output], input)
for idx, step in enumerate(steps):
config = patch_config(
config, callbacks=run_manager.get_child(f"seq:step:{idx+1}")
config, callbacks=run_manager.get_child(f"seq:step:{idx + 1}")
)
if idx == 0:
final_pipeline = step.transform(final_pipeline, config, **kwargs)
@ -3368,7 +3370,7 @@ class RunnableSequence(RunnableSerializable[Input, Output]):
for idx, step in enumerate(steps):
config = patch_config(
config,
callbacks=run_manager.get_child(f"seq:step:{idx+1}"),
callbacks=run_manager.get_child(f"seq:step:{idx + 1}"),
)
if idx == 0:
final_pipeline = step.atransform(final_pipeline, config, **kwargs)
@ -4400,7 +4402,7 @@ class RunnableLambda(Runnable[Input, Output]):
if dict_keys := get_function_first_arg_dict_keys(func):
return create_model_v2(
self.get_name("Input"),
field_definitions={key: (Any, ...) for key in dict_keys},
field_definitions=dict.fromkeys(dict_keys, (Any, ...)),
)
return super().get_input_schema(config)
@ -4524,7 +4526,7 @@ class RunnableLambda(Runnable[Input, Output]):
def __repr__(self) -> str:
"""A string representation of this Runnable."""
if hasattr(self, "func") and isinstance(self.func, itemgetter):
return f"RunnableLambda({str(self.func)[len('operator.'):]})"
return f"RunnableLambda({str(self.func)[len('operator.') :]})"
elif hasattr(self, "func"):
return f"RunnableLambda({get_lambda_source(self.func) or '...'})"
elif hasattr(self, "afunc"):
@ -4785,8 +4787,7 @@ class RunnableLambda(Runnable[Input, Output]):
recursion_limit = config["recursion_limit"]
if recursion_limit <= 0:
msg = (
f"Recursion limit reached when invoking "
f"{self} with input {final}."
f"Recursion limit reached when invoking {self} with input {final}."
)
raise RecursionError(msg)
for chunk in output.stream(
@ -4909,8 +4910,7 @@ class RunnableLambda(Runnable[Input, Output]):
recursion_limit = config["recursion_limit"]
if recursion_limit <= 0:
msg = (
f"Recursion limit reached when invoking "
f"{self} with input {final}."
f"Recursion limit reached when invoking {self} with input {final}."
)
raise RecursionError(msg)
async for chunk in output.astream(

View File

@ -110,8 +110,8 @@ COPIABLE_KEYS = [
DEFAULT_RECURSION_LIMIT = 25
var_child_runnable_config = ContextVar(
"child_runnable_config", default=RunnableConfig()
var_child_runnable_config: ContextVar[RunnableConfig | None] = ContextVar(
"child_runnable_config", default=None
)

View File

@ -242,17 +242,20 @@ def draw_ascii(vertices: Mapping[str, str], edges: Sequence[LangEdge]) -> str:
"""
# NOTE: coordinates might me negative, so we need to shift
# everything to the positive plane before we actually draw it.
xlist = []
ylist = []
xlist: list[float] = []
ylist: list[float] = []
sug = _build_sugiyama_layout(vertices, edges)
for vertex in sug.g.sV:
# NOTE: moving boxes w/2 to the left
xlist.append(vertex.view.xy[0] - vertex.view.w / 2.0)
xlist.append(vertex.view.xy[0] + vertex.view.w / 2.0)
ylist.append(vertex.view.xy[1])
ylist.append(vertex.view.xy[1] + vertex.view.h)
xlist.extend(
(
vertex.view.xy[0] - vertex.view.w / 2.0,
vertex.view.xy[0] + vertex.view.w / 2.0,
)
)
ylist.extend((vertex.view.xy[1], vertex.view.xy[1] + vertex.view.h))
for edge in sug.g.sE:
for x, y in edge.view._pts:

View File

@ -590,9 +590,7 @@ class RunnableWithMessageHistory(RunnableBindingBase):
if missing_keys and parameter_names:
example_input = {self.input_messages_key: "foo"}
example_configurable = {
missing_key: "[your-value-here]" for missing_key in missing_keys
}
example_configurable = dict.fromkeys(missing_keys, "[your-value-here]")
example_config = {"configurable": example_configurable}
msg = (
f"Missing keys {sorted(missing_keys)} in config['configurable'] "

View File

@ -248,7 +248,7 @@ class RunnableRetry(RunnableBindingBase[Input, Output]):
result = cast(list[Output], [e] * len(inputs))
outputs: list[Union[Output, Exception]] = []
for idx, _ in enumerate(inputs):
for idx in range(len(inputs)):
if idx in results_map:
outputs.append(results_map[idx])
else:
@ -314,7 +314,7 @@ class RunnableRetry(RunnableBindingBase[Input, Output]):
result = cast(list[Output], [e] * len(inputs))
outputs: list[Union[Output, Exception]] = []
for idx, _ in enumerate(inputs):
for idx in range(len(inputs)):
if idx in results_map:
outputs.append(results_map[idx])
else:

View File

@ -1,6 +1,7 @@
from __future__ import annotations
from collections.abc import AsyncIterator, Iterator, Mapping
from itertools import starmap
from typing import (
Any,
Callable,
@ -189,10 +190,7 @@ class RouterRunnable(RunnableSerializable[RouterInput, Output]):
configs = get_config_list(config, len(inputs))
return await gather_with_concurrency(
configs[0].get("max_concurrency"),
*(
ainvoke(runnable, input, config)
for runnable, input, config in zip(runnables, actual_inputs, configs)
),
*starmap(ainvoke, zip(runnables, actual_inputs, configs)),
)
def stream(

View File

@ -863,7 +863,7 @@ async def _astream_events_implementation_v1(
tags=log_entry["tags"],
metadata=log_entry["metadata"],
data=data,
parent_ids=[], # Not supported in v1
parent_ids=[], # Not supported in v1
)
# Finally, we take care of the streaming output from the root chain

View File

@ -160,7 +160,7 @@ class FunctionCallbackHandler(BaseTracer):
def _on_tool_start(self, run: Run) -> None:
crumbs = self.get_breadcrumbs(run)
self.function_callback(
f'{get_colored_text("[tool/start]", color="green")} '
f"{get_colored_text('[tool/start]', color='green')} "
+ get_bolded_text(f"[{crumbs}] Entering Tool run with input:\n")
+ f'"{run.inputs["input"].strip()}"'
)
@ -169,7 +169,7 @@ class FunctionCallbackHandler(BaseTracer):
crumbs = self.get_breadcrumbs(run)
if run.outputs:
self.function_callback(
f'{get_colored_text("[tool/end]", color="blue")} '
f"{get_colored_text('[tool/end]', color='blue')} "
+ get_bolded_text(
f"[{crumbs}] [{elapsed(run)}] Exiting Tool run with output:\n"
)

View File

@ -44,7 +44,7 @@ class StrictFormatter(Formatter):
Raises:
ValueError: If any input variables are not used in the format string.
"""
dummy_inputs = {input_variable: "foo" for input_variable in input_variables}
dummy_inputs = dict.fromkeys(input_variables, "foo")
super().format(format_string, **dummy_inputs)

View File

@ -122,8 +122,7 @@ def parse_tag(template: str, l_del: str, r_del: str) -> tuple[tuple[str, str], s
ChevronError: If the tag is unclosed.
ChevronError: If the set delimiter tag is unclosed.
"""
global _CURRENT_LINE
global _LAST_TAG_LINE
global _CURRENT_LINE, _LAST_TAG_LINE
tag_types = {
"!": "comment",
@ -140,7 +139,7 @@ def parse_tag(template: str, l_del: str, r_del: str) -> tuple[tuple[str, str], s
try:
tag, template = template.split(r_del, 1)
except ValueError as e:
msg = "unclosed tag " f"at line {_CURRENT_LINE}"
msg = f"unclosed tag at line {_CURRENT_LINE}"
raise ChevronError(msg) from e
# Find the type meaning of the first character
@ -161,7 +160,7 @@ def parse_tag(template: str, l_del: str, r_del: str) -> tuple[tuple[str, str], s
# Otherwise we should complain
else:
msg = "unclosed set delimiter tag\n" f"at line {_CURRENT_LINE}"
msg = f"unclosed set delimiter tag\nat line {_CURRENT_LINE}"
raise ChevronError(msg)
elif (

View File

@ -389,6 +389,7 @@ if PYDANTIC_MAJOR_VERSION == 2:
else:
msg = f"Expected a Pydantic model. Got {type(model)}"
raise TypeError(msg)
elif PYDANTIC_MAJOR_VERSION == 1:
from pydantic import BaseModel as BaseModelV1_
@ -397,6 +398,7 @@ elif PYDANTIC_MAJOR_VERSION == 1:
) -> dict[str, FieldInfoV1]:
"""Get the field names of a Pydantic model."""
return model.__fields__ # type: ignore
else:
msg = f"Unsupported Pydantic version: {PYDANTIC_MAJOR_VERSION}"
raise ValueError(msg)

View File

@ -189,7 +189,7 @@ class InMemoryVectorStore(VectorStore):
for doc, vector in zip(documents, vectors):
doc_id = next(id_iterator)
doc_id_ = doc_id if doc_id else str(uuid.uuid4())
doc_id_ = doc_id or str(uuid.uuid4())
ids_.append(doc_id_)
self.store[doc_id_] = {
"id": doc_id_,
@ -221,7 +221,7 @@ class InMemoryVectorStore(VectorStore):
for doc, vector in zip(documents, vectors):
doc_id = next(id_iterator)
doc_id_ = doc_id if doc_id else str(uuid.uuid4())
doc_id_ = doc_id or str(uuid.uuid4())
ids_.append(doc_id_)
self.store[doc_id_] = {
"id": doc_id_,
@ -258,8 +258,7 @@ class InMemoryVectorStore(VectorStore):
@deprecated(
alternative="VectorStore.add_documents",
message=(
"This was a beta API that was added in 0.2.11. "
"It'll be removed in 0.3.0."
"This was a beta API that was added in 0.2.11. It'll be removed in 0.3.0."
),
since="0.2.29",
removal="1.0",
@ -268,7 +267,7 @@ class InMemoryVectorStore(VectorStore):
vectors = self.embedding.embed_documents([item.page_content for item in items])
ids = []
for item, vector in zip(items, vectors):
doc_id = item.id if item.id else str(uuid.uuid4())
doc_id = item.id or str(uuid.uuid4())
ids.append(doc_id)
self.store[doc_id] = {
"id": doc_id,
@ -284,8 +283,7 @@ class InMemoryVectorStore(VectorStore):
@deprecated(
alternative="VectorStore.aadd_documents",
message=(
"This was a beta API that was added in 0.2.11. "
"It'll be removed in 0.3.0."
"This was a beta API that was added in 0.2.11. It'll be removed in 0.3.0."
),
since="0.2.29",
removal="1.0",
@ -298,7 +296,7 @@ class InMemoryVectorStore(VectorStore):
)
ids = []
for item, vector in zip(items, vectors):
doc_id = item.id if item.id else str(uuid.uuid4())
doc_id = item.id or str(uuid.uuid4())
ids.append(doc_id)
self.store[doc_id] = {
"id": doc_id,

42
libs/core/poetry.lock generated
View File

@ -1,4 +1,4 @@
# This file is automatically @generated by Poetry 1.8.5 and should not be changed by hand.
# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand.
[[package]]
name = "annotated-types"
@ -2654,29 +2654,29 @@ files = [
[[package]]
name = "ruff"
version = "0.5.7"
version = "0.9.2"
description = "An extremely fast Python linter and code formatter, written in Rust."
optional = false
python-versions = ">=3.7"
files = [
{file = "ruff-0.5.7-py3-none-linux_armv6l.whl", hash = "sha256:548992d342fc404ee2e15a242cdbea4f8e39a52f2e7752d0e4cbe88d2d2f416a"},
{file = "ruff-0.5.7-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:00cc8872331055ee017c4f1071a8a31ca0809ccc0657da1d154a1d2abac5c0be"},
{file = "ruff-0.5.7-py3-none-macosx_11_0_arm64.whl", hash = "sha256:eaf3d86a1fdac1aec8a3417a63587d93f906c678bb9ed0b796da7b59c1114a1e"},
{file = "ruff-0.5.7-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a01c34400097b06cf8a6e61b35d6d456d5bd1ae6961542de18ec81eaf33b4cb8"},
{file = "ruff-0.5.7-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fcc8054f1a717e2213500edaddcf1dbb0abad40d98e1bd9d0ad364f75c763eea"},
{file = "ruff-0.5.7-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7f70284e73f36558ef51602254451e50dd6cc479f8b6f8413a95fcb5db4a55fc"},
{file = "ruff-0.5.7-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:a78ad870ae3c460394fc95437d43deb5c04b5c29297815a2a1de028903f19692"},
{file = "ruff-0.5.7-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9ccd078c66a8e419475174bfe60a69adb36ce04f8d4e91b006f1329d5cd44bcf"},
{file = "ruff-0.5.7-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7e31c9bad4ebf8fdb77b59cae75814440731060a09a0e0077d559a556453acbb"},
{file = "ruff-0.5.7-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d796327eed8e168164346b769dd9a27a70e0298d667b4ecee6877ce8095ec8e"},
{file = "ruff-0.5.7-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:4a09ea2c3f7778cc635e7f6edf57d566a8ee8f485f3c4454db7771efb692c499"},
{file = "ruff-0.5.7-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:a36d8dcf55b3a3bc353270d544fb170d75d2dff41eba5df57b4e0b67a95bb64e"},
{file = "ruff-0.5.7-py3-none-musllinux_1_2_i686.whl", hash = "sha256:9369c218f789eefbd1b8d82a8cf25017b523ac47d96b2f531eba73770971c9e5"},
{file = "ruff-0.5.7-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:b88ca3db7eb377eb24fb7c82840546fb7acef75af4a74bd36e9ceb37a890257e"},
{file = "ruff-0.5.7-py3-none-win32.whl", hash = "sha256:33d61fc0e902198a3e55719f4be6b375b28f860b09c281e4bdbf783c0566576a"},
{file = "ruff-0.5.7-py3-none-win_amd64.whl", hash = "sha256:083bbcbe6fadb93cd86709037acc510f86eed5a314203079df174c40bbbca6b3"},
{file = "ruff-0.5.7-py3-none-win_arm64.whl", hash = "sha256:2dca26154ff9571995107221d0aeaad0e75a77b5a682d6236cf89a58c70b76f4"},
{file = "ruff-0.5.7.tar.gz", hash = "sha256:8dfc0a458797f5d9fb622dd0efc52d796f23f0a1493a9527f4e49a550ae9a7e5"},
{file = "ruff-0.9.2-py3-none-linux_armv6l.whl", hash = "sha256:80605a039ba1454d002b32139e4970becf84b5fee3a3c3bf1c2af6f61a784347"},
{file = "ruff-0.9.2-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:b9aab82bb20afd5f596527045c01e6ae25a718ff1784cb92947bff1f83068b00"},
{file = "ruff-0.9.2-py3-none-macosx_11_0_arm64.whl", hash = "sha256:fbd337bac1cfa96be615f6efcd4bc4d077edbc127ef30e2b8ba2a27e18c054d4"},
{file = "ruff-0.9.2-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82b35259b0cbf8daa22a498018e300b9bb0174c2bbb7bcba593935158a78054d"},
{file = "ruff-0.9.2-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8b6a9701d1e371bf41dca22015c3f89769da7576884d2add7317ec1ec8cb9c3c"},
{file = "ruff-0.9.2-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9cc53e68b3c5ae41e8faf83a3b89f4a5d7b2cb666dff4b366bb86ed2a85b481f"},
{file = "ruff-0.9.2-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:8efd9da7a1ee314b910da155ca7e8953094a7c10d0c0a39bfde3fcfd2a015684"},
{file = "ruff-0.9.2-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3292c5a22ea9a5f9a185e2d131dc7f98f8534a32fb6d2ee7b9944569239c648d"},
{file = "ruff-0.9.2-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1a605fdcf6e8b2d39f9436d343d1f0ff70c365a1e681546de0104bef81ce88df"},
{file = "ruff-0.9.2-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c547f7f256aa366834829a08375c297fa63386cbe5f1459efaf174086b564247"},
{file = "ruff-0.9.2-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:d18bba3d3353ed916e882521bc3e0af403949dbada344c20c16ea78f47af965e"},
{file = "ruff-0.9.2-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:b338edc4610142355ccf6b87bd356729b62bf1bc152a2fad5b0c7dc04af77bfe"},
{file = "ruff-0.9.2-py3-none-musllinux_1_2_i686.whl", hash = "sha256:492a5e44ad9b22a0ea98cf72e40305cbdaf27fac0d927f8bc9e1df316dcc96eb"},
{file = "ruff-0.9.2-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:af1e9e9fe7b1f767264d26b1075ac4ad831c7db976911fa362d09b2d0356426a"},
{file = "ruff-0.9.2-py3-none-win32.whl", hash = "sha256:71cbe22e178c5da20e1514e1e01029c73dc09288a8028a5d3446e6bba87a5145"},
{file = "ruff-0.9.2-py3-none-win_amd64.whl", hash = "sha256:c5e1d6abc798419cf46eed03f54f2e0c3adb1ad4b801119dedf23fcaf69b55b5"},
{file = "ruff-0.9.2-py3-none-win_arm64.whl", hash = "sha256:a1b63fa24149918f8b37cef2ee6fff81f24f0d74b6f0bdc37bc3e1f2143e41c6"},
{file = "ruff-0.9.2.tar.gz", hash = "sha256:b5eceb334d55fae5f316f783437392642ae18e16dcf4f1858d55d3c2a0f8f5d0"},
]
[[package]]
@ -3134,4 +3134,4 @@ type = ["pytest-mypy"]
[metadata]
lock-version = "2.0"
python-versions = ">=3.9,<4.0"
content-hash = "9d0d2645af208f4f334c2e35f80f39bdc438546d3c66a7aec7e8a67f99c04d28"
content-hash = "2a262498da93ae3991e5eade787affab38f1ddef5212cb745eafdd0a40f0e986"

View File

@ -82,7 +82,8 @@ classmethod-decorators = [ "classmethod", "langchain_core.utils.pydantic.pre_ini
"scripts/**" = [ "S",]
[tool.poetry.group.lint.dependencies]
ruff = "^0.5"
ruff = "^0.9.2"
[tool.poetry.group.typing.dependencies]
@ -92,12 +93,14 @@ types-requests = "^2.28.11.5"
types-jinja2 = "^2.11.9"
[tool.poetry.group.dev.dependencies]
jupyter = "^1.0.0"
setuptools = "^67.6.1"
grandalf = "^0.8"
[tool.poetry.group.test.dependencies]
pytest = "^8"
freezegun = "^1.2.2"
@ -118,14 +121,17 @@ version = ">=1.26.0,<3"
python = ">=3.12"
[tool.poetry.group.test_integration.dependencies]
[tool.poetry.group.typing.dependencies.langchain-text-splitters]
path = "../text-splitters"
develop = true
[tool.poetry.group.test.dependencies.langchain-tests]
path = "../standard-tests"
develop = true

View File

@ -29,7 +29,10 @@ def test_add_message_implementation_only() -> None:
assert store[1] == HumanMessage(content="World")
chat_history.add_messages(
[HumanMessage(content="Hello"), HumanMessage(content="World")]
[
HumanMessage(content="Hello"),
HumanMessage(content="World"),
]
)
assert len(store) == 4
assert store[2] == HumanMessage(content="Hello")
@ -61,7 +64,10 @@ def test_bulk_message_implementation_only() -> None:
assert store[1] == HumanMessage(content="World")
chat_history.add_messages(
[HumanMessage(content="Hello"), HumanMessage(content="World")]
[
HumanMessage(content="Hello"),
HumanMessage(content="World"),
]
)
assert len(store) == 4
assert store[2] == HumanMessage(content="Hello")
@ -85,7 +91,10 @@ async def test_async_interface() -> None:
chat_history = BulkAddHistory()
await chat_history.aadd_messages(
[HumanMessage(content="Hello"), HumanMessage(content="World")]
[
HumanMessage(content="Hello"),
HumanMessage(content="World"),
]
)
assert await chat_history.aget_messages() == [
HumanMessage(content="Hello"),

View File

@ -11,5 +11,8 @@ def test_deterministic_fake_embeddings() -> None:
assert fake.embed_query(text) != fake.embed_query("Goodbye world!")
assert fake.embed_documents([text, text]) == fake.embed_documents([text, text])
assert fake.embed_documents([text, text]) != fake.embed_documents(
[text, "Goodbye world!"]
[
text,
"Goodbye world!",
]
)

View File

@ -227,7 +227,7 @@ def test_global_cache_batch() -> None:
assert results[0].content == results[1].content
assert {results[0].content, results[1].content}.issubset({"hello", "goodbye"})
## RACE CONDITION -- note behavior is different from async
# RACE CONDITION -- note behavior is different from async
# Now, reset cache and test the race condition
# For now we just hard-code the result, if this changes
# we can investigate further

View File

@ -663,7 +663,7 @@ def create_image_data() -> str:
def create_base64_image(format: str = "jpeg") -> str:
data = create_image_data()
return f"data:image/{format};base64,{data}" # noqa: E501
return f"data:image/{format};base64,{data}"
def test_convert_to_openai_messages_single_message() -> None:

View File

@ -613,6 +613,6 @@ def test_unicode_handling() -> None:
parser = SimpleJsonOutputParser(pydantic_object=Sample)
format_instructions = parser.get_format_instructions()
assert (
"科学文章的标题" in format_instructions
), "Unicode characters should not be escaped"
assert "科学文章的标题" in format_instructions, (
"Unicode characters should not be escaped"
)

View File

@ -123,7 +123,8 @@ def test_numbered_list() -> None:
def test_markdown_list() -> None:
parser = MarkdownListOutputParser()
text1 = (
"Your response should be a numbered - not a list item - list with each item on a new line." # noqa: E501
"Your response should be a numbered - not a list item - "
"list with each item on a new line."
"For example: \n- foo\n- bar\n- baz"
)

View File

@ -123,9 +123,7 @@ def test_create_system_message_prompt_template_from_template_partial() -> None:
partial_variables={"instructions": json_prompt_instructions},
)
assert graph_analyst_template.format(history="history") == SystemMessage(
content="\n Your instructions are:\n "
" {}\n History:\n "
"history\n "
content="\n Your instructions are:\n {}\n History:\n history\n "
)
@ -234,7 +232,11 @@ def test_chat_prompt_template_from_messages(
"""Test creating a chat prompt template from messages."""
chat_prompt_template = ChatPromptTemplate.from_messages(messages)
assert sorted(chat_prompt_template.input_variables) == sorted(
["context", "foo", "bar"]
[
"context",
"foo",
"bar",
]
)
assert len(chat_prompt_template.messages) == 4
@ -377,7 +379,11 @@ def test_chat_prompt_template_with_messages(
messages + [HumanMessage(content="foo")]
)
assert sorted(chat_prompt_template.input_variables) == sorted(
["context", "foo", "bar"]
[
"context",
"foo",
"bar",
]
)
assert len(chat_prompt_template.messages) == 5
prompt_value = chat_prompt_template.format_prompt(
@ -835,7 +841,10 @@ async def test_messages_prompt_accepts_list() -> None:
# Assert still raises a nice error
prompt = ChatPromptTemplate(
[("system", "You are a {foo}"), MessagesPlaceholder("history")]
[
("system", "You are a {foo}"),
MessagesPlaceholder("history"),
]
)
with pytest.raises(TypeError):
prompt.invoke([("user", "Hi there")]) # type: ignore
@ -872,7 +881,11 @@ def test_chat_input_schema(snapshot: SnapshotAssertion) -> None:
def test_chat_prompt_w_msgs_placeholder_ser_des(snapshot: SnapshotAssertion) -> None:
prompt = ChatPromptTemplate.from_messages(
[("system", "foo"), MessagesPlaceholder("bar"), ("human", "baz")]
[
("system", "foo"),
MessagesPlaceholder("bar"),
("human", "baz"),
]
)
assert dumpd(MessagesPlaceholder("bar")) == snapshot(name="placeholder")
assert load(dumpd(MessagesPlaceholder("bar"))) == MessagesPlaceholder("bar")

View File

@ -243,7 +243,7 @@ def test_prompt_jinja2_functionality(
)
output = prompt.format(foo="hello", bar="bye")
expected_output = (
"Starting with hello\n\n" "happy: sad\n\n" "tall: short\n\n" "Ending with bye"
"Starting with hello\n\nhappy: sad\n\ntall: short\n\nEnding with bye"
)
assert output == expected_output

View File

@ -49,12 +49,12 @@ def test_ensure_config() -> None:
},
)
config = ctx.run(ensure_config, cast(RunnableConfig, arg))
assert (
len(arg["callbacks"]) == 1
), "ensure_config should not modify the original config"
assert (
json.dumps({**arg, "callbacks": []}) == arg_str
), "ensure_config should not modify the original config"
assert len(arg["callbacks"]) == 1, (
"ensure_config should not modify the original config"
)
assert json.dumps({**arg, "callbacks": []}) == arg_str, (
"ensure_config should not modify the original config"
)
assert config is not arg
assert config["callbacks"] is not arg["callbacks"]
assert config["metadata"] is not arg["metadata"]

View File

@ -215,7 +215,11 @@ async def test_abatch() -> None:
)
with pytest.raises(RuntimeError):
await runnable_with_single.abatch(
[{"text": "foo"}, {"text": "bar"}, {"text": "baz"}]
[
{"text": "foo"},
{"text": "bar"},
{"text": "baz"},
]
)
actual = await runnable_with_single.abatch(
[{"text": "foo"}, {"text": "bar"}, {"text": "baz"}], return_exceptions=True

View File

@ -622,32 +622,29 @@ def test_lambda_schemas(snapshot: SnapshotAssertion) -> None:
"byebye": input["yo"],
}
assert (
_normalize_schema(
RunnableLambda(
aget_values_typed # type: ignore[arg-type]
).get_input_jsonschema()
)
== _normalize_schema(
{
"$defs": {
"InputType": {
"properties": {
"variable_name": {
"title": "Variable " "Name",
"type": "string",
},
"yo": {"title": "Yo", "type": "integer"},
assert _normalize_schema(
RunnableLambda(
aget_values_typed # type: ignore[arg-type]
).get_input_jsonschema()
) == _normalize_schema(
{
"$defs": {
"InputType": {
"properties": {
"variable_name": {
"title": "Variable Name",
"type": "string",
},
"required": ["variable_name", "yo"],
"title": "InputType",
"type": "object",
}
},
"allOf": [{"$ref": "#/$defs/InputType"}],
"title": "aget_values_typed_input",
}
)
"yo": {"title": "Yo", "type": "integer"},
},
"required": ["variable_name", "yo"],
"title": "InputType",
"type": "object",
}
},
"allOf": [{"$ref": "#/$defs/InputType"}],
"title": "aget_values_typed_input",
}
)
if PYDANTIC_VERSION >= (2, 9):
@ -2793,7 +2790,10 @@ async def test_router_runnable(
assert result == "4"
result2 = chain.batch(
[{"key": "math", "question": "2 + 2"}, {"key": "english", "question": "2 + 2"}]
[
{"key": "math", "question": "2 + 2"},
{"key": "english", "question": "2 + 2"},
]
)
assert result2 == ["4", "2"]
@ -2801,7 +2801,10 @@ async def test_router_runnable(
assert result == "4"
result2 = await chain.abatch(
[{"key": "math", "question": "2 + 2"}, {"key": "english", "question": "2 + 2"}]
[
{"key": "math", "question": "2 + 2"},
{"key": "english", "question": "2 + 2"},
]
)
assert result2 == ["4", "2"]
@ -2855,7 +2858,10 @@ async def test_higher_order_lambda_runnable(
assert result == "4"
result2 = chain.batch(
[{"key": "math", "question": "2 + 2"}, {"key": "english", "question": "2 + 2"}]
[
{"key": "math", "question": "2 + 2"},
{"key": "english", "question": "2 + 2"},
]
)
assert result2 == ["4", "2"]
@ -2863,7 +2869,10 @@ async def test_higher_order_lambda_runnable(
assert result == "4"
result2 = await chain.abatch(
[{"key": "math", "question": "2 + 2"}, {"key": "english", "question": "2 + 2"}]
[
{"key": "math", "question": "2 + 2"},
{"key": "english", "question": "2 + 2"},
]
)
assert result2 == ["4", "2"]
@ -3058,7 +3067,10 @@ def test_map_stream() -> None:
assert len(streamed_chunks) == len(llm_res)
chain_pick_two = chain.assign(hello=RunnablePick("llm").pipe(llm)).pick(
["llm", "hello"]
[
"llm",
"hello",
]
)
assert chain_pick_two.get_output_jsonschema() == {
@ -5441,7 +5453,11 @@ def test_schema_for_prompt_and_chat_model() -> None:
chain = prompt | chat
assert (
chain.invoke(
{"model_json_schema": "hello", "_private": "goodbye", "json": "json"}
{
"model_json_schema": "hello",
"_private": "goodbye",
"json": "json",
}
).content
== chat_res
)

View File

@ -795,7 +795,10 @@ async def test_astream_events_from_model() -> None:
async def test_event_stream_with_simple_chain() -> None:
"""Test as event stream."""
template = ChatPromptTemplate.from_messages(
[("system", "You are Cat Agent 007"), ("human", "{question}")]
[
("system", "You are Cat Agent 007"),
("human", "{question}"),
]
).with_config({"run_name": "my_template", "tags": ["my_template"]})
infinite_cycle = cycle(
@ -1681,7 +1684,10 @@ async def test_event_stream_with_retry() -> None:
async def test_with_llm() -> None:
"""Test with regular llm."""
prompt = ChatPromptTemplate.from_messages(
[("system", "You are Cat Agent 007"), ("human", "{question}")]
[
("system", "You are Cat Agent 007"),
("human", "{question}"),
]
).with_config({"run_name": "my_template", "tags": ["my_template"]})
llm = FakeStreamingListLLM(responses=["abc"])
@ -1730,7 +1736,7 @@ async def test_with_llm() -> None:
{
"data": {
"input": {
"prompts": ["System: You are Cat Agent 007\n" "Human: hello"]
"prompts": ["System: You are Cat Agent 007\nHuman: hello"]
}
},
"event": "on_llm_start",
@ -1743,7 +1749,7 @@ async def test_with_llm() -> None:
{
"data": {
"input": {
"prompts": ["System: You are Cat Agent 007\n" "Human: hello"]
"prompts": ["System: You are Cat Agent 007\nHuman: hello"]
},
"output": {
"generations": [
@ -1918,7 +1924,10 @@ async def test_runnable_with_message_history() -> None:
return InMemoryHistory(messages=store[session_id])
infinite_cycle = cycle(
[AIMessage(content="hello", id="ai3"), AIMessage(content="world", id="ai4")]
[
AIMessage(content="hello", id="ai3"),
AIMessage(content="world", id="ai4"),
]
)
prompt = ChatPromptTemplate.from_messages(

View File

@ -55,12 +55,12 @@ def _with_nulled_run_id(events: Sequence[StreamEvent]) -> list[StreamEvent]:
for event in events:
assert "run_id" in event, f"Event {event} does not have a run_id."
assert "parent_ids" in event, f"Event {event} does not have parent_ids."
assert isinstance(
event["run_id"], str
), f"Event {event} run_id is not a string."
assert isinstance(
event["parent_ids"], list
), f"Event {event} parent_ids is not a list."
assert isinstance(event["run_id"], str), (
f"Event {event} run_id is not a string."
)
assert isinstance(event["parent_ids"], list), (
f"Event {event} parent_ids is not a list."
)
return cast(
list[StreamEvent],
@ -828,7 +828,10 @@ async def test_astream_with_model_in_chain() -> None:
async def test_event_stream_with_simple_chain() -> None:
"""Test as event stream."""
template = ChatPromptTemplate.from_messages(
[("system", "You are Cat Agent 007"), ("human", "{question}")]
[
("system", "You are Cat Agent 007"),
("human", "{question}"),
]
).with_config({"run_name": "my_template", "tags": ["my_template"]})
infinite_cycle = cycle(
@ -1628,7 +1631,10 @@ async def test_event_stream_with_retry() -> None:
async def test_with_llm() -> None:
"""Test with regular llm."""
prompt = ChatPromptTemplate.from_messages(
[("system", "You are Cat Agent 007"), ("human", "{question}")]
[
("system", "You are Cat Agent 007"),
("human", "{question}"),
]
).with_config({"run_name": "my_template", "tags": ["my_template"]})
llm = FakeStreamingListLLM(responses=["abc"])
@ -1677,7 +1683,7 @@ async def test_with_llm() -> None:
{
"data": {
"input": {
"prompts": ["System: You are Cat Agent 007\n" "Human: hello"]
"prompts": ["System: You are Cat Agent 007\nHuman: hello"]
}
},
"event": "on_llm_start",
@ -1690,7 +1696,7 @@ async def test_with_llm() -> None:
{
"data": {
"input": {
"prompts": ["System: You are Cat Agent 007\n" "Human: hello"]
"prompts": ["System: You are Cat Agent 007\nHuman: hello"]
},
"output": {
"generations": [
@ -1865,7 +1871,10 @@ async def test_runnable_with_message_history() -> None:
return InMemoryHistory(messages=store[session_id])
infinite_cycle = cycle(
[AIMessage(content="hello", id="ai3"), AIMessage(content="world", id="ai4")]
[
AIMessage(content="hello", id="ai3"),
AIMessage(content="world", id="ai4"),
]
)
prompt = ChatPromptTemplate.from_messages(
@ -2372,7 +2381,10 @@ async def test_with_explicit_config() -> None:
return x
chain = passthrough_to_trigger_issue | model.with_config(
{"tags": ["hello"], "callbacks": callbacks}
{
"tags": ["hello"],
"callbacks": callbacks,
}
)
return await chain.ainvoke(query)

View File

@ -333,7 +333,7 @@ async def test_runnable_sequence_parallel_trace_nesting(method: str) -> None:
"other_thing": "RunnableParallel<chain_result,other_thing>",
"after": "RunnableSequence",
}
assert len(posts) == sum([1 if isinstance(n, str) else len(n) for n in name_order])
assert len(posts) == sum(1 if isinstance(n, str) else len(n) for n in name_order)
prev_dotted_order = None
dotted_order_map = {}
id_map = {}
@ -360,9 +360,9 @@ async def test_runnable_sequence_parallel_trace_nesting(method: str) -> None:
if prev_dotted_order is not None and not str(
expected_parents[name]
).startswith("RunnableParallel"):
assert (
dotted_order > prev_dotted_order
), f"{name} not after {name_order[i-1]}"
assert dotted_order > prev_dotted_order, (
f"{name} not after {name_order[i - 1]}"
)
prev_dotted_order = dotted_order
if name in dotted_order_map:
msg = f"Duplicate name {name}"
@ -377,9 +377,9 @@ async def test_runnable_sequence_parallel_trace_nesting(method: str) -> None:
dotted_order = dotted_order_map[name]
if parent_ is not None:
parent_dotted_order = dotted_order_map[parent_]
assert dotted_order.startswith(
parent_dotted_order
), f"{name}, {parent_dotted_order} not in {dotted_order}"
assert dotted_order.startswith(parent_dotted_order), (
f"{name}, {parent_dotted_order} not in {dotted_order}"
)
assert str(parent_id_map[name]) == str(id_map[parent_])
else:
assert dotted_order.split(".")[0] == dotted_order

View File

@ -47,48 +47,47 @@ def test_message_init() -> None:
def test_message_chunks() -> None:
assert AIMessageChunk(content="I am", id="ai3") + AIMessageChunk(
content=" indeed."
) == AIMessageChunk(content="I am indeed.", id="ai3"), (
"MessageChunk + MessageChunk should be a MessageChunk"
)
assert AIMessageChunk(content="I am", id="ai2") + HumanMessageChunk(
content=" indeed.", id="human1"
) == AIMessageChunk(content="I am indeed.", id="ai2"), (
"MessageChunk + MessageChunk should be a MessageChunk "
"of same class as the left side"
)
assert AIMessageChunk(
content="", additional_kwargs={"foo": "bar"}
) + AIMessageChunk(content="", additional_kwargs={"baz": "foo"}) == AIMessageChunk(
content="", additional_kwargs={"foo": "bar", "baz": "foo"}
), (
"MessageChunk + MessageChunk should be a MessageChunk "
"with merged additional_kwargs"
)
assert AIMessageChunk(
content="", additional_kwargs={"function_call": {"name": "web_search"}}
) + AIMessageChunk(
content="", additional_kwargs={"function_call": {"arguments": None}}
) + AIMessageChunk(
content="", additional_kwargs={"function_call": {"arguments": "{\n"}}
) + AIMessageChunk(
content="",
additional_kwargs={"function_call": {"arguments": ' "query": "turtles"\n}'}},
) == AIMessageChunk(
content="I am indeed.", id="ai3"
), "MessageChunk + MessageChunk should be a MessageChunk"
assert (
AIMessageChunk(content="I am", id="ai2")
+ HumanMessageChunk(content=" indeed.", id="human1")
== AIMessageChunk(content="I am indeed.", id="ai2")
), "MessageChunk + MessageChunk should be a MessageChunk of same class as the left side" # noqa: E501
assert (
AIMessageChunk(content="", additional_kwargs={"foo": "bar"})
+ AIMessageChunk(content="", additional_kwargs={"baz": "foo"})
== AIMessageChunk(content="", additional_kwargs={"foo": "bar", "baz": "foo"})
), "MessageChunk + MessageChunk should be a MessageChunk with merged additional_kwargs" # noqa: E501
assert (
AIMessageChunk(
content="", additional_kwargs={"function_call": {"name": "web_search"}}
)
+ AIMessageChunk(
content="", additional_kwargs={"function_call": {"arguments": None}}
)
+ AIMessageChunk(
content="", additional_kwargs={"function_call": {"arguments": "{\n"}}
)
+ AIMessageChunk(
content="",
additional_kwargs={
"function_call": {"arguments": ' "query": "turtles"\n}'}
},
)
== AIMessageChunk(
content="",
additional_kwargs={
"function_call": {
"name": "web_search",
"arguments": '{\n "query": "turtles"\n}',
}
},
)
), "MessageChunk + MessageChunk should be a MessageChunk with merged additional_kwargs" # noqa: E501
content="",
additional_kwargs={
"function_call": {
"name": "web_search",
"arguments": '{\n "query": "turtles"\n}',
}
},
), (
"MessageChunk + MessageChunk should be a MessageChunk "
"with merged additional_kwargs"
)
# Test tool calls
assert (
@ -181,97 +180,107 @@ def test_message_chunks() -> None:
def test_chat_message_chunks() -> None:
assert ChatMessageChunk(role="User", content="I am", id="ai4") + ChatMessageChunk(
role="User", content=" indeed."
) == ChatMessageChunk(
id="ai4", role="User", content="I am indeed."
), "ChatMessageChunk + ChatMessageChunk should be a ChatMessageChunk"
) == ChatMessageChunk(id="ai4", role="User", content="I am indeed."), (
"ChatMessageChunk + ChatMessageChunk should be a ChatMessageChunk"
)
with pytest.raises(ValueError):
ChatMessageChunk(role="User", content="I am") + ChatMessageChunk(
role="Assistant", content=" indeed."
)
assert (
ChatMessageChunk(role="User", content="I am")
+ AIMessageChunk(content=" indeed.")
== ChatMessageChunk(role="User", content="I am indeed.")
), "ChatMessageChunk + other MessageChunk should be a ChatMessageChunk with the left side's role" # noqa: E501
assert ChatMessageChunk(role="User", content="I am") + AIMessageChunk(
content=" indeed."
) == ChatMessageChunk(role="User", content="I am indeed."), (
"ChatMessageChunk + other MessageChunk should be a ChatMessageChunk "
"with the left side's role"
)
assert AIMessageChunk(content="I am") + ChatMessageChunk(
role="User", content=" indeed."
) == AIMessageChunk(
content="I am indeed."
), "Other MessageChunk + ChatMessageChunk should be a MessageChunk as the left side"
) == AIMessageChunk(content="I am indeed."), (
"Other MessageChunk + ChatMessageChunk should be a MessageChunk "
"as the left side"
)
def test_complex_ai_message_chunks() -> None:
assert AIMessageChunk(content=["I am"], id="ai4") + AIMessageChunk(
content=[" indeed."]
) == AIMessageChunk(
id="ai4", content=["I am", " indeed."]
), "Content concatenation with arrays of strings should naively combine"
) == AIMessageChunk(id="ai4", content=["I am", " indeed."]), (
"Content concatenation with arrays of strings should naively combine"
)
assert AIMessageChunk(content=[{"index": 0, "text": "I am"}]) + AIMessageChunk(
content=" indeed."
) == AIMessageChunk(
content=[{"index": 0, "text": "I am"}, " indeed."]
), "Concatenating mixed content arrays should naively combine them"
) == AIMessageChunk(content=[{"index": 0, "text": "I am"}, " indeed."]), (
"Concatenating mixed content arrays should naively combine them"
)
assert (
AIMessageChunk(content=[{"index": 0, "text": "I am"}])
+ AIMessageChunk(content=[{"index": 0, "text": " indeed."}])
== AIMessageChunk(content=[{"index": 0, "text": "I am indeed."}])
), "Concatenating when both content arrays are dicts with the same index should merge" # noqa: E501
assert AIMessageChunk(content=[{"index": 0, "text": "I am"}]) + AIMessageChunk(
content=[{"index": 0, "text": " indeed."}]
) == AIMessageChunk(content=[{"index": 0, "text": "I am indeed."}]), (
"Concatenating when both content arrays are dicts with the same index "
"should merge"
)
assert AIMessageChunk(content=[{"index": 0, "text": "I am"}]) + AIMessageChunk(
content=[{"text": " indeed."}]
) == AIMessageChunk(content=[{"index": 0, "text": "I am"}, {"text": " indeed."}]), (
"Concatenating when one chunk is missing an index should not merge or throw"
)
assert AIMessageChunk(content=[{"index": 0, "text": "I am"}]) + AIMessageChunk(
content=[{"index": 2, "text": " indeed."}]
) == AIMessageChunk(
content=[{"index": 0, "text": "I am"}, {"text": " indeed."}]
), "Concatenating when one chunk is missing an index should not merge or throw" # noqa: E501
content=[{"index": 0, "text": "I am"}, {"index": 2, "text": " indeed."}]
), (
"Concatenating when both content arrays are dicts with a gap between indexes "
"should not result in a holey array"
)
assert (
AIMessageChunk(content=[{"index": 0, "text": "I am"}])
+ AIMessageChunk(content=[{"index": 2, "text": " indeed."}])
== AIMessageChunk(
content=[{"index": 0, "text": "I am"}, {"index": 2, "text": " indeed."}]
)
), "Concatenating when both content arrays are dicts with a gap between indexes should not result in a holey array" # noqa: E501
assert AIMessageChunk(content=[{"index": 0, "text": "I am"}]) + AIMessageChunk(
content=[{"index": 1, "text": " indeed."}]
) == AIMessageChunk(
content=[{"index": 0, "text": "I am"}, {"index": 1, "text": " indeed."}]
), (
"Concatenating when both content arrays are dicts with separate indexes "
"should not merge"
)
assert (
AIMessageChunk(content=[{"index": 0, "text": "I am"}])
+ AIMessageChunk(content=[{"index": 1, "text": " indeed."}])
== AIMessageChunk(
content=[{"index": 0, "text": "I am"}, {"index": 1, "text": " indeed."}]
)
), "Concatenating when both content arrays are dicts with separate indexes should not merge" # noqa: E501
assert AIMessageChunk(
content=[{"index": 0, "text": "I am", "type": "text_block"}]
) + AIMessageChunk(
content=[{"index": 0, "text": " indeed.", "type": "text_block"}]
) == AIMessageChunk(
content=[{"index": 0, "text": "I am indeed.", "type": "text_block"}]
), (
"Concatenating when both content arrays are dicts with the same index and type "
"should merge"
)
assert (
AIMessageChunk(content=[{"index": 0, "text": "I am", "type": "text_block"}])
+ AIMessageChunk(
content=[{"index": 0, "text": " indeed.", "type": "text_block"}]
)
== AIMessageChunk(
content=[{"index": 0, "text": "I am indeed.", "type": "text_block"}]
)
), "Concatenating when both content arrays are dicts with the same index and type should merge" # noqa: E501
assert AIMessageChunk(
content=[{"index": 0, "text": "I am", "type": "text_block"}]
) + AIMessageChunk(
content=[{"index": 0, "text": " indeed.", "type": "text_block_delta"}]
) == AIMessageChunk(
content=[{"index": 0, "text": "I am indeed.", "type": "text_block"}]
), (
"Concatenating when both content arrays are dicts with the same index "
"and different types should merge without updating type"
)
assert (
AIMessageChunk(content=[{"index": 0, "text": "I am", "type": "text_block"}])
+ AIMessageChunk(
content=[{"index": 0, "text": " indeed.", "type": "text_block_delta"}]
)
== AIMessageChunk(
content=[{"index": 0, "text": "I am indeed.", "type": "text_block"}]
)
), "Concatenating when both content arrays are dicts with the same index and different types should merge without updating type" # noqa: E501
assert (
AIMessageChunk(content=[{"index": 0, "text": "I am", "type": "text_block"}])
+ AIMessageChunk(content="", response_metadata={"extra": "value"})
== AIMessageChunk(
content=[{"index": 0, "text": "I am", "type": "text_block"}],
response_metadata={"extra": "value"},
)
), "Concatenating when one content is an array and one is an empty string should not add a new item, but should concat other fields" # noqa: E501
assert AIMessageChunk(
content=[{"index": 0, "text": "I am", "type": "text_block"}]
) + AIMessageChunk(
content="", response_metadata={"extra": "value"}
) == AIMessageChunk(
content=[{"index": 0, "text": "I am", "type": "text_block"}],
response_metadata={"extra": "value"},
), (
"Concatenating when one content is an array and one is an empty string "
"should not add a new item, but should concat other fields"
)
def test_function_message_chunks() -> None:
@ -290,9 +299,9 @@ def test_function_message_chunks() -> None:
def test_ai_message_chunks() -> None:
assert AIMessageChunk(example=True, content="I am") + AIMessageChunk(
example=True, content=" indeed."
) == AIMessageChunk(
example=True, content="I am indeed."
), "AIMessageChunk + AIMessageChunk should be a AIMessageChunk"
) == AIMessageChunk(example=True, content="I am indeed."), (
"AIMessageChunk + AIMessageChunk should be a AIMessageChunk"
)
with pytest.raises(ValueError):
AIMessageChunk(example=True, content="I am") + AIMessageChunk(

View File

@ -5,24 +5,25 @@ from langchain_core.outputs import ChatGenerationChunk, GenerationChunk
def test_generation_chunk() -> None:
assert GenerationChunk(text="Hello, ") + GenerationChunk(
text="world!"
) == GenerationChunk(
text="Hello, world!"
), "GenerationChunk + GenerationChunk should be a GenerationChunk"
) == GenerationChunk(text="Hello, world!"), (
"GenerationChunk + GenerationChunk should be a GenerationChunk"
)
assert (
GenerationChunk(text="Hello, ")
+ GenerationChunk(text="world!", generation_info={"foo": "bar"})
== GenerationChunk(text="Hello, world!", generation_info={"foo": "bar"})
), "GenerationChunk + GenerationChunk should be a GenerationChunk with merged generation_info" # noqa: E501
assert GenerationChunk(text="Hello, ") + GenerationChunk(
text="world!", generation_info={"foo": "bar"}
) == GenerationChunk(text="Hello, world!", generation_info={"foo": "bar"}), (
"GenerationChunk + GenerationChunk should be a GenerationChunk "
"with merged generation_info"
)
assert (
GenerationChunk(text="Hello, ")
+ GenerationChunk(text="world!", generation_info={"foo": "bar"})
+ GenerationChunk(text="!", generation_info={"baz": "foo"})
== GenerationChunk(
text="Hello, world!!", generation_info={"foo": "bar", "baz": "foo"}
)
), "GenerationChunk + GenerationChunk should be a GenerationChunk with merged generation_info" # noqa: E501
assert GenerationChunk(text="Hello, ") + GenerationChunk(
text="world!", generation_info={"foo": "bar"}
) + GenerationChunk(text="!", generation_info={"baz": "foo"}) == GenerationChunk(
text="Hello, world!!", generation_info={"foo": "bar", "baz": "foo"}
), (
"GenerationChunk + GenerationChunk should be a GenerationChunk "
"with merged generation_info"
)
def test_chat_generation_chunk() -> None:
@ -30,31 +31,32 @@ def test_chat_generation_chunk() -> None:
message=HumanMessageChunk(content="Hello, ")
) + ChatGenerationChunk(
message=HumanMessageChunk(content="world!")
) == ChatGenerationChunk(message=HumanMessageChunk(content="Hello, world!")), (
"ChatGenerationChunk + ChatGenerationChunk should be a ChatGenerationChunk"
)
assert ChatGenerationChunk(
message=HumanMessageChunk(content="Hello, ")
) + ChatGenerationChunk(
message=HumanMessageChunk(content="world!"), generation_info={"foo": "bar"}
) == ChatGenerationChunk(
message=HumanMessageChunk(content="Hello, world!")
), "ChatGenerationChunk + ChatGenerationChunk should be a ChatGenerationChunk"
message=HumanMessageChunk(content="Hello, world!"),
generation_info={"foo": "bar"},
), (
"GenerationChunk + GenerationChunk should be a GenerationChunk "
"with merged generation_info"
)
assert (
ChatGenerationChunk(message=HumanMessageChunk(content="Hello, "))
+ ChatGenerationChunk(
message=HumanMessageChunk(content="world!"), generation_info={"foo": "bar"}
)
== ChatGenerationChunk(
message=HumanMessageChunk(content="Hello, world!"),
generation_info={"foo": "bar"},
)
), "GenerationChunk + GenerationChunk should be a GenerationChunk with merged generation_info" # noqa: E501
assert (
ChatGenerationChunk(message=HumanMessageChunk(content="Hello, "))
+ ChatGenerationChunk(
message=HumanMessageChunk(content="world!"), generation_info={"foo": "bar"}
)
+ ChatGenerationChunk(
message=HumanMessageChunk(content="!"), generation_info={"baz": "foo"}
)
== ChatGenerationChunk(
message=HumanMessageChunk(content="Hello, world!!"),
generation_info={"foo": "bar", "baz": "foo"},
)
), "GenerationChunk + GenerationChunk should be a GenerationChunk with merged generation_info" # noqa: E501
assert ChatGenerationChunk(
message=HumanMessageChunk(content="Hello, ")
) + ChatGenerationChunk(
message=HumanMessageChunk(content="world!"), generation_info={"foo": "bar"}
) + ChatGenerationChunk(
message=HumanMessageChunk(content="!"), generation_info={"baz": "foo"}
) == ChatGenerationChunk(
message=HumanMessageChunk(content="Hello, world!!"),
generation_info={"foo": "bar", "baz": "foo"},
), (
"GenerationChunk + GenerationChunk should be a GenerationChunk "
"with merged generation_info"
)

View File

@ -1571,7 +1571,12 @@ def test_tool_injected_arg_without_schema(tool_: BaseTool) -> None:
}
assert tool_.invoke({"x": 5, "y": "bar"}) == "bar"
assert tool_.invoke(
{"name": "foo", "args": {"x": 5, "y": "bar"}, "id": "123", "type": "tool_call"}
{
"name": "foo",
"args": {"x": 5, "y": "bar"},
"id": "123",
"type": "tool_call",
}
) == ToolMessage("bar", tool_call_id="123", name="foo")
expected_error = (
ValidationError if not isinstance(tool_, InjectedTool) else TypeError
@ -1614,7 +1619,12 @@ def test_tool_injected_arg_with_schema(tool_: BaseTool) -> None:
}
assert tool_.invoke({"x": 5, "y": "bar"}) == "bar"
assert tool_.invoke(
{"name": "foo", "args": {"x": 5, "y": "bar"}, "id": "123", "type": "tool_call"}
{
"name": "foo",
"args": {"x": 5, "y": "bar"},
"id": "123",
"type": "tool_call",
}
) == ToolMessage("bar", tool_call_id="123", name="foo")
expected_error = (
ValidationError if not isinstance(tool_, InjectedTool) else TypeError
@ -1654,7 +1664,12 @@ def test_tool_injected_arg() -> None:
}
assert tool_.invoke({"x": 5, "y": "bar"}) == "bar"
assert tool_.invoke(
{"name": "foo", "args": {"x": 5, "y": "bar"}, "id": "123", "type": "tool_call"}
{
"name": "foo",
"args": {"x": 5, "y": "bar"},
"id": "123",
"type": "tool_call",
}
) == ToolMessage("bar", tool_call_id="123", name="foo")
expected_error = (
ValidationError if not isinstance(tool_, InjectedTool) else TypeError
@ -1715,7 +1730,12 @@ def test_tool_inherited_injected_arg() -> None:
}
assert tool_.invoke({"x": 5, "y": "bar"}) == "bar"
assert tool_.invoke(
{"name": "foo", "args": {"x": 5, "y": "bar"}, "id": "123", "type": "tool_call"}
{
"name": "foo",
"args": {"x": 5, "y": "bar"},
"id": "123",
"type": "tool_call",
}
) == ToolMessage("bar", tool_call_id="123", name="foo")
expected_error = (
ValidationError if not isinstance(tool_, InjectedTool) else TypeError
@ -1987,6 +2007,7 @@ def test__get_all_basemodel_annotations_v2(use_v1_namespace: bool) -> None:
class ModelA(BaseModel1, Generic[A], extra="allow"):
a: A
else:
from pydantic import BaseModel as BaseModel2
from pydantic import ConfigDict
@ -2272,7 +2293,12 @@ def test_tool_injected_tool_call_id() -> None:
return ToolMessage(x, tool_call_id=tool_call_id) # type: ignore
assert foo.invoke(
{"type": "tool_call", "args": {"x": 0}, "name": "foo", "id": "bar"}
{
"type": "tool_call",
"args": {"x": 0},
"name": "foo",
"id": "bar",
}
) == ToolMessage(0, tool_call_id="bar") # type: ignore
with pytest.raises(ValueError):
@ -2284,7 +2310,12 @@ def test_tool_injected_tool_call_id() -> None:
return ToolMessage(x, tool_call_id=tool_call_id) # type: ignore
assert foo2.invoke(
{"type": "tool_call", "args": {"x": 0}, "name": "foo", "id": "bar"}
{
"type": "tool_call",
"args": {"x": 0},
"name": "foo",
"id": "bar",
}
) == ToolMessage(0, tool_call_id="bar") # type: ignore
@ -2321,7 +2352,12 @@ def test_tool_return_output_mixin() -> None:
return Bar(x=x)
assert foo.invoke(
{"type": "tool_call", "args": {"x": 0}, "name": "foo", "id": "bar"}
{
"type": "tool_call",
"args": {"x": 0},
"name": "foo",
"id": "bar",
}
) == Bar(x=0)

View File

@ -54,9 +54,9 @@ async def test_same_event_loop() -> None:
# To verify that the producer and consumer are running in parallel, we
# expect the delta_time to be smaller than the sleep delay in the producer
# * # of items = 30 ms
assert (
math.isclose(delta_time, 0, abs_tol=0.010) is True
), f"delta_time: {delta_time}"
assert math.isclose(delta_time, 0, abs_tol=0.010) is True, (
f"delta_time: {delta_time}"
)
async def test_queue_for_streaming_via_sync_call() -> None:
@ -107,9 +107,9 @@ async def test_queue_for_streaming_via_sync_call() -> None:
# To verify that the producer and consumer are running in parallel, we
# expect the delta_time to be smaller than the sleep delay in the producer
# * # of items = 30 ms
assert (
math.isclose(delta_time, 0, abs_tol=0.010) is True
), f"delta_time: {delta_time}"
assert math.isclose(delta_time, 0, abs_tol=0.010) is True, (
f"delta_time: {delta_time}"
)
def test_send_to_closed_stream() -> None:

View File

@ -13,8 +13,8 @@ from typing import (
from typing import TypedDict as TypingTypedDict
import pytest
from pydantic import BaseModel as BaseModelV2Maybe # pydantic: ignore
from pydantic import Field as FieldV2Maybe # pydantic: ignore
from pydantic import BaseModel as BaseModelV2Maybe # pydantic: ignore
from pydantic import Field as FieldV2Maybe # pydantic: ignore
from typing_extensions import (
TypedDict as ExtensionsTypedDict,
)

View File

@ -1,3 +1,5 @@
import operator
import pytest
from langchain_core.utils.usage import _dict_int_op
@ -6,7 +8,7 @@ from langchain_core.utils.usage import _dict_int_op
def test_dict_int_op_add() -> None:
left = {"a": 1, "b": 2}
right = {"b": 3, "c": 4}
result = _dict_int_op(left, right, lambda x, y: x + y)
result = _dict_int_op(left, right, operator.add)
assert result == {"a": 1, "b": 5, "c": 4}
@ -20,7 +22,7 @@ def test_dict_int_op_subtract() -> None:
def test_dict_int_op_nested() -> None:
left = {"a": 1, "b": {"c": 2, "d": 3}}
right = {"a": 2, "b": {"c": 1, "e": 4}}
result = _dict_int_op(left, right, lambda x, y: x + y)
result = _dict_int_op(left, right, operator.add)
assert result == {"a": 3, "b": {"c": 3, "d": 3, "e": 4}}
@ -28,11 +30,11 @@ def test_dict_int_op_max_depth_exceeded() -> None:
left = {"a": {"b": {"c": 1}}}
right = {"a": {"b": {"c": 2}}}
with pytest.raises(ValueError):
_dict_int_op(left, right, lambda x, y: x + y, max_depth=2)
_dict_int_op(left, right, operator.add, max_depth=2)
def test_dict_int_op_invalid_types() -> None:
left = {"a": 1, "b": "string"}
right = {"a": 2, "b": 3}
with pytest.raises(ValueError):
_dict_int_op(left, right, lambda x, y: x + y)
_dict_int_op(left, right, operator.add)