mirror of
https://github.com/hwchase17/langchain.git
synced 2025-07-13 00:16:01 +00:00
anthropic[patch]: ruff fixes and rules (#31899)
* bump ruff deps * add more thorough ruff rules * fix said rules
This commit is contained in:
parent
e7eac27241
commit
2a7645300c
@ -180,13 +180,13 @@ select = [
|
||||
"YTT", # flake8-2020
|
||||
]
|
||||
ignore = [
|
||||
"D100",
|
||||
"D101",
|
||||
"D102",
|
||||
"D103",
|
||||
"D104",
|
||||
"D105",
|
||||
"D107",
|
||||
"D100", # Missing docstring in public module
|
||||
"D101", # Missing docstring in public class
|
||||
"D102", # Missing docstring in public method
|
||||
"D103", # Missing docstring in public function
|
||||
"D104", # Missing docstring in public package
|
||||
"D105", # Missing docstring in magic method
|
||||
"D107", # Missing docstring in __init__
|
||||
"COM812", # Messes with the formatter
|
||||
"ISC001", # Messes with the formatter
|
||||
"PERF203", # Rarely useful
|
||||
|
@ -6,9 +6,9 @@ from langchain_anthropic.chat_models import (
|
||||
from langchain_anthropic.llms import Anthropic, AnthropicLLM
|
||||
|
||||
__all__ = [
|
||||
"ChatAnthropicMessages",
|
||||
"ChatAnthropic",
|
||||
"convert_to_anthropic_tool",
|
||||
"Anthropic",
|
||||
"AnthropicLLM",
|
||||
"ChatAnthropic",
|
||||
"ChatAnthropicMessages",
|
||||
"convert_to_anthropic_tool",
|
||||
]
|
||||
|
@ -6,6 +6,8 @@ for each instance of ChatAnthropic.
|
||||
Logic is largely replicated from anthropic._base_client.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import os
|
||||
from functools import lru_cache
|
||||
@ -17,7 +19,7 @@ _NOT_GIVEN: Any = object()
|
||||
|
||||
|
||||
class _SyncHttpxClientWrapper(anthropic.DefaultHttpxClient):
|
||||
"""Borrowed from anthropic._base_client"""
|
||||
"""Borrowed from anthropic._base_client."""
|
||||
|
||||
def __del__(self) -> None:
|
||||
if self.is_closed:
|
||||
@ -30,7 +32,7 @@ class _SyncHttpxClientWrapper(anthropic.DefaultHttpxClient):
|
||||
|
||||
|
||||
class _AsyncHttpxClientWrapper(anthropic.DefaultAsyncHttpxClient):
|
||||
"""Borrowed from anthropic._base_client"""
|
||||
"""Borrowed from anthropic._base_client."""
|
||||
|
||||
def __del__(self) -> None:
|
||||
if self.is_closed:
|
||||
|
@ -1,3 +1,5 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import copy
|
||||
import json
|
||||
import re
|
||||
@ -111,18 +113,19 @@ def _is_builtin_tool(tool: Any) -> bool:
|
||||
|
||||
|
||||
def _format_image(url: str) -> dict:
|
||||
"""
|
||||
Converts part["image_url"]["url"] strings (OpenAI format)
|
||||
to the correct Anthropic format:
|
||||
"""Convert part["image_url"]["url"] strings (OpenAI format) to Anthropic format.
|
||||
|
||||
{
|
||||
"type": "base64",
|
||||
"media_type": "image/jpeg",
|
||||
"data": "/9j/4AAQSkZJRg...",
|
||||
"type": "base64",
|
||||
"media_type": "image/jpeg",
|
||||
"data": "/9j/4AAQSkZJRg...",
|
||||
}
|
||||
|
||||
Or
|
||||
|
||||
{
|
||||
"type": "url",
|
||||
"url": "https://example.com/image.jpg",
|
||||
"type": "url",
|
||||
"url": "https://example.com/image.jpg",
|
||||
}
|
||||
"""
|
||||
# Base64 encoded image
|
||||
@ -146,11 +149,14 @@ def _format_image(url: str) -> dict:
|
||||
"url": url,
|
||||
}
|
||||
|
||||
raise ValueError(
|
||||
msg = (
|
||||
"Malformed url parameter."
|
||||
" Must be either an image URL (https://example.com/image.jpg)"
|
||||
" or base64 encoded string (data:image/png;base64,'/9j/4AAQSk'...)"
|
||||
)
|
||||
raise ValueError(
|
||||
msg,
|
||||
)
|
||||
|
||||
|
||||
def _merge_messages(
|
||||
@ -177,8 +183,8 @@ def _merge_messages(
|
||||
"content": curr.content,
|
||||
"tool_use_id": curr.tool_call_id,
|
||||
"is_error": curr.status == "error",
|
||||
}
|
||||
]
|
||||
},
|
||||
],
|
||||
)
|
||||
last = merged[-1] if merged else None
|
||||
if any(
|
||||
@ -187,7 +193,7 @@ def _merge_messages(
|
||||
):
|
||||
if isinstance(cast(BaseMessage, last).content, str):
|
||||
new_content: list = [
|
||||
{"type": "text", "text": cast(BaseMessage, last).content}
|
||||
{"type": "text", "text": cast(BaseMessage, last).content},
|
||||
]
|
||||
else:
|
||||
new_content = copy.copy(cast(list, cast(BaseMessage, last).content))
|
||||
@ -234,10 +240,13 @@ def _format_data_content_block(block: dict) -> dict:
|
||||
},
|
||||
}
|
||||
else:
|
||||
raise ValueError(
|
||||
msg = (
|
||||
"Anthropic only supports 'url' and 'base64' source_type for image "
|
||||
"content blocks."
|
||||
)
|
||||
raise ValueError(
|
||||
msg,
|
||||
)
|
||||
|
||||
elif block["type"] == "file":
|
||||
if block["source_type"] == "url":
|
||||
@ -276,7 +285,8 @@ def _format_data_content_block(block: dict) -> dict:
|
||||
}
|
||||
|
||||
else:
|
||||
raise ValueError(f"Block of type {block['type']} is not supported.")
|
||||
msg = f"Block of type {block['type']} is not supported."
|
||||
raise ValueError(msg)
|
||||
|
||||
if formatted_block:
|
||||
for key in ["cache_control", "citations", "title", "context"]:
|
||||
@ -292,7 +302,6 @@ def _format_messages(
|
||||
messages: Sequence[BaseMessage],
|
||||
) -> tuple[Union[str, list[dict], None], list[dict]]:
|
||||
"""Format messages for anthropic."""
|
||||
|
||||
"""
|
||||
[
|
||||
{
|
||||
@ -308,8 +317,9 @@ def _format_messages(
|
||||
for i, message in enumerate(merged_messages):
|
||||
if message.type == "system":
|
||||
if system is not None:
|
||||
raise ValueError("Received multiple non-consecutive system messages.")
|
||||
elif isinstance(message.content, list):
|
||||
msg = "Received multiple non-consecutive system messages."
|
||||
raise ValueError(msg)
|
||||
if isinstance(message.content, list):
|
||||
system = [
|
||||
(
|
||||
block
|
||||
@ -328,8 +338,9 @@ def _format_messages(
|
||||
if not isinstance(message.content, str):
|
||||
# parse as dict
|
||||
if not isinstance(message.content, list):
|
||||
msg = "Anthropic message content must be str or list of dicts"
|
||||
raise ValueError(
|
||||
"Anthropic message content must be str or list of dicts"
|
||||
msg,
|
||||
)
|
||||
|
||||
# populate content
|
||||
@ -339,8 +350,9 @@ def _format_messages(
|
||||
content.append({"type": "text", "text": block})
|
||||
elif isinstance(block, dict):
|
||||
if "type" not in block:
|
||||
raise ValueError("Dict content block must have a type key")
|
||||
elif block["type"] == "image_url":
|
||||
msg = "Dict content block must have a type key"
|
||||
raise ValueError(msg)
|
||||
if block["type"] == "image_url":
|
||||
# convert format
|
||||
source = _format_image(block["image_url"]["url"])
|
||||
content.append({"type": "image", "source": source})
|
||||
@ -358,7 +370,9 @@ def _format_messages(
|
||||
if tc["id"] == block["id"]
|
||||
]
|
||||
content.extend(
|
||||
_lc_tool_calls_to_anthropic_tool_use_blocks(overlapping)
|
||||
_lc_tool_calls_to_anthropic_tool_use_blocks(
|
||||
overlapping,
|
||||
),
|
||||
)
|
||||
else:
|
||||
block.pop("text", None)
|
||||
@ -398,7 +412,7 @@ def _format_messages(
|
||||
for k, v in block.items()
|
||||
if k
|
||||
in ("type", "text", "cache_control", "citations")
|
||||
}
|
||||
},
|
||||
)
|
||||
elif block["type"] == "thinking":
|
||||
content.append(
|
||||
@ -407,7 +421,7 @@ def _format_messages(
|
||||
for k, v in block.items()
|
||||
if k
|
||||
in ("type", "thinking", "cache_control", "signature")
|
||||
}
|
||||
},
|
||||
)
|
||||
elif block["type"] == "redacted_thinking":
|
||||
content.append(
|
||||
@ -415,13 +429,13 @@ def _format_messages(
|
||||
k: v
|
||||
for k, v in block.items()
|
||||
if k in ("type", "cache_control", "data")
|
||||
}
|
||||
},
|
||||
)
|
||||
elif block["type"] == "tool_result":
|
||||
tool_content = _format_messages(
|
||||
[HumanMessage(block["content"])]
|
||||
[HumanMessage(block["content"])],
|
||||
)[1][0]["content"]
|
||||
content.append({**block, **{"content": tool_content}})
|
||||
content.append({**block, "content": tool_content})
|
||||
elif block["type"] in (
|
||||
"code_execution_tool_result",
|
||||
"mcp_tool_result",
|
||||
@ -439,15 +453,18 @@ def _format_messages(
|
||||
"is_error", # for mcp_tool_result
|
||||
"cache_control",
|
||||
)
|
||||
}
|
||||
},
|
||||
)
|
||||
else:
|
||||
content.append(block)
|
||||
else:
|
||||
raise ValueError(
|
||||
msg = (
|
||||
f"Content blocks must be str or dict, instead was: "
|
||||
f"{type(block)}"
|
||||
)
|
||||
raise ValueError(
|
||||
msg,
|
||||
)
|
||||
else:
|
||||
content = message.content
|
||||
|
||||
@ -468,7 +485,7 @@ def _format_messages(
|
||||
tc for tc in message.tool_calls if tc["id"] not in tool_use_ids
|
||||
]
|
||||
cast(list, content).extend(
|
||||
_lc_tool_calls_to_anthropic_tool_use_blocks(missing_tool_calls)
|
||||
_lc_tool_calls_to_anthropic_tool_use_blocks(missing_tool_calls),
|
||||
)
|
||||
|
||||
formatted_messages.append({"role": role, "content": content})
|
||||
@ -481,8 +498,7 @@ def _handle_anthropic_bad_request(e: anthropic.BadRequestError) -> None:
|
||||
message = "Received only system message(s). "
|
||||
warnings.warn(message)
|
||||
raise e
|
||||
else:
|
||||
raise
|
||||
raise
|
||||
|
||||
|
||||
class ChatAnthropic(BaseChatModel):
|
||||
@ -635,17 +651,17 @@ class ChatAnthropic(BaseChatModel):
|
||||
.. code-block:: python
|
||||
|
||||
[{'name': 'GetWeather',
|
||||
'args': {'location': 'Los Angeles, CA'},
|
||||
'id': 'toolu_01KzpPEAgzura7hpBqwHbWdo'},
|
||||
'args': {'location': 'Los Angeles, CA'},
|
||||
'id': 'toolu_01KzpPEAgzura7hpBqwHbWdo'},
|
||||
{'name': 'GetWeather',
|
||||
'args': {'location': 'New York, NY'},
|
||||
'id': 'toolu_01JtgbVGVJbiSwtZk3Uycezx'},
|
||||
'args': {'location': 'New York, NY'},
|
||||
'id': 'toolu_01JtgbVGVJbiSwtZk3Uycezx'},
|
||||
{'name': 'GetPopulation',
|
||||
'args': {'location': 'Los Angeles, CA'},
|
||||
'id': 'toolu_01429aygngesudV9nTbCKGuw'},
|
||||
'args': {'location': 'Los Angeles, CA'},
|
||||
'id': 'toolu_01429aygngesudV9nTbCKGuw'},
|
||||
{'name': 'GetPopulation',
|
||||
'args': {'location': 'New York, NY'},
|
||||
'id': 'toolu_01JPktyd44tVMeBcPPnFSEJG'}]
|
||||
'args': {'location': 'New York, NY'},
|
||||
'id': 'toolu_01JPktyd44tVMeBcPPnFSEJG'}]
|
||||
|
||||
See ``ChatAnthropic.bind_tools()`` method for more.
|
||||
|
||||
@ -673,7 +689,7 @@ class ChatAnthropic(BaseChatModel):
|
||||
See ``ChatAnthropic.with_structured_output()`` for more.
|
||||
|
||||
Image input:
|
||||
See `multimodal guides <https://python.langchain.com/docs/how_to/multimodal_inputs/>`_
|
||||
See `multimodal guides <https://python.langchain.com/docs/how_to/multimodal_inputs/>`__
|
||||
for more detail.
|
||||
|
||||
.. code-block:: python
|
||||
@ -717,7 +733,7 @@ class ChatAnthropic(BaseChatModel):
|
||||
.. dropdown:: Files API
|
||||
|
||||
You can also pass in files that are managed through Anthropic's
|
||||
`Files API <https://docs.anthropic.com/en/docs/build-with-claude/files>`_:
|
||||
`Files API <https://docs.anthropic.com/en/docs/build-with-claude/files>`__:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@ -744,7 +760,7 @@ class ChatAnthropic(BaseChatModel):
|
||||
llm.invoke([input_message])
|
||||
|
||||
PDF input:
|
||||
See `multimodal guides <https://python.langchain.com/docs/how_to/multimodal_inputs/>`_
|
||||
See `multimodal guides <https://python.langchain.com/docs/how_to/multimodal_inputs/>`__
|
||||
for more detail.
|
||||
|
||||
.. code-block:: python
|
||||
@ -782,7 +798,7 @@ class ChatAnthropic(BaseChatModel):
|
||||
.. dropdown:: Files API
|
||||
|
||||
You can also pass in files that are managed through Anthropic's
|
||||
`Files API <https://docs.anthropic.com/en/docs/build-with-claude/files>`_:
|
||||
`Files API <https://docs.anthropic.com/en/docs/build-with-claude/files>`__:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@ -810,7 +826,7 @@ class ChatAnthropic(BaseChatModel):
|
||||
|
||||
Extended thinking:
|
||||
Claude 3.7 Sonnet supports an
|
||||
`extended thinking <https://docs.anthropic.com/en/docs/build-with-claude/extended-thinking>`_
|
||||
`extended thinking <https://docs.anthropic.com/en/docs/build-with-claude/extended-thinking>`__
|
||||
feature, which will output the step-by-step reasoning process that led to its
|
||||
final answer.
|
||||
|
||||
@ -838,10 +854,10 @@ class ChatAnthropic(BaseChatModel):
|
||||
|
||||
Citations:
|
||||
Anthropic supports a
|
||||
`citations <https://docs.anthropic.com/en/docs/build-with-claude/citations>`_
|
||||
`citations <https://docs.anthropic.com/en/docs/build-with-claude/citations>`__
|
||||
feature that lets Claude attach context to its answers based on source
|
||||
documents supplied by the user. When
|
||||
`document content blocks <https://docs.anthropic.com/en/docs/build-with-claude/citations#document-types>`_
|
||||
`document content blocks <https://docs.anthropic.com/en/docs/build-with-claude/citations#document-types>`__
|
||||
with ``"citations": {"enabled": True}`` are included in a query, Claude may
|
||||
generate citations in its response.
|
||||
|
||||
@ -924,7 +940,7 @@ class ChatAnthropic(BaseChatModel):
|
||||
or by setting ``stream_usage=False`` when initializing ChatAnthropic.
|
||||
|
||||
Prompt caching:
|
||||
See LangChain `docs <https://python.langchain.com/docs/integrations/chat/anthropic/#built-in-tools>`_
|
||||
See LangChain `docs <https://python.langchain.com/docs/integrations/chat/anthropic/#built-in-tools>`__
|
||||
for more detail.
|
||||
|
||||
.. code-block:: python
|
||||
@ -1000,11 +1016,11 @@ class ChatAnthropic(BaseChatModel):
|
||||
}
|
||||
}
|
||||
|
||||
See `Claude documentation <https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching#1-hour-cache-duration-beta>`_
|
||||
See `Claude documentation <https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching#1-hour-cache-duration-beta>`__
|
||||
for detail.
|
||||
|
||||
Token-efficient tool use (beta):
|
||||
See LangChain `docs <https://python.langchain.com/docs/integrations/chat/anthropic/>`_
|
||||
See LangChain `docs <https://python.langchain.com/docs/integrations/chat/anthropic/>`__
|
||||
for more detail.
|
||||
|
||||
.. code-block:: python
|
||||
@ -1041,7 +1057,7 @@ class ChatAnthropic(BaseChatModel):
|
||||
Total tokens: 408
|
||||
|
||||
Built-in tools:
|
||||
See LangChain `docs <https://python.langchain.com/docs/integrations/chat/anthropic/>`_
|
||||
See LangChain `docs <https://python.langchain.com/docs/integrations/chat/anthropic/>`__
|
||||
for more detail.
|
||||
|
||||
.. dropdown:: Web search
|
||||
@ -1266,7 +1282,9 @@ class ChatAnthropic(BaseChatModel):
|
||||
}
|
||||
|
||||
def _get_ls_params(
|
||||
self, stop: Optional[list[str]] = None, **kwargs: Any
|
||||
self,
|
||||
stop: Optional[list[str]] = None,
|
||||
**kwargs: Any,
|
||||
) -> LangSmithParams:
|
||||
"""Get standard params for tracing."""
|
||||
params = self._get_invocation_params(stop=stop, **kwargs)
|
||||
@ -1286,8 +1304,7 @@ class ChatAnthropic(BaseChatModel):
|
||||
@classmethod
|
||||
def build_extra(cls, values: dict) -> Any:
|
||||
all_required_field_names = get_pydantic_field_names(cls)
|
||||
values = _build_model_kwargs(values, all_required_field_names)
|
||||
return values
|
||||
return _build_model_kwargs(values, all_required_field_names)
|
||||
|
||||
@cached_property
|
||||
def _client_params(self) -> dict[str, Any]:
|
||||
@ -1361,14 +1378,12 @@ class ChatAnthropic(BaseChatModel):
|
||||
def _create(self, payload: dict) -> Any:
|
||||
if "betas" in payload:
|
||||
return self._client.beta.messages.create(**payload)
|
||||
else:
|
||||
return self._client.messages.create(**payload)
|
||||
return self._client.messages.create(**payload)
|
||||
|
||||
async def _acreate(self, payload: dict) -> Any:
|
||||
if "betas" in payload:
|
||||
return await self._async_client.beta.messages.create(**payload)
|
||||
else:
|
||||
return await self._async_client.messages.create(**payload)
|
||||
return await self._async_client.messages.create(**payload)
|
||||
|
||||
def _stream(
|
||||
self,
|
||||
@ -1496,7 +1511,10 @@ class ChatAnthropic(BaseChatModel):
|
||||
) -> ChatResult:
|
||||
if self.streaming:
|
||||
stream_iter = self._stream(
|
||||
messages, stop=stop, run_manager=run_manager, **kwargs
|
||||
messages,
|
||||
stop=stop,
|
||||
run_manager=run_manager,
|
||||
**kwargs,
|
||||
)
|
||||
return generate_from_stream(stream_iter)
|
||||
payload = self._get_request_payload(messages, stop=stop, **kwargs)
|
||||
@ -1515,7 +1533,10 @@ class ChatAnthropic(BaseChatModel):
|
||||
) -> ChatResult:
|
||||
if self.streaming:
|
||||
stream_iter = self._astream(
|
||||
messages, stop=stop, run_manager=run_manager, **kwargs
|
||||
messages,
|
||||
stop=stop,
|
||||
run_manager=run_manager,
|
||||
**kwargs,
|
||||
)
|
||||
return await agenerate_from_stream(stream_iter)
|
||||
payload = self._get_request_payload(messages, stop=stop, **kwargs)
|
||||
@ -1558,7 +1579,7 @@ class ChatAnthropic(BaseChatModel):
|
||||
tools: Sequence[Union[dict[str, Any], type, Callable, BaseTool]],
|
||||
*,
|
||||
tool_choice: Optional[
|
||||
Union[dict[str, str], Literal["any", "auto"], str]
|
||||
Union[dict[str, str], Literal["any", "auto"], str] # noqa: PYI051
|
||||
] = None,
|
||||
parallel_tool_calls: Optional[bool] = None,
|
||||
**kwargs: Any,
|
||||
@ -1716,10 +1737,13 @@ class ChatAnthropic(BaseChatModel):
|
||||
elif isinstance(tool_choice, str):
|
||||
kwargs["tool_choice"] = {"type": "tool", "name": tool_choice}
|
||||
else:
|
||||
raise ValueError(
|
||||
msg = (
|
||||
f"Unrecognized 'tool_choice' type {tool_choice=}. Expected dict, "
|
||||
f"str, or None."
|
||||
)
|
||||
raise ValueError(
|
||||
msg,
|
||||
)
|
||||
|
||||
if parallel_tool_calls is not None:
|
||||
disable_parallel_tool_use = not parallel_tool_calls
|
||||
@ -1861,7 +1885,8 @@ class ChatAnthropic(BaseChatModel):
|
||||
tool_name = formatted_tool["name"]
|
||||
if self.thinking is not None and self.thinking.get("type") == "enabled":
|
||||
llm = self._get_llm_for_structured_output_when_thinking_is_enabled(
|
||||
schema, formatted_tool
|
||||
schema,
|
||||
formatted_tool,
|
||||
)
|
||||
else:
|
||||
llm = self.bind_tools(
|
||||
@ -1875,24 +1900,27 @@ class ChatAnthropic(BaseChatModel):
|
||||
|
||||
if isinstance(schema, type) and is_basemodel_subclass(schema):
|
||||
output_parser: OutputParserLike = PydanticToolsParser(
|
||||
tools=[schema], first_tool_only=True
|
||||
tools=[schema],
|
||||
first_tool_only=True,
|
||||
)
|
||||
else:
|
||||
output_parser = JsonOutputKeyToolsParser(
|
||||
key_name=tool_name, first_tool_only=True
|
||||
key_name=tool_name,
|
||||
first_tool_only=True,
|
||||
)
|
||||
|
||||
if include_raw:
|
||||
parser_assign = RunnablePassthrough.assign(
|
||||
parsed=itemgetter("raw") | output_parser, parsing_error=lambda _: None
|
||||
parsed=itemgetter("raw") | output_parser,
|
||||
parsing_error=lambda _: None,
|
||||
)
|
||||
parser_none = RunnablePassthrough.assign(parsed=lambda _: None)
|
||||
parser_with_fallback = parser_assign.with_fallbacks(
|
||||
[parser_none], exception_key="parsing_error"
|
||||
[parser_none],
|
||||
exception_key="parsing_error",
|
||||
)
|
||||
return RunnableMap(raw=llm) | parser_with_fallback
|
||||
else:
|
||||
return llm | output_parser
|
||||
return llm | output_parser
|
||||
|
||||
@beta()
|
||||
def get_num_tokens_from_messages(
|
||||
@ -1909,6 +1937,8 @@ class ChatAnthropic(BaseChatModel):
|
||||
messages: The message inputs to tokenize.
|
||||
tools: If provided, sequence of dict, BaseModel, function, or BaseTools
|
||||
to be converted to tool schemas.
|
||||
kwargs: Additional keyword arguments are passed to the
|
||||
:meth:`~langchain_anthropic.chat_models.ChatAnthropic.bind` method.
|
||||
|
||||
Basic usage:
|
||||
|
||||
@ -1985,7 +2015,7 @@ def convert_to_anthropic_tool(
|
||||
if isinstance(tool, dict) and all(
|
||||
k in tool for k in ("name", "description", "input_schema")
|
||||
):
|
||||
anthropic_formatted = AnthropicTool(tool) # type: ignore
|
||||
anthropic_formatted = AnthropicTool(tool) # type: ignore[misc]
|
||||
else:
|
||||
oai_formatted = convert_to_openai_tool(tool)["function"]
|
||||
anthropic_formatted = AnthropicTool(
|
||||
@ -2032,17 +2062,15 @@ class _AnthropicToolUse(TypedDict):
|
||||
def _lc_tool_calls_to_anthropic_tool_use_blocks(
|
||||
tool_calls: list[ToolCall],
|
||||
) -> list[_AnthropicToolUse]:
|
||||
blocks = []
|
||||
for tool_call in tool_calls:
|
||||
blocks.append(
|
||||
_AnthropicToolUse(
|
||||
type="tool_use",
|
||||
name=tool_call["name"],
|
||||
input=tool_call["args"],
|
||||
id=cast(str, tool_call["id"]),
|
||||
)
|
||||
return [
|
||||
_AnthropicToolUse(
|
||||
type="tool_use",
|
||||
name=tool_call["name"],
|
||||
input=tool_call["args"],
|
||||
id=cast(str, tool_call["id"]),
|
||||
)
|
||||
return blocks
|
||||
for tool_call in tool_calls
|
||||
]
|
||||
|
||||
|
||||
def _make_message_chunk_from_anthropic_event(
|
||||
@ -2107,7 +2135,7 @@ def _make_message_chunk_from_anthropic_event(
|
||||
tool_call_chunks = []
|
||||
message_chunk = AIMessageChunk(
|
||||
content=[content_block],
|
||||
tool_call_chunks=tool_call_chunks, # type: ignore
|
||||
tool_call_chunks=tool_call_chunks,
|
||||
)
|
||||
block_start_event = event
|
||||
elif event.type == "content_block_delta":
|
||||
@ -2122,14 +2150,10 @@ def _make_message_chunk_from_anthropic_event(
|
||||
if "citation" in content_block:
|
||||
content_block["citations"] = [content_block.pop("citation")]
|
||||
message_chunk = AIMessageChunk(content=[content_block])
|
||||
elif event.delta.type == "thinking_delta":
|
||||
content_block = event.delta.model_dump()
|
||||
if "text" in content_block and content_block["text"] is None:
|
||||
content_block.pop("text")
|
||||
content_block["index"] = event.index
|
||||
content_block["type"] = "thinking"
|
||||
message_chunk = AIMessageChunk(content=[content_block])
|
||||
elif event.delta.type == "signature_delta":
|
||||
elif (
|
||||
event.delta.type == "thinking_delta"
|
||||
or event.delta.type == "signature_delta"
|
||||
):
|
||||
content_block = event.delta.model_dump()
|
||||
if "text" in content_block and content_block["text"] is None:
|
||||
content_block.pop("text")
|
||||
@ -2155,7 +2179,7 @@ def _make_message_chunk_from_anthropic_event(
|
||||
tool_call_chunks = []
|
||||
message_chunk = AIMessageChunk(
|
||||
content=[content_block],
|
||||
tool_call_chunks=tool_call_chunks, # type: ignore
|
||||
tool_call_chunks=tool_call_chunks,
|
||||
)
|
||||
elif event.type == "message_delta" and stream_usage:
|
||||
usage_metadata = UsageMetadata(
|
||||
|
@ -1,3 +1,5 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from typing import (
|
||||
Any,
|
||||
@ -66,7 +68,7 @@ def get_system_message(tools: list[dict]) -> str:
|
||||
parameter_description=parameter.get("description"),
|
||||
)
|
||||
for name, parameter in tool["parameters"]["properties"].items()
|
||||
]
|
||||
],
|
||||
),
|
||||
}
|
||||
for tool in tools
|
||||
@ -79,7 +81,7 @@ def get_system_message(tools: list[dict]) -> str:
|
||||
formatted_parameters=tool["formatted_parameters"],
|
||||
)
|
||||
for tool in tools_data
|
||||
]
|
||||
],
|
||||
)
|
||||
return SYSTEM_PROMPT_FORMAT.format(formatted_tools=tools_formatted)
|
||||
|
||||
@ -111,18 +113,20 @@ def _xml_to_function_call(invoke: Any, tools: list[dict]) -> dict[str, Any]:
|
||||
if len(filtered_tools) > 0 and not isinstance(arguments, str):
|
||||
tool = filtered_tools[0]
|
||||
for key, value in arguments.items():
|
||||
if key in tool["parameters"]["properties"]:
|
||||
if "type" in tool["parameters"]["properties"][key]:
|
||||
if tool["parameters"]["properties"][key][
|
||||
"type"
|
||||
] == "array" and not isinstance(value, list):
|
||||
arguments[key] = [value]
|
||||
if (
|
||||
tool["parameters"]["properties"][key]["type"] != "object"
|
||||
and isinstance(value, dict)
|
||||
and len(value.keys()) == 1
|
||||
):
|
||||
arguments[key] = list(value.values())[0]
|
||||
if (
|
||||
key in tool["parameters"]["properties"]
|
||||
and "type" in tool["parameters"]["properties"][key]
|
||||
):
|
||||
if tool["parameters"]["properties"][key][
|
||||
"type"
|
||||
] == "array" and not isinstance(value, list):
|
||||
arguments[key] = [value]
|
||||
if (
|
||||
tool["parameters"]["properties"][key]["type"] != "object"
|
||||
and isinstance(value, dict)
|
||||
and len(value.keys()) == 1
|
||||
):
|
||||
arguments[key] = next(iter(value.values()))
|
||||
|
||||
return {
|
||||
"function": {
|
||||
@ -134,9 +138,7 @@ def _xml_to_function_call(invoke: Any, tools: list[dict]) -> dict[str, Any]:
|
||||
|
||||
|
||||
def _xml_to_tool_calls(elem: Any, tools: list[dict]) -> list[dict[str, Any]]:
|
||||
"""
|
||||
Convert an XML element and its children into a dictionary of dictionaries.
|
||||
"""
|
||||
"""Convert an XML element and its children into a dictionary of dictionaries."""
|
||||
invokes = elem.findall("invoke")
|
||||
|
||||
return [_xml_to_function_call(invoke, tools) for invoke in invokes]
|
||||
|
@ -1,3 +1,5 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
import warnings
|
||||
from collections.abc import AsyncIterator, Iterator, Mapping
|
||||
@ -85,8 +87,7 @@ class _AnthropicCommon(BaseLanguageModel):
|
||||
@classmethod
|
||||
def build_extra(cls, values: dict) -> Any:
|
||||
all_required_field_names = get_pydantic_field_names(cls)
|
||||
values = _build_model_kwargs(values, all_required_field_names)
|
||||
return values
|
||||
return _build_model_kwargs(values, all_required_field_names)
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_environment(self) -> Self:
|
||||
@ -125,11 +126,12 @@ class _AnthropicCommon(BaseLanguageModel):
|
||||
@property
|
||||
def _identifying_params(self) -> Mapping[str, Any]:
|
||||
"""Get the identifying parameters."""
|
||||
return {**{}, **self._default_params}
|
||||
return {**self._default_params}
|
||||
|
||||
def _get_anthropic_stop(self, stop: Optional[list[str]] = None) -> list[str]:
|
||||
if not self.HUMAN_PROMPT or not self.AI_PROMPT:
|
||||
raise NameError("Please ensure the anthropic package is loaded")
|
||||
msg = "Please ensure the anthropic package is loaded"
|
||||
raise NameError(msg)
|
||||
|
||||
if stop is None:
|
||||
stop = []
|
||||
@ -152,6 +154,7 @@ class AnthropicLLM(LLM, _AnthropicCommon):
|
||||
from langchain_anthropic import AnthropicLLM
|
||||
|
||||
model = AnthropicLLM()
|
||||
|
||||
"""
|
||||
|
||||
model_config = ConfigDict(
|
||||
@ -166,7 +169,7 @@ class AnthropicLLM(LLM, _AnthropicCommon):
|
||||
warnings.warn(
|
||||
"This Anthropic LLM is deprecated. "
|
||||
"Please use `from langchain_anthropic import ChatAnthropic` "
|
||||
"instead"
|
||||
"instead",
|
||||
)
|
||||
return values
|
||||
|
||||
@ -199,7 +202,9 @@ class AnthropicLLM(LLM, _AnthropicCommon):
|
||||
}
|
||||
|
||||
def _get_ls_params(
|
||||
self, stop: Optional[list[str]] = None, **kwargs: Any
|
||||
self,
|
||||
stop: Optional[list[str]] = None,
|
||||
**kwargs: Any,
|
||||
) -> LangSmithParams:
|
||||
"""Get standard params for tracing."""
|
||||
params = super()._get_ls_params(stop=stop, **kwargs)
|
||||
@ -213,7 +218,8 @@ class AnthropicLLM(LLM, _AnthropicCommon):
|
||||
|
||||
def _wrap_prompt(self, prompt: str) -> str:
|
||||
if not self.HUMAN_PROMPT or not self.AI_PROMPT:
|
||||
raise NameError("Please ensure the anthropic package is loaded")
|
||||
msg = "Please ensure the anthropic package is loaded"
|
||||
raise NameError(msg)
|
||||
|
||||
if prompt.startswith(self.HUMAN_PROMPT):
|
||||
return prompt # Already wrapped.
|
||||
@ -238,6 +244,8 @@ class AnthropicLLM(LLM, _AnthropicCommon):
|
||||
Args:
|
||||
prompt: The prompt to pass into the model.
|
||||
stop: Optional list of stop words to use when generating.
|
||||
run_manager: Optional callback manager for LLM run.
|
||||
kwargs: Additional keyword arguments to pass to the model.
|
||||
|
||||
Returns:
|
||||
The string generated by the model.
|
||||
@ -253,7 +261,10 @@ class AnthropicLLM(LLM, _AnthropicCommon):
|
||||
if self.streaming:
|
||||
completion = ""
|
||||
for chunk in self._stream(
|
||||
prompt=prompt, stop=stop, run_manager=run_manager, **kwargs
|
||||
prompt=prompt,
|
||||
stop=stop,
|
||||
run_manager=run_manager,
|
||||
**kwargs,
|
||||
):
|
||||
completion += chunk.text
|
||||
return completion
|
||||
@ -281,7 +292,10 @@ class AnthropicLLM(LLM, _AnthropicCommon):
|
||||
if self.streaming:
|
||||
completion = ""
|
||||
async for chunk in self._astream(
|
||||
prompt=prompt, stop=stop, run_manager=run_manager, **kwargs
|
||||
prompt=prompt,
|
||||
stop=stop,
|
||||
run_manager=run_manager,
|
||||
**kwargs,
|
||||
):
|
||||
completion += chunk.text
|
||||
return completion
|
||||
@ -308,8 +322,12 @@ class AnthropicLLM(LLM, _AnthropicCommon):
|
||||
Args:
|
||||
prompt: The prompt to pass into the model.
|
||||
stop: Optional list of stop words to use when generating.
|
||||
run_manager: Optional callback manager for LLM run.
|
||||
kwargs: Additional keyword arguments to pass to the model.
|
||||
|
||||
Returns:
|
||||
A generator representing the stream of tokens from Anthropic.
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
@ -319,12 +337,16 @@ class AnthropicLLM(LLM, _AnthropicCommon):
|
||||
generator = anthropic.stream(prompt)
|
||||
for token in generator:
|
||||
yield token
|
||||
|
||||
"""
|
||||
stop = self._get_anthropic_stop(stop)
|
||||
params = {**self._default_params, **kwargs}
|
||||
|
||||
for token in self.client.completions.create(
|
||||
prompt=self._wrap_prompt(prompt), stop_sequences=stop, stream=True, **params
|
||||
prompt=self._wrap_prompt(prompt),
|
||||
stop_sequences=stop,
|
||||
stream=True,
|
||||
**params,
|
||||
):
|
||||
chunk = GenerationChunk(text=token.completion)
|
||||
|
||||
@ -344,8 +366,12 @@ class AnthropicLLM(LLM, _AnthropicCommon):
|
||||
Args:
|
||||
prompt: The prompt to pass into the model.
|
||||
stop: Optional list of stop words to use when generating.
|
||||
run_manager: Optional callback manager for LLM run.
|
||||
kwargs: Additional keyword arguments to pass to the model.
|
||||
|
||||
Returns:
|
||||
A generator representing the stream of tokens from Anthropic.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
@ -354,6 +380,7 @@ class AnthropicLLM(LLM, _AnthropicCommon):
|
||||
generator = anthropic.stream(prompt)
|
||||
for token in generator:
|
||||
yield token
|
||||
|
||||
"""
|
||||
stop = self._get_anthropic_stop(stop)
|
||||
params = {**self._default_params, **kwargs}
|
||||
@ -372,15 +399,16 @@ class AnthropicLLM(LLM, _AnthropicCommon):
|
||||
|
||||
def get_num_tokens(self, text: str) -> int:
|
||||
"""Calculate number of tokens."""
|
||||
raise NotImplementedError(
|
||||
msg = (
|
||||
"Anthropic's legacy count_tokens method was removed in anthropic 0.39.0 "
|
||||
"and langchain-anthropic 0.3.0. Please use "
|
||||
"ChatAnthropic.get_num_tokens_from_messages instead."
|
||||
)
|
||||
raise NotImplementedError(
|
||||
msg,
|
||||
)
|
||||
|
||||
|
||||
@deprecated(since="0.1.0", removal="1.0.0", alternative="AnthropicLLM")
|
||||
class Anthropic(AnthropicLLM):
|
||||
"""Anthropic large language model."""
|
||||
|
||||
pass
|
||||
|
@ -1,3 +1,5 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Optional, Union, cast
|
||||
|
||||
from langchain_core.messages import AIMessage, ToolCall
|
||||
@ -27,8 +29,12 @@ class ToolsOutputParser(BaseGenerationOutputParser):
|
||||
Args:
|
||||
result: A list of Generations to be parsed. The Generations are assumed
|
||||
to be different candidate outputs for a single model input.
|
||||
partial: (Not used) Whether the result is a partial result. If True, the
|
||||
parser may return a partial result, which may not be complete or valid.
|
||||
|
||||
Returns:
|
||||
Structured output.
|
||||
|
||||
"""
|
||||
if not result or not isinstance(result[0], ChatGeneration):
|
||||
return None if self.first_tool_only else []
|
||||
@ -53,8 +59,7 @@ class ToolsOutputParser(BaseGenerationOutputParser):
|
||||
|
||||
if self.first_tool_only:
|
||||
return tool_calls[0] if tool_calls else None
|
||||
else:
|
||||
return [tool_call for tool_call in tool_calls]
|
||||
return list(tool_calls)
|
||||
|
||||
def _pydantic_parse(self, tool_call: dict) -> BaseModel:
|
||||
cls_ = {schema.__name__: schema for schema in self.pydantic_schemas or []}[
|
||||
@ -80,8 +85,7 @@ def extract_tool_calls(content: Union[str, list[Union[str, dict]]]) -> list[Tool
|
||||
if block["type"] != "tool_use":
|
||||
continue
|
||||
tool_calls.append(
|
||||
tool_call(name=block["name"], args=block["input"], id=block["id"])
|
||||
tool_call(name=block["name"], args=block["input"], id=block["id"]),
|
||||
)
|
||||
return tool_calls
|
||||
else:
|
||||
return []
|
||||
return []
|
||||
|
@ -60,8 +60,58 @@ plugins = ['pydantic.mypy']
|
||||
target-version = "py39"
|
||||
|
||||
[tool.ruff.lint]
|
||||
select = ["E", "F", "I", "T201", "UP", "S"]
|
||||
ignore = [ "UP007", ]
|
||||
select = [
|
||||
"A", # flake8-builtins
|
||||
"ASYNC", # flake8-async
|
||||
"C4", # flake8-comprehensions
|
||||
"COM", # flake8-commas
|
||||
"D", # pydocstyle
|
||||
"DOC", # pydoclint
|
||||
"E", # pycodestyle error
|
||||
"EM", # flake8-errmsg
|
||||
"F", # pyflakes
|
||||
"FA", # flake8-future-annotations
|
||||
"FBT", # flake8-boolean-trap
|
||||
"FLY", # flake8-flynt
|
||||
"I", # isort
|
||||
"ICN", # flake8-import-conventions
|
||||
"INT", # flake8-gettext
|
||||
"ISC", # isort-comprehensions
|
||||
"PGH", # pygrep-hooks
|
||||
"PIE", # flake8-pie
|
||||
"PERF", # flake8-perf
|
||||
"PYI", # flake8-pyi
|
||||
"Q", # flake8-quotes
|
||||
"RET", # flake8-return
|
||||
"RSE", # flake8-rst-docstrings
|
||||
"RUF", # ruff
|
||||
"S", # flake8-bandit
|
||||
"SLF", # flake8-self
|
||||
"SLOT", # flake8-slots
|
||||
"SIM", # flake8-simplify
|
||||
"T10", # flake8-debugger
|
||||
"T20", # flake8-print
|
||||
"TID", # flake8-tidy-imports
|
||||
"UP", # pyupgrade
|
||||
"W", # pycodestyle warning
|
||||
"YTT", # flake8-2020
|
||||
]
|
||||
ignore = [
|
||||
"D100", # Missing docstring in public module
|
||||
"D101", # Missing docstring in public class
|
||||
"D102", # Missing docstring in public method
|
||||
"D103", # Missing docstring in public function
|
||||
"D104", # Missing docstring in public package
|
||||
"D105", # Missing docstring in magic method
|
||||
"D107", # Missing docstring in __init__
|
||||
"D214", # Section over-indented, doesn't play well with reStructuredText
|
||||
"COM812", # Messes with the formatter
|
||||
"ISC001", # Messes with the formatter
|
||||
"PERF203", # Rarely useful
|
||||
"UP007", # non-pep604-annotation-union
|
||||
"UP045", # non-pep604-annotation-optional
|
||||
"SIM105", # Rarely useful
|
||||
]
|
||||
|
||||
[tool.coverage.run]
|
||||
omit = ["tests/*"]
|
||||
@ -78,4 +128,5 @@ asyncio_mode = "auto"
|
||||
"tests/**/*.py" = [
|
||||
"S101", # Tests need assertions
|
||||
"S311", # Standard pseudo-random generators are not suitable for cryptographic purposes
|
||||
"SLF001", # Private member access in tests
|
||||
]
|
||||
|
@ -20,9 +20,7 @@ def remove_response_headers(response: dict) -> dict:
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def vcr_config(_base_vcr_config: dict) -> dict: # noqa: F811
|
||||
"""
|
||||
Extend the default configuration coming from langchain_tests.
|
||||
"""
|
||||
"""Extend the default configuration coming from langchain_tests."""
|
||||
config = _base_vcr_config.copy()
|
||||
config["before_record_request"] = remove_request_headers
|
||||
config["before_record_response"] = remove_response_headers
|
||||
|
@ -1,5 +1,7 @@
|
||||
"""Test ChatAnthropic chat model."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import os
|
||||
@ -43,10 +45,7 @@ def test_stream() -> None:
|
||||
chunks_with_model_name = 0
|
||||
for token in llm.stream("I'm Pickle Rick"):
|
||||
assert isinstance(token.content, str)
|
||||
if full is None:
|
||||
full = cast(BaseMessageChunk, token)
|
||||
else:
|
||||
full = full + token
|
||||
full = cast(BaseMessageChunk, token) if full is None else full + token
|
||||
assert isinstance(token, AIMessageChunk)
|
||||
if token.usage_metadata is not None:
|
||||
if token.usage_metadata.get("input_tokens"):
|
||||
@ -55,11 +54,14 @@ def test_stream() -> None:
|
||||
chunks_with_output_token_counts += 1
|
||||
chunks_with_model_name += int("model_name" in token.response_metadata)
|
||||
if chunks_with_input_token_counts != 1 or chunks_with_output_token_counts != 1:
|
||||
raise AssertionError(
|
||||
msg = (
|
||||
"Expected exactly one chunk with input or output token counts. "
|
||||
"AIMessageChunk aggregation adds counts. Check that "
|
||||
"this is behaving properly."
|
||||
)
|
||||
raise AssertionError(
|
||||
msg,
|
||||
)
|
||||
assert chunks_with_model_name == 1
|
||||
# check token usage is populated
|
||||
assert isinstance(full, AIMessageChunk)
|
||||
@ -85,10 +87,7 @@ async def test_astream() -> None:
|
||||
chunks_with_output_token_counts = 0
|
||||
async for token in llm.astream("I'm Pickle Rick"):
|
||||
assert isinstance(token.content, str)
|
||||
if full is None:
|
||||
full = cast(BaseMessageChunk, token)
|
||||
else:
|
||||
full = full + token
|
||||
full = cast(BaseMessageChunk, token) if full is None else full + token
|
||||
assert isinstance(token, AIMessageChunk)
|
||||
if token.usage_metadata is not None:
|
||||
if token.usage_metadata.get("input_tokens"):
|
||||
@ -96,11 +95,14 @@ async def test_astream() -> None:
|
||||
if token.usage_metadata.get("output_tokens"):
|
||||
chunks_with_output_token_counts += 1
|
||||
if chunks_with_input_token_counts != 1 or chunks_with_output_token_counts != 1:
|
||||
raise AssertionError(
|
||||
msg = (
|
||||
"Expected exactly one chunk with input or output token counts. "
|
||||
"AIMessageChunk aggregation adds counts. Check that "
|
||||
"this is behaving properly."
|
||||
)
|
||||
raise AssertionError(
|
||||
msg,
|
||||
)
|
||||
# check token usage is populated
|
||||
assert isinstance(full, AIMessageChunk)
|
||||
assert full.usage_metadata is not None
|
||||
@ -167,7 +169,8 @@ async def test_abatch_tags() -> None:
|
||||
llm = ChatAnthropicMessages(model_name=MODEL_NAME) # type: ignore[call-arg, call-arg]
|
||||
|
||||
result = await llm.abatch(
|
||||
["I'm Pickle Rick", "I'm not Pickle Rick"], config={"tags": ["foo"]}
|
||||
["I'm Pickle Rick", "I'm not Pickle Rick"],
|
||||
config={"tags": ["foo"]},
|
||||
)
|
||||
for token in result:
|
||||
assert isinstance(token.content, str)
|
||||
@ -187,8 +190,8 @@ async def test_async_tool_use() -> None:
|
||||
"type": "object",
|
||||
"properties": {"location": {"type": "string"}},
|
||||
},
|
||||
}
|
||||
]
|
||||
},
|
||||
],
|
||||
)
|
||||
response = await llm_with_tools.ainvoke("what's the weather in san francisco, ca")
|
||||
assert isinstance(response, AIMessage)
|
||||
@ -202,16 +205,16 @@ async def test_async_tool_use() -> None:
|
||||
|
||||
# Test streaming
|
||||
first = True
|
||||
chunks = [] # type: ignore
|
||||
chunks: list[BaseMessage | BaseMessageChunk] = []
|
||||
async for chunk in llm_with_tools.astream(
|
||||
"what's the weather in san francisco, ca"
|
||||
"what's the weather in san francisco, ca",
|
||||
):
|
||||
chunks = chunks + [chunk]
|
||||
chunks = [*chunks, chunk]
|
||||
if first:
|
||||
gathered = chunk
|
||||
first = False
|
||||
else:
|
||||
gathered = gathered + chunk # type: ignore
|
||||
gathered = gathered + chunk # type: ignore[assignment]
|
||||
assert len(chunks) > 1
|
||||
assert isinstance(gathered, AIMessageChunk)
|
||||
assert isinstance(gathered.tool_call_chunks, list)
|
||||
@ -244,12 +247,12 @@ def test_invoke() -> None:
|
||||
"""Test invoke tokens from ChatAnthropicMessages."""
|
||||
llm = ChatAnthropicMessages(model_name=MODEL_NAME) # type: ignore[call-arg, call-arg]
|
||||
|
||||
result = llm.invoke("I'm Pickle Rick", config=dict(tags=["foo"]))
|
||||
result = llm.invoke("I'm Pickle Rick", config={"tags": ["foo"]})
|
||||
assert isinstance(result.content, str)
|
||||
|
||||
|
||||
def test_system_invoke() -> None:
|
||||
"""Test invoke tokens with a system message"""
|
||||
"""Test invoke tokens with a system message."""
|
||||
llm = ChatAnthropicMessages(model_name=MODEL_NAME) # type: ignore[call-arg, call-arg]
|
||||
|
||||
prompt = ChatPromptTemplate.from_messages(
|
||||
@ -260,7 +263,7 @@ def test_system_invoke() -> None:
|
||||
"STAY IN CHARACTER",
|
||||
),
|
||||
("human", "Are you a mathematician?"),
|
||||
]
|
||||
],
|
||||
)
|
||||
|
||||
chain = prompt | llm
|
||||
@ -282,7 +285,7 @@ def test_anthropic_generate() -> None:
|
||||
"""Test generate method of anthropic."""
|
||||
chat = ChatAnthropic(model=MODEL_NAME)
|
||||
chat_messages: list[list[BaseMessage]] = [
|
||||
[HumanMessage(content="How many toes do dogs have?")]
|
||||
[HumanMessage(content="How many toes do dogs have?")],
|
||||
]
|
||||
messages_copy = [messages.copy() for messages in chat_messages]
|
||||
result: LLMResult = chat.generate(chat_messages)
|
||||
@ -330,7 +333,7 @@ async def test_anthropic_async_streaming_callback() -> None:
|
||||
verbose=True,
|
||||
)
|
||||
chat_messages: list[BaseMessage] = [
|
||||
HumanMessage(content="How many toes do dogs have?")
|
||||
HumanMessage(content="How many toes do dogs have?"),
|
||||
]
|
||||
async for token in chat.astream(chat_messages):
|
||||
assert isinstance(token, AIMessageChunk)
|
||||
@ -352,8 +355,8 @@ def test_anthropic_multimodal() -> None:
|
||||
},
|
||||
},
|
||||
{"type": "text", "text": "What is this a logo for?"},
|
||||
]
|
||||
)
|
||||
],
|
||||
),
|
||||
]
|
||||
response = chat.invoke(messages)
|
||||
assert isinstance(response, AIMessage)
|
||||
@ -368,7 +371,9 @@ def test_streaming() -> None:
|
||||
callback_manager = CallbackManager([callback_handler])
|
||||
|
||||
llm = ChatAnthropicMessages( # type: ignore[call-arg, call-arg]
|
||||
model_name=MODEL_NAME, streaming=True, callback_manager=callback_manager
|
||||
model_name=MODEL_NAME,
|
||||
streaming=True,
|
||||
callback_manager=callback_manager,
|
||||
)
|
||||
|
||||
response = llm.generate([[HumanMessage(content="I'm Pickle Rick")]])
|
||||
@ -382,7 +387,9 @@ async def test_astreaming() -> None:
|
||||
callback_manager = CallbackManager([callback_handler])
|
||||
|
||||
llm = ChatAnthropicMessages( # type: ignore[call-arg, call-arg]
|
||||
model_name=MODEL_NAME, streaming=True, callback_manager=callback_manager
|
||||
model_name=MODEL_NAME,
|
||||
streaming=True,
|
||||
callback_manager=callback_manager,
|
||||
)
|
||||
|
||||
response = await llm.agenerate([[HumanMessage(content="I'm Pickle Rick")]])
|
||||
@ -421,19 +428,19 @@ def test_tool_use() -> None:
|
||||
temperature=0,
|
||||
# Add extra headers to also test token-efficient tools
|
||||
model_kwargs={
|
||||
"extra_headers": {"anthropic-beta": "token-efficient-tools-2025-02-19"}
|
||||
"extra_headers": {"anthropic-beta": "token-efficient-tools-2025-02-19"},
|
||||
},
|
||||
)
|
||||
llm_with_tools = llm.bind_tools([tool_definition])
|
||||
first = True
|
||||
chunks = [] # type: ignore
|
||||
chunks: list[BaseMessage | BaseMessageChunk] = []
|
||||
for chunk in llm_with_tools.stream(query):
|
||||
chunks = chunks + [chunk]
|
||||
chunks = [*chunks, chunk]
|
||||
if first:
|
||||
gathered = chunk
|
||||
first = False
|
||||
else:
|
||||
gathered = gathered + chunk # type: ignore
|
||||
gathered = gathered + chunk # type: ignore[assignment]
|
||||
assert len(chunks) > 1
|
||||
assert isinstance(gathered.content, list)
|
||||
assert len(gathered.content) == 2
|
||||
@ -470,17 +477,17 @@ def test_tool_use() -> None:
|
||||
query,
|
||||
gathered,
|
||||
ToolMessage(content="sunny and warm", tool_call_id=tool_call["id"]),
|
||||
]
|
||||
],
|
||||
)
|
||||
chunks = [] # type: ignore
|
||||
chunks = []
|
||||
first = True
|
||||
for chunk in stream:
|
||||
chunks = chunks + [chunk]
|
||||
chunks = [*chunks, chunk]
|
||||
if first:
|
||||
gathered = chunk
|
||||
first = False
|
||||
else:
|
||||
gathered = gathered + chunk # type: ignore
|
||||
gathered = gathered + chunk # type: ignore[assignment]
|
||||
assert len(chunks) > 1
|
||||
|
||||
|
||||
@ -489,14 +496,14 @@ def test_builtin_tools() -> None:
|
||||
tool = {"type": "text_editor_20250124", "name": "str_replace_editor"}
|
||||
llm_with_tools = llm.bind_tools([tool])
|
||||
response = llm_with_tools.invoke(
|
||||
"There's a syntax error in my primes.py file. Can you help me fix it?"
|
||||
"There's a syntax error in my primes.py file. Can you help me fix it?",
|
||||
)
|
||||
assert isinstance(response, AIMessage)
|
||||
assert response.tool_calls
|
||||
|
||||
|
||||
class GenerateUsername(BaseModel):
|
||||
"Get a username based on someone's name and hair color."
|
||||
"""Get a username based on someone's name and hair color."""
|
||||
|
||||
name: str
|
||||
hair_color: str
|
||||
@ -508,7 +515,7 @@ def test_disable_parallel_tool_calling() -> None:
|
||||
result = llm_with_tools.invoke(
|
||||
"Use the GenerateUsername tool to generate user names for:\n\n"
|
||||
"Sally with green hair\n"
|
||||
"Bob with blue hair"
|
||||
"Bob with blue hair",
|
||||
)
|
||||
assert isinstance(result, AIMessage)
|
||||
assert len(result.tool_calls) == 1
|
||||
@ -523,7 +530,7 @@ def test_anthropic_with_empty_text_block() -> None:
|
||||
return "OK"
|
||||
|
||||
model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0).bind_tools(
|
||||
[type_letter]
|
||||
[type_letter],
|
||||
)
|
||||
|
||||
messages = [
|
||||
@ -531,7 +538,7 @@ def test_anthropic_with_empty_text_block() -> None:
|
||||
content="Repeat the given string using the provided tools. Do not write "
|
||||
"anything else or provide any explanations. For example, "
|
||||
"if the string is 'abc', you must print the "
|
||||
"letters 'a', 'b', and 'c' one at a time and in that order. "
|
||||
"letters 'a', 'b', and 'c' one at a time and in that order. ",
|
||||
),
|
||||
HumanMessage(content="dog"),
|
||||
AIMessage(
|
||||
@ -572,7 +579,7 @@ def test_with_structured_output() -> None:
|
||||
"type": "object",
|
||||
"properties": {"location": {"type": "string"}},
|
||||
},
|
||||
}
|
||||
},
|
||||
)
|
||||
response = structured_llm.invoke("what's the weather in san francisco, ca")
|
||||
assert isinstance(response, dict)
|
||||
@ -593,10 +600,11 @@ def test_get_num_tokens_from_messages() -> None:
|
||||
# Test tool use
|
||||
@tool(parse_docstring=True)
|
||||
def get_weather(location: str) -> str:
|
||||
"""Get the current weather in a given location
|
||||
"""Get the current weather in a given location.
|
||||
|
||||
Args:
|
||||
location: The city and state, e.g. San Francisco, CA
|
||||
|
||||
"""
|
||||
return "Sunny"
|
||||
|
||||
@ -634,7 +642,7 @@ def test_get_num_tokens_from_messages() -> None:
|
||||
|
||||
|
||||
class GetWeather(BaseModel):
|
||||
"""Get the current weather in a given location"""
|
||||
"""Get the current weather in a given location."""
|
||||
|
||||
location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
|
||||
|
||||
@ -666,9 +674,9 @@ def test_pdf_document_input() -> None:
|
||||
"media_type": "application/pdf",
|
||||
},
|
||||
},
|
||||
]
|
||||
)
|
||||
]
|
||||
],
|
||||
),
|
||||
],
|
||||
)
|
||||
assert isinstance(result, AIMessage)
|
||||
assert isinstance(result.content, str)
|
||||
@ -694,7 +702,7 @@ def test_citations() -> None:
|
||||
},
|
||||
{"type": "text", "text": "What color is the grass and sky?"},
|
||||
],
|
||||
}
|
||||
},
|
||||
]
|
||||
response = llm.invoke(messages)
|
||||
assert isinstance(response, AIMessage)
|
||||
@ -704,10 +712,7 @@ def test_citations() -> None:
|
||||
# Test streaming
|
||||
full: Optional[BaseMessageChunk] = None
|
||||
for chunk in llm.stream(messages):
|
||||
if full is None:
|
||||
full = cast(BaseMessageChunk, chunk)
|
||||
else:
|
||||
full = full + chunk
|
||||
full = cast(BaseMessageChunk, chunk) if full is None else full + chunk
|
||||
assert isinstance(full, AIMessageChunk)
|
||||
assert isinstance(full.content, list)
|
||||
assert any("citations" in block for block in full.content)
|
||||
@ -718,7 +723,7 @@ def test_citations() -> None:
|
||||
"role": "user",
|
||||
"content": "Can you comment on the citations you just made?",
|
||||
}
|
||||
_ = llm.invoke(messages + [full, next_message])
|
||||
_ = llm.invoke([*messages, full, next_message])
|
||||
|
||||
|
||||
@pytest.mark.vcr
|
||||
@ -742,10 +747,7 @@ def test_thinking() -> None:
|
||||
# Test streaming
|
||||
full: Optional[BaseMessageChunk] = None
|
||||
for chunk in llm.stream([input_message]):
|
||||
if full is None:
|
||||
full = cast(BaseMessageChunk, chunk)
|
||||
else:
|
||||
full = full + chunk
|
||||
full = cast(BaseMessageChunk, chunk) if full is None else full + chunk
|
||||
assert isinstance(full, AIMessageChunk)
|
||||
assert isinstance(full.content, list)
|
||||
assert any("thinking" in block for block in full.content)
|
||||
@ -784,10 +786,7 @@ def test_redacted_thinking() -> None:
|
||||
# Test streaming
|
||||
full: Optional[BaseMessageChunk] = None
|
||||
for chunk in llm.stream([input_message]):
|
||||
if full is None:
|
||||
full = cast(BaseMessageChunk, chunk)
|
||||
else:
|
||||
full = full + chunk
|
||||
full = cast(BaseMessageChunk, chunk) if full is None else full + chunk
|
||||
assert isinstance(full, AIMessageChunk)
|
||||
assert isinstance(full.content, list)
|
||||
stream_has_reasoning = False
|
||||
@ -864,7 +863,7 @@ def test_image_tool_calling() -> None:
|
||||
"media_type": "image/jpeg",
|
||||
"data": image_data,
|
||||
},
|
||||
}
|
||||
},
|
||||
)
|
||||
messages = [
|
||||
SystemMessage("you're a good assistant"),
|
||||
@ -878,7 +877,7 @@ def test_image_tool_calling() -> None:
|
||||
"id": "foo",
|
||||
"name": "color_picker",
|
||||
},
|
||||
]
|
||||
],
|
||||
),
|
||||
HumanMessage(
|
||||
[
|
||||
@ -889,12 +888,12 @@ def test_image_tool_calling() -> None:
|
||||
{
|
||||
"type": "text",
|
||||
"text": "green is a great pick! that's my sister's favorite color", # noqa: E501
|
||||
}
|
||||
},
|
||||
],
|
||||
"is_error": False,
|
||||
},
|
||||
{"type": "text", "text": "what's my sister's favorite color"},
|
||||
]
|
||||
],
|
||||
),
|
||||
]
|
||||
llm = ChatAnthropic(model="claude-3-5-sonnet-latest")
|
||||
@ -914,7 +913,7 @@ def test_web_search() -> None:
|
||||
{
|
||||
"type": "text",
|
||||
"text": "How do I update a web app to TypeScript 5.5?",
|
||||
}
|
||||
},
|
||||
],
|
||||
}
|
||||
response = llm_with_tools.invoke([input_message])
|
||||
@ -962,7 +961,7 @@ def test_code_execution() -> None:
|
||||
"Calculate the mean and standard deviation of "
|
||||
"[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]"
|
||||
),
|
||||
}
|
||||
},
|
||||
],
|
||||
}
|
||||
response = llm_with_tools.invoke([input_message])
|
||||
@ -999,7 +998,7 @@ def test_remote_mcp() -> None:
|
||||
"name": "deepwiki",
|
||||
"tool_configuration": {"enabled": True, "allowed_tools": ["ask_question"]},
|
||||
"authorization_token": "PLACEHOLDER",
|
||||
}
|
||||
},
|
||||
]
|
||||
|
||||
llm = ChatAnthropic(
|
||||
@ -1018,7 +1017,7 @@ def test_remote_mcp() -> None:
|
||||
"What transport protocols does the 2025-03-26 version of the MCP "
|
||||
"spec (modelcontextprotocol/modelcontextprotocol) support?"
|
||||
),
|
||||
}
|
||||
},
|
||||
],
|
||||
}
|
||||
response = llm.invoke([input_message])
|
||||
@ -1132,9 +1131,9 @@ def test_search_result_tool_message() -> None:
|
||||
"To request vacation days, submit a leave request form "
|
||||
"through the HR portal. Approval will be sent by email."
|
||||
),
|
||||
}
|
||||
},
|
||||
],
|
||||
}
|
||||
},
|
||||
]
|
||||
|
||||
tool_call = {
|
||||
@ -1182,7 +1181,7 @@ def test_search_result_top_level() -> None:
|
||||
"To request vacation days, submit a leave request form "
|
||||
"through the HR portal. Approval will be sent by email."
|
||||
),
|
||||
}
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
@ -1194,14 +1193,14 @@ def test_search_result_top_level() -> None:
|
||||
{
|
||||
"type": "text",
|
||||
"text": "Managers have 3 days to approve a request.",
|
||||
}
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"text": "How do I request vacation days?",
|
||||
},
|
||||
]
|
||||
],
|
||||
)
|
||||
result = llm.invoke([input_message])
|
||||
assert isinstance(result, AIMessage)
|
||||
|
@ -4,4 +4,3 @@ import pytest
|
||||
@pytest.mark.compile
|
||||
def test_placeholder() -> None:
|
||||
"""Used for compiling integration tests without running any real tests."""
|
||||
pass
|
||||
|
@ -1,5 +1,7 @@
|
||||
"""Test ChatAnthropic chat model."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from enum import Enum
|
||||
from typing import Optional
|
||||
|
||||
@ -46,7 +48,8 @@ async def test_abatch_tags() -> None:
|
||||
llm = ChatAnthropicTools(model_name=MODEL_NAME) # type: ignore[call-arg, call-arg]
|
||||
|
||||
result = await llm.abatch(
|
||||
["I'm Pickle Rick", "I'm not Pickle Rick"], config={"tags": ["foo"]}
|
||||
["I'm Pickle Rick", "I'm not Pickle Rick"],
|
||||
config={"tags": ["foo"]},
|
||||
)
|
||||
for token in result:
|
||||
assert isinstance(token.content, str)
|
||||
@ -73,12 +76,12 @@ def test_invoke() -> None:
|
||||
"""Test invoke tokens from ChatAnthropicTools."""
|
||||
llm = ChatAnthropicTools(model_name=MODEL_NAME) # type: ignore[call-arg, call-arg]
|
||||
|
||||
result = llm.invoke("I'm Pickle Rick", config=dict(tags=["foo"]))
|
||||
result = llm.invoke("I'm Pickle Rick", config={"tags": ["foo"]})
|
||||
assert isinstance(result.content, str)
|
||||
|
||||
|
||||
def test_system_invoke() -> None:
|
||||
"""Test invoke tokens with a system message"""
|
||||
"""Test invoke tokens with a system message."""
|
||||
llm = ChatAnthropicTools(model_name=MODEL_NAME) # type: ignore[call-arg, call-arg]
|
||||
|
||||
prompt = ChatPromptTemplate.from_messages(
|
||||
@ -89,7 +92,7 @@ def test_system_invoke() -> None:
|
||||
"STAY IN CHARACTER",
|
||||
),
|
||||
("human", "Are you a mathematician?"),
|
||||
]
|
||||
],
|
||||
)
|
||||
|
||||
chain = prompt | llm
|
||||
@ -128,19 +131,24 @@ def test_anthropic_complex_structured_output() -> None:
|
||||
"""Relevant information about an email."""
|
||||
|
||||
sender: Optional[str] = Field(
|
||||
None, description="The sender's name, if available"
|
||||
None,
|
||||
description="The sender's name, if available",
|
||||
)
|
||||
sender_phone_number: Optional[str] = Field(
|
||||
None, description="The sender's phone number, if available"
|
||||
None,
|
||||
description="The sender's phone number, if available",
|
||||
)
|
||||
sender_address: Optional[str] = Field(
|
||||
None, description="The sender's address, if available"
|
||||
None,
|
||||
description="The sender's address, if available",
|
||||
)
|
||||
action_items: list[str] = Field(
|
||||
..., description="A list of action items requested by the email"
|
||||
...,
|
||||
description="A list of action items requested by the email",
|
||||
)
|
||||
topic: str = Field(
|
||||
..., description="High level description of what the email is about"
|
||||
...,
|
||||
description="High level description of what the email is about",
|
||||
)
|
||||
tone: ToneEnum = Field(..., description="The tone of the email.")
|
||||
|
||||
@ -150,7 +158,7 @@ def test_anthropic_complex_structured_output() -> None:
|
||||
"human",
|
||||
"What can you tell me about the following email? Make sure to answer in the correct format: {email}", # noqa: E501
|
||||
),
|
||||
]
|
||||
],
|
||||
)
|
||||
|
||||
llm = ChatAnthropicTools( # type: ignore[call-arg, call-arg]
|
||||
@ -163,7 +171,7 @@ def test_anthropic_complex_structured_output() -> None:
|
||||
|
||||
response = extraction_chain.invoke(
|
||||
{
|
||||
"email": "From: Erick. The email is about the new project. The tone is positive. The action items are to send the report and to schedule a meeting." # noqa: E501
|
||||
}
|
||||
"email": "From: Erick. The email is about the new project. The tone is positive. The action items are to send the report and to schedule a meeting.", # noqa: E501
|
||||
},
|
||||
)
|
||||
assert isinstance(response, Email)
|
||||
|
@ -1,4 +1,4 @@
|
||||
"""Standard LangChain interface tests"""
|
||||
"""Standard LangChain interface tests."""
|
||||
|
||||
from pathlib import Path
|
||||
from typing import Literal, cast
|
||||
@ -87,9 +87,9 @@ class TestAnthropicStandard(ChatModelIntegrationTests):
|
||||
"type": "text",
|
||||
"text": input_,
|
||||
"cache_control": {"type": "ephemeral"},
|
||||
}
|
||||
},
|
||||
],
|
||||
}
|
||||
},
|
||||
],
|
||||
stream,
|
||||
)
|
||||
@ -118,9 +118,9 @@ class TestAnthropicStandard(ChatModelIntegrationTests):
|
||||
"type": "text",
|
||||
"text": input_,
|
||||
"cache_control": {"type": "ephemeral"},
|
||||
}
|
||||
},
|
||||
],
|
||||
}
|
||||
},
|
||||
],
|
||||
stream,
|
||||
)
|
||||
@ -134,22 +134,18 @@ class TestAnthropicStandard(ChatModelIntegrationTests):
|
||||
"type": "text",
|
||||
"text": input_,
|
||||
"cache_control": {"type": "ephemeral"},
|
||||
}
|
||||
},
|
||||
],
|
||||
}
|
||||
},
|
||||
],
|
||||
stream,
|
||||
)
|
||||
|
||||
|
||||
def _invoke(llm: ChatAnthropic, input_: list, stream: bool) -> AIMessage:
|
||||
def _invoke(llm: ChatAnthropic, input_: list, stream: bool) -> AIMessage: # noqa: FBT001
|
||||
if stream:
|
||||
full = None
|
||||
for chunk in llm.stream(input_):
|
||||
if full is None:
|
||||
full = cast(BaseMessageChunk, chunk)
|
||||
else:
|
||||
full = full + chunk
|
||||
full = cast(BaseMessageChunk, chunk) if full is None else full + chunk
|
||||
return cast(AIMessage, full)
|
||||
else:
|
||||
return cast(AIMessage, llm.invoke(input_))
|
||||
return cast(AIMessage, llm.invoke(input_))
|
||||
|
@ -1,5 +1,7 @@
|
||||
"""A fake callback handler for testing purposes."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Union
|
||||
|
||||
from langchain_core.callbacks import BaseCallbackHandler
|
||||
@ -252,5 +254,5 @@ class FakeCallbackHandler(BaseCallbackHandler, BaseFakeCallbackHandlerMixin):
|
||||
self.on_retriever_error_common()
|
||||
|
||||
# Overriding since BaseModel has __deepcopy__ method as well
|
||||
def __deepcopy__(self, memo: dict) -> "FakeCallbackHandler": # type: ignore
|
||||
def __deepcopy__(self, memo: dict) -> FakeCallbackHandler: # type: ignore[override]
|
||||
return self
|
||||
|
@ -1,5 +1,7 @@
|
||||
"""Test chat model integration."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from typing import Any, Callable, Literal, Optional, cast
|
||||
from unittest.mock import MagicMock, patch
|
||||
@ -187,7 +189,7 @@ def test__merge_messages() -> None:
|
||||
"text": None,
|
||||
"name": "blah",
|
||||
},
|
||||
]
|
||||
],
|
||||
),
|
||||
ToolMessage("buz output", tool_call_id="1", status="error"), # type: ignore[misc]
|
||||
ToolMessage(
|
||||
@ -234,7 +236,7 @@ def test__merge_messages() -> None:
|
||||
"text": None,
|
||||
"name": "blah",
|
||||
},
|
||||
]
|
||||
],
|
||||
),
|
||||
HumanMessage( # type: ignore[misc]
|
||||
[
|
||||
@ -266,7 +268,7 @@ def test__merge_messages() -> None:
|
||||
"is_error": False,
|
||||
},
|
||||
{"type": "text", "text": "next thing"},
|
||||
]
|
||||
],
|
||||
),
|
||||
]
|
||||
actual = _merge_messages(messages)
|
||||
@ -277,7 +279,7 @@ def test__merge_messages() -> None:
|
||||
ToolMessage("buz output", tool_call_id="1"), # type: ignore[misc]
|
||||
ToolMessage( # type: ignore[misc]
|
||||
content=[
|
||||
{"type": "tool_result", "content": "blah output", "tool_use_id": "2"}
|
||||
{"type": "tool_result", "content": "blah output", "tool_use_id": "2"},
|
||||
],
|
||||
tool_call_id="2",
|
||||
),
|
||||
@ -292,8 +294,8 @@ def test__merge_messages() -> None:
|
||||
"is_error": False,
|
||||
},
|
||||
{"type": "tool_result", "content": "blah output", "tool_use_id": "2"},
|
||||
]
|
||||
)
|
||||
],
|
||||
),
|
||||
]
|
||||
actual = _merge_messages(messages)
|
||||
assert expected == actual
|
||||
@ -310,7 +312,7 @@ def test__merge_messages_mutation() -> None:
|
||||
]
|
||||
expected = [
|
||||
HumanMessage( # type: ignore[misc]
|
||||
[{"type": "text", "text": "bar"}, {"type": "text", "text": "next thing"}]
|
||||
[{"type": "text", "text": "bar"}, {"type": "text", "text": "next thing"}],
|
||||
),
|
||||
]
|
||||
actual = _merge_messages(messages)
|
||||
@ -327,7 +329,7 @@ def test__format_image() -> None:
|
||||
@pytest.fixture()
|
||||
def pydantic() -> type[BaseModel]:
|
||||
class dummy_function(BaseModel):
|
||||
"""dummy function"""
|
||||
"""Dummy function."""
|
||||
|
||||
arg1: int = Field(..., description="foo")
|
||||
arg2: Literal["bar", "baz"] = Field(..., description="one of 'bar', 'baz'")
|
||||
@ -338,13 +340,14 @@ def pydantic() -> type[BaseModel]:
|
||||
@pytest.fixture()
|
||||
def function() -> Callable:
|
||||
def dummy_function(arg1: int, arg2: Literal["bar", "baz"]) -> None:
|
||||
"""dummy function
|
||||
"""Dummy function.
|
||||
|
||||
Args:
|
||||
----
|
||||
arg1: foo
|
||||
arg2: one of 'bar', 'baz'
|
||||
"""
|
||||
pass
|
||||
|
||||
""" # noqa: D401
|
||||
|
||||
return dummy_function
|
||||
|
||||
@ -358,7 +361,7 @@ def dummy_tool() -> BaseTool:
|
||||
class DummyFunction(BaseTool): # type: ignore[override]
|
||||
args_schema: type[BaseModel] = Schema
|
||||
name: str = "dummy_function"
|
||||
description: str = "dummy function"
|
||||
description: str = "Dummy function."
|
||||
|
||||
def _run(self, *args: Any, **kwargs: Any) -> Any:
|
||||
pass
|
||||
@ -370,7 +373,7 @@ def dummy_tool() -> BaseTool:
|
||||
def json_schema() -> dict:
|
||||
return {
|
||||
"title": "dummy_function",
|
||||
"description": "dummy function",
|
||||
"description": "Dummy function.",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"arg1": {"description": "foo", "type": "integer"},
|
||||
@ -388,7 +391,7 @@ def json_schema() -> dict:
|
||||
def openai_function() -> dict:
|
||||
return {
|
||||
"name": "dummy_function",
|
||||
"description": "dummy function",
|
||||
"description": "Dummy function.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@ -413,7 +416,7 @@ def test_convert_to_anthropic_tool(
|
||||
) -> None:
|
||||
expected = {
|
||||
"name": "dummy_function",
|
||||
"description": "dummy function",
|
||||
"description": "Dummy function.",
|
||||
"input_schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@ -429,7 +432,7 @@ def test_convert_to_anthropic_tool(
|
||||
}
|
||||
|
||||
for fn in (pydantic, function, dummy_tool, json_schema, expected, openai_function):
|
||||
actual = convert_to_anthropic_tool(fn) # type: ignore
|
||||
actual = convert_to_anthropic_tool(fn)
|
||||
assert actual == expected
|
||||
|
||||
|
||||
@ -461,7 +464,7 @@ def test__format_messages_with_tool_calls() -> None:
|
||||
"type": "base64",
|
||||
"media_type": "image/jpeg",
|
||||
},
|
||||
}
|
||||
},
|
||||
],
|
||||
tool_call_id="3",
|
||||
)
|
||||
@ -478,7 +481,7 @@ def test__format_messages_with_tool_calls() -> None:
|
||||
"name": "bar",
|
||||
"id": "1",
|
||||
"input": {"baz": "buzz"},
|
||||
}
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
@ -489,7 +492,7 @@ def test__format_messages_with_tool_calls() -> None:
|
||||
"content": "blurb",
|
||||
"tool_use_id": "1",
|
||||
"is_error": False,
|
||||
}
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
@ -500,7 +503,7 @@ def test__format_messages_with_tool_calls() -> None:
|
||||
"name": "bar",
|
||||
"id": "2",
|
||||
"input": {"baz": "buzz"},
|
||||
}
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
@ -516,7 +519,7 @@ def test__format_messages_with_tool_calls() -> None:
|
||||
"type": "base64",
|
||||
"media_type": "image/jpeg",
|
||||
},
|
||||
}
|
||||
},
|
||||
],
|
||||
"tool_use_id": "2",
|
||||
"is_error": False,
|
||||
@ -531,7 +534,7 @@ def test__format_messages_with_tool_calls() -> None:
|
||||
"type": "base64",
|
||||
"media_type": "image/jpeg",
|
||||
},
|
||||
}
|
||||
},
|
||||
],
|
||||
"tool_use_id": "3",
|
||||
"is_error": False,
|
||||
@ -579,7 +582,7 @@ def test__format_messages_with_str_content_and_tool_calls() -> None:
|
||||
"content": "blurb",
|
||||
"tool_use_id": "1",
|
||||
"is_error": False,
|
||||
}
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
@ -624,7 +627,7 @@ def test__format_messages_with_list_content_and_tool_calls() -> None:
|
||||
"content": "blurb",
|
||||
"tool_use_id": "1",
|
||||
"is_error": False,
|
||||
}
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
@ -676,7 +679,7 @@ def test__format_messages_with_tool_use_blocks_and_tool_calls() -> None:
|
||||
"content": "blurb",
|
||||
"tool_use_id": "1",
|
||||
"is_error": False,
|
||||
}
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
@ -690,7 +693,7 @@ def test__format_messages_with_cache_control() -> None:
|
||||
SystemMessage(
|
||||
[
|
||||
{"type": "text", "text": "foo", "cache_control": {"type": "ephemeral"}},
|
||||
]
|
||||
],
|
||||
),
|
||||
HumanMessage(
|
||||
[
|
||||
@ -699,11 +702,11 @@ def test__format_messages_with_cache_control() -> None:
|
||||
"type": "text",
|
||||
"text": "foo",
|
||||
},
|
||||
]
|
||||
],
|
||||
),
|
||||
]
|
||||
expected_system = [
|
||||
{"type": "text", "text": "foo", "cache_control": {"type": "ephemeral"}}
|
||||
{"type": "text", "text": "foo", "cache_control": {"type": "ephemeral"}},
|
||||
]
|
||||
expected_messages = [
|
||||
{
|
||||
@ -712,7 +715,7 @@ def test__format_messages_with_cache_control() -> None:
|
||||
{"type": "text", "text": "foo", "cache_control": {"type": "ephemeral"}},
|
||||
{"type": "text", "text": "foo"},
|
||||
],
|
||||
}
|
||||
},
|
||||
]
|
||||
actual_system, actual_messages = _format_messages(messages)
|
||||
assert expected_system == actual_system
|
||||
@ -733,8 +736,8 @@ def test__format_messages_with_cache_control() -> None:
|
||||
"data": "<base64 data>",
|
||||
"cache_control": {"type": "ephemeral"},
|
||||
},
|
||||
]
|
||||
)
|
||||
],
|
||||
),
|
||||
]
|
||||
actual_system, actual_messages = _format_messages(messages)
|
||||
assert actual_system is None
|
||||
@ -756,7 +759,7 @@ def test__format_messages_with_cache_control() -> None:
|
||||
"cache_control": {"type": "ephemeral"},
|
||||
},
|
||||
],
|
||||
}
|
||||
},
|
||||
]
|
||||
assert actual_messages == expected_messages
|
||||
|
||||
@ -773,8 +776,8 @@ def test__format_messages_with_citations() -> None:
|
||||
"citations": {"enabled": True},
|
||||
},
|
||||
{"type": "text", "text": "What color is the grass and sky?"},
|
||||
]
|
||||
)
|
||||
],
|
||||
),
|
||||
]
|
||||
expected_messages = [
|
||||
{
|
||||
@ -791,7 +794,7 @@ def test__format_messages_with_citations() -> None:
|
||||
},
|
||||
{"type": "text", "text": "What color is the grass and sky?"},
|
||||
],
|
||||
}
|
||||
},
|
||||
]
|
||||
actual_system, actual_messages = _format_messages(input_messages)
|
||||
assert actual_system is None
|
||||
@ -843,7 +846,7 @@ def test__format_messages_openai_image_format() -> None:
|
||||
},
|
||||
},
|
||||
],
|
||||
}
|
||||
},
|
||||
]
|
||||
assert actual_messages == expected_messages
|
||||
|
||||
@ -856,7 +859,7 @@ def test__format_messages_with_multiple_system() -> None:
|
||||
SystemMessage(
|
||||
[
|
||||
{"type": "text", "text": "foo", "cache_control": {"type": "ephemeral"}},
|
||||
]
|
||||
],
|
||||
),
|
||||
]
|
||||
expected_system = [
|
||||
@ -880,7 +883,8 @@ def test_anthropic_api_key_is_secret_string() -> None:
|
||||
|
||||
|
||||
def test_anthropic_api_key_masked_when_passed_from_env(
|
||||
monkeypatch: MonkeyPatch, capsys: CaptureFixture
|
||||
monkeypatch: MonkeyPatch,
|
||||
capsys: CaptureFixture,
|
||||
) -> None:
|
||||
"""Test that the API key is masked when passed from an environment variable."""
|
||||
monkeypatch.setenv("ANTHROPIC_API_KEY ", "secret-api-key")
|
||||
@ -920,7 +924,7 @@ def test_anthropic_uses_actual_secret_value_from_secretstr() -> None:
|
||||
|
||||
|
||||
class GetWeather(BaseModel):
|
||||
"""Get the current weather in a given location"""
|
||||
"""Get the current weather in a given location."""
|
||||
|
||||
location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
|
||||
|
||||
@ -931,14 +935,16 @@ def test_anthropic_bind_tools_tool_choice() -> None:
|
||||
anthropic_api_key="secret-api-key",
|
||||
)
|
||||
chat_model_with_tools = chat_model.bind_tools(
|
||||
[GetWeather], tool_choice={"type": "tool", "name": "GetWeather"}
|
||||
[GetWeather],
|
||||
tool_choice={"type": "tool", "name": "GetWeather"},
|
||||
)
|
||||
assert cast(RunnableBinding, chat_model_with_tools).kwargs["tool_choice"] == {
|
||||
"type": "tool",
|
||||
"name": "GetWeather",
|
||||
}
|
||||
chat_model_with_tools = chat_model.bind_tools(
|
||||
[GetWeather], tool_choice="GetWeather"
|
||||
[GetWeather],
|
||||
tool_choice="GetWeather",
|
||||
)
|
||||
assert cast(RunnableBinding, chat_model_with_tools).kwargs["tool_choice"] == {
|
||||
"type": "tool",
|
||||
@ -946,11 +952,11 @@ def test_anthropic_bind_tools_tool_choice() -> None:
|
||||
}
|
||||
chat_model_with_tools = chat_model.bind_tools([GetWeather], tool_choice="auto")
|
||||
assert cast(RunnableBinding, chat_model_with_tools).kwargs["tool_choice"] == {
|
||||
"type": "auto"
|
||||
"type": "auto",
|
||||
}
|
||||
chat_model_with_tools = chat_model.bind_tools([GetWeather], tool_choice="any")
|
||||
assert cast(RunnableBinding, chat_model_with_tools).kwargs["tool_choice"] == {
|
||||
"type": "any"
|
||||
"type": "any",
|
||||
}
|
||||
|
||||
|
||||
@ -1021,7 +1027,6 @@ class FakeTracer(BaseTracer):
|
||||
|
||||
def _persist_run(self, run: Run) -> None:
|
||||
"""Persist a run."""
|
||||
pass
|
||||
|
||||
def on_chat_model_start(self, *args: Any, **kwargs: Any) -> Run:
|
||||
self.chat_model_start_inputs.append({"args": args, "kwargs": kwargs})
|
||||
@ -1036,7 +1041,7 @@ def test_mcp_tracing() -> None:
|
||||
"url": "https://mcp.deepwiki.com/mcp",
|
||||
"name": "deepwiki",
|
||||
"authorization_token": "PLACEHOLDER",
|
||||
}
|
||||
},
|
||||
]
|
||||
|
||||
llm = ChatAnthropic(
|
||||
|
@ -95,7 +95,8 @@ def test_tools_output_parser_empty_content() -> None:
|
||||
chart_type: Literal["pie", "line", "bar"]
|
||||
|
||||
output_parser = ToolsOutputParser(
|
||||
first_tool_only=True, pydantic_schemas=[ChartType]
|
||||
first_tool_only=True,
|
||||
pydantic_schemas=[ChartType],
|
||||
)
|
||||
message = AIMessage(
|
||||
"",
|
||||
@ -105,7 +106,7 @@ def test_tools_output_parser_empty_content() -> None:
|
||||
"args": {"chart_type": "pie"},
|
||||
"id": "foo",
|
||||
"type": "tool_call",
|
||||
}
|
||||
},
|
||||
],
|
||||
)
|
||||
actual = output_parser.invoke(message)
|
||||
|
@ -1,4 +1,4 @@
|
||||
"""Standard LangChain interface tests"""
|
||||
"""Standard LangChain interface tests."""
|
||||
|
||||
import pytest
|
||||
from langchain_core.language_models import BaseChatModel
|
||||
|
@ -477,7 +477,7 @@ requires-dist = [
|
||||
[package.metadata.requires-dev]
|
||||
codespell = [{ name = "codespell", specifier = ">=2.2.0,<3.0.0" }]
|
||||
dev = [{ name = "langchain-core", editable = "../../core" }]
|
||||
lint = [{ name = "ruff", specifier = ">=0.5,<1.0" }]
|
||||
lint = [{ name = "ruff", specifier = ">=0.12.2,<0.13" }]
|
||||
test = [
|
||||
{ name = "defusedxml", specifier = ">=0.7.1,<1.0.0" },
|
||||
{ name = "freezegun", specifier = ">=1.2.2,<2.0.0" },
|
||||
@ -534,7 +534,7 @@ dev = [
|
||||
{ name = "jupyter", specifier = ">=1.0.0,<2.0.0" },
|
||||
{ name = "setuptools", specifier = ">=67.6.1,<68.0.0" },
|
||||
]
|
||||
lint = [{ name = "ruff", specifier = ">=0.11.2,<0.12.0" }]
|
||||
lint = [{ name = "ruff", specifier = ">=0.12.2,<0.13" }]
|
||||
test = [
|
||||
{ name = "blockbuster", specifier = "~=1.5.18" },
|
||||
{ name = "freezegun", specifier = ">=1.2.2,<2.0.0" },
|
||||
@ -598,7 +598,7 @@ requires-dist = [
|
||||
|
||||
[package.metadata.requires-dev]
|
||||
codespell = [{ name = "codespell", specifier = ">=2.2.0,<3.0.0" }]
|
||||
lint = [{ name = "ruff", specifier = ">=0.9.2,<1.0.0" }]
|
||||
lint = [{ name = "ruff", specifier = ">=0.12.2,<0.13" }]
|
||||
test = [{ name = "langchain-core", editable = "../../core" }]
|
||||
test-integration = []
|
||||
typing = [
|
||||
@ -1513,27 +1513,27 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "ruff"
|
||||
version = "0.5.7"
|
||||
version = "0.12.2"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/bf/2b/69e5e412f9d390adbdbcbf4f64d6914fa61b44b08839a6584655014fc524/ruff-0.5.7.tar.gz", hash = "sha256:8dfc0a458797f5d9fb622dd0efc52d796f23f0a1493a9527f4e49a550ae9a7e5", size = 2449817, upload-time = "2024-08-08T15:43:07.467Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/6c/3d/d9a195676f25d00dbfcf3cf95fdd4c685c497fcfa7e862a44ac5e4e96480/ruff-0.12.2.tar.gz", hash = "sha256:d7b4f55cd6f325cb7621244f19c873c565a08aff5a4ba9c69aa7355f3f7afd3e", size = 4432239, upload-time = "2025-07-03T16:40:19.566Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/6b/eb/06e06aaf96af30a68e83b357b037008c54a2ddcbad4f989535007c700394/ruff-0.5.7-py3-none-linux_armv6l.whl", hash = "sha256:548992d342fc404ee2e15a242cdbea4f8e39a52f2e7752d0e4cbe88d2d2f416a", size = 9570571, upload-time = "2024-08-08T15:41:56.537Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a4/10/1be32aeaab8728f78f673e7a47dd813222364479b2d6573dbcf0085e83ea/ruff-0.5.7-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:00cc8872331055ee017c4f1071a8a31ca0809ccc0657da1d154a1d2abac5c0be", size = 8685138, upload-time = "2024-08-08T15:42:02.833Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3d/1d/c218ce83beb4394ba04d05e9aa2ae6ce9fba8405688fe878b0fdb40ce855/ruff-0.5.7-py3-none-macosx_11_0_arm64.whl", hash = "sha256:eaf3d86a1fdac1aec8a3417a63587d93f906c678bb9ed0b796da7b59c1114a1e", size = 8266785, upload-time = "2024-08-08T15:42:08.321Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/26/79/7f49509bd844476235b40425756def366b227a9714191c91f02fb2178635/ruff-0.5.7-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a01c34400097b06cf8a6e61b35d6d456d5bd1ae6961542de18ec81eaf33b4cb8", size = 9983964, upload-time = "2024-08-08T15:42:12.419Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/bf/b1/939836b70bf9fcd5e5cd3ea67fdb8abb9eac7631351d32f26544034a35e4/ruff-0.5.7-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fcc8054f1a717e2213500edaddcf1dbb0abad40d98e1bd9d0ad364f75c763eea", size = 9359490, upload-time = "2024-08-08T15:42:16.713Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/32/7d/b3db19207de105daad0c8b704b2c6f2a011f9c07017bd58d8d6e7b8eba19/ruff-0.5.7-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7f70284e73f36558ef51602254451e50dd6cc479f8b6f8413a95fcb5db4a55fc", size = 10170833, upload-time = "2024-08-08T15:42:20.54Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a2/45/eae9da55f3357a1ac04220230b8b07800bf516e6dd7e1ad20a2ff3b03b1b/ruff-0.5.7-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:a78ad870ae3c460394fc95437d43deb5c04b5c29297815a2a1de028903f19692", size = 10896360, upload-time = "2024-08-08T15:42:25.2Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/99/67/4388b36d145675f4c51ebec561fcd4298a0e2550c81e629116f83ce45a39/ruff-0.5.7-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9ccd078c66a8e419475174bfe60a69adb36ce04f8d4e91b006f1329d5cd44bcf", size = 10477094, upload-time = "2024-08-08T15:42:29.553Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e1/9c/f5e6ed1751dc187a4ecf19a4970dd30a521c0ee66b7941c16e292a4043fb/ruff-0.5.7-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7e31c9bad4ebf8fdb77b59cae75814440731060a09a0e0077d559a556453acbb", size = 11480896, upload-time = "2024-08-08T15:42:33.772Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c8/3b/2b683be597bbd02046678fc3fc1c199c641512b20212073b58f173822bb3/ruff-0.5.7-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d796327eed8e168164346b769dd9a27a70e0298d667b4ecee6877ce8095ec8e", size = 10179702, upload-time = "2024-08-08T15:42:38.038Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f1/38/c2d94054dc4b3d1ea4c2ba3439b2a7095f08d1c8184bc41e6abe2a688be7/ruff-0.5.7-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:4a09ea2c3f7778cc635e7f6edf57d566a8ee8f485f3c4454db7771efb692c499", size = 9982855, upload-time = "2024-08-08T15:42:42.031Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/7d/e7/1433db2da505ffa8912dcf5b28a8743012ee780cbc20ad0bf114787385d9/ruff-0.5.7-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:a36d8dcf55b3a3bc353270d544fb170d75d2dff41eba5df57b4e0b67a95bb64e", size = 9433156, upload-time = "2024-08-08T15:42:45.339Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e0/36/4fa43250e67741edeea3d366f59a1dc993d4d89ad493a36cbaa9889895f2/ruff-0.5.7-py3-none-musllinux_1_2_i686.whl", hash = "sha256:9369c218f789eefbd1b8d82a8cf25017b523ac47d96b2f531eba73770971c9e5", size = 9782971, upload-time = "2024-08-08T15:42:49.354Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/80/0e/8c276103d518e5cf9202f70630aaa494abf6fc71c04d87c08b6d3cd07a4b/ruff-0.5.7-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:b88ca3db7eb377eb24fb7c82840546fb7acef75af4a74bd36e9ceb37a890257e", size = 10247775, upload-time = "2024-08-08T15:42:53.294Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/cb/b9/673096d61276f39291b729dddde23c831a5833d98048349835782688a0ec/ruff-0.5.7-py3-none-win32.whl", hash = "sha256:33d61fc0e902198a3e55719f4be6b375b28f860b09c281e4bdbf783c0566576a", size = 7841772, upload-time = "2024-08-08T15:42:57.488Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/67/1c/4520c98bfc06b9c73cd1457686d4d3935d40046b1ddea08403e5a6deff51/ruff-0.5.7-py3-none-win_amd64.whl", hash = "sha256:083bbcbe6fadb93cd86709037acc510f86eed5a314203079df174c40bbbca6b3", size = 8699779, upload-time = "2024-08-08T15:43:00.429Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/38/23/b3763a237d2523d40a31fe2d1a301191fe392dd48d3014977d079cf8c0bd/ruff-0.5.7-py3-none-win_arm64.whl", hash = "sha256:2dca26154ff9571995107221d0aeaad0e75a77b5a682d6236cf89a58c70b76f4", size = 8091891, upload-time = "2024-08-08T15:43:04.162Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/74/b6/2098d0126d2d3318fd5bec3ad40d06c25d377d95749f7a0c5af17129b3b1/ruff-0.12.2-py3-none-linux_armv6l.whl", hash = "sha256:093ea2b221df1d2b8e7ad92fc6ffdca40a2cb10d8564477a987b44fd4008a7be", size = 10369761, upload-time = "2025-07-03T16:39:38.847Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b1/4b/5da0142033dbe155dc598cfb99262d8ee2449d76920ea92c4eeb9547c208/ruff-0.12.2-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:09e4cf27cc10f96b1708100fa851e0daf21767e9709e1649175355280e0d950e", size = 11155659, upload-time = "2025-07-03T16:39:42.294Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3e/21/967b82550a503d7c5c5c127d11c935344b35e8c521f52915fc858fb3e473/ruff-0.12.2-py3-none-macosx_11_0_arm64.whl", hash = "sha256:8ae64755b22f4ff85e9c52d1f82644abd0b6b6b6deedceb74bd71f35c24044cc", size = 10537769, upload-time = "2025-07-03T16:39:44.75Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/33/91/00cff7102e2ec71a4890fb7ba1803f2cdb122d82787c7d7cf8041fe8cbc1/ruff-0.12.2-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3eb3a6b2db4d6e2c77e682f0b988d4d61aff06860158fdb413118ca133d57922", size = 10717602, upload-time = "2025-07-03T16:39:47.652Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/9b/eb/928814daec4e1ba9115858adcda44a637fb9010618721937491e4e2283b8/ruff-0.12.2-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:73448de992d05517170fc37169cbca857dfeaeaa8c2b9be494d7bcb0d36c8f4b", size = 10198772, upload-time = "2025-07-03T16:39:49.641Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/50/fa/f15089bc20c40f4f72334f9145dde55ab2b680e51afb3b55422effbf2fb6/ruff-0.12.2-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3b8b94317cbc2ae4a2771af641739f933934b03555e51515e6e021c64441532d", size = 11845173, upload-time = "2025-07-03T16:39:52.069Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/43/9f/1f6f98f39f2b9302acc161a4a2187b1e3a97634fe918a8e731e591841cf4/ruff-0.12.2-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:45fc42c3bf1d30d2008023a0a9a0cfb06bf9835b147f11fe0679f21ae86d34b1", size = 12553002, upload-time = "2025-07-03T16:39:54.551Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d8/70/08991ac46e38ddd231c8f4fd05ef189b1b94be8883e8c0c146a025c20a19/ruff-0.12.2-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce48f675c394c37e958bf229fb5c1e843e20945a6d962cf3ea20b7a107dcd9f4", size = 12171330, upload-time = "2025-07-03T16:39:57.55Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/88/a9/5a55266fec474acfd0a1c73285f19dd22461d95a538f29bba02edd07a5d9/ruff-0.12.2-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:793d8859445ea47591272021a81391350205a4af65a9392401f418a95dfb75c9", size = 11774717, upload-time = "2025-07-03T16:39:59.78Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/87/e5/0c270e458fc73c46c0d0f7cf970bb14786e5fdb88c87b5e423a4bd65232b/ruff-0.12.2-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6932323db80484dda89153da3d8e58164d01d6da86857c79f1961934354992da", size = 11646659, upload-time = "2025-07-03T16:40:01.934Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b7/b6/45ab96070c9752af37f0be364d849ed70e9ccede07675b0ec4e3ef76b63b/ruff-0.12.2-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:6aa7e623a3a11538108f61e859ebf016c4f14a7e6e4eba1980190cacb57714ce", size = 10604012, upload-time = "2025-07-03T16:40:04.363Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/86/91/26a6e6a424eb147cc7627eebae095cfa0b4b337a7c1c413c447c9ebb72fd/ruff-0.12.2-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:2a4a20aeed74671b2def096bdf2eac610c7d8ffcbf4fb0e627c06947a1d7078d", size = 10176799, upload-time = "2025-07-03T16:40:06.514Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f5/0c/9f344583465a61c8918a7cda604226e77b2c548daf8ef7c2bfccf2b37200/ruff-0.12.2-py3-none-musllinux_1_2_i686.whl", hash = "sha256:71a4c550195612f486c9d1f2b045a600aeba851b298c667807ae933478fcef04", size = 11241507, upload-time = "2025-07-03T16:40:08.708Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1c/b7/99c34ded8fb5f86c0280278fa89a0066c3760edc326e935ce0b1550d315d/ruff-0.12.2-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:4987b8f4ceadf597c927beee65a5eaf994c6e2b631df963f86d8ad1bdea99342", size = 11717609, upload-time = "2025-07-03T16:40:10.836Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/51/de/8589fa724590faa057e5a6d171e7f2f6cffe3287406ef40e49c682c07d89/ruff-0.12.2-py3-none-win32.whl", hash = "sha256:369ffb69b70cd55b6c3fc453b9492d98aed98062db9fec828cdfd069555f5f1a", size = 10523823, upload-time = "2025-07-03T16:40:13.203Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/94/47/8abf129102ae4c90cba0c2199a1a9b0fa896f6f806238d6f8c14448cc748/ruff-0.12.2-py3-none-win_amd64.whl", hash = "sha256:dca8a3b6d6dc9810ed8f328d406516bf4d660c00caeaef36eb831cf4871b0639", size = 11629831, upload-time = "2025-07-03T16:40:15.478Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e2/1f/72d2946e3cc7456bb837e88000eb3437e55f80db339c840c04015a11115d/ruff-0.12.2-py3-none-win_arm64.whl", hash = "sha256:48d6c6bfb4761df68bc05ae630e24f506755e702d4fb08f08460be778c7ccb12", size = 10735334, upload-time = "2025-07-03T16:40:17.677Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
Loading…
Reference in New Issue
Block a user