From e02eed5489846a9dd40ea34858c0fc985b8ce095 Mon Sep 17 00:00:00 2001 From: ccurme Date: Tue, 5 Aug 2025 16:17:32 -0300 Subject: [PATCH 1/2] feat: standard outputs (#32287) Co-authored-by: Mason Daugherty Co-authored-by: Eugene Yurtsev Co-authored-by: Mason Daugherty Co-authored-by: Nuno Campos --- .github/workflows/codspeed.yml | 1 + docs/api_reference/create_api_rst.py | 16 +- docs/docs/contributing/how_to/testing.mdx | 41 + libs/core/langchain_core/callbacks/base.py | 37 +- libs/core/langchain_core/callbacks/manager.py | 160 +- .../callbacks/streaming_stdout.py | 7 +- libs/core/langchain_core/callbacks/usage.py | 14 +- .../langchain_core/language_models/_utils.py | 36 + .../langchain_core/language_models/base.py | 5 +- .../language_models/fake_chat_models.py | 72 +- libs/core/langchain_core/messages/__init__.py | 54 + libs/core/langchain_core/messages/ai.py | 46 +- .../langchain_core/messages/content_blocks.py | 1424 ++++++- libs/core/langchain_core/messages/modifier.py | 2 +- libs/core/langchain_core/messages/tool.py | 92 +- libs/core/langchain_core/messages/utils.py | 218 +- .../langchain_core/output_parsers/base.py | 64 +- .../langchain_core/output_parsers/json.py | 7 +- .../langchain_core/output_parsers/list.py | 9 +- .../output_parsers/openai_functions.py | 33 +- .../output_parsers/openai_tools.py | 137 +- .../langchain_core/output_parsers/pydantic.py | 5 +- .../output_parsers/transform.py | 65 +- .../core/langchain_core/output_parsers/xml.py | 19 +- libs/core/langchain_core/prompt_values.py | 118 +- libs/core/langchain_core/runnables/base.py | 25 +- libs/core/langchain_core/runnables/config.py | 4 +- libs/core/langchain_core/runnables/graph.py | 6 +- libs/core/langchain_core/tools/base.py | 46 +- libs/core/langchain_core/tools/convert.py | 24 +- libs/core/langchain_core/tools/retriever.py | 9 +- libs/core/langchain_core/tools/structured.py | 10 +- libs/core/langchain_core/tracers/base.py | 21 +- libs/core/langchain_core/tracers/core.py | 31 +- .../langchain_core/tracers/event_stream.py | 17 +- libs/core/langchain_core/tracers/langchain.py | 17 +- .../core/langchain_core/tracers/log_stream.py | 3 +- .../langchain_core/utils/function_calling.py | 2 +- libs/core/langchain_core/v1/__init__.py | 1 + libs/core/langchain_core/v1/chat_models.py | 1047 +++++ libs/core/langchain_core/v1/messages.py | 755 ++++ libs/core/pyproject.toml | 3 + .../tests/benchmarks/test_async_callbacks.py | 8 +- libs/core/tests/unit_tests/fake/callbacks.py | 3 +- .../unit_tests/fake/test_fake_chat_model.py | 14 +- .../language_models/chat_models/test_base.py | 96 +- .../messages/test_content_block_factories.py | 974 +++++ .../tests/unit_tests/messages/test_imports.py | 18 + .../messages/test_response_metadata.py | 343 ++ .../messages/test_response_metadata.py.bak | 361 ++ .../tests/unit_tests/messages/test_utils.py | 31 +- .../output_parsers/test_base_parsers.py | 66 +- .../output_parsers/test_openai_tools.py | 353 +- .../prompts/__snapshots__/test_chat.ambr | 82 +- .../runnables/__snapshots__/test_graph.ambr | 2312 +++++++--- .../__snapshots__/test_runnable.ambr | 2572 ++++++++--- .../tests/unit_tests/runnables/test_graph.py | 4 +- .../unit_tests/runnables/test_runnable.py | 24 +- libs/core/tests/unit_tests/test_messages.py | 269 +- libs/core/tests/unit_tests/test_tools.py | 347 +- .../tracers/test_async_base_tracer.py | 42 +- .../unit_tests/tracers/test_base_tracer.py | 43 +- .../agents/output_parsers/openai_functions.py | 6 +- .../agents/output_parsers/openai_tools.py | 6 +- .../langchain/agents/output_parsers/tools.py | 13 +- .../langchain/callbacks/streaming_aiter.py | 5 +- .../callbacks/streaming_aiter_final_only.py | 7 +- libs/langchain/langchain/chat_models/base.py | 53 +- .../langchain/smith/evaluation/progress.py | 5 +- .../format_scratchpad/test_openai_tools.py | 7 +- .../tests/unit_tests/agents/test_agent.py | 2 +- .../callbacks/fake_callback_handler.py | 3 +- .../tests/unit_tests/chat_models/test_base.py | 17 + .../unit_tests/llms/test_fake_chat_model.py | 8 +- .../langchain_openai/chat_models/_compat.py | 468 +- .../langchain_openai/chat_models/base.py | 50 +- .../openai/langchain_openai/v1/__init__.py | 3 + .../v1/chat_models/__init__.py | 3 + .../langchain_openai/v1/chat_models/base.py | 3762 +++++++++++++++++ libs/partners/openai/pyproject.toml | 2 + .../cassettes/test_function_calling.yaml.gz | Bin 0 -> 7912 bytes .../test_parsed_pydantic_schema.yaml.gz | Bin 0 -> 4616 bytes .../tests/cassettes/test_web_search.yaml.gz | Bin 24336 -> 27998 bytes .../chat_models/test_base.py | 2 + .../chat_models/test_responses_api.py | 648 ++- .../tests/unit_tests/chat_models/test_base.py | 259 +- .../unit_tests/chat_models/test_imports.py | 3 + .../chat_models/test_responses_stream.py | 84 +- .../openai/tests/unit_tests/fake/callbacks.py | 3 +- .../openai/tests/unit_tests/test_imports.py | 4 + uv.lock | 10 +- 91 files changed, 16350 insertions(+), 1748 deletions(-) create mode 100644 libs/core/langchain_core/v1/__init__.py create mode 100644 libs/core/langchain_core/v1/chat_models.py create mode 100644 libs/core/langchain_core/v1/messages.py create mode 100644 libs/core/tests/unit_tests/messages/test_content_block_factories.py create mode 100644 libs/core/tests/unit_tests/messages/test_response_metadata.py create mode 100644 libs/core/tests/unit_tests/messages/test_response_metadata.py.bak create mode 100644 libs/partners/openai/langchain_openai/v1/__init__.py create mode 100644 libs/partners/openai/langchain_openai/v1/chat_models/__init__.py create mode 100644 libs/partners/openai/langchain_openai/v1/chat_models/base.py create mode 100644 libs/partners/openai/tests/cassettes/test_function_calling.yaml.gz create mode 100644 libs/partners/openai/tests/cassettes/test_parsed_pydantic_schema.yaml.gz diff --git a/.github/workflows/codspeed.yml b/.github/workflows/codspeed.yml index c6836d192c4..6d6025082df 100644 --- a/.github/workflows/codspeed.yml +++ b/.github/workflows/codspeed.yml @@ -20,6 +20,7 @@ jobs: codspeed: name: 'Benchmark' runs-on: ubuntu-latest + if: ${{ !contains(github.event.pull_request.labels.*.name, 'codspeed-ignore') }} strategy: matrix: include: diff --git a/docs/api_reference/create_api_rst.py b/docs/api_reference/create_api_rst.py index f4c5f977b3f..a37d213d725 100644 --- a/docs/api_reference/create_api_rst.py +++ b/docs/api_reference/create_api_rst.py @@ -217,7 +217,11 @@ def _load_package_modules( # Get the full namespace of the module namespace = str(relative_module_name).replace(".py", "").replace("/", ".") # Keep only the top level namespace - top_namespace = namespace.split(".")[0] + # (but make special exception for content_blocks and messages.v1) + if namespace == "messages.content_blocks" or namespace == "messages.v1": + top_namespace = namespace # Keep full namespace for content_blocks + else: + top_namespace = namespace.split(".")[0] try: # If submodule is present, we need to construct the paths in a slightly @@ -283,7 +287,7 @@ def _construct_doc( .. toctree:: :hidden: :maxdepth: 2 - + """ index_autosummary = """ """ @@ -365,9 +369,9 @@ def _construct_doc( module_doc += f"""\ :template: {template} - + {class_["qualified_name"]} - + """ index_autosummary += f""" {class_["qualified_name"]} @@ -550,8 +554,8 @@ def _build_index(dirs: List[str]) -> None: integrations = sorted(dir_ for dir_ in dirs if dir_ not in main_) doc = """# LangChain Python API Reference -Welcome to the LangChain Python API reference. This is a reference for all -`langchain-x` packages. +Welcome to the LangChain Python API reference. This is a reference for all +`langchain-x` packages. For user guides see [https://python.langchain.com](https://python.langchain.com). diff --git a/docs/docs/contributing/how_to/testing.mdx b/docs/docs/contributing/how_to/testing.mdx index cc5a1155c32..853ad44e7e3 100644 --- a/docs/docs/contributing/how_to/testing.mdx +++ b/docs/docs/contributing/how_to/testing.mdx @@ -124,6 +124,47 @@ start "" htmlcov/index.html || open htmlcov/index.html ``` +## Snapshot Testing + +Some tests use [syrupy](https://github.com/tophat/syrupy) for snapshot testing, which captures the output of functions and compares them to stored snapshots. This is particularly useful for testing JSON schema generation and other structured outputs. + +### Updating Snapshots + +To update snapshots when the expected output has legitimately changed: + +```bash +uv run --group test pytest path/to/test.py --snapshot-update +``` + +### Pydantic Version Compatibility Issues + +Pydantic generates different JSON schemas across versions, which can cause snapshot test failures in CI when tests run with different Pydantic versions than what was used to generate the snapshots. + +**Symptoms:** +- CI fails with snapshot mismatches showing differences like missing or extra fields. +- Tests pass locally but fail in CI with different Pydantic versions + +**Solution:** +Locally update snapshots using the same Pydantic version that CI uses: + +1. **Identify the failing Pydantic version** from CI logs (e.g., `2.7.0`, `2.8.0`, `2.9.0`) + +2. **Update snapshots with that version:** + ```bash + uv run --with "pydantic==2.9.0" --group test pytest tests/unit_tests/path/to/test.py::test_name --snapshot-update + ``` + +3. **Verify compatibility across supported versions:** + ```bash + # Test with the version you used to update + uv run --with "pydantic==2.9.0" --group test pytest tests/unit_tests/path/to/test.py::test_name + + # Test with other supported versions + uv run --with "pydantic==2.8.0" --group test pytest tests/unit_tests/path/to/test.py::test_name + ``` + +**Note:** Some tests use `@pytest.mark.skipif` decorators to only run with specific Pydantic version ranges (e.g., `PYDANTIC_VERSION_AT_LEAST_210`). Make sure to understand these constraints when updating snapshots. + ## Coverage Code coverage (i.e. the amount of code that is covered by unit tests) helps identify areas of the code that are potentially more or less brittle. diff --git a/libs/core/langchain_core/callbacks/base.py b/libs/core/langchain_core/callbacks/base.py index 5365fcb9ef1..b6be125af48 100644 --- a/libs/core/langchain_core/callbacks/base.py +++ b/libs/core/langchain_core/callbacks/base.py @@ -7,6 +7,8 @@ from typing import TYPE_CHECKING, Any, Optional, Union from typing_extensions import Self +from langchain_core.v1.messages import AIMessage, AIMessageChunk, MessageV1 + if TYPE_CHECKING: from collections.abc import Sequence from uuid import UUID @@ -66,7 +68,9 @@ class LLMManagerMixin: self, token: str, *, - chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None, + chunk: Optional[ + Union[GenerationChunk, ChatGenerationChunk, AIMessageChunk] + ] = None, run_id: UUID, parent_run_id: Optional[UUID] = None, **kwargs: Any, @@ -75,8 +79,8 @@ class LLMManagerMixin: Args: token (str): The new token. - chunk (GenerationChunk | ChatGenerationChunk): The new generated chunk, - containing content and other information. + chunk (GenerationChunk | ChatGenerationChunk | AIMessageChunk): The new + generated chunk, containing content and other information. run_id (UUID): The run ID. This is the ID of the current run. parent_run_id (UUID): The parent run ID. This is the ID of the parent run. kwargs (Any): Additional keyword arguments. @@ -84,7 +88,7 @@ class LLMManagerMixin: def on_llm_end( self, - response: LLMResult, + response: Union[LLMResult, AIMessage], *, run_id: UUID, parent_run_id: Optional[UUID] = None, @@ -93,7 +97,7 @@ class LLMManagerMixin: """Run when LLM ends running. Args: - response (LLMResult): The response which was generated. + response (LLMResult | AIMessage): The response which was generated. run_id (UUID): The run ID. This is the ID of the current run. parent_run_id (UUID): The parent run ID. This is the ID of the parent run. kwargs (Any): Additional keyword arguments. @@ -261,7 +265,7 @@ class CallbackManagerMixin: def on_chat_model_start( self, serialized: dict[str, Any], - messages: list[list[BaseMessage]], + messages: Union[list[list[BaseMessage]], list[MessageV1]], *, run_id: UUID, parent_run_id: Optional[UUID] = None, @@ -439,6 +443,9 @@ class BaseCallbackHandler( run_inline: bool = False """Whether to run the callback inline.""" + accepts_new_messages: bool = False + """Whether the callback accepts new message format.""" + @property def ignore_llm(self) -> bool: """Whether to ignore LLM callbacks.""" @@ -509,7 +516,7 @@ class AsyncCallbackHandler(BaseCallbackHandler): async def on_chat_model_start( self, serialized: dict[str, Any], - messages: list[list[BaseMessage]], + messages: Union[list[list[BaseMessage]], list[MessageV1]], *, run_id: UUID, parent_run_id: Optional[UUID] = None, @@ -540,7 +547,9 @@ class AsyncCallbackHandler(BaseCallbackHandler): self, token: str, *, - chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None, + chunk: Optional[ + Union[GenerationChunk, ChatGenerationChunk, AIMessageChunk] + ] = None, run_id: UUID, parent_run_id: Optional[UUID] = None, tags: Optional[list[str]] = None, @@ -550,8 +559,8 @@ class AsyncCallbackHandler(BaseCallbackHandler): Args: token (str): The new token. - chunk (GenerationChunk | ChatGenerationChunk): The new generated chunk, - containing content and other information. + chunk (GenerationChunk | ChatGenerationChunk | AIMessageChunk): The new + generated chunk, containing content and other information. run_id (UUID): The run ID. This is the ID of the current run. parent_run_id (UUID): The parent run ID. This is the ID of the parent run. tags (Optional[list[str]]): The tags. @@ -560,7 +569,7 @@ class AsyncCallbackHandler(BaseCallbackHandler): async def on_llm_end( self, - response: LLMResult, + response: Union[LLMResult, AIMessage], *, run_id: UUID, parent_run_id: Optional[UUID] = None, @@ -570,7 +579,7 @@ class AsyncCallbackHandler(BaseCallbackHandler): """Run when LLM ends running. Args: - response (LLMResult): The response which was generated. + response (LLMResult | AIMessage): The response which was generated. run_id (UUID): The run ID. This is the ID of the current run. parent_run_id (UUID): The parent run ID. This is the ID of the parent run. tags (Optional[list[str]]): The tags. @@ -594,8 +603,8 @@ class AsyncCallbackHandler(BaseCallbackHandler): parent_run_id: The parent run ID. This is the ID of the parent run. tags: The tags. kwargs (Any): Additional keyword arguments. - - response (LLMResult): The response which was generated before - the error occurred. + - response (LLMResult | AIMessage): The response which was generated + before the error occurred. """ async def on_chain_start( diff --git a/libs/core/langchain_core/callbacks/manager.py b/libs/core/langchain_core/callbacks/manager.py index 56fc1bb67ba..93efa45d336 100644 --- a/libs/core/langchain_core/callbacks/manager.py +++ b/libs/core/langchain_core/callbacks/manager.py @@ -37,8 +37,16 @@ from langchain_core.callbacks.base import ( ) from langchain_core.callbacks.stdout import StdOutCallbackHandler from langchain_core.messages import BaseMessage, get_buffer_string +from langchain_core.messages.utils import convert_from_v1_message +from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, LLMResult from langchain_core.tracers.schemas import Run from langchain_core.utils.env import env_var_is_set +from langchain_core.v1.messages import ( + AIMessage, + AIMessageChunk, + MessageV1, + MessageV1Types, +) if TYPE_CHECKING: from collections.abc import AsyncGenerator, Coroutine, Generator, Sequence @@ -47,7 +55,7 @@ if TYPE_CHECKING: from langchain_core.agents import AgentAction, AgentFinish from langchain_core.documents import Document - from langchain_core.outputs import ChatGenerationChunk, GenerationChunk, LLMResult + from langchain_core.outputs import GenerationChunk from langchain_core.runnables.config import RunnableConfig logger = logging.getLogger(__name__) @@ -243,6 +251,46 @@ def shielded(func: Func) -> Func: return cast("Func", wrapped) +def _convert_llm_events( + event_name: str, args: tuple[Any, ...], kwargs: dict[str, Any] +) -> tuple[tuple[Any, ...], dict[str, Any]]: + args_list = list(args) + if ( + event_name == "on_chat_model_start" + and isinstance(args_list[1], list) + and args_list[1] + and isinstance(args_list[1][0], MessageV1Types) + ): + batch = [ + convert_from_v1_message(item) + for item in args_list[1] + if isinstance(item, MessageV1Types) + ] + args_list[1] = [batch] + elif ( + event_name == "on_llm_new_token" + and "chunk" in kwargs + and isinstance(kwargs["chunk"], MessageV1Types) + ): + chunk = kwargs["chunk"] + kwargs["chunk"] = ChatGenerationChunk(text=chunk.text, message=chunk) + elif event_name == "on_llm_end" and isinstance(args_list[0], MessageV1Types): + args_list[0] = LLMResult( + generations=[ + [ + ChatGeneration( + text=args_list[0].text, + message=convert_from_v1_message(args_list[0]), + ) + ] + ] + ) + else: + pass + + return tuple(args_list), kwargs + + def handle_event( handlers: list[BaseCallbackHandler], event_name: str, @@ -271,6 +319,8 @@ def handle_event( if ignore_condition_name is None or not getattr( handler, ignore_condition_name ): + if not handler.accepts_new_messages: + args, kwargs = _convert_llm_events(event_name, args, kwargs) event = getattr(handler, event_name)(*args, **kwargs) if asyncio.iscoroutine(event): coros.append(event) @@ -365,6 +415,8 @@ async def _ahandle_event_for_handler( ) -> None: try: if ignore_condition_name is None or not getattr(handler, ignore_condition_name): + if not handler.accepts_new_messages: + args, kwargs = _convert_llm_events(event_name, args, kwargs) event = getattr(handler, event_name) if asyncio.iscoroutinefunction(event): await event(*args, **kwargs) @@ -674,7 +726,9 @@ class CallbackManagerForLLMRun(RunManager, LLMManagerMixin): self, token: str, *, - chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None, + chunk: Optional[ + Union[GenerationChunk, ChatGenerationChunk, AIMessageChunk] + ] = None, **kwargs: Any, ) -> None: """Run when LLM generates a new token. @@ -699,11 +753,11 @@ class CallbackManagerForLLMRun(RunManager, LLMManagerMixin): **kwargs, ) - def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: + def on_llm_end(self, response: Union[LLMResult, AIMessage], **kwargs: Any) -> None: """Run when LLM ends running. Args: - response (LLMResult): The LLM result. + response (LLMResult | AIMessage): The LLM result. **kwargs (Any): Additional keyword arguments. """ if not self.handlers: @@ -729,8 +783,8 @@ class CallbackManagerForLLMRun(RunManager, LLMManagerMixin): Args: error (Exception or KeyboardInterrupt): The error. kwargs (Any): Additional keyword arguments. - - response (LLMResult): The response which was generated before - the error occurred. + - response (LLMResult | AIMessage): The response which was generated + before the error occurred. """ if not self.handlers: return @@ -770,7 +824,9 @@ class AsyncCallbackManagerForLLMRun(AsyncRunManager, LLMManagerMixin): self, token: str, *, - chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None, + chunk: Optional[ + Union[GenerationChunk, ChatGenerationChunk, AIMessageChunk] + ] = None, **kwargs: Any, ) -> None: """Run when LLM generates a new token. @@ -796,11 +852,13 @@ class AsyncCallbackManagerForLLMRun(AsyncRunManager, LLMManagerMixin): ) @shielded - async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: + async def on_llm_end( + self, response: Union[LLMResult, AIMessage], **kwargs: Any + ) -> None: """Run when LLM ends running. Args: - response (LLMResult): The LLM result. + response (LLMResult | AIMessage): The LLM result. **kwargs (Any): Additional keyword arguments. """ if not self.handlers: @@ -827,11 +885,8 @@ class AsyncCallbackManagerForLLMRun(AsyncRunManager, LLMManagerMixin): Args: error (Exception or KeyboardInterrupt): The error. kwargs (Any): Additional keyword arguments. - - response (LLMResult): The response which was generated before - the error occurred. - - - + - response (LLMResult | AIMessage): The response which was generated + before the error occurred. """ if not self.handlers: return @@ -1354,7 +1409,7 @@ class CallbackManager(BaseCallbackManager): def on_chat_model_start( self, serialized: dict[str, Any], - messages: list[list[BaseMessage]], + messages: Union[list[list[BaseMessage]], list[MessageV1]], run_id: Optional[UUID] = None, **kwargs: Any, ) -> list[CallbackManagerForLLMRun]: @@ -1362,7 +1417,7 @@ class CallbackManager(BaseCallbackManager): Args: serialized (dict[str, Any]): The serialized LLM. - messages (list[list[BaseMessage]]): The list of messages. + messages (list[list[BaseMessage | MessageV1]]): The list of messages. run_id (UUID, optional): The ID of the run. Defaults to None. **kwargs (Any): Additional keyword arguments. @@ -1370,6 +1425,32 @@ class CallbackManager(BaseCallbackManager): list[CallbackManagerForLLMRun]: A callback manager for each list of messages as an LLM run. """ + if messages and isinstance(messages[0], MessageV1Types): + run_id_ = run_id if run_id is not None else uuid.uuid4() + handle_event( + self.handlers, + "on_chat_model_start", + "ignore_chat_model", + serialized, + messages, + run_id=run_id_, + parent_run_id=self.parent_run_id, + tags=self.tags, + metadata=self.metadata, + **kwargs, + ) + return [ + CallbackManagerForLLMRun( + run_id=run_id_, + handlers=self.handlers, + inheritable_handlers=self.inheritable_handlers, + parent_run_id=self.parent_run_id, + tags=self.tags, + inheritable_tags=self.inheritable_tags, + metadata=self.metadata, + inheritable_metadata=self.inheritable_metadata, + ) + ] managers = [] for message_list in messages: if run_id is not None: @@ -1864,7 +1945,7 @@ class AsyncCallbackManager(BaseCallbackManager): async def on_chat_model_start( self, serialized: dict[str, Any], - messages: list[list[BaseMessage]], + messages: Union[list[list[BaseMessage]], list[MessageV1]], run_id: Optional[UUID] = None, **kwargs: Any, ) -> list[AsyncCallbackManagerForLLMRun]: @@ -1872,7 +1953,7 @@ class AsyncCallbackManager(BaseCallbackManager): Args: serialized (dict[str, Any]): The serialized LLM. - messages (list[list[BaseMessage]]): The list of messages. + messages (list[list[BaseMessage | MessageV1]]): The list of messages. run_id (UUID, optional): The ID of the run. Defaults to None. **kwargs (Any): Additional keyword arguments. @@ -1881,10 +1962,51 @@ class AsyncCallbackManager(BaseCallbackManager): async callback managers, one for each LLM Run corresponding to each inner message list. """ + if messages and isinstance(messages[0], MessageV1Types): + run_id_ = run_id if run_id is not None else uuid.uuid4() + inline_tasks = [] + non_inline_tasks = [] + for handler in self.handlers: + task = ahandle_event( + [handler], + "on_chat_model_start", + "ignore_chat_model", + serialized, + messages, + run_id=run_id_, + parent_run_id=self.parent_run_id, + tags=self.tags, + metadata=self.metadata, + **kwargs, + ) + if handler.run_inline: + inline_tasks.append(task) + else: + non_inline_tasks.append(task) + managers = [ + AsyncCallbackManagerForLLMRun( + run_id=run_id_, + handlers=self.handlers, + inheritable_handlers=self.inheritable_handlers, + parent_run_id=self.parent_run_id, + tags=self.tags, + inheritable_tags=self.inheritable_tags, + metadata=self.metadata, + inheritable_metadata=self.inheritable_metadata, + ) + ] + # Run inline tasks sequentially + for task in inline_tasks: + await task + + # Run non-inline tasks concurrently + if non_inline_tasks: + await asyncio.gather(*non_inline_tasks) + + return managers inline_tasks = [] non_inline_tasks = [] managers = [] - for message_list in messages: if run_id is not None: run_id_ = run_id diff --git a/libs/core/langchain_core/callbacks/streaming_stdout.py b/libs/core/langchain_core/callbacks/streaming_stdout.py index f8dbe518eac..31ac544f55a 100644 --- a/libs/core/langchain_core/callbacks/streaming_stdout.py +++ b/libs/core/langchain_core/callbacks/streaming_stdout.py @@ -3,7 +3,7 @@ from __future__ import annotations import sys -from typing import TYPE_CHECKING, Any +from typing import TYPE_CHECKING, Any, Union from typing_extensions import override @@ -13,6 +13,7 @@ if TYPE_CHECKING: from langchain_core.agents import AgentAction, AgentFinish from langchain_core.messages import BaseMessage from langchain_core.outputs import LLMResult + from langchain_core.v1.messages import AIMessage, MessageV1 class StreamingStdOutCallbackHandler(BaseCallbackHandler): @@ -32,7 +33,7 @@ class StreamingStdOutCallbackHandler(BaseCallbackHandler): def on_chat_model_start( self, serialized: dict[str, Any], - messages: list[list[BaseMessage]], + messages: Union[list[list[BaseMessage]], list[MessageV1]], **kwargs: Any, ) -> None: """Run when LLM starts running. @@ -54,7 +55,7 @@ class StreamingStdOutCallbackHandler(BaseCallbackHandler): sys.stdout.write(token) sys.stdout.flush() - def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: + def on_llm_end(self, response: Union[LLMResult, AIMessage], **kwargs: Any) -> None: """Run when LLM ends running. Args: diff --git a/libs/core/langchain_core/callbacks/usage.py b/libs/core/langchain_core/callbacks/usage.py index 0249cadec1f..5aef7e714dd 100644 --- a/libs/core/langchain_core/callbacks/usage.py +++ b/libs/core/langchain_core/callbacks/usage.py @@ -4,14 +4,16 @@ import threading from collections.abc import Generator from contextlib import contextmanager from contextvars import ContextVar -from typing import Any, Optional +from typing import Any, Optional, Union from typing_extensions import override from langchain_core.callbacks import BaseCallbackHandler from langchain_core.messages import AIMessage from langchain_core.messages.ai import UsageMetadata, add_usage +from langchain_core.messages.utils import convert_from_v1_message from langchain_core.outputs import ChatGeneration, LLMResult +from langchain_core.v1.messages import AIMessage as AIMessageV1 class UsageMetadataCallbackHandler(BaseCallbackHandler): @@ -58,9 +60,17 @@ class UsageMetadataCallbackHandler(BaseCallbackHandler): return str(self.usage_metadata) @override - def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: + def on_llm_end( + self, response: Union[LLMResult, AIMessageV1], **kwargs: Any + ) -> None: """Collect token usage.""" # Check for usage_metadata (langchain-core >= 0.2.2) + if isinstance(response, AIMessageV1): + response = LLMResult( + generations=[ + [ChatGeneration(message=convert_from_v1_message(response))] + ] + ) try: generation = response.generations[0][0] except IndexError: diff --git a/libs/core/langchain_core/language_models/_utils.py b/libs/core/langchain_core/language_models/_utils.py index 883f8c855ea..ec814261763 100644 --- a/libs/core/langchain_core/language_models/_utils.py +++ b/libs/core/langchain_core/language_models/_utils.py @@ -1,8 +1,10 @@ +import copy import re from collections.abc import Sequence from typing import Optional from langchain_core.messages import BaseMessage +from langchain_core.v1.messages import MessageV1 def _is_openai_data_block(block: dict) -> bool: @@ -138,3 +140,37 @@ def _normalize_messages(messages: Sequence[BaseMessage]) -> list[BaseMessage]: formatted_messages.append(formatted_message) return formatted_messages + + +def _normalize_messages_v1(messages: Sequence[MessageV1]) -> list[MessageV1]: + """Extend support for message formats. + + Chat models implement support for images in OpenAI Chat Completions format, as well + as other multimodal data as standard data blocks. This function extends support to + audio and file data in OpenAI Chat Completions format by converting them to standard + data blocks. + """ + formatted_messages = [] + for message in messages: + formatted_message = message + if isinstance(message.content, list): + for idx, block in enumerate(message.content): + if ( + isinstance(block, dict) + # Subset to (PDF) files and audio, as most relevant chat models + # support images in OAI format (and some may not yet support the + # standard data block format) + and block.get("type") in {"file", "input_audio"} + and _is_openai_data_block(block) # type: ignore[arg-type] + ): + if formatted_message is message: + formatted_message = copy.copy(message) + # Also shallow-copy content + formatted_message.content = list(formatted_message.content) + + formatted_message.content[idx] = ( # type: ignore[call-overload] + _convert_openai_format_to_data_block(block) # type: ignore[arg-type] + ) + formatted_messages.append(formatted_message) + + return formatted_messages diff --git a/libs/core/langchain_core/language_models/base.py b/libs/core/langchain_core/language_models/base.py index a9e7e4e64cc..e421d9ab842 100644 --- a/libs/core/langchain_core/language_models/base.py +++ b/libs/core/langchain_core/language_models/base.py @@ -31,6 +31,7 @@ from langchain_core.messages import ( from langchain_core.prompt_values import PromptValue from langchain_core.runnables import Runnable, RunnableSerializable from langchain_core.utils import get_pydantic_field_names +from langchain_core.v1.messages import AIMessage as AIMessageV1 if TYPE_CHECKING: from langchain_core.outputs import LLMResult @@ -85,7 +86,9 @@ def _get_token_ids_default_method(text: str) -> list[int]: LanguageModelInput = Union[PromptValue, str, Sequence[MessageLikeRepresentation]] LanguageModelOutput = Union[BaseMessage, str] LanguageModelLike = Runnable[LanguageModelInput, LanguageModelOutput] -LanguageModelOutputVar = TypeVar("LanguageModelOutputVar", BaseMessage, str) +LanguageModelOutputVar = TypeVar( + "LanguageModelOutputVar", BaseMessage, str, AIMessageV1 +) def _get_verbosity() -> bool: diff --git a/libs/core/langchain_core/language_models/fake_chat_models.py b/libs/core/langchain_core/language_models/fake_chat_models.py index 184a4fcb154..a4ecd87d627 100644 --- a/libs/core/langchain_core/language_models/fake_chat_models.py +++ b/libs/core/langchain_core/language_models/fake_chat_models.py @@ -3,7 +3,7 @@ import asyncio import re import time -from collections.abc import AsyncIterator, Iterator +from collections.abc import AsyncIterator, Iterable, Iterator from typing import Any, Optional, Union, cast from typing_extensions import override @@ -16,6 +16,10 @@ from langchain_core.language_models.chat_models import BaseChatModel, SimpleChat from langchain_core.messages import AIMessage, AIMessageChunk, BaseMessage from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult from langchain_core.runnables import RunnableConfig +from langchain_core.v1.chat_models import BaseChatModel as BaseChatModelV1 +from langchain_core.v1.messages import AIMessage as AIMessageV1 +from langchain_core.v1.messages import AIMessageChunk as AIMessageChunkV1 +from langchain_core.v1.messages import MessageV1 class FakeMessagesListChatModel(BaseChatModel): @@ -367,3 +371,69 @@ class ParrotFakeChatModel(BaseChatModel): @property def _llm_type(self) -> str: return "parrot-fake-chat-model" + + +class GenericFakeChatModelV1(BaseChatModelV1): + """Generic fake chat model that can be used to test the chat model interface.""" + + messages: Optional[Iterator[Union[AIMessageV1, str]]] = None + message_chunks: Optional[Iterable[Union[AIMessageChunkV1, str]]] = None + + @override + def _invoke( + self, + messages: list[MessageV1], + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> AIMessageV1: + """Top Level call.""" + if self.messages is None: + error_msg = "Messages iterator is not set." + raise ValueError(error_msg) + message = next(self.messages) + return AIMessageV1(content=message) if isinstance(message, str) else message + + @override + def _stream( + self, + messages: list[MessageV1], + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> Iterator[AIMessageChunkV1]: + """Top Level call.""" + if self.message_chunks is None: + error_msg = "Message chunks iterator is not set." + raise ValueError(error_msg) + for chunk in self.message_chunks: + if isinstance(chunk, str): + yield AIMessageChunkV1(chunk) + else: + yield chunk + + @property + def _llm_type(self) -> str: + return "generic-fake-chat-model" + + +class ParrotFakeChatModelV1(BaseChatModelV1): + """Generic fake chat model that can be used to test the chat model interface. + + * Chat model should be usable in both sync and async tests + """ + + @override + def _invoke( + self, + messages: list[MessageV1], + stop: Optional[list[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> AIMessageV1: + """Top Level call.""" + if isinstance(messages[-1], AIMessageV1): + return messages[-1] + return AIMessageV1(content=messages[-1].content) + + @property + def _llm_type(self) -> str: + return "parrot-fake-chat-model" diff --git a/libs/core/langchain_core/messages/__init__.py b/libs/core/langchain_core/messages/__init__.py index fe87e964af2..0faf0447295 100644 --- a/libs/core/langchain_core/messages/__init__.py +++ b/libs/core/langchain_core/messages/__init__.py @@ -33,6 +33,24 @@ if TYPE_CHECKING: ) from langchain_core.messages.chat import ChatMessage, ChatMessageChunk from langchain_core.messages.content_blocks import ( + Annotation, + AudioContentBlock, + Citation, + CodeInterpreterCall, + CodeInterpreterOutput, + CodeInterpreterResult, + ContentBlock, + DataContentBlock, + FileContentBlock, + ImageContentBlock, + NonStandardAnnotation, + NonStandardContentBlock, + PlainTextContentBlock, + ReasoningContentBlock, + TextContentBlock, + VideoContentBlock, + WebSearchCall, + WebSearchResult, convert_to_openai_data_block, convert_to_openai_image_block, is_data_content_block, @@ -65,24 +83,42 @@ if TYPE_CHECKING: __all__ = ( "AIMessage", "AIMessageChunk", + "Annotation", "AnyMessage", + "AudioContentBlock", "BaseMessage", "BaseMessageChunk", "ChatMessage", "ChatMessageChunk", + "Citation", + "CodeInterpreterCall", + "CodeInterpreterOutput", + "CodeInterpreterResult", + "ContentBlock", + "DataContentBlock", + "FileContentBlock", "FunctionMessage", "FunctionMessageChunk", "HumanMessage", "HumanMessageChunk", + "ImageContentBlock", "InvalidToolCall", "MessageLikeRepresentation", + "NonStandardAnnotation", + "NonStandardContentBlock", + "PlainTextContentBlock", + "ReasoningContentBlock", "RemoveMessage", "SystemMessage", "SystemMessageChunk", + "TextContentBlock", "ToolCall", "ToolCallChunk", "ToolMessage", "ToolMessageChunk", + "VideoContentBlock", + "WebSearchCall", + "WebSearchResult", "_message_from_dict", "convert_to_messages", "convert_to_openai_data_block", @@ -103,25 +139,43 @@ __all__ = ( _dynamic_imports = { "AIMessage": "ai", "AIMessageChunk": "ai", + "Annotation": "content_blocks", + "AudioContentBlock": "content_blocks", "BaseMessage": "base", "BaseMessageChunk": "base", "merge_content": "base", "message_to_dict": "base", "messages_to_dict": "base", + "Citation": "content_blocks", + "ContentBlock": "content_blocks", "ChatMessage": "chat", "ChatMessageChunk": "chat", + "CodeInterpreterCall": "content_blocks", + "CodeInterpreterOutput": "content_blocks", + "CodeInterpreterResult": "content_blocks", + "DataContentBlock": "content_blocks", + "FileContentBlock": "content_blocks", "FunctionMessage": "function", "FunctionMessageChunk": "function", "HumanMessage": "human", "HumanMessageChunk": "human", + "NonStandardAnnotation": "content_blocks", + "NonStandardContentBlock": "content_blocks", + "PlainTextContentBlock": "content_blocks", + "ReasoningContentBlock": "content_blocks", "RemoveMessage": "modifier", "SystemMessage": "system", "SystemMessageChunk": "system", + "WebSearchCall": "content_blocks", + "WebSearchResult": "content_blocks", + "ImageContentBlock": "content_blocks", "InvalidToolCall": "tool", + "TextContentBlock": "content_blocks", "ToolCall": "tool", "ToolCallChunk": "tool", "ToolMessage": "tool", "ToolMessageChunk": "tool", + "VideoContentBlock": "content_blocks", "AnyMessage": "utils", "MessageLikeRepresentation": "utils", "_message_from_dict": "utils", diff --git a/libs/core/langchain_core/messages/ai.py b/libs/core/langchain_core/messages/ai.py index c81187dc3f6..b980761a121 100644 --- a/libs/core/langchain_core/messages/ai.py +++ b/libs/core/langchain_core/messages/ai.py @@ -8,11 +8,7 @@ from typing import Any, Literal, Optional, Union, cast from pydantic import model_validator from typing_extensions import NotRequired, Self, TypedDict, override -from langchain_core.messages.base import ( - BaseMessage, - BaseMessageChunk, - merge_content, -) +from langchain_core.messages.base import BaseMessage, BaseMessageChunk, merge_content from langchain_core.messages.tool import ( InvalidToolCall, ToolCall, @@ -20,15 +16,9 @@ from langchain_core.messages.tool import ( default_tool_chunk_parser, default_tool_parser, ) -from langchain_core.messages.tool import ( - invalid_tool_call as create_invalid_tool_call, -) -from langchain_core.messages.tool import ( - tool_call as create_tool_call, -) -from langchain_core.messages.tool import ( - tool_call_chunk as create_tool_call_chunk, -) +from langchain_core.messages.tool import invalid_tool_call as create_invalid_tool_call +from langchain_core.messages.tool import tool_call as create_tool_call +from langchain_core.messages.tool import tool_call_chunk as create_tool_call_chunk from langchain_core.utils._merge import merge_dicts, merge_lists from langchain_core.utils.json import parse_partial_json from langchain_core.utils.usage import _dict_int_op @@ -37,6 +27,16 @@ logger = logging.getLogger(__name__) _LC_ID_PREFIX = "run-" +"""Internal tracing/callback system identifier. + +Used for: +- Tracing. Every LangChain operation (LLM call, chain execution, tool use, etc.) + gets a unique run_id (UUID) +- Enables tracking parent-child relationships between operations +""" + +_LC_AUTO_PREFIX = "lc_" +"""LangChain auto-generated ID prefix for messages and content blocks.""" class InputTokenDetails(TypedDict, total=False): @@ -428,17 +428,27 @@ def add_ai_message_chunks( chunk_id = None candidates = [left.id] + [o.id for o in others] - # first pass: pick the first non-run-* id + # first pass: pick the first provider-assigned id (non-run-* and non-lc_*) for id_ in candidates: - if id_ and not id_.startswith(_LC_ID_PREFIX): + if ( + id_ + and not id_.startswith(_LC_ID_PREFIX) + and not id_.startswith(_LC_AUTO_PREFIX) + ): chunk_id = id_ break else: - # second pass: no provider-assigned id found, just take the first non-null + # second pass: prefer lc_* ids over run-* ids for id_ in candidates: - if id_: + if id_ and id_.startswith(_LC_AUTO_PREFIX): chunk_id = id_ break + else: + # third pass: take any remaining id (run-* ids) + for id_ in candidates: + if id_: + chunk_id = id_ + break return left.__class__( example=left.example, diff --git a/libs/core/langchain_core/messages/content_blocks.py b/libs/core/langchain_core/messages/content_blocks.py index 83a66fb123a..025f16eeb37 100644 --- a/libs/core/langchain_core/messages/content_blocks.py +++ b/libs/core/langchain_core/messages/content_blocks.py @@ -1,110 +1,935 @@ -"""Types for content blocks.""" +"""Standard, multimodal content blocks for Large Language Model I/O. + +.. warning:: + This module is under active development. The API is unstable and subject to + change in future releases. + +This module provides a standardized data structure for representing inputs to and +outputs from Large Language Models. The core abstraction is the **Content Block**, a +``TypedDict`` that can represent a piece of text, an image, a tool call, or other +structured data. + +Data **not yet mapped** to a standard block may be represented using the +``NonStandardContentBlock``, which allows for provider-specific data to be included +without losing the benefits of type checking and validation. + +Furthermore, provider-specific fields **within** a standard block are fully supported +by default. However, since current type checkers do not recognize this, we are temporarily +applying type ignore comments to suppress warnings. In the future, +`PEP 728 `__ will add an extra param, ``extra_items=Any``. +When this is supported, we will apply it to block signatures to signify to type checkers +that additional provider-specific fields are allowed. + +**Example with PEP 728 provider-specific fields:** + +.. code-block:: python + + # Note `extra_items=Any` + class TextContentBlock(TypedDict, extra_items=Any): + type: Literal["text"] + id: NotRequired[str] + text: str + annotations: NotRequired[list[Annotation]] + index: NotRequired[int] + +.. code-block:: python + + from langchain_core.messages.content_blocks import TextContentBlock + + my_block: TextContentBlock = { + # Add required fields + "type": "text", + "text": "Hello, world!", + # Additional fields not specified in the TypedDict + # These are valid with PEP 728 and are typed as Any + "openai_metadata": {"model": "gpt-4", "temperature": 0.7}, + "anthropic_usage": {"input_tokens": 10, "output_tokens": 20}, + "custom_field": "any value", + } + + openai_data = my_block["openai_metadata"] # Type: Any + +.. note:: + PEP 728 is enabled with ``# type: ignore[call-arg]`` comments to suppress warnings + from type checkers that don't yet support it. The functionality works correctly + in Python 3.13+ and will be fully supported as the ecosystem catches up. + +**Rationale** + +Different LLM providers use distinct and incompatible API schemas. This module +introduces a unified, provider-agnostic format to standardize these interactions. A +message to or from a model is simply a `list` of `ContentBlock` objects, allowing for +the natural interleaving of text, images, and other content in a single, ordered +sequence. + +An adapter for a specific provider is responsible for translating this standard list of +blocks into the format required by its API. + +**Key Block Types** + +The module defines several types of content blocks, including: + +- ``TextContentBlock``: Standard text. +- ``ImageContentBlock``, ``Audio...``, ``Video...``, ``PlainText...``, ``File...``: For multimodal data. +- ``ToolCallContentBlock``, ``ToolOutputContentBlock``: For function calling. +- ``ReasoningContentBlock``: To capture a model's thought process. +- ``Citation``: For annotations that link generated text to a source document. + +**Example Usage** + +.. code-block:: python + + # Direct construction: + from langchain_core.messages.content_blocks import TextContentBlock, ImageContentBlock + + multimodal_message: AIMessage = [ + TextContentBlock(type="text", text="What is shown in this image?"), + ImageContentBlock( + type="image", + url="https://www.langchain.com/images/brand/langchain_logo_text_w_white.png", + mime_type="image/png", + ), + ] + + from langchain_core.messages.content_blocks import create_text_block, create_image_block + + # Using factory functions: + multimodal_message: AIMessage = [ + create_text_block("What is shown in this image?"), + create_image_block( + url="https://www.langchain.com/images/brand/langchain_logo_text_w_white.png", + mime_type="image/png", + ), + ] +""" # noqa: E501 import warnings -from typing import Any, Literal, Union +from typing import Any, Literal, Optional, Union +from uuid import uuid4 -from pydantic import TypeAdapter, ValidationError -from typing_extensions import NotRequired, TypedDict +from typing_extensions import NotRequired, TypedDict, get_args, get_origin -class BaseDataContentBlock(TypedDict, total=False): - """Base class for data content blocks.""" +def _ensure_id(id_val: Optional[str]) -> str: + """Ensure the ID is a valid string, generating a new UUID if not provided. + + Auto-generated UUIDs are prefixed by ``'lc_'`` to indicate they are + LangChain-generated IDs. + + Args: + id_val: Optional string ID value to validate. + + Returns: + A valid string ID, either the provided value or a new UUID. + """ + return id_val or str(f"lc_{uuid4()}") + + +class Citation(TypedDict): + """Annotation for citing data from a document. + + .. note:: + ``start/end`` indices refer to the **response text**, + not the source text. This means that the indices are relative to the model's + response, not the original document (as specified in the ``url``). + + .. note:: + ``create_citation`` may also be used as a factory to create a ``Citation``. + Benefits include: + + * Automatic ID generation (when not provided) + * Required arguments strictly validated at creation time + + """ + + type: Literal["citation"] + """Type of the content block. Used for discrimination.""" + + id: NotRequired[str] + """Content block identifier. Either: + + - Generated by the provider (e.g., OpenAI's file ID) + - Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``)) + """ + + url: NotRequired[str] + """URL of the document source.""" + + # For future consideration, if needed: + # provenance: NotRequired[str] + # """Provenance of the document, e.g., "Wikipedia", "arXiv", etc. + + # Included for future compatibility; not currently implemented. + # """ + + title: NotRequired[str] + """Source document title. + + For example, the page title for a web page or the title of a paper. + """ + + start_index: NotRequired[int] + """Start index of the **response text** (``TextContentBlock.text``) for which the + annotation applies.""" + + end_index: NotRequired[int] + """End index of the **response text** (``TextContentBlock.text``) for which the + annotation applies.""" + + cited_text: NotRequired[str] + """Excerpt of source text being cited.""" + + # NOTE: not including spans for the raw document text (such as `text_start_index` + # and `text_end_index`) as this is not currently supported by any provider. The + # thinking is that the `cited_text` should be sufficient for most use cases, and it + # is difficult to reliably extract spans from the raw document text across file + # formats or encoding schemes. + + extras: NotRequired[dict[str, Any]] + """Provider-specific metadata.""" + + +class NonStandardAnnotation(TypedDict): + """Provider-specific annotation format.""" + + type: Literal["non_standard_annotation"] + """Type of the content block. Used for discrimination.""" + + id: NotRequired[str] + """Content block identifier. Either: + + - Generated by the provider (e.g., OpenAI's file ID) + - Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``)) + """ + + value: dict[str, Any] + """Provider-specific annotation data.""" + + +Annotation = Union[Citation, NonStandardAnnotation] + + +class TextContentBlock(TypedDict): + """Text output from a LLM. + + This typically represents the main text content of a message, such as the response + from a language model or the text of a user message. + + .. note:: + ``create_text_block`` may also be used as a factory to create a + ``TextContentBlock``. Benefits include: + + * Automatic ID generation (when not provided) + * Required arguments strictly validated at creation time + + """ + + type: Literal["text"] + """Type of the content block. Used for discrimination.""" + + id: NotRequired[str] + """Content block identifier. Either: + + - Generated by the provider (e.g., OpenAI's file ID) + - Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``)) + """ + + text: str + """Block text.""" + + annotations: NotRequired[list[Annotation]] + """Citations and other annotations.""" + + index: NotRequired[int] + """Index of block in aggregate response. Used during streaming.""" + + extras: NotRequired[dict[str, Any]] + """Provider-specific metadata.""" + + +class ToolCall(TypedDict): + """Represents a request to call a tool. + + Example: + + .. code-block:: python + + { + "name": "foo", + "args": {"a": 1}, + "id": "123" + } + + This represents a request to call the tool named "foo" with arguments {"a": 1} + and an identifier of "123". + + .. note:: + ``create_tool_call`` may also be used as a factory to create a + ``ToolCall``. Benefits include: + + * Automatic ID generation (when not provided) + * Required arguments strictly validated at creation time + + """ + + type: Literal["tool_call"] + """Used for discrimination.""" + + id: Optional[str] + """An identifier associated with the tool call. + + An identifier is needed to associate a tool call request with a tool + call result in events when multiple concurrent tool calls are made. + """ + # TODO: Consider making this NotRequired[str] in the future. + + name: str + """The name of the tool to be called.""" + + args: dict[str, Any] + """The arguments to the tool call.""" + + index: NotRequired[int] + """Index of block in aggregate response. Used during streaming.""" + + extras: NotRequired[dict[str, Any]] + """Provider-specific metadata.""" + + +class ToolCallChunk(TypedDict): + """A chunk of a tool call (e.g., as part of a stream). + + When merging ToolCallChunks (e.g., via ``AIMessageChunk.__add__``), + all string attributes are concatenated. Chunks are only merged if their + values of ``index`` are equal and not ``None``. + + Example: + + .. code-block:: python + + left_chunks = [ToolCallChunk(name="foo", args='{"a":', index=0)] + right_chunks = [ToolCallChunk(name=None, args='1}', index=0)] + + ( + AIMessageChunk(content="", tool_call_chunks=left_chunks) + + AIMessageChunk(content="", tool_call_chunks=right_chunks) + ).tool_call_chunks == [ToolCallChunk(name='foo', args='{"a":1}', index=0)] + """ + + # TODO: Consider making fields NotRequired[str] in the future. + + type: NotRequired[Literal["tool_call_chunk"]] + """Used for serialization.""" + + id: Optional[str] + """An identifier associated with the tool call.""" + + name: Optional[str] + """The name of the tool to be called.""" + + args: Optional[str] + """The arguments to the tool call.""" + + index: Optional[int] + """The index of the tool call in a sequence.""" + + extras: NotRequired[dict[str, Any]] + """Provider-specific metadata.""" + + +class InvalidToolCall(TypedDict): + """Allowance for errors made by LLM. + + Here we add an ``error`` key to surface errors made during generation + (e.g., invalid JSON arguments.) + """ + + # TODO: Consider making fields NotRequired[str] in the future. + + type: Literal["invalid_tool_call"] + """Used for discrimination.""" + + id: Optional[str] + """An identifier associated with the tool call.""" + + name: Optional[str] + """The name of the tool to be called.""" + + args: Optional[str] + """The arguments to the tool call.""" + + error: Optional[str] + """An error message associated with the tool call.""" + + index: NotRequired[int] + """Index of block in aggregate response. Used during streaming.""" + + extras: NotRequired[dict[str, Any]] + """Provider-specific metadata.""" + + +# Note: These are not standard tool calls, but rather provider-specific built-in tools. +# Web search +class WebSearchCall(TypedDict): + """Built-in web search tool call.""" + + type: Literal["web_search_call"] + """Type of the content block. Used for discrimination.""" + + id: NotRequired[str] + """Content block identifier. Either: + + - Generated by the provider (e.g., OpenAI's file ID) + - Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``)) + """ + + query: NotRequired[str] + """The search query used in the web search tool call.""" + + index: NotRequired[int] + """Index of block in aggregate response. Used during streaming.""" + + extras: NotRequired[dict[str, Any]] + """Provider-specific metadata.""" + + +class WebSearchResult(TypedDict): + """Result of a built-in web search tool call.""" + + type: Literal["web_search_result"] + """Type of the content block. Used for discrimination.""" + + id: NotRequired[str] + """Content block identifier. Either: + + - Generated by the provider (e.g., OpenAI's file ID) + - Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``)) + """ + + urls: NotRequired[list[str]] + """List of URLs returned by the web search tool call.""" + + index: NotRequired[int] + """Index of block in aggregate response. Used during streaming.""" + + extras: NotRequired[dict[str, Any]] + """Provider-specific metadata.""" + + +class CodeInterpreterCall(TypedDict): + """Built-in code interpreter tool call.""" + + type: Literal["code_interpreter_call"] + """Type of the content block. Used for discrimination.""" + + id: NotRequired[str] + """Content block identifier. Either: + + - Generated by the provider (e.g., OpenAI's file ID) + - Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``)) + """ + + language: NotRequired[str] + """The name of the programming language used in the code interpreter tool call.""" + + code: NotRequired[str] + """The code to be executed by the code interpreter.""" + + index: NotRequired[int] + """Index of block in aggregate response. Used during streaming.""" + + extras: NotRequired[dict[str, Any]] + """Provider-specific metadata.""" + + +class CodeInterpreterOutput(TypedDict): + """Output of a singular code interpreter tool call. + + Full output of a code interpreter tool call is represented by + ``CodeInterpreterResult`` which is a list of these blocks. + """ + + type: Literal["code_interpreter_output"] + """Type of the content block. Used for discrimination.""" + + id: NotRequired[str] + """Content block identifier. Either: + + - Generated by the provider (e.g., OpenAI's file ID) + - Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``)) + """ + + return_code: NotRequired[int] + """Return code of the executed code. + + Example: ``0`` for success, non-zero for failure. + """ + + stderr: NotRequired[str] + """Standard error output of the executed code.""" + + stdout: NotRequired[str] + """Standard output of the executed code.""" + + file_ids: NotRequired[list[str]] + """List of file IDs generated by the code interpreter.""" + + index: NotRequired[int] + """Index of block in aggregate response. Used during streaming.""" + + extras: NotRequired[dict[str, Any]] + """Provider-specific metadata.""" + + +class CodeInterpreterResult(TypedDict): + """Result of a code interpreter tool call.""" + + type: Literal["code_interpreter_result"] + """Type of the content block. Used for discrimination.""" + + id: NotRequired[str] + """Content block identifier. Either: + + - Generated by the provider (e.g., OpenAI's file ID) + - Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``)) + """ + + output: list[CodeInterpreterOutput] + """List of outputs from the code interpreter tool call.""" + + index: NotRequired[int] + """Index of block in aggregate response. Used during streaming.""" + + extras: NotRequired[dict[str, Any]] + """Provider-specific metadata.""" + + +class ReasoningContentBlock(TypedDict): + """Reasoning output from a LLM. + + .. note:: + ``create_reasoning_block`` may also be used as a factory to create a + ``ReasoningContentBlock``. Benefits include: + + * Automatic ID generation (when not provided) + * Required arguments strictly validated at creation time + + """ + + type: Literal["reasoning"] + """Type of the content block. Used for discrimination.""" + + id: NotRequired[str] + """Content block identifier. Either: + + - Generated by the provider (e.g., OpenAI's file ID) + - Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``)) + """ + + reasoning: NotRequired[str] + """Reasoning text. + + Either the thought summary or the raw reasoning text itself. This is often parsed + from ```` tags in the model's response. + """ + + index: NotRequired[int] + """Index of block in aggregate response. Used during streaming.""" + + extras: NotRequired[dict[str, Any]] + """Provider-specific metadata.""" + + +# Note: `title` and `context` are fields that could be used to provide additional +# information about the file, such as a description or summary of its content. +# E.g. with Claude, you can provide a context for a file which is passed to the model. +class ImageContentBlock(TypedDict): + """Image data. + + .. note:: + ``create_image_block`` may also be used as a factory to create a + ``ImageContentBlock``. Benefits include: + + * Automatic ID generation (when not provided) + * Required arguments strictly validated at creation time + + """ + + type: Literal["image"] + """Type of the content block. Used for discrimination.""" + + id: NotRequired[str] + """Content block identifier. Either: + + - Generated by the provider (e.g., OpenAI's file ID) + - Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``)) + """ + + file_id: NotRequired[str] + """ID of the image file, e.g., from a file storage system.""" mime_type: NotRequired[str] - """MIME type of the content block (if needed).""" + """MIME type of the image. Required for base64. + `Examples from IANA `__ + """ -class URLContentBlock(BaseDataContentBlock): - """Content block for data from a URL.""" + index: NotRequired[int] + """Index of block in aggregate response. Used during streaming.""" - type: Literal["image", "audio", "file"] - """Type of the content block.""" - source_type: Literal["url"] - """Source type (url).""" - url: str - """URL for data.""" + url: NotRequired[str] + """URL of the image.""" - -class Base64ContentBlock(BaseDataContentBlock): - """Content block for inline data from a base64 string.""" - - type: Literal["image", "audio", "file"] - """Type of the content block.""" - source_type: Literal["base64"] - """Source type (base64).""" - data: str + base64: NotRequired[str] """Data as a base64 string.""" + extras: NotRequired[dict[str, Any]] + """Provider-specific metadata.""" -class PlainTextContentBlock(BaseDataContentBlock): - """Content block for plain text data (e.g., from a document).""" + +class VideoContentBlock(TypedDict): + """Video data. + + .. note:: + ``create_video_block`` may also be used as a factory to create a + ``VideoContentBlock``. Benefits include: + + * Automatic ID generation (when not provided) + * Required arguments strictly validated at creation time + + """ + + type: Literal["video"] + """Type of the content block. Used for discrimination.""" + + id: NotRequired[str] + """Content block identifier. Either: + + - Generated by the provider (e.g., OpenAI's file ID) + - Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``)) + """ + + file_id: NotRequired[str] + """ID of the video file, e.g., from a file storage system.""" + + mime_type: NotRequired[str] + """MIME type of the video. Required for base64. + + `Examples from IANA `__ + """ + + index: NotRequired[int] + """Index of block in aggregate response. Used during streaming.""" + + url: NotRequired[str] + """URL of the video.""" + + base64: NotRequired[str] + """Data as a base64 string.""" + + extras: NotRequired[dict[str, Any]] + """Provider-specific metadata.""" + + +class AudioContentBlock(TypedDict): + """Audio data. + + .. note:: + ``create_audio_block`` may also be used as a factory to create an + ``AudioContentBlock``. Benefits include: + + * Automatic ID generation (when not provided) + * Required arguments strictly validated at creation time + + """ + + type: Literal["audio"] + """Type of the content block. Used for discrimination.""" + + id: NotRequired[str] + """Content block identifier. Either: + + - Generated by the provider (e.g., OpenAI's file ID) + - Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``)) + """ + + file_id: NotRequired[str] + """ID of the audio file, e.g., from a file storage system.""" + + mime_type: NotRequired[str] + """MIME type of the audio. Required for base64. + + `Examples from IANA `__ + """ + + index: NotRequired[int] + """Index of block in aggregate response. Used during streaming.""" + + url: NotRequired[str] + """URL of the audio.""" + + base64: NotRequired[str] + """Data as a base64 string.""" + + extras: NotRequired[dict[str, Any]] + """Provider-specific metadata.""" + + +class PlainTextContentBlock(TypedDict): + """Plaintext data (e.g., from a document). + + .. note:: + Title and context are optional fields that may be passed to the model. See + Anthropic `example `__. + + .. note:: + ``create_plaintext_block`` may also be used as a factory to create a + ``PlainTextContentBlock``. Benefits include: + + * Automatic ID generation (when not provided) + * Required arguments strictly validated at creation time + + """ + + type: Literal["text-plain"] + """Type of the content block. Used for discrimination.""" + + id: NotRequired[str] + """Content block identifier. Either: + + - Generated by the provider (e.g., OpenAI's file ID) + - Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``)) + """ + + file_id: NotRequired[str] + """ID of the plaintext file, e.g., from a file storage system.""" + + mime_type: Literal["text/plain"] + """MIME type of the file. Required for base64.""" + + index: NotRequired[int] + """Index of block in aggregate response. Used during streaming.""" + + url: NotRequired[str] + """URL of the plaintext.""" + + base64: NotRequired[str] + """Data as a base64 string.""" + + text: NotRequired[str] + """Plaintext content. This is optional if the data is provided as base64.""" + + title: NotRequired[str] + """Title of the text data, e.g., the title of a document.""" + + context: NotRequired[str] + """Context for the text, e.g., a description or summary of the text's content.""" + + extras: NotRequired[dict[str, Any]] + """Provider-specific metadata.""" + + +class FileContentBlock(TypedDict): + """File data that doesn't fit into other multimodal blocks. + + This block is intended for files that are not images, audio, or plaintext. For + example, it can be used for PDFs, Word documents, etc. + + If the file is an image, audio, or plaintext, you should use the corresponding + content block type (e.g., ``ImageContentBlock``, ``AudioContentBlock``, + ``PlainTextContentBlock``). + + .. note:: + ``create_file_block`` may also be used as a factory to create a + ``FileContentBlock``. Benefits include: + + * Automatic ID generation (when not provided) + * Required arguments strictly validated at creation time + + """ type: Literal["file"] - """Type of the content block.""" - source_type: Literal["text"] - """Source type (text).""" - text: str - """Text data.""" + """Type of the content block. Used for discrimination.""" + + id: NotRequired[str] + """Content block identifier. Either: + + - Generated by the provider (e.g., OpenAI's file ID) + - Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``)) + """ + + file_id: NotRequired[str] + """ID of the file, e.g., from a file storage system.""" + + mime_type: NotRequired[str] + """MIME type of the file. Required for base64. + + `Examples from IANA `__ + """ + + index: NotRequired[int] + """Index of block in aggregate response. Used during streaming.""" + + url: NotRequired[str] + """URL of the file.""" + + base64: NotRequired[str] + """Data as a base64 string.""" + + extras: NotRequired[dict[str, Any]] + """Provider-specific metadata.""" -class IDContentBlock(TypedDict): - """Content block for data specified by an identifier.""" - - type: Literal["image", "audio", "file"] - """Type of the content block.""" - source_type: Literal["id"] - """Source type (id).""" - id: str - """Identifier for data source.""" +# Future modalities to consider: +# - 3D models +# - Tabular data +class NonStandardContentBlock(TypedDict): + """Provider-specific data. + + This block contains data for which there is not yet a standard type. + + The purpose of this block should be to simply hold a provider-specific payload. + If a provider's non-standard output includes reasoning and tool calls, it should be + the adapter's job to parse that payload and emit the corresponding standard + ReasoningContentBlock and ToolCallContentBlocks. + + .. note:: + ``create_non_standard_block`` may also be used as a factory to create a + ``NonStandardContentBlock``. Benefits include: + + * Automatic ID generation (when not provided) + * Required arguments strictly validated at creation time + + """ + + type: Literal["non_standard"] + """Type of the content block. Used for discrimination.""" + + id: NotRequired[str] + """Content block identifier. Either: + + - Generated by the provider (e.g., OpenAI's file ID) + - Generated by LangChain upon creation (``UUID4`` prefixed with ``'lc_'``)) + """ + + value: dict[str, Any] + """Provider-specific data.""" + + index: NotRequired[int] + """Index of block in aggregate response. Used during streaming.""" + + +# --- Aliases --- DataContentBlock = Union[ - URLContentBlock, - Base64ContentBlock, + ImageContentBlock, + VideoContentBlock, + AudioContentBlock, PlainTextContentBlock, - IDContentBlock, + FileContentBlock, ] -_DataContentBlockAdapter: TypeAdapter[DataContentBlock] = TypeAdapter(DataContentBlock) +ToolContentBlock = Union[ + ToolCall, + CodeInterpreterCall, + CodeInterpreterOutput, + CodeInterpreterResult, + WebSearchCall, + WebSearchResult, +] + +ContentBlock = Union[ + TextContentBlock, + ToolCall, + InvalidToolCall, + ToolCallChunk, + ReasoningContentBlock, + NonStandardContentBlock, + DataContentBlock, + ToolContentBlock, +] -def is_data_content_block( - content_block: dict, -) -> bool: +def _extract_typedict_type_values(union_type: Any) -> set[str]: + """Extract the values of the 'type' field from a TypedDict union type.""" + result: set[str] = set() + for value in get_args(union_type): + annotation = value.__annotations__["type"] + if get_origin(annotation) is Literal: + result.update(get_args(annotation)) + else: + msg = f"{value} 'type' is not a Literal" + raise ValueError(msg) + return result + + +KNOWN_BLOCK_TYPES = { + "text", + "text-plain", + "tool_call", + "invalid_tool_call", + "tool_call_chunk", + "reasoning", + "non_standard", + "image", + "audio", + "file", + "video", + "code_interpreter_call", + "code_interpreter_output", + "code_interpreter_result", + "web_search_call", + "web_search_result", +} + + +def is_data_content_block(block: dict) -> bool: """Check if the content block is a standard data content block. Args: - content_block: The content block to check. + block: The content block to check. Returns: True if the content block is a data content block, False otherwise. """ - try: - _ = _DataContentBlockAdapter.validate_python(content_block) - except ValidationError: - return False - else: - return True + return block.get("type") in ( + "audio", + "image", + "video", + "file", + "text-plain", + ) and any( + key in block + for key in ( + "url", + "base64", + "file_id", + "text", + "source_type", # backwards compatibility + ) + ) -def convert_to_openai_image_block(content_block: dict[str, Any]) -> dict: +def convert_to_openai_image_block(block: dict[str, Any]) -> dict: """Convert image content block to format expected by OpenAI Chat Completions API.""" - if content_block["source_type"] == "url": + if "url" in block: return { "type": "image_url", "image_url": { - "url": content_block["url"], + "url": block["url"], }, } - if content_block["source_type"] == "base64": - if "mime_type" not in content_block: + if "base64" in block or block.get("source_type") == "base64": + if "mime_type" not in block: error_message = "mime_type key is required for base64 data." raise ValueError(error_message) - mime_type = content_block["mime_type"] + mime_type = block["mime_type"] + base64_data = block["data"] if "data" in block else block["base64"] return { "type": "image_url", "image_url": { - "url": f"data:{mime_type};base64,{content_block['data']}", + "url": f"data:{mime_type};base64,{base64_data}", }, } error_message = "Unsupported source type. Only 'url' and 'base64' are supported." @@ -117,8 +942,9 @@ def convert_to_openai_data_block(block: dict) -> dict: formatted_block = convert_to_openai_image_block(block) elif block["type"] == "file": - if block["source_type"] == "base64": - file = {"file_data": f"data:{block['mime_type']};base64,{block['data']}"} + if "base64" in block or block.get("source_type") == "base64": + base64_data = block["data"] if "source_type" in block else block["base64"] + file = {"file_data": f"data:{block['mime_type']};base64,{base64_data}"} if filename := block.get("filename"): file["filename"] = filename elif (metadata := block.get("metadata")) and ("filename" in metadata): @@ -126,30 +952,484 @@ def convert_to_openai_data_block(block: dict) -> dict: else: warnings.warn( "OpenAI may require a filename for file inputs. Specify a filename " - "in the content block: {'type': 'file', 'source_type': 'base64', " - "'mime_type': 'application/pdf', 'data': '...', " - "'filename': 'my-pdf'}", + "in the content block: {'type': 'file', 'mime_type': " + "'application/pdf', 'base64': '...', 'filename': 'my-pdf'}", stacklevel=1, ) formatted_block = {"type": "file", "file": file} - elif block["source_type"] == "id": - formatted_block = {"type": "file", "file": {"file_id": block["id"]}} + elif "file_id" in block or block.get("source_type") == "id": + file_id = block["id"] if "source_type" in block else block["file_id"] + formatted_block = {"type": "file", "file": {"file_id": file_id}} else: - error_msg = "source_type base64 or id is required for file blocks." + error_msg = "Keys base64 or file_id required for file blocks." raise ValueError(error_msg) elif block["type"] == "audio": - if block["source_type"] == "base64": + if "base64" in block or block.get("source_type") == "base64": + base64_data = block["data"] if "source_type" in block else block["base64"] audio_format = block["mime_type"].split("/")[-1] formatted_block = { "type": "input_audio", - "input_audio": {"data": block["data"], "format": audio_format}, + "input_audio": {"data": base64_data, "format": audio_format}, } else: - error_msg = "source_type base64 is required for audio blocks." + error_msg = "Key base64 is required for audio blocks." raise ValueError(error_msg) else: error_msg = f"Block of type {block['type']} is not supported." raise ValueError(error_msg) return formatted_block + + +def create_text_block( + text: str, + *, + id: Optional[str] = None, + annotations: Optional[list[Annotation]] = None, + index: Optional[int] = None, +) -> TextContentBlock: + """Create a ``TextContentBlock``. + + Args: + text: The text content of the block. + id: Content block identifier. Generated automatically if not provided. + annotations: Citations and other annotations for the text. + index: Index of block in aggregate response. Used during streaming. + + Returns: + A properly formatted ``TextContentBlock``. + + .. note:: + The ``id`` is generated automatically if not provided, using a UUID4 format + prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID. + + """ + block = TextContentBlock( + type="text", + text=text, + id=_ensure_id(id), + ) + if annotations is not None: + block["annotations"] = annotations + if index is not None: + block["index"] = index + return block + + +def create_image_block( + *, + url: Optional[str] = None, + base64: Optional[str] = None, + file_id: Optional[str] = None, + mime_type: Optional[str] = None, + id: Optional[str] = None, + index: Optional[int] = None, +) -> ImageContentBlock: + """Create an ``ImageContentBlock``. + + Args: + url: URL of the image. + base64: Base64-encoded image data. + file_id: ID of the image file from a file storage system. + mime_type: MIME type of the image. Required for base64 data. + id: Content block identifier. Generated automatically if not provided. + index: Index of block in aggregate response. Used during streaming. + + Returns: + A properly formatted ``ImageContentBlock``. + + Raises: + ValueError: If no image source is provided or if ``base64`` is used without + ``mime_type``. + + .. note:: + The ``id`` is generated automatically if not provided, using a UUID4 format + prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID. + + """ + if not any([url, base64, file_id]): + msg = "Must provide one of: url, base64, or file_id" + raise ValueError(msg) + + if base64 and not mime_type: + msg = "mime_type is required when using base64 data" + raise ValueError(msg) + + block = ImageContentBlock(type="image", id=_ensure_id(id)) + + if url is not None: + block["url"] = url + if base64 is not None: + block["base64"] = base64 + if file_id is not None: + block["file_id"] = file_id + if mime_type is not None: + block["mime_type"] = mime_type + if index is not None: + block["index"] = index + + return block + + +def create_video_block( + *, + url: Optional[str] = None, + base64: Optional[str] = None, + file_id: Optional[str] = None, + mime_type: Optional[str] = None, + id: Optional[str] = None, + index: Optional[int] = None, +) -> VideoContentBlock: + """Create a ``VideoContentBlock``. + + Args: + url: URL of the video. + base64: Base64-encoded video data. + file_id: ID of the video file from a file storage system. + mime_type: MIME type of the video. Required for base64 data. + id: Content block identifier. Generated automatically if not provided. + index: Index of block in aggregate response. Used during streaming. + + Returns: + A properly formatted ``VideoContentBlock``. + + Raises: + ValueError: If no video source is provided or if ``base64`` is used without + ``mime_type``. + + .. note:: + The ``id`` is generated automatically if not provided, using a UUID4 format + prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID. + + """ + if not any([url, base64, file_id]): + msg = "Must provide one of: url, base64, or file_id" + raise ValueError(msg) + + if base64 and not mime_type: + msg = "mime_type is required when using base64 data" + raise ValueError(msg) + + block = VideoContentBlock(type="video", id=_ensure_id(id)) + + if url is not None: + block["url"] = url + if base64 is not None: + block["base64"] = base64 + if file_id is not None: + block["file_id"] = file_id + if mime_type is not None: + block["mime_type"] = mime_type + if index is not None: + block["index"] = index + + return block + + +def create_audio_block( + *, + url: Optional[str] = None, + base64: Optional[str] = None, + file_id: Optional[str] = None, + mime_type: Optional[str] = None, + id: Optional[str] = None, + index: Optional[int] = None, +) -> AudioContentBlock: + """Create an ``AudioContentBlock``. + + Args: + url: URL of the audio. + base64: Base64-encoded audio data. + file_id: ID of the audio file from a file storage system. + mime_type: MIME type of the audio. Required for base64 data. + id: Content block identifier. Generated automatically if not provided. + index: Index of block in aggregate response. Used during streaming. + + Returns: + A properly formatted ``AudioContentBlock``. + + Raises: + ValueError: If no audio source is provided or if ``base64`` is used without + ``mime_type``. + + .. note:: + The ``id`` is generated automatically if not provided, using a UUID4 format + prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID. + + """ + if not any([url, base64, file_id]): + msg = "Must provide one of: url, base64, or file_id" + raise ValueError(msg) + + if base64 and not mime_type: + msg = "mime_type is required when using base64 data" + raise ValueError(msg) + + block = AudioContentBlock(type="audio", id=_ensure_id(id)) + + if url is not None: + block["url"] = url + if base64 is not None: + block["base64"] = base64 + if file_id is not None: + block["file_id"] = file_id + if mime_type is not None: + block["mime_type"] = mime_type + if index is not None: + block["index"] = index + + return block + + +def create_file_block( + *, + url: Optional[str] = None, + base64: Optional[str] = None, + file_id: Optional[str] = None, + mime_type: Optional[str] = None, + id: Optional[str] = None, + index: Optional[int] = None, +) -> FileContentBlock: + """Create a ``FileContentBlock``. + + Args: + url: URL of the file. + base64: Base64-encoded file data. + file_id: ID of the file from a file storage system. + mime_type: MIME type of the file. Required for base64 data. + id: Content block identifier. Generated automatically if not provided. + index: Index of block in aggregate response. Used during streaming. + + Returns: + A properly formatted ``FileContentBlock``. + + Raises: + ValueError: If no file source is provided or if ``base64`` is used without + ``mime_type``. + + .. note:: + The ``id`` is generated automatically if not provided, using a UUID4 format + prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID. + + """ + if not any([url, base64, file_id]): + msg = "Must provide one of: url, base64, or file_id" + raise ValueError(msg) + + if base64 and not mime_type: + msg = "mime_type is required when using base64 data" + raise ValueError(msg) + + block = FileContentBlock(type="file", id=_ensure_id(id)) + + if url is not None: + block["url"] = url + if base64 is not None: + block["base64"] = base64 + if file_id is not None: + block["file_id"] = file_id + if mime_type is not None: + block["mime_type"] = mime_type + if index is not None: + block["index"] = index + + return block + + +def create_plaintext_block( + text: str, + *, + url: Optional[str] = None, + base64: Optional[str] = None, + file_id: Optional[str] = None, + title: Optional[str] = None, + context: Optional[str] = None, + id: Optional[str] = None, + index: Optional[int] = None, +) -> PlainTextContentBlock: + """Create a ``PlainTextContentBlock``. + + Args: + text: The plaintext content. + url: URL of the plaintext file. + base64: Base64-encoded plaintext data. + file_id: ID of the plaintext file from a file storage system. + title: Title of the text data. + context: Context or description of the text content. + id: Content block identifier. Generated automatically if not provided. + index: Index of block in aggregate response. Used during streaming. + + Returns: + A properly formatted ``PlainTextContentBlock``. + + .. note:: + The ``id`` is generated automatically if not provided, using a UUID4 format + prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID. + + """ + block = PlainTextContentBlock( + type="text-plain", + mime_type="text/plain", + text=text, + id=_ensure_id(id), + ) + + if url is not None: + block["url"] = url + if base64 is not None: + block["base64"] = base64 + if file_id is not None: + block["file_id"] = file_id + if title is not None: + block["title"] = title + if context is not None: + block["context"] = context + if index is not None: + block["index"] = index + + return block + + +def create_tool_call( + name: str, + args: dict[str, Any], + *, + id: Optional[str] = None, + index: Optional[int] = None, +) -> ToolCall: + """Create a ``ToolCall``. + + Args: + name: The name of the tool to be called. + args: The arguments to the tool call. + id: An identifier for the tool call. Generated automatically if not provided. + index: Index of block in aggregate response. Used during streaming. + + Returns: + A properly formatted ``ToolCall``. + + .. note:: + The ``id`` is generated automatically if not provided, using a UUID4 format + prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID. + + """ + block = ToolCall( + type="tool_call", + name=name, + args=args, + id=_ensure_id(id), + ) + + if index is not None: + block["index"] = index + + return block + + +def create_reasoning_block( + reasoning: Optional[str] = None, + id: Optional[str] = None, + index: Optional[int] = None, +) -> ReasoningContentBlock: + """Create a ``ReasoningContentBlock``. + + Args: + reasoning: The reasoning text or thought summary. + id: Content block identifier. Generated automatically if not provided. + index: Index of block in aggregate response. Used during streaming. + + Returns: + A properly formatted ``ReasoningContentBlock``. + + .. note:: + The ``id`` is generated automatically if not provided, using a UUID4 format + prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID. + + """ + block = ReasoningContentBlock( + type="reasoning", + reasoning=reasoning or "", + id=_ensure_id(id), + ) + + if index is not None: + block["index"] = index + + return block + + +def create_citation( + *, + url: Optional[str] = None, + title: Optional[str] = None, + start_index: Optional[int] = None, + end_index: Optional[int] = None, + cited_text: Optional[str] = None, + id: Optional[str] = None, +) -> Citation: + """Create a ``Citation``. + + Args: + url: URL of the document source. + title: Source document title. + start_index: Start index in the response text where citation applies. + end_index: End index in the response text where citation applies. + cited_text: Excerpt of source text being cited. + id: Content block identifier. Generated automatically if not provided. + + Returns: + A properly formatted ``Citation``. + + .. note:: + The ``id`` is generated automatically if not provided, using a UUID4 format + prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID. + + """ + block = Citation(type="citation", id=_ensure_id(id)) + + if url is not None: + block["url"] = url + if title is not None: + block["title"] = title + if start_index is not None: + block["start_index"] = start_index + if end_index is not None: + block["end_index"] = end_index + if cited_text is not None: + block["cited_text"] = cited_text + + return block + + +def create_non_standard_block( + value: dict[str, Any], + *, + id: Optional[str] = None, + index: Optional[int] = None, +) -> NonStandardContentBlock: + """Create a ``NonStandardContentBlock``. + + Args: + value: Provider-specific data. + id: Content block identifier. Generated automatically if not provided. + index: Index of block in aggregate response. Used during streaming. + + Returns: + A properly formatted ``NonStandardContentBlock``. + + .. note:: + The ``id`` is generated automatically if not provided, using a UUID4 format + prefixed with ``'lc_'`` to indicate it is a LangChain-generated ID. + + """ + block = NonStandardContentBlock( + type="non_standard", + value=value, + id=_ensure_id(id), + ) + + if index is not None: + block["index"] = index + + return block diff --git a/libs/core/langchain_core/messages/modifier.py b/libs/core/langchain_core/messages/modifier.py index 08b7e79b69c..5f1602a4908 100644 --- a/libs/core/langchain_core/messages/modifier.py +++ b/libs/core/langchain_core/messages/modifier.py @@ -13,7 +13,7 @@ class RemoveMessage(BaseMessage): def __init__( self, - id: str, # noqa: A002 + id: str, **kwargs: Any, ) -> None: """Create a RemoveMessage. diff --git a/libs/core/langchain_core/messages/tool.py b/libs/core/langchain_core/messages/tool.py index 1f8a519a7dc..181c80443d5 100644 --- a/libs/core/langchain_core/messages/tool.py +++ b/libs/core/langchain_core/messages/tool.py @@ -5,9 +5,12 @@ from typing import Any, Literal, Optional, Union from uuid import UUID from pydantic import Field, model_validator -from typing_extensions import NotRequired, TypedDict, override +from typing_extensions import override from langchain_core.messages.base import BaseMessage, BaseMessageChunk, merge_content +from langchain_core.messages.content_blocks import InvalidToolCall as InvalidToolCall +from langchain_core.messages.content_blocks import ToolCall as ToolCall +from langchain_core.messages.content_blocks import ToolCallChunk as ToolCallChunk from langchain_core.utils._merge import merge_dicts, merge_obj @@ -177,42 +180,11 @@ class ToolMessageChunk(ToolMessage, BaseMessageChunk): return super().__add__(other) -class ToolCall(TypedDict): - """Represents a request to call a tool. - - Example: - - .. code-block:: python - - { - "name": "foo", - "args": {"a": 1}, - "id": "123" - } - - This represents a request to call the tool named "foo" with arguments {"a": 1} - and an identifier of "123". - - """ - - name: str - """The name of the tool to be called.""" - args: dict[str, Any] - """The arguments to the tool call.""" - id: Optional[str] - """An identifier associated with the tool call. - - An identifier is needed to associate a tool call request with a tool - call result in events when multiple concurrent tool calls are made. - """ - type: NotRequired[Literal["tool_call"]] - - def tool_call( *, name: str, args: dict[str, Any], - id: Optional[str], # noqa: A002 + id: Optional[str], ) -> ToolCall: """Create a tool call. @@ -224,43 +196,11 @@ def tool_call( return ToolCall(name=name, args=args, id=id, type="tool_call") -class ToolCallChunk(TypedDict): - """A chunk of a tool call (e.g., as part of a stream). - - When merging ToolCallChunks (e.g., via AIMessageChunk.__add__), - all string attributes are concatenated. Chunks are only merged if their - values of `index` are equal and not None. - - Example: - - .. code-block:: python - - left_chunks = [ToolCallChunk(name="foo", args='{"a":', index=0)] - right_chunks = [ToolCallChunk(name=None, args='1}', index=0)] - - ( - AIMessageChunk(content="", tool_call_chunks=left_chunks) - + AIMessageChunk(content="", tool_call_chunks=right_chunks) - ).tool_call_chunks == [ToolCallChunk(name='foo', args='{"a":1}', index=0)] - - """ - - name: Optional[str] - """The name of the tool to be called.""" - args: Optional[str] - """The arguments to the tool call.""" - id: Optional[str] - """An identifier associated with the tool call.""" - index: Optional[int] - """The index of the tool call in a sequence.""" - type: NotRequired[Literal["tool_call_chunk"]] - - def tool_call_chunk( *, name: Optional[str] = None, args: Optional[str] = None, - id: Optional[str] = None, # noqa: A002 + id: Optional[str] = None, index: Optional[int] = None, ) -> ToolCallChunk: """Create a tool call chunk. @@ -276,29 +216,11 @@ def tool_call_chunk( ) -class InvalidToolCall(TypedDict): - """Allowance for errors made by LLM. - - Here we add an `error` key to surface errors made during generation - (e.g., invalid JSON arguments.) - """ - - name: Optional[str] - """The name of the tool to be called.""" - args: Optional[str] - """The arguments to the tool call.""" - id: Optional[str] - """An identifier associated with the tool call.""" - error: Optional[str] - """An error message associated with the tool call.""" - type: NotRequired[Literal["invalid_tool_call"]] - - def invalid_tool_call( *, name: Optional[str] = None, args: Optional[str] = None, - id: Optional[str] = None, # noqa: A002 + id: Optional[str] = None, error: Optional[str] = None, ) -> InvalidToolCall: """Create an invalid tool call. diff --git a/libs/core/langchain_core/messages/utils.py b/libs/core/langchain_core/messages/utils.py index 11c044eb438..02d4679fe76 100644 --- a/libs/core/langchain_core/messages/utils.py +++ b/libs/core/langchain_core/messages/utils.py @@ -40,6 +40,12 @@ from langchain_core.messages.human import HumanMessage, HumanMessageChunk from langchain_core.messages.modifier import RemoveMessage from langchain_core.messages.system import SystemMessage, SystemMessageChunk from langchain_core.messages.tool import ToolCall, ToolMessage, ToolMessageChunk +from langchain_core.v1.messages import AIMessage as AIMessageV1 +from langchain_core.v1.messages import AIMessageChunk as AIMessageChunkV1 +from langchain_core.v1.messages import HumanMessage as HumanMessageV1 +from langchain_core.v1.messages import MessageV1, MessageV1Types +from langchain_core.v1.messages import SystemMessage as SystemMessageV1 +from langchain_core.v1.messages import ToolMessage as ToolMessageV1 if TYPE_CHECKING: from langchain_text_splitters import TextSplitter @@ -203,7 +209,7 @@ def message_chunk_to_message(chunk: BaseMessageChunk) -> BaseMessage: MessageLikeRepresentation = Union[ - BaseMessage, list[str], tuple[str, str], str, dict[str, Any] + BaseMessage, list[str], tuple[str, str], str, dict[str, Any], MessageV1 ] @@ -213,7 +219,7 @@ def _create_message_from_message_type( name: Optional[str] = None, tool_call_id: Optional[str] = None, tool_calls: Optional[list[dict[str, Any]]] = None, - id: Optional[str] = None, # noqa: A002 + id: Optional[str] = None, **additional_kwargs: Any, ) -> BaseMessage: """Create a message from a message type and content string. @@ -294,6 +300,130 @@ def _create_message_from_message_type( return message +def _create_message_from_message_type_v1( + message_type: str, + content: str, + name: Optional[str] = None, + tool_call_id: Optional[str] = None, + tool_calls: Optional[list[dict[str, Any]]] = None, + id: Optional[str] = None, + **kwargs: Any, +) -> MessageV1: + """Create a message from a message type and content string. + + Args: + message_type: (str) the type of the message (e.g., "human", "ai", etc.). + content: (str) the content string. + name: (str) the name of the message. Default is None. + tool_call_id: (str) the tool call id. Default is None. + tool_calls: (list[dict[str, Any]]) the tool calls. Default is None. + id: (str) the id of the message. Default is None. + kwargs: (dict[str, Any]) additional keyword arguments. + + Returns: + a message of the appropriate type. + + Raises: + ValueError: if the message type is not one of "human", "user", "ai", + "assistant", "tool", "system", or "developer". + """ + if name is not None: + kwargs["name"] = name + if tool_call_id is not None: + kwargs["tool_call_id"] = tool_call_id + if kwargs and (response_metadata := kwargs.pop("response_metadata", None)): + kwargs["response_metadata"] = response_metadata + if id is not None: + kwargs["id"] = id + if tool_calls is not None: + kwargs["tool_calls"] = [] + for tool_call in tool_calls: + # Convert OpenAI-format tool call to LangChain format. + if "function" in tool_call: + args = tool_call["function"]["arguments"] + if isinstance(args, str): + args = json.loads(args, strict=False) + kwargs["tool_calls"].append( + { + "name": tool_call["function"]["name"], + "args": args, + "id": tool_call["id"], + "type": "tool_call", + } + ) + else: + kwargs["tool_calls"].append(tool_call) + if message_type in {"human", "user"}: + message: MessageV1 = HumanMessageV1(content=content, **kwargs) + elif message_type in {"ai", "assistant"}: + message = AIMessageV1(content=content, **kwargs) + elif message_type in {"system", "developer"}: + if message_type == "developer": + kwargs["custom_role"] = "developer" + message = SystemMessageV1(content=content, **kwargs) + elif message_type == "tool": + artifact = kwargs.pop("artifact", None) + message = ToolMessageV1(content=content, artifact=artifact, **kwargs) + else: + msg = ( + f"Unexpected message type: '{message_type}'. Use one of 'human'," + f" 'user', 'ai', 'assistant', 'function', 'tool', 'system', or 'developer'." + ) + msg = create_message(message=msg, error_code=ErrorCode.MESSAGE_COERCION_FAILURE) + raise ValueError(msg) + return message + + +def convert_from_v1_message(message: MessageV1) -> BaseMessage: + """Compatibility layer to convert v1 messages to current messages. + + Args: + message: MessageV1 instance to convert. + + Returns: + BaseMessage: Converted message instance. + """ + content = cast("Union[str, list[str | dict]]", message.content) + if isinstance(message, AIMessageV1): + return AIMessage( + content=content, + id=message.id, + name=message.name, + tool_calls=message.tool_calls, + response_metadata=cast("dict", message.response_metadata), + ) + if isinstance(message, AIMessageChunkV1): + return AIMessageChunk( + content=content, + id=message.id, + name=message.name, + tool_call_chunks=message.tool_call_chunks, + response_metadata=cast("dict", message.response_metadata), + ) + if isinstance(message, HumanMessageV1): + return HumanMessage( + content=content, + id=message.id, + name=message.name, + ) + if isinstance(message, SystemMessageV1): + return SystemMessage( + content=content, + id=message.id, + ) + if isinstance(message, ToolMessageV1): + return ToolMessage( + content=content, + id=message.id, + tool_call_id=message.tool_call_id, + artifact=message.artifact, + name=message.name, + status=message.status, + ) + message = f"Unsupported message type: {type(message)}" + raise NotImplementedError(message) + + def _convert_to_message(message: MessageLikeRepresentation) -> BaseMessage: """Instantiate a message from a variety of message formats. @@ -341,6 +471,66 @@ def _convert_to_message(message: MessageLikeRepresentation) -> BaseMessage: message_ = _create_message_from_message_type( msg_type, msg_content, **msg_kwargs ) + elif isinstance(message, MessageV1Types): + message_ = convert_from_v1_message(message) + else: + msg = f"Unsupported message type: {type(message)}" + msg = create_message(message=msg, error_code=ErrorCode.MESSAGE_COERCION_FAILURE) + raise NotImplementedError(msg) + + return message_ + + +def _convert_to_message_v1(message: MessageLikeRepresentation) -> MessageV1: + """Instantiate a message from a variety of message formats. + + The message format can be one of the following: + + - BaseMessagePromptTemplate + - BaseMessage + - 2-tuple of (role string, template); e.g., ("human", "{user_input}") + - dict: a message dict with role and content keys + - string: shorthand for ("human", template); e.g., "{user_input}" + + Args: + message: a representation of a message in one of the supported formats. + + Returns: + an instance of a message or a message template. + + Raises: + NotImplementedError: if the message type is not supported. + ValueError: if the message dict does not contain the required keys. + """ + if isinstance(message, MessageV1Types): + if isinstance(message, AIMessageChunkV1): + message_: MessageV1 = message.to_message() + else: + message_ = message + elif isinstance(message, str): + message_ = _create_message_from_message_type_v1("human", message) + elif isinstance(message, Sequence) and len(message) == 2: + # mypy doesn't realise this can't be a string given the previous branch + message_type_str, template = message # type: ignore[misc] + message_ = _create_message_from_message_type_v1(message_type_str, template) + elif isinstance(message, dict): + msg_kwargs = message.copy() + try: + try: + msg_type = msg_kwargs.pop("role") + except KeyError: + msg_type = msg_kwargs.pop("type") + # None msg content is not allowed + msg_content = msg_kwargs.pop("content") or "" + except KeyError as e: + msg = f"Message dict must contain 'role' and 'content' keys, got {message}" + msg = create_message( + message=msg, error_code=ErrorCode.MESSAGE_COERCION_FAILURE + ) + raise ValueError(msg) from e + message_ = _create_message_from_message_type_v1( + msg_type, msg_content, **msg_kwargs + ) else: msg = f"Unsupported message type: {type(message)}" msg = create_message(message=msg, error_code=ErrorCode.MESSAGE_COERCION_FAILURE) @@ -368,6 +558,25 @@ def convert_to_messages( return [_convert_to_message(m) for m in messages] +def convert_to_messages_v1( + messages: Union[Iterable[MessageLikeRepresentation], PromptValue], +) -> list[MessageV1]: + """Convert a sequence of messages to a list of messages. + + Args: + messages: Sequence of messages to convert. + + Returns: + list of messages (BaseMessages). + """ + # Import here to avoid circular imports + from langchain_core.prompt_values import PromptValue + + if isinstance(messages, PromptValue): + return messages.to_messages(message_version="v1") + return [_convert_to_message_v1(m) for m in messages] + + def _runnable_support(func: Callable) -> Callable: @overload def wrapped( @@ -1007,10 +1216,11 @@ def convert_to_openai_messages( oai_messages: list = [] - if is_single := isinstance(messages, (BaseMessage, dict, str)): + if is_single := isinstance(messages, (BaseMessage, dict, str, MessageV1Types)): messages = [messages] - messages = convert_to_messages(messages) + # TODO: resolve type ignore here + messages = convert_to_messages(messages) # type: ignore[arg-type] for i, message in enumerate(messages): oai_msg: dict = {"role": _get_message_openai_role(message)} diff --git a/libs/core/langchain_core/output_parsers/base.py b/libs/core/langchain_core/output_parsers/base.py index a187efb4b23..af144af70de 100644 --- a/libs/core/langchain_core/output_parsers/base.py +++ b/libs/core/langchain_core/output_parsers/base.py @@ -11,6 +11,7 @@ from typing import ( Optional, TypeVar, Union, + cast, ) from typing_extensions import override @@ -20,19 +21,22 @@ from langchain_core.messages import AnyMessage, BaseMessage from langchain_core.outputs import ChatGeneration, Generation from langchain_core.runnables import Runnable, RunnableConfig, RunnableSerializable from langchain_core.runnables.config import run_in_executor +from langchain_core.v1.messages import AIMessage, MessageV1, MessageV1Types if TYPE_CHECKING: from langchain_core.prompt_values import PromptValue T = TypeVar("T") -OutputParserLike = Runnable[LanguageModelOutput, T] +OutputParserLike = Runnable[Union[LanguageModelOutput, AIMessage], T] class BaseLLMOutputParser(ABC, Generic[T]): """Abstract base class for parsing the outputs of a model.""" @abstractmethod - def parse_result(self, result: list[Generation], *, partial: bool = False) -> T: + def parse_result( + self, result: Union[list[Generation], AIMessage], *, partial: bool = False + ) -> T: """Parse a list of candidate model Generations into a specific format. Args: @@ -46,7 +50,7 @@ class BaseLLMOutputParser(ABC, Generic[T]): """ async def aparse_result( - self, result: list[Generation], *, partial: bool = False + self, result: Union[list[Generation], AIMessage], *, partial: bool = False ) -> T: """Async parse a list of candidate model Generations into a specific format. @@ -71,7 +75,7 @@ class BaseGenerationOutputParser( @override def InputType(self) -> Any: """Return the input type for the parser.""" - return Union[str, AnyMessage] + return Union[str, AnyMessage, MessageV1] @property @override @@ -84,7 +88,7 @@ class BaseGenerationOutputParser( @override def invoke( self, - input: Union[str, BaseMessage], + input: Union[str, BaseMessage, MessageV1], config: Optional[RunnableConfig] = None, **kwargs: Any, ) -> T: @@ -97,9 +101,16 @@ class BaseGenerationOutputParser( config, run_type="parser", ) + if isinstance(input, MessageV1Types): + return self._call_with_config( + lambda inner_input: self.parse_result(inner_input), + input, + config, + run_type="parser", + ) return self._call_with_config( lambda inner_input: self.parse_result([Generation(text=inner_input)]), - input, + cast("str", input), config, run_type="parser", ) @@ -120,6 +131,13 @@ class BaseGenerationOutputParser( config, run_type="parser", ) + if isinstance(input, MessageV1Types): + return await self._acall_with_config( + lambda inner_input: self.aparse_result(inner_input), + input, + config, + run_type="parser", + ) return await self._acall_with_config( lambda inner_input: self.aparse_result([Generation(text=inner_input)]), input, @@ -129,7 +147,7 @@ class BaseGenerationOutputParser( class BaseOutputParser( - BaseLLMOutputParser, RunnableSerializable[LanguageModelOutput, T] + BaseLLMOutputParser, RunnableSerializable[Union[LanguageModelOutput, AIMessage], T] ): """Base class to parse the output of an LLM call. @@ -162,7 +180,7 @@ class BaseOutputParser( @override def InputType(self) -> Any: """Return the input type for the parser.""" - return Union[str, AnyMessage] + return Union[str, AnyMessage, MessageV1] @property @override @@ -189,7 +207,7 @@ class BaseOutputParser( @override def invoke( self, - input: Union[str, BaseMessage], + input: Union[str, BaseMessage, MessageV1], config: Optional[RunnableConfig] = None, **kwargs: Any, ) -> T: @@ -202,9 +220,16 @@ class BaseOutputParser( config, run_type="parser", ) + if isinstance(input, MessageV1Types): + return self._call_with_config( + lambda inner_input: self.parse_result(inner_input), + input, + config, + run_type="parser", + ) return self._call_with_config( lambda inner_input: self.parse_result([Generation(text=inner_input)]), - input, + cast("str", input), config, run_type="parser", ) @@ -212,7 +237,7 @@ class BaseOutputParser( @override async def ainvoke( self, - input: Union[str, BaseMessage], + input: Union[str, BaseMessage, MessageV1], config: Optional[RunnableConfig] = None, **kwargs: Optional[Any], ) -> T: @@ -225,15 +250,24 @@ class BaseOutputParser( config, run_type="parser", ) + if isinstance(input, MessageV1Types): + return await self._acall_with_config( + lambda inner_input: self.aparse_result(inner_input), + input, + config, + run_type="parser", + ) return await self._acall_with_config( lambda inner_input: self.aparse_result([Generation(text=inner_input)]), - input, + cast("str", input), config, run_type="parser", ) @override - def parse_result(self, result: list[Generation], *, partial: bool = False) -> T: + def parse_result( + self, result: Union[list[Generation], AIMessage], *, partial: bool = False + ) -> T: """Parse a list of candidate model Generations into a specific format. The return value is parsed from only the first Generation in the result, which @@ -248,6 +282,8 @@ class BaseOutputParser( Returns: Structured output. """ + if isinstance(result, AIMessage): + return self.parse(result.text) return self.parse(result[0].text) @abstractmethod @@ -262,7 +298,7 @@ class BaseOutputParser( """ async def aparse_result( - self, result: list[Generation], *, partial: bool = False + self, result: Union[list[Generation], AIMessage], *, partial: bool = False ) -> T: """Async parse a list of candidate model Generations into a specific format. diff --git a/libs/core/langchain_core/output_parsers/json.py b/libs/core/langchain_core/output_parsers/json.py index 8d8e4d52765..54f1196b80e 100644 --- a/libs/core/langchain_core/output_parsers/json.py +++ b/libs/core/langchain_core/output_parsers/json.py @@ -21,6 +21,7 @@ from langchain_core.utils.json import ( parse_json_markdown, parse_partial_json, ) +from langchain_core.v1.messages import AIMessage # Union type needs to be last assignment to PydanticBaseModel to make mypy happy. PydanticBaseModel = Union[BaseModel, pydantic.BaseModel] @@ -53,7 +54,9 @@ class JsonOutputParser(BaseCumulativeTransformOutputParser[Any]): return pydantic_object.schema() return None - def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any: + def parse_result( + self, result: Union[list[Generation], AIMessage], *, partial: bool = False + ) -> Any: """Parse the result of an LLM call to a JSON object. Args: @@ -70,7 +73,7 @@ class JsonOutputParser(BaseCumulativeTransformOutputParser[Any]): Raises: OutputParserException: If the output is not valid JSON. """ - text = result[0].text + text = result.text if isinstance(result, AIMessage) else result[0].text text = text.strip() if partial: try: diff --git a/libs/core/langchain_core/output_parsers/list.py b/libs/core/langchain_core/output_parsers/list.py index d60afe3554d..08b8c65e750 100644 --- a/libs/core/langchain_core/output_parsers/list.py +++ b/libs/core/langchain_core/output_parsers/list.py @@ -13,6 +13,7 @@ from typing_extensions import override from langchain_core.messages import BaseMessage from langchain_core.output_parsers.transform import BaseTransformOutputParser +from langchain_core.v1.messages import AIMessage if TYPE_CHECKING: from collections.abc import AsyncIterator, Iterator @@ -71,7 +72,7 @@ class ListOutputParser(BaseTransformOutputParser[list[str]]): @override def _transform( - self, input: Iterator[Union[str, BaseMessage]] + self, input: Iterator[Union[str, BaseMessage, AIMessage]] ) -> Iterator[list[str]]: buffer = "" for chunk in input: @@ -81,6 +82,8 @@ class ListOutputParser(BaseTransformOutputParser[list[str]]): if not isinstance(chunk_content, str): continue buffer += chunk_content + elif isinstance(chunk, AIMessage): + buffer += chunk.text else: # add current chunk to buffer buffer += chunk @@ -105,7 +108,7 @@ class ListOutputParser(BaseTransformOutputParser[list[str]]): @override async def _atransform( - self, input: AsyncIterator[Union[str, BaseMessage]] + self, input: AsyncIterator[Union[str, BaseMessage, AIMessage]] ) -> AsyncIterator[list[str]]: buffer = "" async for chunk in input: @@ -115,6 +118,8 @@ class ListOutputParser(BaseTransformOutputParser[list[str]]): if not isinstance(chunk_content, str): continue buffer += chunk_content + elif isinstance(chunk, AIMessage): + buffer += chunk.text else: # add current chunk to buffer buffer += chunk diff --git a/libs/core/langchain_core/output_parsers/openai_functions.py b/libs/core/langchain_core/output_parsers/openai_functions.py index 129c9855061..6c333fc797e 100644 --- a/libs/core/langchain_core/output_parsers/openai_functions.py +++ b/libs/core/langchain_core/output_parsers/openai_functions.py @@ -17,6 +17,7 @@ from langchain_core.output_parsers import ( ) from langchain_core.output_parsers.json import parse_partial_json from langchain_core.outputs import ChatGeneration, Generation +from langchain_core.v1.messages import AIMessage class OutputFunctionsParser(BaseGenerationOutputParser[Any]): @@ -26,7 +27,9 @@ class OutputFunctionsParser(BaseGenerationOutputParser[Any]): """Whether to only return the arguments to the function call.""" @override - def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any: + def parse_result( + self, result: Union[list[Generation], AIMessage], *, partial: bool = False + ) -> Any: """Parse the result of an LLM call to a JSON object. Args: @@ -39,6 +42,12 @@ class OutputFunctionsParser(BaseGenerationOutputParser[Any]): Raises: OutputParserException: If the output is not valid JSON. """ + if isinstance(result, AIMessage): + msg = ( + "This output parser does not support v1 AIMessages. Use " + "JsonOutputToolsParser instead." + ) + raise TypeError(msg) generation = result[0] if not isinstance(generation, ChatGeneration): msg = "This output parser can only be used with a chat generation." @@ -77,7 +86,9 @@ class JsonOutputFunctionsParser(BaseCumulativeTransformOutputParser[Any]): def _diff(self, prev: Optional[Any], next: Any) -> Any: return jsonpatch.make_patch(prev, next).patch - def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any: + def parse_result( + self, result: Union[list[Generation], AIMessage], *, partial: bool = False + ) -> Any: """Parse the result of an LLM call to a JSON object. Args: @@ -90,6 +101,12 @@ class JsonOutputFunctionsParser(BaseCumulativeTransformOutputParser[Any]): Raises: OutputParserException: If the output is not valid JSON. """ + if isinstance(result, AIMessage): + msg = ( + "This output parser does not support v1 AIMessages. Use " + "JsonOutputToolsParser instead." + ) + raise TypeError(msg) if len(result) != 1: msg = f"Expected exactly one result, but got {len(result)}" raise OutputParserException(msg) @@ -160,7 +177,9 @@ class JsonKeyOutputFunctionsParser(JsonOutputFunctionsParser): key_name: str """The name of the key to return.""" - def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any: + def parse_result( + self, result: Union[list[Generation], AIMessage], *, partial: bool = False + ) -> Any: """Parse the result of an LLM call to a JSON object. Args: @@ -254,7 +273,9 @@ class PydanticOutputFunctionsParser(OutputFunctionsParser): return values @override - def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any: + def parse_result( + self, result: Union[list[Generation], AIMessage], *, partial: bool = False + ) -> Any: """Parse the result of an LLM call to a JSON object. Args: @@ -294,7 +315,9 @@ class PydanticAttrOutputFunctionsParser(PydanticOutputFunctionsParser): """The name of the attribute to return.""" @override - def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any: + def parse_result( + self, result: Union[list[Generation], AIMessage], *, partial: bool = False + ) -> Any: """Parse the result of an LLM call to a JSON object. Args: diff --git a/libs/core/langchain_core/output_parsers/openai_tools.py b/libs/core/langchain_core/output_parsers/openai_tools.py index 63495bc2d84..f4bf85b970e 100644 --- a/libs/core/langchain_core/output_parsers/openai_tools.py +++ b/libs/core/langchain_core/output_parsers/openai_tools.py @@ -4,7 +4,7 @@ import copy import json import logging from json import JSONDecodeError -from typing import Annotated, Any, Optional +from typing import Annotated, Any, Optional, Union from pydantic import SkipValidation, ValidationError @@ -16,6 +16,7 @@ from langchain_core.output_parsers.transform import BaseCumulativeTransformOutpu from langchain_core.outputs import ChatGeneration, Generation from langchain_core.utils.json import parse_partial_json from langchain_core.utils.pydantic import TypeBaseModel +from langchain_core.v1.messages import AIMessage as AIMessageV1 logger = logging.getLogger(__name__) @@ -156,7 +157,9 @@ class JsonOutputToolsParser(BaseCumulativeTransformOutputParser[Any]): If no tool calls are found, None will be returned. """ - def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any: + def parse_result( + self, result: Union[list[Generation], AIMessageV1], *, partial: bool = False + ) -> Any: """Parse the result of an LLM call to a list of tool calls. Args: @@ -173,31 +176,45 @@ class JsonOutputToolsParser(BaseCumulativeTransformOutputParser[Any]): Raises: OutputParserException: If the output is not valid JSON. """ - generation = result[0] - if not isinstance(generation, ChatGeneration): - msg = "This output parser can only be used with a chat generation." - raise OutputParserException(msg) - message = generation.message - if isinstance(message, AIMessage) and message.tool_calls: - tool_calls = [dict(tc) for tc in message.tool_calls] + if isinstance(result, list): + generation = result[0] + if not isinstance(generation, ChatGeneration): + msg = ( + "This output parser can only be used with a chat generation or " + "v1 AIMessage." + ) + raise OutputParserException(msg) + message = generation.message + if isinstance(message, AIMessage) and message.tool_calls: + tool_calls = [dict(tc) for tc in message.tool_calls] + for tool_call in tool_calls: + if not self.return_id: + _ = tool_call.pop("id") + else: + try: + raw_tool_calls = copy.deepcopy( + message.additional_kwargs["tool_calls"] + ) + except KeyError: + return [] + tool_calls = parse_tool_calls( + raw_tool_calls, + partial=partial, + strict=self.strict, + return_id=self.return_id, + ) + elif result.tool_calls: + # v1 message + tool_calls = [dict(tc) for tc in result.tool_calls] for tool_call in tool_calls: if not self.return_id: _ = tool_call.pop("id") else: - try: - raw_tool_calls = copy.deepcopy(message.additional_kwargs["tool_calls"]) - except KeyError: - return [] - tool_calls = parse_tool_calls( - raw_tool_calls, - partial=partial, - strict=self.strict, - return_id=self.return_id, - ) + return [] + # for backwards compatibility for tc in tool_calls: tc["type"] = tc.pop("name") - if self.first_tool_only: return tool_calls[0] if tool_calls else None return tool_calls @@ -220,7 +237,9 @@ class JsonOutputKeyToolsParser(JsonOutputToolsParser): key_name: str """The type of tools to return.""" - def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any: + def parse_result( + self, result: Union[list[Generation], AIMessageV1], *, partial: bool = False + ) -> Any: """Parse the result of an LLM call to a list of tool calls. Args: @@ -234,32 +253,47 @@ class JsonOutputKeyToolsParser(JsonOutputToolsParser): Returns: The parsed tool calls. """ - generation = result[0] - if not isinstance(generation, ChatGeneration): - msg = "This output parser can only be used with a chat generation." - raise OutputParserException(msg) - message = generation.message - if isinstance(message, AIMessage) and message.tool_calls: - parsed_tool_calls = [dict(tc) for tc in message.tool_calls] + if isinstance(result, list): + generation = result[0] + if not isinstance(generation, ChatGeneration): + msg = "This output parser can only be used with a chat generation." + raise OutputParserException(msg) + message = generation.message + if isinstance(message, AIMessage) and message.tool_calls: + parsed_tool_calls = [dict(tc) for tc in message.tool_calls] + for tool_call in parsed_tool_calls: + if not self.return_id: + _ = tool_call.pop("id") + else: + try: + raw_tool_calls = copy.deepcopy( + message.additional_kwargs["tool_calls"] + ) + except KeyError: + if self.first_tool_only: + return None + return [] + parsed_tool_calls = parse_tool_calls( + raw_tool_calls, + partial=partial, + strict=self.strict, + return_id=self.return_id, + ) + elif result.tool_calls: + # v1 message + parsed_tool_calls = [dict(tc) for tc in result.tool_calls] for tool_call in parsed_tool_calls: if not self.return_id: _ = tool_call.pop("id") else: - try: - raw_tool_calls = copy.deepcopy(message.additional_kwargs["tool_calls"]) - except KeyError: - if self.first_tool_only: - return None - return [] - parsed_tool_calls = parse_tool_calls( - raw_tool_calls, - partial=partial, - strict=self.strict, - return_id=self.return_id, - ) + if self.first_tool_only: + return None + return [] + # For backwards compatibility for tc in parsed_tool_calls: tc["type"] = tc.pop("name") + if self.first_tool_only: parsed_result = list( filter(lambda x: x["type"] == self.key_name, parsed_tool_calls) @@ -299,7 +333,9 @@ class PydanticToolsParser(JsonOutputToolsParser): # TODO: Support more granular streaming of objects. Currently only streams once all # Pydantic object fields are present. - def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any: + def parse_result( + self, result: Union[list[Generation], AIMessageV1], *, partial: bool = False + ) -> Any: """Parse the result of an LLM call to a list of Pydantic objects. Args: @@ -337,12 +373,19 @@ class PydanticToolsParser(JsonOutputToolsParser): except (ValidationError, ValueError): if partial: continue - has_max_tokens_stop_reason = any( - generation.message.response_metadata.get("stop_reason") - == "max_tokens" - for generation in result - if isinstance(generation, ChatGeneration) - ) + has_max_tokens_stop_reason = False + if isinstance(result, list): + has_max_tokens_stop_reason = any( + generation.message.response_metadata.get("stop_reason") + == "max_tokens" + for generation in result + if isinstance(generation, ChatGeneration) + ) + else: + # v1 message + has_max_tokens_stop_reason = ( + result.response_metadata.get("stop_reason") == "max_tokens" + ) if has_max_tokens_stop_reason: logger.exception(_MAX_TOKENS_ERROR) raise diff --git a/libs/core/langchain_core/output_parsers/pydantic.py b/libs/core/langchain_core/output_parsers/pydantic.py index b543df55768..544e47f404f 100644 --- a/libs/core/langchain_core/output_parsers/pydantic.py +++ b/libs/core/langchain_core/output_parsers/pydantic.py @@ -1,7 +1,7 @@ """Output parsers using Pydantic.""" import json -from typing import Annotated, Generic, Optional +from typing import Annotated, Generic, Optional, Union import pydantic from pydantic import SkipValidation @@ -14,6 +14,7 @@ from langchain_core.utils.pydantic import ( PydanticBaseModel, TBaseModel, ) +from langchain_core.v1.messages import AIMessage class PydanticOutputParser(JsonOutputParser, Generic[TBaseModel]): @@ -43,7 +44,7 @@ class PydanticOutputParser(JsonOutputParser, Generic[TBaseModel]): return OutputParserException(msg, llm_output=json_string) def parse_result( - self, result: list[Generation], *, partial: bool = False + self, result: Union[list[Generation], AIMessage], *, partial: bool = False ) -> Optional[TBaseModel]: """Parse the result of an LLM call to a pydantic object. diff --git a/libs/core/langchain_core/output_parsers/transform.py b/libs/core/langchain_core/output_parsers/transform.py index 876e66b5556..b2eccc674be 100644 --- a/libs/core/langchain_core/output_parsers/transform.py +++ b/libs/core/langchain_core/output_parsers/transform.py @@ -20,6 +20,7 @@ from langchain_core.outputs import ( GenerationChunk, ) from langchain_core.runnables.config import run_in_executor +from langchain_core.v1.messages import AIMessage, AIMessageChunk if TYPE_CHECKING: from collections.abc import AsyncIterator, Iterator @@ -32,23 +33,27 @@ class BaseTransformOutputParser(BaseOutputParser[T]): def _transform( self, - input: Iterator[Union[str, BaseMessage]], # noqa: A002 + input: Iterator[Union[str, BaseMessage, AIMessage]], ) -> Iterator[T]: for chunk in input: if isinstance(chunk, BaseMessage): yield self.parse_result([ChatGeneration(message=chunk)]) + elif isinstance(chunk, AIMessage): + yield self.parse_result(chunk) else: yield self.parse_result([Generation(text=chunk)]) async def _atransform( self, - input: AsyncIterator[Union[str, BaseMessage]], # noqa: A002 + input: AsyncIterator[Union[str, BaseMessage, AIMessage]], ) -> AsyncIterator[T]: async for chunk in input: if isinstance(chunk, BaseMessage): yield await run_in_executor( None, self.parse_result, [ChatGeneration(message=chunk)] ) + elif isinstance(chunk, AIMessage): + yield await run_in_executor(None, self.parse_result, chunk) else: yield await run_in_executor( None, self.parse_result, [Generation(text=chunk)] @@ -57,7 +62,7 @@ class BaseTransformOutputParser(BaseOutputParser[T]): @override def transform( self, - input: Iterator[Union[str, BaseMessage]], + input: Iterator[Union[str, BaseMessage, AIMessage]], config: Optional[RunnableConfig] = None, **kwargs: Any, ) -> Iterator[T]: @@ -78,7 +83,7 @@ class BaseTransformOutputParser(BaseOutputParser[T]): @override async def atransform( self, - input: AsyncIterator[Union[str, BaseMessage]], + input: AsyncIterator[Union[str, BaseMessage, AIMessage]], config: Optional[RunnableConfig] = None, **kwargs: Any, ) -> AsyncIterator[T]: @@ -125,23 +130,42 @@ class BaseCumulativeTransformOutputParser(BaseTransformOutputParser[T]): raise NotImplementedError @override - def _transform(self, input: Iterator[Union[str, BaseMessage]]) -> Iterator[Any]: + def _transform( + self, input: Iterator[Union[str, BaseMessage, AIMessage]] + ) -> Iterator[Any]: prev_parsed = None - acc_gen: Union[GenerationChunk, ChatGenerationChunk, None] = None + acc_gen: Union[GenerationChunk, ChatGenerationChunk, AIMessageChunk, None] = ( + None + ) for chunk in input: - chunk_gen: Union[GenerationChunk, ChatGenerationChunk] + chunk_gen: Union[GenerationChunk, ChatGenerationChunk, AIMessageChunk] if isinstance(chunk, BaseMessageChunk): chunk_gen = ChatGenerationChunk(message=chunk) elif isinstance(chunk, BaseMessage): chunk_gen = ChatGenerationChunk( message=BaseMessageChunk(**chunk.model_dump()) ) + elif isinstance(chunk, AIMessageChunk): + chunk_gen = chunk + elif isinstance(chunk, AIMessage): + chunk_gen = AIMessageChunk( + content=chunk.content, + id=chunk.id, + name=chunk.name, + lc_version=chunk.lc_version, + response_metadata=chunk.response_metadata, + usage_metadata=chunk.usage_metadata, + parsed=chunk.parsed, + ) else: chunk_gen = GenerationChunk(text=chunk) acc_gen = chunk_gen if acc_gen is None else acc_gen + chunk_gen # type: ignore[operator] - parsed = self.parse_result([acc_gen], partial=True) + if isinstance(acc_gen, AIMessageChunk): + parsed = self.parse_result(acc_gen, partial=True) + else: + parsed = self.parse_result([acc_gen], partial=True) if parsed is not None and parsed != prev_parsed: if self.diff: yield self._diff(prev_parsed, parsed) @@ -151,24 +175,41 @@ class BaseCumulativeTransformOutputParser(BaseTransformOutputParser[T]): @override async def _atransform( - self, input: AsyncIterator[Union[str, BaseMessage]] + self, input: AsyncIterator[Union[str, BaseMessage, AIMessage]] ) -> AsyncIterator[T]: prev_parsed = None - acc_gen: Union[GenerationChunk, ChatGenerationChunk, None] = None + acc_gen: Union[GenerationChunk, ChatGenerationChunk, AIMessageChunk, None] = ( + None + ) async for chunk in input: - chunk_gen: Union[GenerationChunk, ChatGenerationChunk] + chunk_gen: Union[GenerationChunk, ChatGenerationChunk, AIMessageChunk] if isinstance(chunk, BaseMessageChunk): chunk_gen = ChatGenerationChunk(message=chunk) elif isinstance(chunk, BaseMessage): chunk_gen = ChatGenerationChunk( message=BaseMessageChunk(**chunk.model_dump()) ) + elif isinstance(chunk, AIMessageChunk): + chunk_gen = chunk + elif isinstance(chunk, AIMessage): + chunk_gen = AIMessageChunk( + content=chunk.content, + id=chunk.id, + name=chunk.name, + lc_version=chunk.lc_version, + response_metadata=chunk.response_metadata, + usage_metadata=chunk.usage_metadata, + parsed=chunk.parsed, + ) else: chunk_gen = GenerationChunk(text=chunk) acc_gen = chunk_gen if acc_gen is None else acc_gen + chunk_gen # type: ignore[operator] - parsed = await self.aparse_result([acc_gen], partial=True) + if isinstance(acc_gen, AIMessageChunk): + parsed = await self.aparse_result(acc_gen, partial=True) + else: + parsed = await self.aparse_result([acc_gen], partial=True) if parsed is not None and parsed != prev_parsed: if self.diff: yield await run_in_executor(None, self._diff, prev_parsed, parsed) diff --git a/libs/core/langchain_core/output_parsers/xml.py b/libs/core/langchain_core/output_parsers/xml.py index c6a46f3de1a..318a783c6b2 100644 --- a/libs/core/langchain_core/output_parsers/xml.py +++ b/libs/core/langchain_core/output_parsers/xml.py @@ -12,8 +12,10 @@ from typing_extensions import override from langchain_core.exceptions import OutputParserException from langchain_core.messages import BaseMessage +from langchain_core.messages.utils import convert_from_v1_message from langchain_core.output_parsers.transform import BaseTransformOutputParser from langchain_core.runnables.utils import AddableDict +from langchain_core.v1.messages import AIMessage XML_FORMAT_INSTRUCTIONS = """The output should be formatted as a XML file. 1. Output should conform to the tags below. @@ -240,21 +242,28 @@ class XMLOutputParser(BaseTransformOutputParser): @override def _transform( - self, input: Iterator[Union[str, BaseMessage]] + self, input: Iterator[Union[str, BaseMessage, AIMessage]] ) -> Iterator[AddableDict]: streaming_parser = _StreamingParser(self.parser) for chunk in input: - yield from streaming_parser.parse(chunk) + if isinstance(chunk, AIMessage): + yield from streaming_parser.parse(convert_from_v1_message(chunk)) + else: + yield from streaming_parser.parse(chunk) streaming_parser.close() @override async def _atransform( - self, input: AsyncIterator[Union[str, BaseMessage]] + self, input: AsyncIterator[Union[str, BaseMessage, AIMessage]] ) -> AsyncIterator[AddableDict]: streaming_parser = _StreamingParser(self.parser) async for chunk in input: - for output in streaming_parser.parse(chunk): - yield output + if isinstance(chunk, AIMessage): + for output in streaming_parser.parse(convert_from_v1_message(chunk)): + yield output + else: + for output in streaming_parser.parse(chunk): + yield output streaming_parser.close() def _root_to_dict(self, root: ET.Element) -> dict[str, Union[str, list[Any]]]: diff --git a/libs/core/langchain_core/prompt_values.py b/libs/core/langchain_core/prompt_values.py index 7652bd76e3c..f0069f47fa4 100644 --- a/libs/core/langchain_core/prompt_values.py +++ b/libs/core/langchain_core/prompt_values.py @@ -8,17 +8,65 @@ from __future__ import annotations from abc import ABC, abstractmethod from collections.abc import Sequence -from typing import Literal, cast +from typing import Literal, Union, cast -from typing_extensions import TypedDict +from typing_extensions import TypedDict, overload from langchain_core.load.serializable import Serializable from langchain_core.messages import ( + AIMessage, AnyMessage, BaseMessage, HumanMessage, + SystemMessage, + ToolMessage, get_buffer_string, ) +from langchain_core.messages import content_blocks as types +from langchain_core.v1.messages import AIMessage as AIMessageV1 +from langchain_core.v1.messages import HumanMessage as HumanMessageV1 +from langchain_core.v1.messages import MessageV1, ResponseMetadata +from langchain_core.v1.messages import SystemMessage as SystemMessageV1 +from langchain_core.v1.messages import ToolMessage as ToolMessageV1 + + +def _convert_to_v1(message: BaseMessage) -> MessageV1: + """Best-effort conversion of a V0 AIMessage to V1.""" + if isinstance(message.content, str): + content: list[types.ContentBlock] = [] + if message.content: + content = [{"type": "text", "text": message.content}] + else: + content = [] + for block in message.content: + if isinstance(block, str): + content.append({"type": "text", "text": block}) + elif isinstance(block, dict): + content.append(cast("types.ContentBlock", block)) + else: + pass + + if isinstance(message, HumanMessage): + return HumanMessageV1(content=content) + if isinstance(message, AIMessage): + for tool_call in message.tool_calls: + content.append(tool_call) + return AIMessageV1( + content=content, + usage_metadata=message.usage_metadata, + response_metadata=cast("ResponseMetadata", message.response_metadata), + tool_calls=message.tool_calls, + ) + if isinstance(message, SystemMessage): + return SystemMessageV1(content=content) + if isinstance(message, ToolMessage): + return ToolMessageV1( + tool_call_id=message.tool_call_id, + content=content, + artifact=message.artifact, + ) + error_message = f"Unsupported message type: {type(message)}" + raise TypeError(error_message) class PromptValue(Serializable, ABC): @@ -46,8 +94,18 @@ class PromptValue(Serializable, ABC): def to_string(self) -> str: """Return prompt value as string.""" + @overload + def to_messages( + self, message_version: Literal["v0"] = "v0" + ) -> list[BaseMessage]: ... + + @overload + def to_messages(self, message_version: Literal["v1"]) -> list[MessageV1]: ... + @abstractmethod - def to_messages(self) -> list[BaseMessage]: + def to_messages( + self, message_version: Literal["v0", "v1"] = "v0" + ) -> Union[Sequence[BaseMessage], Sequence[MessageV1]]: """Return prompt as a list of Messages.""" @@ -71,8 +129,20 @@ class StringPromptValue(PromptValue): """Return prompt as string.""" return self.text - def to_messages(self) -> list[BaseMessage]: + @overload + def to_messages( + self, message_version: Literal["v0"] = "v0" + ) -> list[BaseMessage]: ... + + @overload + def to_messages(self, message_version: Literal["v1"]) -> list[MessageV1]: ... + + def to_messages( + self, message_version: Literal["v0", "v1"] = "v0" + ) -> Union[Sequence[BaseMessage], Sequence[MessageV1]]: """Return prompt as messages.""" + if message_version == "v1": + return [HumanMessageV1(content=self.text)] return [HumanMessage(content=self.text)] @@ -89,8 +159,24 @@ class ChatPromptValue(PromptValue): """Return prompt as string.""" return get_buffer_string(self.messages) - def to_messages(self) -> list[BaseMessage]: - """Return prompt as a list of messages.""" + @overload + def to_messages( + self, message_version: Literal["v0"] = "v0" + ) -> list[BaseMessage]: ... + + @overload + def to_messages(self, message_version: Literal["v1"]) -> list[MessageV1]: ... + + def to_messages( + self, message_version: Literal["v0", "v1"] = "v0" + ) -> Union[Sequence[BaseMessage], Sequence[MessageV1]]: + """Return prompt as a list of messages. + + Args: + message_version: The output version, either "v0" (default) or "v1". + """ + if message_version == "v1": + return [_convert_to_v1(m) for m in self.messages] return list(self.messages) @classmethod @@ -125,8 +211,26 @@ class ImagePromptValue(PromptValue): """Return prompt (image URL) as string.""" return self.image_url["url"] - def to_messages(self) -> list[BaseMessage]: + @overload + def to_messages( + self, message_version: Literal["v0"] = "v0" + ) -> list[BaseMessage]: ... + + @overload + def to_messages(self, message_version: Literal["v1"]) -> list[MessageV1]: ... + + def to_messages( + self, message_version: Literal["v0", "v1"] = "v0" + ) -> Union[Sequence[BaseMessage], Sequence[MessageV1]]: """Return prompt (image URL) as messages.""" + if message_version == "v1": + block: types.ImageContentBlock = { + "type": "image", + "url": self.image_url["url"], + } + if "detail" in self.image_url: + block["detail"] = self.image_url["detail"] + return [HumanMessageV1(content=[block])] return [HumanMessage(content=[cast("dict", self.image_url)])] diff --git a/libs/core/langchain_core/runnables/base.py b/libs/core/langchain_core/runnables/base.py index fd66cf6dad5..e1a2b3324c5 100644 --- a/libs/core/langchain_core/runnables/base.py +++ b/libs/core/langchain_core/runnables/base.py @@ -728,7 +728,7 @@ class Runnable(ABC, Generic[Input, Output]): @abstractmethod def invoke( self, - input: Input, # noqa: A002 + input: Input, config: Optional[RunnableConfig] = None, **kwargs: Any, ) -> Output: @@ -748,7 +748,7 @@ class Runnable(ABC, Generic[Input, Output]): async def ainvoke( self, - input: Input, # noqa: A002 + input: Input, config: Optional[RunnableConfig] = None, **kwargs: Any, ) -> Output: @@ -996,7 +996,7 @@ class Runnable(ABC, Generic[Input, Output]): def stream( self, - input: Input, # noqa: A002 + input: Input, config: Optional[RunnableConfig] = None, **kwargs: Optional[Any], ) -> Iterator[Output]: @@ -1016,7 +1016,7 @@ class Runnable(ABC, Generic[Input, Output]): async def astream( self, - input: Input, # noqa: A002 + input: Input, config: Optional[RunnableConfig] = None, **kwargs: Optional[Any], ) -> AsyncIterator[Output]: @@ -1070,7 +1070,7 @@ class Runnable(ABC, Generic[Input, Output]): async def astream_log( self, - input: Any, # noqa: A002 + input: Any, config: Optional[RunnableConfig] = None, *, diff: bool = True, @@ -1141,7 +1141,7 @@ class Runnable(ABC, Generic[Input, Output]): async def astream_events( self, - input: Any, # noqa: A002 + input: Any, config: Optional[RunnableConfig] = None, *, version: Literal["v1", "v2"] = "v2", @@ -1407,7 +1407,7 @@ class Runnable(ABC, Generic[Input, Output]): def transform( self, - input: Iterator[Input], # noqa: A002 + input: Iterator[Input], config: Optional[RunnableConfig] = None, **kwargs: Optional[Any], ) -> Iterator[Output]: @@ -1449,7 +1449,7 @@ class Runnable(ABC, Generic[Input, Output]): async def atransform( self, - input: AsyncIterator[Input], # noqa: A002 + input: AsyncIterator[Input], config: Optional[RunnableConfig] = None, **kwargs: Optional[Any], ) -> AsyncIterator[Output]: @@ -2361,6 +2361,7 @@ class Runnable(ABC, Generic[Input, Output]): name: Optional[str] = None, description: Optional[str] = None, arg_types: Optional[dict[str, type]] = None, + message_version: Literal["v0", "v1"] = "v0", ) -> BaseTool: """Create a BaseTool from a Runnable. @@ -2376,6 +2377,11 @@ class Runnable(ABC, Generic[Input, Output]): name: The name of the tool. Defaults to None. description: The description of the tool. Defaults to None. arg_types: A dictionary of argument names to types. Defaults to None. + message_version: Version of ToolMessage to return given + :class:`~langchain_core.messages.content_blocks.ToolCall` input. + + If ``"v0"``, output will be a v0 :class:`~langchain_core.messages.tool.ToolMessage`. + If ``"v1"``, output will be a v1 :class:`~langchain_core.messages.v1.ToolMessage`. Returns: A BaseTool instance. @@ -2451,7 +2457,7 @@ class Runnable(ABC, Generic[Input, Output]): .. versionadded:: 0.2.14 - """ + """ # noqa: E501 # Avoid circular import from langchain_core.tools import convert_runnable_to_tool @@ -2461,6 +2467,7 @@ class Runnable(ABC, Generic[Input, Output]): name=name, description=description, arg_types=arg_types, + message_version=message_version, ) diff --git a/libs/core/langchain_core/runnables/config.py b/libs/core/langchain_core/runnables/config.py index 4ac7bda7b46..cc36622b914 100644 --- a/libs/core/langchain_core/runnables/config.py +++ b/libs/core/langchain_core/runnables/config.py @@ -402,7 +402,7 @@ def call_func_with_variable_args( Callable[[Input, CallbackManagerForChainRun], Output], Callable[[Input, CallbackManagerForChainRun, RunnableConfig], Output], ], - input: Input, # noqa: A002 + input: Input, config: RunnableConfig, run_manager: Optional[CallbackManagerForChainRun] = None, **kwargs: Any, @@ -439,7 +439,7 @@ def acall_func_with_variable_args( Awaitable[Output], ], ], - input: Input, # noqa: A002 + input: Input, config: RunnableConfig, run_manager: Optional[AsyncCallbackManagerForChainRun] = None, **kwargs: Any, diff --git a/libs/core/langchain_core/runnables/graph.py b/libs/core/langchain_core/runnables/graph.py index 3e22494bad7..20a841d51a8 100644 --- a/libs/core/langchain_core/runnables/graph.py +++ b/libs/core/langchain_core/runnables/graph.py @@ -114,7 +114,7 @@ class Node(NamedTuple): def copy( self, *, - id: Optional[str] = None, # noqa: A002 + id: Optional[str] = None, name: Optional[str] = None, ) -> Node: """Return a copy of the node with optional new id and name. @@ -187,7 +187,7 @@ class MermaidDrawMethod(Enum): def node_data_str( - id: str, # noqa: A002 + id: str, data: Union[type[BaseModel], RunnableType, None], ) -> str: """Convert the data of a node to a string. @@ -328,7 +328,7 @@ class Graph: def add_node( self, data: Union[type[BaseModel], RunnableType, None], - id: Optional[str] = None, # noqa: A002 + id: Optional[str] = None, *, metadata: Optional[dict[str, Any]] = None, ) -> Node: diff --git a/libs/core/langchain_core/tools/base.py b/libs/core/langchain_core/tools/base.py index e54a09709d6..c4900723de2 100644 --- a/libs/core/langchain_core/tools/base.py +++ b/libs/core/langchain_core/tools/base.py @@ -68,6 +68,7 @@ from langchain_core.utils.pydantic import ( is_pydantic_v1_subclass, is_pydantic_v2_subclass, ) +from langchain_core.v1.messages import ToolMessage as ToolMessageV1 if TYPE_CHECKING: import uuid @@ -498,6 +499,14 @@ class ChildTool(BaseTool): two-tuple corresponding to the (content, artifact) of a ToolMessage. """ + message_version: Literal["v0", "v1"] = "v0" + """Version of ToolMessage to return given + :class:`~langchain_core.messages.content_blocks.ToolCall` input. + + If ``"v0"``, output will be a v0 :class:`~langchain_core.messages.tool.ToolMessage`. + If ``"v1"``, output will be a v1 :class:`~langchain_core.messages.v1.ToolMessage`. + """ + def __init__(self, **kwargs: Any) -> None: """Initialize the tool.""" if ( @@ -835,7 +844,7 @@ class ChildTool(BaseTool): content = None artifact = None - status = "success" + status: Literal["success", "error"] = "success" error_to_raise: Union[Exception, KeyboardInterrupt, None] = None try: child_config = patch_config(config, callbacks=run_manager.get_child()) @@ -879,7 +888,14 @@ class ChildTool(BaseTool): if error_to_raise: run_manager.on_tool_error(error_to_raise) raise error_to_raise - output = _format_output(content, artifact, tool_call_id, self.name, status) + output = _format_output( + content, + artifact, + tool_call_id, + self.name, + status, + message_version=self.message_version, + ) run_manager.on_tool_end(output, color=color, name=self.name, **kwargs) return output @@ -945,7 +961,7 @@ class ChildTool(BaseTool): ) content = None artifact = None - status = "success" + status: Literal["success", "error"] = "success" error_to_raise: Optional[Union[Exception, KeyboardInterrupt]] = None try: tool_args, tool_kwargs = self._to_args_and_kwargs(tool_input, tool_call_id) @@ -993,7 +1009,14 @@ class ChildTool(BaseTool): await run_manager.on_tool_error(error_to_raise) raise error_to_raise - output = _format_output(content, artifact, tool_call_id, self.name, status) + output = _format_output( + content, + artifact, + tool_call_id, + self.name, + status, + message_version=self.message_version, + ) await run_manager.on_tool_end(output, color=color, name=self.name, **kwargs) return output @@ -1131,7 +1154,9 @@ def _format_output( artifact: Any, tool_call_id: Optional[str], name: str, - status: str, + status: Literal["success", "error"], + *, + message_version: Literal["v0", "v1"] = "v0", ) -> Union[ToolOutputMixin, Any]: """Format tool output as a ToolMessage if appropriate. @@ -1141,6 +1166,7 @@ def _format_output( tool_call_id: The ID of the tool call. name: The name of the tool. status: The execution status. + message_version: The version of the ToolMessage to return. Returns: The formatted output, either as a ToolMessage or the original content. @@ -1149,7 +1175,15 @@ def _format_output( return content if not _is_message_content_type(content): content = _stringify(content) - return ToolMessage( + if message_version == "v0": + return ToolMessage( + content, + artifact=artifact, + tool_call_id=tool_call_id, + name=name, + status=status, + ) + return ToolMessageV1( content, artifact=artifact, tool_call_id=tool_call_id, diff --git a/libs/core/langchain_core/tools/convert.py b/libs/core/langchain_core/tools/convert.py index 8b103fd54d6..976f23a5999 100644 --- a/libs/core/langchain_core/tools/convert.py +++ b/libs/core/langchain_core/tools/convert.py @@ -22,6 +22,7 @@ def tool( response_format: Literal["content", "content_and_artifact"] = "content", parse_docstring: bool = False, error_on_invalid_docstring: bool = True, + message_version: Literal["v0", "v1"] = "v0", ) -> Callable[[Union[Callable, Runnable]], BaseTool]: ... @@ -37,6 +38,7 @@ def tool( response_format: Literal["content", "content_and_artifact"] = "content", parse_docstring: bool = False, error_on_invalid_docstring: bool = True, + message_version: Literal["v0", "v1"] = "v0", ) -> BaseTool: ... @@ -51,6 +53,7 @@ def tool( response_format: Literal["content", "content_and_artifact"] = "content", parse_docstring: bool = False, error_on_invalid_docstring: bool = True, + message_version: Literal["v0", "v1"] = "v0", ) -> BaseTool: ... @@ -65,6 +68,7 @@ def tool( response_format: Literal["content", "content_and_artifact"] = "content", parse_docstring: bool = False, error_on_invalid_docstring: bool = True, + message_version: Literal["v0", "v1"] = "v0", ) -> Callable[[Union[Callable, Runnable]], BaseTool]: ... @@ -79,6 +83,7 @@ def tool( response_format: Literal["content", "content_and_artifact"] = "content", parse_docstring: bool = False, error_on_invalid_docstring: bool = True, + message_version: Literal["v0", "v1"] = "v0", ) -> Union[ BaseTool, Callable[[Union[Callable, Runnable]], BaseTool], @@ -118,6 +123,11 @@ def tool( error_on_invalid_docstring: if ``parse_docstring`` is provided, configure whether to raise ValueError on invalid Google Style docstrings. Defaults to True. + message_version: Version of ToolMessage to return given + :class:`~langchain_core.messages.content_blocks.ToolCall` input. + + If ``"v0"``, output will be a v0 :class:`~langchain_core.messages.tool.ToolMessage`. + If ``"v1"``, output will be a v1 :class:`~langchain_core.messages.v1.ToolMessage`. Returns: The tool. @@ -216,7 +226,7 @@ def tool( \"\"\" return bar - """ # noqa: D214, D410, D411 + """ # noqa: D214, D410, D411, E501 def _create_tool_factory( tool_name: str, @@ -274,6 +284,7 @@ def tool( response_format=response_format, parse_docstring=parse_docstring, error_on_invalid_docstring=error_on_invalid_docstring, + message_version=message_version, ) # If someone doesn't want a schema applied, we must treat it as # a simple string->string function @@ -290,6 +301,7 @@ def tool( return_direct=return_direct, coroutine=coroutine, response_format=response_format, + message_version=message_version, ) return _tool_factory @@ -383,6 +395,7 @@ def convert_runnable_to_tool( name: Optional[str] = None, description: Optional[str] = None, arg_types: Optional[dict[str, type]] = None, + message_version: Literal["v0", "v1"] = "v0", ) -> BaseTool: """Convert a Runnable into a BaseTool. @@ -392,10 +405,15 @@ def convert_runnable_to_tool( name: The name of the tool. Defaults to None. description: The description of the tool. Defaults to None. arg_types: The types of the arguments. Defaults to None. + message_version: Version of ToolMessage to return given + :class:`~langchain_core.messages.content_blocks.ToolCall` input. + + If ``"v0"``, output will be a v0 :class:`~langchain_core.messages.tool.ToolMessage`. + If ``"v1"``, output will be a v1 :class:`~langchain_core.messages.v1.ToolMessage`. Returns: The tool. - """ + """ # noqa: E501 if args_schema: runnable = runnable.with_types(input_type=args_schema) description = description or _get_description_from_runnable(runnable) @@ -408,6 +426,7 @@ def convert_runnable_to_tool( func=runnable.invoke, coroutine=runnable.ainvoke, description=description, + message_version=message_version, ) async def ainvoke_wrapper( @@ -435,4 +454,5 @@ def convert_runnable_to_tool( coroutine=ainvoke_wrapper, description=description, args_schema=args_schema, + message_version=message_version, ) diff --git a/libs/core/langchain_core/tools/retriever.py b/libs/core/langchain_core/tools/retriever.py index 002fa5e80d6..53de0b54a9a 100644 --- a/libs/core/langchain_core/tools/retriever.py +++ b/libs/core/langchain_core/tools/retriever.py @@ -72,6 +72,7 @@ def create_retriever_tool( document_prompt: Optional[BasePromptTemplate] = None, document_separator: str = "\n\n", response_format: Literal["content", "content_and_artifact"] = "content", + message_version: Literal["v0", "v1"] = "v1", ) -> Tool: r"""Create a tool to do retrieval of documents. @@ -88,10 +89,15 @@ def create_retriever_tool( "content_and_artifact" then the output is expected to be a two-tuple corresponding to the (content, artifact) of a ToolMessage (artifact being a list of documents in this case). Defaults to "content". + message_version: Version of ToolMessage to return given + :class:`~langchain_core.messages.content_blocks.ToolCall` input. + + If ``"v0"``, output will be a v0 :class:`~langchain_core.messages.tool.ToolMessage`. + If ``"v1"``, output will be a v1 :class:`~langchain_core.messages.v1.ToolMessage`. Returns: Tool class to pass to an agent. - """ + """ # noqa: E501 document_prompt = document_prompt or PromptTemplate.from_template("{page_content}") func = partial( _get_relevant_documents, @@ -114,4 +120,5 @@ def create_retriever_tool( coroutine=afunc, args_schema=RetrieverInput, response_format=response_format, + message_version=message_version, ) diff --git a/libs/core/langchain_core/tools/structured.py b/libs/core/langchain_core/tools/structured.py index a419a1ede62..a11ebb60c32 100644 --- a/libs/core/langchain_core/tools/structured.py +++ b/libs/core/langchain_core/tools/structured.py @@ -129,6 +129,7 @@ class StructuredTool(BaseTool): response_format: Literal["content", "content_and_artifact"] = "content", parse_docstring: bool = False, error_on_invalid_docstring: bool = False, + message_version: Literal["v0", "v1"] = "v0", **kwargs: Any, ) -> StructuredTool: """Create tool from a given function. @@ -157,6 +158,12 @@ class StructuredTool(BaseTool): error_on_invalid_docstring: if ``parse_docstring`` is provided, configure whether to raise ValueError on invalid Google Style docstrings. Defaults to False. + message_version: Version of ToolMessage to return given + :class:`~langchain_core.messages.content_blocks.ToolCall` input. + + If ``"v0"``, output will be a v0 :class:`~langchain_core.messages.tool.ToolMessage`. + If ``"v1"``, output will be a v1 :class:`~langchain_core.messages.v1.ToolMessage`. + kwargs: Additional arguments to pass to the tool Returns: @@ -175,7 +182,7 @@ class StructuredTool(BaseTool): tool = StructuredTool.from_function(add) tool.run(1, 2) # 3 - """ + """ # noqa: E501 if func is not None: source_function = func elif coroutine is not None: @@ -232,6 +239,7 @@ class StructuredTool(BaseTool): description=description_, return_direct=return_direct, response_format=response_format, + message_version=message_version, **kwargs, ) diff --git a/libs/core/langchain_core/tracers/base.py b/libs/core/langchain_core/tracers/base.py index ee588606165..d8ef0480484 100644 --- a/libs/core/langchain_core/tracers/base.py +++ b/libs/core/langchain_core/tracers/base.py @@ -17,6 +17,7 @@ from typing_extensions import override from langchain_core.callbacks.base import AsyncCallbackHandler, BaseCallbackHandler from langchain_core.exceptions import TracerException # noqa: F401 from langchain_core.tracers.core import _TracerCore +from langchain_core.v1.messages import AIMessage, AIMessageChunk, MessageV1 if TYPE_CHECKING: from collections.abc import Sequence @@ -54,7 +55,7 @@ class BaseTracer(_TracerCore, BaseCallbackHandler, ABC): def on_chat_model_start( self, serialized: dict[str, Any], - messages: list[list[BaseMessage]], + messages: Union[list[list[BaseMessage]], list[MessageV1]], *, run_id: UUID, tags: Optional[list[str]] = None, @@ -138,7 +139,9 @@ class BaseTracer(_TracerCore, BaseCallbackHandler, ABC): self, token: str, *, - chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None, + chunk: Optional[ + Union[GenerationChunk, ChatGenerationChunk, AIMessageChunk] + ] = None, run_id: UUID, parent_run_id: Optional[UUID] = None, **kwargs: Any, @@ -190,7 +193,9 @@ class BaseTracer(_TracerCore, BaseCallbackHandler, ABC): ) @override - def on_llm_end(self, response: LLMResult, *, run_id: UUID, **kwargs: Any) -> Run: + def on_llm_end( + self, response: Union[LLMResult, AIMessage], *, run_id: UUID, **kwargs: Any + ) -> Run: """End a trace for an LLM run. Args: @@ -562,7 +567,7 @@ class AsyncBaseTracer(_TracerCore, AsyncCallbackHandler, ABC): async def on_chat_model_start( self, serialized: dict[str, Any], - messages: list[list[BaseMessage]], + messages: Union[list[list[BaseMessage]], list[MessageV1]], *, run_id: UUID, parent_run_id: Optional[UUID] = None, @@ -617,7 +622,9 @@ class AsyncBaseTracer(_TracerCore, AsyncCallbackHandler, ABC): self, token: str, *, - chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None, + chunk: Optional[ + Union[GenerationChunk, ChatGenerationChunk, AIMessageChunk] + ] = None, run_id: UUID, parent_run_id: Optional[UUID] = None, **kwargs: Any, @@ -646,7 +653,7 @@ class AsyncBaseTracer(_TracerCore, AsyncCallbackHandler, ABC): @override async def on_llm_end( self, - response: LLMResult, + response: Union[LLMResult, AIMessage], *, run_id: UUID, parent_run_id: Optional[UUID] = None, @@ -882,7 +889,7 @@ class AsyncBaseTracer(_TracerCore, AsyncCallbackHandler, ABC): self, run: Run, token: str, - chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]], + chunk: Optional[Union[GenerationChunk, ChatGenerationChunk, AIMessageChunk]], ) -> None: """Process new LLM token.""" diff --git a/libs/core/langchain_core/tracers/core.py b/libs/core/langchain_core/tracers/core.py index 0a10b06ecaf..c7074c9af2e 100644 --- a/libs/core/langchain_core/tracers/core.py +++ b/libs/core/langchain_core/tracers/core.py @@ -18,6 +18,7 @@ from typing import ( from langchain_core.exceptions import TracerException from langchain_core.load import dumpd +from langchain_core.messages.utils import convert_from_v1_message from langchain_core.outputs import ( ChatGeneration, ChatGenerationChunk, @@ -25,6 +26,12 @@ from langchain_core.outputs import ( LLMResult, ) from langchain_core.tracers.schemas import Run +from langchain_core.v1.messages import ( + AIMessage, + AIMessageChunk, + MessageV1, + MessageV1Types, +) if TYPE_CHECKING: from collections.abc import Coroutine, Sequence @@ -156,7 +163,7 @@ class _TracerCore(ABC): def _create_chat_model_run( self, serialized: dict[str, Any], - messages: list[list[BaseMessage]], + messages: Union[list[list[BaseMessage]], list[MessageV1]], run_id: UUID, tags: Optional[list[str]] = None, parent_run_id: Optional[UUID] = None, @@ -181,6 +188,12 @@ class _TracerCore(ABC): start_time = datetime.now(timezone.utc) if metadata: kwargs.update({"metadata": metadata}) + if isinstance(messages[0], MessageV1Types): + # Convert from v1 messages to BaseMessage + messages = [ + [convert_from_v1_message(msg) for msg in messages] # type: ignore[arg-type] + ] + messages = cast("list[list[BaseMessage]]", messages) return Run( id=run_id, parent_run_id=parent_run_id, @@ -230,7 +243,9 @@ class _TracerCore(ABC): self, token: str, run_id: UUID, - chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None, + chunk: Optional[ + Union[GenerationChunk, ChatGenerationChunk, AIMessageChunk] + ] = None, parent_run_id: Optional[UUID] = None, # noqa: ARG002 ) -> Run: """Append token event to LLM run and return the run.""" @@ -276,7 +291,15 @@ class _TracerCore(ABC): ) return llm_run - def _complete_llm_run(self, response: LLMResult, run_id: UUID) -> Run: + def _complete_llm_run( + self, response: Union[LLMResult, AIMessage], run_id: UUID + ) -> Run: + if isinstance(response, AIMessage): + response = LLMResult( + generations=[ + [ChatGeneration(message=convert_from_v1_message(response))] + ] + ) llm_run = self._get_run(run_id, run_type={"llm", "chat_model"}) if getattr(llm_run, "outputs", None) is None: llm_run.outputs = {} @@ -558,7 +581,7 @@ class _TracerCore(ABC): self, run: Run, # noqa: ARG002 token: str, # noqa: ARG002 - chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]], # noqa: ARG002 + chunk: Optional[Union[GenerationChunk, ChatGenerationChunk, AIMessageChunk]], # noqa: ARG002 ) -> Union[None, Coroutine[Any, Any, None]]: """Process new LLM token.""" return None diff --git a/libs/core/langchain_core/tracers/event_stream.py b/libs/core/langchain_core/tracers/event_stream.py index e510356e6b6..32d3b399c7e 100644 --- a/libs/core/langchain_core/tracers/event_stream.py +++ b/libs/core/langchain_core/tracers/event_stream.py @@ -38,6 +38,7 @@ from langchain_core.runnables.utils import ( from langchain_core.tracers._streaming import _StreamingCallbackHandler from langchain_core.tracers.memory_stream import _MemoryStream from langchain_core.utils.aiter import aclosing, py_anext +from langchain_core.v1.messages import MessageV1 if TYPE_CHECKING: from collections.abc import AsyncIterator, Iterator, Sequence @@ -45,6 +46,8 @@ if TYPE_CHECKING: from langchain_core.documents import Document from langchain_core.runnables import Runnable, RunnableConfig from langchain_core.tracers.log_stream import LogEntry + from langchain_core.v1.messages import AIMessage as AIMessageV1 + from langchain_core.v1.messages import AIMessageChunk as AIMessageChunkV1 logger = logging.getLogger(__name__) @@ -297,7 +300,7 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand async def on_chat_model_start( self, serialized: dict[str, Any], - messages: list[list[BaseMessage]], + messages: Union[list[list[BaseMessage]], list[MessageV1]], *, run_id: UUID, tags: Optional[list[str]] = None, @@ -307,6 +310,8 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand **kwargs: Any, ) -> None: """Start a trace for an LLM run.""" + # below cast is because type is converted in handle_event + messages = cast("list[list[BaseMessage]]", messages) name_ = _assign_name(name, serialized) run_type = "chat_model" @@ -407,13 +412,18 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand self, token: str, *, - chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None, + chunk: Optional[ + Union[GenerationChunk, ChatGenerationChunk, AIMessageChunkV1] + ] = None, run_id: UUID, parent_run_id: Optional[UUID] = None, **kwargs: Any, ) -> None: """Run on new LLM token. Only available when streaming is enabled.""" run_info = self.run_map.get(run_id) + chunk = cast( + "Optional[Union[GenerationChunk, ChatGenerationChunk]]", chunk + ) # converted in handle_event chunk_: Union[GenerationChunk, BaseMessageChunk] if run_info is None: @@ -456,9 +466,10 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand @override async def on_llm_end( - self, response: LLMResult, *, run_id: UUID, **kwargs: Any + self, response: Union[LLMResult, AIMessageV1], *, run_id: UUID, **kwargs: Any ) -> None: """End a trace for an LLM run.""" + response = cast("LLMResult", response) # converted in handle_event run_info = self.run_map.pop(run_id) inputs_ = run_info["inputs"] diff --git a/libs/core/langchain_core/tracers/langchain.py b/libs/core/langchain_core/tracers/langchain.py index 4a6d0d82344..5d5f1e5ad80 100644 --- a/libs/core/langchain_core/tracers/langchain.py +++ b/libs/core/langchain_core/tracers/langchain.py @@ -5,7 +5,7 @@ from __future__ import annotations import logging from concurrent.futures import ThreadPoolExecutor from datetime import datetime, timezone -from typing import TYPE_CHECKING, Any, Optional, Union +from typing import TYPE_CHECKING, Any, Optional, Union, cast from uuid import UUID from langsmith import Client @@ -21,12 +21,15 @@ from typing_extensions import override from langchain_core.env import get_runtime_environment from langchain_core.load import dumpd +from langchain_core.messages.utils import convert_from_v1_message from langchain_core.tracers.base import BaseTracer from langchain_core.tracers.schemas import Run +from langchain_core.v1.messages import MessageV1Types if TYPE_CHECKING: from langchain_core.messages import BaseMessage from langchain_core.outputs import ChatGenerationChunk, GenerationChunk + from langchain_core.v1.messages import AIMessageChunk, MessageV1 logger = logging.getLogger(__name__) _LOGGED = set() @@ -113,7 +116,7 @@ class LangChainTracer(BaseTracer): def on_chat_model_start( self, serialized: dict[str, Any], - messages: list[list[BaseMessage]], + messages: Union[list[list[BaseMessage]], list[MessageV1]], *, run_id: UUID, tags: Optional[list[str]] = None, @@ -140,6 +143,12 @@ class LangChainTracer(BaseTracer): start_time = datetime.now(timezone.utc) if metadata: kwargs.update({"metadata": metadata}) + if isinstance(messages[0], MessageV1Types): + # Convert from v1 messages to BaseMessage + messages = [ + [convert_from_v1_message(msg) for msg in messages] # type: ignore[arg-type] + ] + messages = cast("list[list[BaseMessage]]", messages) chat_model_run = Run( id=run_id, parent_run_id=parent_run_id, @@ -232,7 +241,9 @@ class LangChainTracer(BaseTracer): self, token: str, run_id: UUID, - chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None, + chunk: Optional[ + Union[GenerationChunk, ChatGenerationChunk, AIMessageChunk] + ] = None, parent_run_id: Optional[UUID] = None, ) -> Run: """Append token event to LLM run and return the run.""" diff --git a/libs/core/langchain_core/tracers/log_stream.py b/libs/core/langchain_core/tracers/log_stream.py index 5246ea8456e..5ddc7e4ec69 100644 --- a/libs/core/langchain_core/tracers/log_stream.py +++ b/libs/core/langchain_core/tracers/log_stream.py @@ -34,6 +34,7 @@ if TYPE_CHECKING: from langchain_core.runnables.utils import Input, Output from langchain_core.tracers.schemas import Run + from langchain_core.v1.messages import AIMessageChunk class LogEntry(TypedDict): @@ -485,7 +486,7 @@ class LogStreamCallbackHandler(BaseTracer, _StreamingCallbackHandler): self, run: Run, token: str, - chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]], + chunk: Optional[Union[GenerationChunk, ChatGenerationChunk, AIMessageChunk]], ) -> None: """Process new LLM token.""" index = self._key_map_by_run_id.get(run.id) diff --git a/libs/core/langchain_core/utils/function_calling.py b/libs/core/langchain_core/utils/function_calling.py index d7059fded47..b69e2c331fa 100644 --- a/libs/core/langchain_core/utils/function_calling.py +++ b/libs/core/langchain_core/utils/function_calling.py @@ -616,7 +616,7 @@ def convert_to_json_schema( @beta() def tool_example_to_messages( - input: str, # noqa: A002 + input: str, tool_calls: list[BaseModel], tool_outputs: Optional[list[str]] = None, *, diff --git a/libs/core/langchain_core/v1/__init__.py b/libs/core/langchain_core/v1/__init__.py new file mode 100644 index 00000000000..ad3d9e1cc1c --- /dev/null +++ b/libs/core/langchain_core/v1/__init__.py @@ -0,0 +1 @@ +"""LangChain v1.0 types.""" diff --git a/libs/core/langchain_core/v1/chat_models.py b/libs/core/langchain_core/v1/chat_models.py new file mode 100644 index 00000000000..710c90c9dfd --- /dev/null +++ b/libs/core/langchain_core/v1/chat_models.py @@ -0,0 +1,1047 @@ +"""Chat models for conversational AI.""" + +from __future__ import annotations + +import copy +import typing +import warnings +from abc import ABC, abstractmethod +from collections.abc import AsyncIterator, Iterator, Mapping, Sequence +from functools import cached_property +from operator import itemgetter +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Literal, + Optional, + Union, + cast, +) + +from pydantic import ( + BaseModel, + ConfigDict, + Field, + field_validator, +) +from typing_extensions import TypeAlias, override + +from langchain_core.caches import BaseCache +from langchain_core.callbacks import ( + AsyncCallbackManager, + AsyncCallbackManagerForLLMRun, + CallbackManager, + CallbackManagerForLLMRun, + Callbacks, +) +from langchain_core.language_models._utils import _normalize_messages_v1 +from langchain_core.language_models.base import ( + LangSmithParams, + LanguageModelInput, + _get_token_ids_default_method, + _get_verbosity, +) +from langchain_core.load import dumpd +from langchain_core.messages import ( + convert_to_openai_image_block, + get_buffer_string, + is_data_content_block, +) +from langchain_core.messages.utils import ( + convert_from_v1_message, + convert_to_messages_v1, +) +from langchain_core.outputs import ( + ChatGeneration, + ChatGenerationChunk, +) +from langchain_core.prompt_values import PromptValue +from langchain_core.rate_limiters import BaseRateLimiter +from langchain_core.runnables import RunnableMap, RunnablePassthrough +from langchain_core.runnables.base import RunnableSerializable +from langchain_core.runnables.config import ensure_config, run_in_executor +from langchain_core.tracers._streaming import _StreamingCallbackHandler +from langchain_core.utils.function_calling import ( + convert_to_json_schema, + convert_to_openai_tool, +) +from langchain_core.utils.pydantic import TypeBaseModel, is_basemodel_subclass +from langchain_core.v1.messages import AIMessage as AIMessageV1 +from langchain_core.v1.messages import AIMessageChunk as AIMessageChunkV1 +from langchain_core.v1.messages import HumanMessage as HumanMessageV1 +from langchain_core.v1.messages import MessageV1, add_ai_message_chunks + +if TYPE_CHECKING: + from langchain_core.output_parsers.base import OutputParserLike + from langchain_core.runnables import Runnable, RunnableConfig + from langchain_core.tools import BaseTool + + +def _generate_response_from_error(error: BaseException) -> list[AIMessageV1]: + if hasattr(error, "response"): + response = error.response + metadata: dict = {} + if hasattr(response, "headers"): + try: + metadata["headers"] = dict(response.headers) + except Exception: + metadata["headers"] = None + if hasattr(response, "status_code"): + metadata["status_code"] = response.status_code + if hasattr(error, "request_id"): + metadata["request_id"] = error.request_id + # Permit response_metadata without model_name, model_provider fields + generations = [AIMessageV1(content=[], response_metadata=metadata)] # type: ignore[arg-type] + else: + generations = [] + + return generations + + +def _format_for_tracing(messages: Sequence[MessageV1]) -> list[MessageV1]: + """Format messages for tracing in on_chat_model_start. + + - Update image content blocks to OpenAI Chat Completions format (backward + compatibility). + - Add "type" key to content blocks that have a single key. + + Args: + messages: List of messages to format. + + Returns: + List of messages formatted for tracing. + """ + messages_to_trace = [] + for message in messages: + message_to_trace = message + for idx, block in enumerate(message.content): + # Update image content blocks to OpenAI # Chat Completions format. + if ( + block["type"] == "image" + and is_data_content_block(block) # type: ignore[arg-type] # permit unnecessary runtime check + and block.get("source_type") != "id" + ): + if message_to_trace is message: + # Shallow copy + message_to_trace = copy.copy(message) + message_to_trace.content = list(message_to_trace.content) + + # TODO: for tracing purposes we store non-standard types (OpenAI format) + # in message content. Consider typing these block formats. + message_to_trace.content[idx] = convert_to_openai_image_block(block) # type: ignore[arg-type, call-overload] + else: + pass + messages_to_trace.append(message_to_trace) + + return messages_to_trace + + +def generate_from_stream(stream: Iterator[AIMessageChunkV1]) -> AIMessageV1: + """Generate from a stream. + + Args: + stream: Iterator of AIMessageChunkV1. + + Returns: + AIMessageV1: aggregated message. + """ + generation = next(stream, None) + if generation: + generation += list(stream) + if generation is None: + msg = "No generations found in stream." + raise ValueError(msg) + return generation.to_message() + + +async def agenerate_from_stream( + stream: AsyncIterator[AIMessageChunkV1], +) -> AIMessageV1: + """Async generate from a stream. + + Args: + stream: Iterator of AIMessageChunkV1. + + Returns: + AIMessageV1: aggregated message. + """ + chunks = [chunk async for chunk in stream] + return await run_in_executor(None, generate_from_stream, iter(chunks)) + + +def _format_ls_structured_output(ls_structured_output_format: Optional[dict]) -> dict: + if ls_structured_output_format: + try: + ls_structured_output_format_dict = { + "ls_structured_output_format": { + "kwargs": ls_structured_output_format.get("kwargs", {}), + "schema": convert_to_json_schema( + ls_structured_output_format["schema"] + ), + } + } + except ValueError: + ls_structured_output_format_dict = {} + else: + ls_structured_output_format_dict = {} + + return ls_structured_output_format_dict + + +class BaseChatModel(RunnableSerializable[LanguageModelInput, AIMessageV1], ABC): + """Base class for chat models. + + Key imperative methods: + Methods that actually call the underlying model. + + +---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+ + | Method | Input | Output | Description | + +===========================+================================================================+=====================================================================+==================================================================================================+ + | `invoke` | str | list[dict | tuple | BaseMessage] | PromptValue | BaseMessage | A single chat model call. | + +---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+ + | `ainvoke` | ''' | BaseMessage | Defaults to running invoke in an async executor. | + +---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+ + | `stream` | ''' | Iterator[BaseMessageChunk] | Defaults to yielding output of invoke. | + +---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+ + | `astream` | ''' | AsyncIterator[BaseMessageChunk] | Defaults to yielding output of ainvoke. | + +---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+ + | `astream_events` | ''' | AsyncIterator[StreamEvent] | Event types: 'on_chat_model_start', 'on_chat_model_stream', 'on_chat_model_end'. | + +---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+ + | `batch` | list['''] | list[BaseMessage] | Defaults to running invoke in concurrent threads. | + +---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+ + | `abatch` | list['''] | list[BaseMessage] | Defaults to running ainvoke in concurrent threads. | + +---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+ + | `batch_as_completed` | list['''] | Iterator[tuple[int, Union[BaseMessage, Exception]]] | Defaults to running invoke in concurrent threads. | + +---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+ + | `abatch_as_completed` | list['''] | AsyncIterator[tuple[int, Union[BaseMessage, Exception]]] | Defaults to running ainvoke in concurrent threads. | + +---------------------------+----------------------------------------------------------------+---------------------------------------------------------------------+--------------------------------------------------------------------------------------------------+ + + This table provides a brief overview of the main imperative methods. Please see the base Runnable reference for full documentation. + + Key declarative methods: + Methods for creating another Runnable using the ChatModel. + + +----------------------------------+-----------------------------------------------------------------------------------------------------------+ + | Method | Description | + +==================================+===========================================================================================================+ + | `bind_tools` | Create ChatModel that can call tools. | + +----------------------------------+-----------------------------------------------------------------------------------------------------------+ + | `with_structured_output` | Create wrapper that structures model output using schema. | + +----------------------------------+-----------------------------------------------------------------------------------------------------------+ + | `with_retry` | Create wrapper that retries model calls on failure. | + +----------------------------------+-----------------------------------------------------------------------------------------------------------+ + | `with_fallbacks` | Create wrapper that falls back to other models on failure. | + +----------------------------------+-----------------------------------------------------------------------------------------------------------+ + | `configurable_fields` | Specify init args of the model that can be configured at runtime via the RunnableConfig. | + +----------------------------------+-----------------------------------------------------------------------------------------------------------+ + | `configurable_alternatives` | Specify alternative models which can be swapped in at runtime via the RunnableConfig. | + +----------------------------------+-----------------------------------------------------------------------------------------------------------+ + + This table provides a brief overview of the main declarative methods. Please see the reference for each method for full documentation. + + Creating custom chat model: + Custom chat model implementations should inherit from this class. + Please reference the table below for information about which + methods and properties are required or optional for implementations. + + +----------------------------------+--------------------------------------------------------------------+-------------------+ + | Method/Property | Description | Required/Optional | + +==================================+====================================================================+===================+ + | `_generate` | Use to generate a chat result from a prompt | Required | + +----------------------------------+--------------------------------------------------------------------+-------------------+ + | `_llm_type` (property) | Used to uniquely identify the type of the model. Used for logging. | Required | + +----------------------------------+--------------------------------------------------------------------+-------------------+ + | `_identifying_params` (property) | Represent model parameterization for tracing purposes. | Optional | + +----------------------------------+--------------------------------------------------------------------+-------------------+ + | `_stream` | Use to implement streaming | Optional | + +----------------------------------+--------------------------------------------------------------------+-------------------+ + | `_agenerate` | Use to implement a native async method | Optional | + +----------------------------------+--------------------------------------------------------------------+-------------------+ + | `_astream` | Use to implement async version of `_stream` | Optional | + +----------------------------------+--------------------------------------------------------------------+-------------------+ + + Follow the guide for more information on how to implement a custom Chat Model: + [Guide](https://python.langchain.com/docs/how_to/custom_chat_model/). + + """ # noqa: E501 + + rate_limiter: Optional[BaseRateLimiter] = Field(default=None, exclude=True) + "An optional rate limiter to use for limiting the number of requests." + + disable_streaming: Union[bool, Literal["tool_calling"]] = False + """Whether to disable streaming for this model. + + If streaming is bypassed, then ``stream()``/``astream()``/``astream_events()`` will + defer to ``invoke()``/``ainvoke()``. + + - If True, will always bypass streaming case. + - If ``'tool_calling'``, will bypass streaming case only when the model is called + with a ``tools`` keyword argument. In other words, LangChain will automatically + switch to non-streaming behavior (``invoke()``) only when the tools argument is + provided. This offers the best of both worlds. + - If False (default), will always use streaming case if available. + + The main reason for this flag is that code might be written using ``.stream()`` and + a user may want to swap out a given model for another model whose the implementation + does not properly support streaming. + """ + + cache: Union[BaseCache, bool, None] = Field(default=None, exclude=True) + """Whether to cache the response. + + * If true, will use the global cache. + * If false, will not use a cache + * If None, will use the global cache if it's set, otherwise no cache. + * If instance of BaseCache, will use the provided cache. + + Caching is not currently supported for streaming methods of models. + """ + verbose: bool = Field(default_factory=_get_verbosity, exclude=True, repr=False) + """Whether to print out response text.""" + callbacks: Callbacks = Field(default=None, exclude=True) + """Callbacks to add to the run trace.""" + tags: Optional[list[str]] = Field(default=None, exclude=True) + """Tags to add to the run trace.""" + metadata: Optional[dict[str, Any]] = Field(default=None, exclude=True) + """Metadata to add to the run trace.""" + custom_get_token_ids: Optional[Callable[[str], list[int]]] = Field( + default=None, exclude=True + ) + """Optional encoder to use for counting tokens.""" + + model_config = ConfigDict( + arbitrary_types_allowed=True, + ) + + @cached_property + def _serialized(self) -> dict[str, Any]: + return dumpd(self) + + # --- Runnable methods --- + + @field_validator("verbose", mode="before") + def set_verbose(cls, verbose: Optional[bool]) -> bool: # noqa: FBT001 + """If verbose is None, set it. + + This allows users to pass in None as verbose to access the global setting. + + Args: + verbose: The verbosity setting to use. + + Returns: + The verbosity setting to use. + """ + if verbose is None: + return _get_verbosity() + return verbose + + @property + @override + def InputType(self) -> TypeAlias: + """Get the input type for this runnable.""" + from langchain_core.prompt_values import ( + ChatPromptValueConcrete, + StringPromptValue, + ) + + # This is a version of LanguageModelInput which replaces the abstract + # base class BaseMessage with a union of its subclasses, which makes + # for a much better schema. + return Union[ + str, + Union[StringPromptValue, ChatPromptValueConcrete], + list[MessageV1], + ] + + @property + @override + def OutputType(self) -> Any: + """Get the output type for this runnable.""" + return AIMessageV1 + + def _convert_input(self, model_input: LanguageModelInput) -> list[MessageV1]: + if isinstance(model_input, PromptValue): + return model_input.to_messages(message_version="v1") + if isinstance(model_input, str): + return [HumanMessageV1(content=model_input)] + if isinstance(model_input, Sequence): + return convert_to_messages_v1(model_input) + msg = ( + f"Invalid input type {type(model_input)}. " + "Must be a PromptValue, str, or list of Messages." + ) + raise ValueError(msg) + + def _should_stream( + self, + *, + async_api: bool, + run_manager: Optional[ + Union[CallbackManagerForLLMRun, AsyncCallbackManagerForLLMRun] + ] = None, + **kwargs: Any, + ) -> bool: + """Determine if a given model call should hit the streaming API.""" + sync_not_implemented = type(self)._stream == BaseChatModel._stream # noqa: SLF001 + async_not_implemented = type(self)._astream == BaseChatModel._astream # noqa: SLF001 + + # Check if streaming is implemented. + if (not async_api) and sync_not_implemented: + return False + # Note, since async falls back to sync we check both here. + if async_api and async_not_implemented and sync_not_implemented: + return False + + # Check if streaming has been disabled on this instance. + if self.disable_streaming is True: + return False + # We assume tools are passed in via "tools" kwarg in all models. + if self.disable_streaming == "tool_calling" and kwargs.get("tools"): + return False + + # Check if a runtime streaming flag has been passed in. + if "stream" in kwargs: + return kwargs["stream"] + + # Check if any streaming callback handlers have been passed in. + handlers = run_manager.handlers if run_manager else [] + return any(isinstance(h, _StreamingCallbackHandler) for h in handlers) + + @override + def invoke( + self, + input: LanguageModelInput, + config: Optional[RunnableConfig] = None, + **kwargs: Any, + ) -> AIMessageV1: + config = ensure_config(config) + messages = self._convert_input(input) + ls_structured_output_format = kwargs.pop( + "ls_structured_output_format", None + ) or kwargs.pop("structured_output_format", None) + ls_structured_output_format_dict = _format_ls_structured_output( + ls_structured_output_format + ) + + params = self._get_invocation_params(**kwargs) + options = {**kwargs, **ls_structured_output_format_dict} + inheritable_metadata = { + **(config.get("metadata") or {}), + **self._get_ls_params(**kwargs), + } + callback_manager = CallbackManager.configure( + config.get("callbacks"), + self.callbacks, + self.verbose, + config.get("tags"), + self.tags, + inheritable_metadata, + self.metadata, + ) + (run_manager,) = callback_manager.on_chat_model_start( + self._serialized, + _format_for_tracing(messages), + invocation_params=params, + options=options, + name=config.get("run_name"), + run_id=config.pop("run_id", None), + batch_size=1, + ) + + if self.rate_limiter: + self.rate_limiter.acquire(blocking=True) + + input_messages = _normalize_messages_v1(messages) + + if self._should_stream(async_api=False, **kwargs): + chunks: list[AIMessageChunkV1] = [] + try: + for msg in self._stream(input_messages, **kwargs): + run_manager.on_llm_new_token(msg.text) + chunks.append(msg) + except BaseException as e: + run_manager.on_llm_error(e, response=_generate_response_from_error(e)) + raise + full_message = add_ai_message_chunks(chunks[0], *chunks[1:]).to_message() + else: + try: + full_message = self._invoke(input_messages, **kwargs) + except BaseException as e: + run_manager.on_llm_error(e) + raise + + run_manager.on_llm_end(full_message) + return full_message + + @override + async def ainvoke( + self, + input: LanguageModelInput, + config: Optional[RunnableConfig] = None, + **kwargs: Any, + ) -> AIMessageV1: + config = ensure_config(config) + messages = self._convert_input(input) + ls_structured_output_format = kwargs.pop( + "ls_structured_output_format", None + ) or kwargs.pop("structured_output_format", None) + ls_structured_output_format_dict = _format_ls_structured_output( + ls_structured_output_format + ) + + params = self._get_invocation_params(**kwargs) + options = {**kwargs, **ls_structured_output_format_dict} + inheritable_metadata = { + **(config.get("metadata") or {}), + **self._get_ls_params(**kwargs), + } + callback_manager = AsyncCallbackManager.configure( + config.get("callbacks"), + self.callbacks, + self.verbose, + config.get("tags"), + self.tags, + inheritable_metadata, + self.metadata, + ) + (run_manager,) = await callback_manager.on_chat_model_start( + self._serialized, + _format_for_tracing(messages), + invocation_params=params, + options=options, + name=config.get("run_name"), + run_id=config.pop("run_id", None), + batch_size=1, + ) + + if self.rate_limiter: + await self.rate_limiter.aacquire(blocking=True) + + # TODO: type openai image, audio, file types and permit in MessageV1 + input_messages = _normalize_messages_v1(messages) + + if self._should_stream(async_api=True, **kwargs): + chunks: list[AIMessageChunkV1] = [] + try: + async for msg in self._astream(input_messages, **kwargs): + await run_manager.on_llm_new_token(msg.text) + chunks.append(msg) + except BaseException as e: + await run_manager.on_llm_error( + e, response=_generate_response_from_error(e) + ) + raise + full_message = add_ai_message_chunks(chunks[0], *chunks[1:]).to_message() + else: + try: + full_message = await self._ainvoke(input_messages, **kwargs) + except BaseException as e: + await run_manager.on_llm_error( + e, response=_generate_response_from_error(e) + ) + raise + + await run_manager.on_llm_end(full_message) + return full_message + + @override + def stream( + self, + input: LanguageModelInput, + config: Optional[RunnableConfig] = None, + **kwargs: Any, + ) -> Iterator[AIMessageChunkV1]: + if not self._should_stream(async_api=False, **{**kwargs, "stream": True}): + # model doesn't implement streaming, so use default implementation + yield cast( + "AIMessageChunkV1", + self.invoke(input, config=config, **kwargs), + ) + else: + config = ensure_config(config) + messages = self._convert_input(input) + ls_structured_output_format = kwargs.pop( + "ls_structured_output_format", None + ) or kwargs.pop("structured_output_format", None) + ls_structured_output_format_dict = _format_ls_structured_output( + ls_structured_output_format + ) + + params = self._get_invocation_params(**kwargs) + options = {**kwargs, **ls_structured_output_format_dict} + inheritable_metadata = { + **(config.get("metadata") or {}), + **self._get_ls_params(**kwargs), + } + callback_manager = CallbackManager.configure( + config.get("callbacks"), + self.callbacks, + self.verbose, + config.get("tags"), + self.tags, + inheritable_metadata, + self.metadata, + ) + (run_manager,) = callback_manager.on_chat_model_start( + self._serialized, + _format_for_tracing(messages), + invocation_params=params, + options=options, + name=config.get("run_name"), + run_id=config.pop("run_id", None), + batch_size=1, + ) + + chunks: list[AIMessageChunkV1] = [] + + if self.rate_limiter: + self.rate_limiter.acquire(blocking=True) + + try: + # TODO: replace this with something for new messages + input_messages = _normalize_messages_v1(messages) + for msg in self._stream(input_messages, **kwargs): + run_manager.on_llm_new_token(msg.text) + chunks.append(msg) + yield msg + + if msg.chunk_position != "last": + yield (AIMessageChunkV1([], chunk_position="last")) + except BaseException as e: + run_manager.on_llm_error(e, response=_generate_response_from_error(e)) + raise + + msg = add_ai_message_chunks(chunks[0], *chunks[1:]) + run_manager.on_llm_end(msg) + + @override + async def astream( + self, + input: LanguageModelInput, + config: Optional[RunnableConfig] = None, + **kwargs: Any, + ) -> AsyncIterator[AIMessageChunkV1]: + if not self._should_stream(async_api=True, **{**kwargs, "stream": True}): + # No async or sync stream is implemented, so fall back to ainvoke + yield cast( + "AIMessageChunkV1", + await self.ainvoke(input, config=config, **kwargs), + ) + return + + config = ensure_config(config) + messages = self._convert_input(input) + + ls_structured_output_format = kwargs.pop( + "ls_structured_output_format", None + ) or kwargs.pop("structured_output_format", None) + ls_structured_output_format_dict = _format_ls_structured_output( + ls_structured_output_format + ) + + params = self._get_invocation_params(**kwargs) + options = {**kwargs, **ls_structured_output_format_dict} + inheritable_metadata = { + **(config.get("metadata") or {}), + **self._get_ls_params(**kwargs), + } + callback_manager = AsyncCallbackManager.configure( + config.get("callbacks"), + self.callbacks, + self.verbose, + config.get("tags"), + self.tags, + inheritable_metadata, + self.metadata, + ) + (run_manager,) = await callback_manager.on_chat_model_start( + self._serialized, + _format_for_tracing(messages), + invocation_params=params, + options=options, + name=config.get("run_name"), + run_id=config.pop("run_id", None), + batch_size=1, + ) + + if self.rate_limiter: + await self.rate_limiter.aacquire(blocking=True) + + chunks: list[AIMessageChunkV1] = [] + + try: + input_messages = _normalize_messages_v1(messages) + async for msg in self._astream( + input_messages, + **kwargs, + ): + await run_manager.on_llm_new_token(msg.text) + chunks.append(msg) + yield msg + if msg.chunk_position != "last": + yield (AIMessageChunkV1([], chunk_position="last")) + except BaseException as e: + await run_manager.on_llm_error(e, response=_generate_response_from_error(e)) + raise + + msg = add_ai_message_chunks(chunks[0], *chunks[1:]) + await run_manager.on_llm_end(msg) + + # --- Custom methods --- + + def _combine_llm_outputs(self, llm_outputs: list[Optional[dict]]) -> dict: # noqa: ARG002 + return {} + + def _get_invocation_params( + self, + stop: Optional[list[str]] = None, + **kwargs: Any, + ) -> dict: + params = self.dump() + params["stop"] = stop + return {**params, **kwargs} + + def _get_ls_params( + self, + stop: Optional[list[str]] = None, + **kwargs: Any, + ) -> LangSmithParams: + """Get standard params for tracing.""" + # get default provider from class name + default_provider = self.__class__.__name__ + if default_provider.startswith("Chat"): + default_provider = default_provider[4:].lower() + elif default_provider.endswith("Chat"): + default_provider = default_provider[:-4] + default_provider = default_provider.lower() + + ls_params = LangSmithParams(ls_provider=default_provider, ls_model_type="chat") + if stop: + ls_params["ls_stop"] = stop + + # model + if hasattr(self, "model") and isinstance(self.model, str): + ls_params["ls_model_name"] = self.model + elif hasattr(self, "model_name") and isinstance(self.model_name, str): + ls_params["ls_model_name"] = self.model_name + + # temperature + if "temperature" in kwargs and isinstance(kwargs["temperature"], float): + ls_params["ls_temperature"] = kwargs["temperature"] + elif hasattr(self, "temperature") and isinstance(self.temperature, float): + ls_params["ls_temperature"] = self.temperature + + # max_tokens + if "max_tokens" in kwargs and isinstance(kwargs["max_tokens"], int): + ls_params["ls_max_tokens"] = kwargs["max_tokens"] + elif hasattr(self, "max_tokens") and isinstance(self.max_tokens, int): + ls_params["ls_max_tokens"] = self.max_tokens + + return ls_params + + def _get_llm_string(self, stop: Optional[list[str]] = None, **kwargs: Any) -> str: + params = self._get_invocation_params(stop=stop, **kwargs) + params = {**params, **kwargs} + return str(sorted(params.items())) + + def _invoke( + self, + messages: list[MessageV1], + **kwargs: Any, + ) -> AIMessageV1: + raise NotImplementedError + + async def _ainvoke( + self, + messages: list[MessageV1], + **kwargs: Any, + ) -> AIMessageV1: + return await run_in_executor( + None, + self._invoke, + messages, + **kwargs, + ) + + def _stream( + self, + messages: list[MessageV1], + **kwargs: Any, + ) -> Iterator[AIMessageChunkV1]: + raise NotImplementedError + + async def _astream( + self, + messages: list[MessageV1], + **kwargs: Any, + ) -> AsyncIterator[AIMessageChunkV1]: + iterator = await run_in_executor( + None, + self._stream, + messages, + **kwargs, + ) + done = object() + while True: + item = await run_in_executor( + None, + next, + iterator, + done, + ) + if item is done: + break + yield item # type: ignore[misc] + + @property + @abstractmethod + def _llm_type(self) -> str: + """Return type of chat model.""" + + def dump(self, **kwargs: Any) -> dict: # noqa: ARG002 + """Return a dictionary of the LLM.""" + starter_dict = dict(self._identifying_params) + starter_dict["_type"] = self._llm_type + return starter_dict + + def bind_tools( + self, + tools: Sequence[ + Union[typing.Dict[str, Any], type, Callable, BaseTool] # noqa: UP006 + ], + *, + tool_choice: Optional[Union[str]] = None, + **kwargs: Any, + ) -> Runnable[LanguageModelInput, AIMessageV1]: + """Bind tools to the model. + + Args: + tools: Sequence of tools to bind to the model. + tool_choice: The tool to use. If "any" then any tool can be used. + + Returns: + A Runnable that returns a message. + """ + raise NotImplementedError + + def with_structured_output( + self, + schema: Union[typing.Dict, type], # noqa: UP006 + *, + include_raw: bool = False, + **kwargs: Any, + ) -> Runnable[LanguageModelInput, Union[typing.Dict, BaseModel]]: # noqa: UP006 + """Model wrapper that returns outputs formatted to match the given schema. + + Args: + schema: + The output schema. Can be passed in as: + - an OpenAI function/tool schema, + - a JSON Schema, + - a TypedDict class, + - or a Pydantic class. + If ``schema`` is a Pydantic class then the model output will be a + Pydantic instance of that class, and the model-generated fields will be + validated by the Pydantic class. Otherwise the model output will be a + dict and will not be validated. See :meth:`langchain_core.utils.function_calling.convert_to_openai_tool` + for more on how to properly specify types and descriptions of + schema fields when specifying a Pydantic or TypedDict class. + + include_raw: + If False then only the parsed structured output is returned. If + an error occurs during model output parsing it will be raised. If True + then both the raw model response (a BaseMessage) and the parsed model + response will be returned. If an error occurs during output parsing it + will be caught and returned as well. The final output is always a dict + with keys "raw", "parsed", and "parsing_error". + + Returns: + A Runnable that takes same inputs as a :class:`langchain_core.language_models.chat.BaseChatModel`. + + If ``include_raw`` is False and ``schema`` is a Pydantic class, Runnable outputs + an instance of ``schema`` (i.e., a Pydantic object). + + Otherwise, if ``include_raw`` is False then Runnable outputs a dict. + + If ``include_raw`` is True, then Runnable outputs a dict with keys: + - ``"raw"``: BaseMessage + - ``"parsed"``: None if there was a parsing error, otherwise the type depends on the ``schema`` as described above. + - ``"parsing_error"``: Optional[BaseException] + + Example: Pydantic schema (include_raw=False): + .. code-block:: python + + from pydantic import BaseModel + + class AnswerWithJustification(BaseModel): + '''An answer to the user question along with justification for the answer.''' + answer: str + justification: str + + llm = ChatModel(model="model-name", temperature=0) + structured_llm = llm.with_structured_output(AnswerWithJustification) + + structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers") + + # -> AnswerWithJustification( + # answer='They weigh the same', + # justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.' + # ) + + Example: Pydantic schema (include_raw=True): + .. code-block:: python + + from pydantic import BaseModel + + class AnswerWithJustification(BaseModel): + '''An answer to the user question along with justification for the answer.''' + answer: str + justification: str + + llm = ChatModel(model="model-name", temperature=0) + structured_llm = llm.with_structured_output(AnswerWithJustification, include_raw=True) + + structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers") + # -> { + # 'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Ao02pnFYXD6GN1yzc0uXPsvF', 'function': {'arguments': '{"answer":"They weigh the same.","justification":"Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ."}', 'name': 'AnswerWithJustification'}, 'type': 'function'}]}), + # 'parsed': AnswerWithJustification(answer='They weigh the same.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'), + # 'parsing_error': None + # } + + Example: Dict schema (include_raw=False): + .. code-block:: python + + from pydantic import BaseModel + from langchain_core.utils.function_calling import convert_to_openai_tool + + class AnswerWithJustification(BaseModel): + '''An answer to the user question along with justification for the answer.''' + answer: str + justification: str + + dict_schema = convert_to_openai_tool(AnswerWithJustification) + llm = ChatModel(model="model-name", temperature=0) + structured_llm = llm.with_structured_output(dict_schema) + + structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers") + # -> { + # 'answer': 'They weigh the same', + # 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.' + # } + + .. versionchanged:: 0.2.26 + + Added support for TypedDict class. + """ # noqa: E501 + _ = kwargs.pop("method", None) + _ = kwargs.pop("strict", None) + if kwargs: + msg = f"Received unsupported arguments {kwargs}" + raise ValueError(msg) + + from langchain_core.output_parsers.openai_tools import ( + JsonOutputKeyToolsParser, + PydanticToolsParser, + ) + + if type(self).bind_tools is BaseChatModel.bind_tools: + msg = "with_structured_output is not implemented for this model." + raise NotImplementedError(msg) + + llm = self.bind_tools( + [schema], + tool_choice="any", + ls_structured_output_format={ + "kwargs": {"method": "function_calling"}, + "schema": schema, + }, + ) + if isinstance(schema, type) and is_basemodel_subclass(schema): + output_parser: OutputParserLike = PydanticToolsParser( + tools=[cast("TypeBaseModel", schema)], first_tool_only=True + ) + else: + key_name = convert_to_openai_tool(schema)["function"]["name"] + output_parser = JsonOutputKeyToolsParser( + key_name=key_name, first_tool_only=True + ) + if include_raw: + parser_assign = RunnablePassthrough.assign( + parsed=itemgetter("raw") | output_parser, parsing_error=lambda _: None + ) + parser_none = RunnablePassthrough.assign(parsed=lambda _: None) + parser_with_fallback = parser_assign.with_fallbacks( + [parser_none], exception_key="parsing_error" + ) + return RunnableMap(raw=llm) | parser_with_fallback + return llm | output_parser + + @property + def _identifying_params(self) -> Mapping[str, Any]: + """Get the identifying parameters.""" + return self.lc_attributes + + def get_token_ids(self, text: str) -> list[int]: + """Return the ordered ids of the tokens in a text. + + Args: + text: The string input to tokenize. + + Returns: + A list of ids corresponding to the tokens in the text, in order they occur + in the text. + """ + if self.custom_get_token_ids is not None: + return self.custom_get_token_ids(text) + return _get_token_ids_default_method(text) + + def get_num_tokens(self, text: str) -> int: + """Get the number of tokens present in the text. + + Useful for checking if an input fits in a model's context window. + + Args: + text: The string input to tokenize. + + Returns: + The integer number of tokens in the text. + """ + return len(self.get_token_ids(text)) + + def get_num_tokens_from_messages( + self, + messages: list[MessageV1], + tools: Optional[Sequence] = None, + ) -> int: + """Get the number of tokens in the messages. + + Useful for checking if an input fits in a model's context window. + + **Note**: the base implementation of get_num_tokens_from_messages ignores + tool schemas. + + Args: + messages: The message inputs to tokenize. + tools: If provided, sequence of dict, BaseModel, function, or BaseTools + to be converted to tool schemas. + + Returns: + The sum of the number of tokens across the messages. + """ + messages_v0 = [convert_from_v1_message(message) for message in messages] + if tools is not None: + warnings.warn( + "Counting tokens in tool schemas is not yet supported. Ignoring tools.", + stacklevel=2, + ) + return sum(self.get_num_tokens(get_buffer_string([m])) for m in messages_v0) + + +def _gen_info_and_msg_metadata( + generation: Union[ChatGeneration, ChatGenerationChunk], +) -> dict: + return { + **(generation.generation_info or {}), + **generation.message.response_metadata, + } diff --git a/libs/core/langchain_core/v1/messages.py b/libs/core/langchain_core/v1/messages.py new file mode 100644 index 00000000000..159e2faacaf --- /dev/null +++ b/libs/core/langchain_core/v1/messages.py @@ -0,0 +1,755 @@ +"""LangChain v1.0.0 message format. + +Each message has content that may be comprised of content blocks, defined under +``langchain_core.messages.content_blocks``. +""" + +import uuid +from dataclasses import dataclass, field +from typing import Any, Literal, Optional, Union, cast, get_args + +from pydantic import BaseModel +from typing_extensions import TypedDict + +import langchain_core.messages.content_blocks as types +from langchain_core.messages.ai import ( + _LC_AUTO_PREFIX, + _LC_ID_PREFIX, + UsageMetadata, + add_usage, +) +from langchain_core.messages.base import merge_content +from langchain_core.messages.tool import ToolOutputMixin +from langchain_core.messages.tool import invalid_tool_call as create_invalid_tool_call +from langchain_core.messages.tool import tool_call as create_tool_call +from langchain_core.utils._merge import merge_dicts +from langchain_core.utils.json import parse_partial_json + + +def _ensure_id(id_val: Optional[str]) -> str: + """Ensure the ID is a valid string, generating a new UUID if not provided. + + Auto-generated UUIDs are prefixed by ``'lc_'`` to indicate they are + LangChain-generated IDs. + + Args: + id_val: Optional string ID value to validate. + + Returns: + A valid string ID, either the provided value or a new UUID. + """ + return id_val or str(f"{_LC_AUTO_PREFIX}{uuid.uuid4()}") + + +class ResponseMetadata(TypedDict, total=False): + """Metadata about the response from the AI provider. + + Contains additional information returned by the provider, such as + response headers, service tiers, log probabilities, system fingerprints, etc. + + Extra keys are permitted from what is typed here (via `total=False`), allowing + for provider-specific metadata to be included without breaking the type + definition. + """ + + model_provider: str + """Name and version of the provider that created the message (e.g., openai).""" + + model_name: str + """Name of the model that generated the message.""" + + +@dataclass +class AIMessage: + """A message generated by an AI assistant. + + Represents a response from an AI model, including text content, tool calls, + and metadata about the generation process. + + Attributes: + id: Unique identifier for the message. + type: Message type identifier, always "ai". + name: Optional human-readable name for the message. + lc_version: Encoding version for the message. + content: List of content blocks containing the message data. + tool_calls: Optional list of tool calls made by the AI. + invalid_tool_calls: Optional list of tool calls that failed validation. + usage: Optional dictionary containing usage statistics. + """ + + type: Literal["ai"] = "ai" + """The type of the message. Must be a string that is unique to the message type. + + The purpose of this field is to allow for easy identification of the message type + when deserializing messages. + """ + + name: Optional[str] = None + """An optional name for the message. + + This can be used to provide a human-readable name for the message. + + Usage of this field is optional, and whether it's used or not is up to the + model implementation. + """ + + id: Optional[str] = None + """Unique identifier for the message. + + If the provider assigns a meaningful ID, it should be used here. Otherwise, a + LangChain-generated ID will be used. + """ + + lc_version: str = "v1" + """Encoding version for the message. Used for serialization.""" + + content: list[types.ContentBlock] = field(default_factory=list) + """Message content as a list of content blocks.""" + + usage_metadata: Optional[UsageMetadata] = None + """If provided, usage metadata for a message, such as token counts.""" + + response_metadata: ResponseMetadata = field( + default_factory=lambda: ResponseMetadata() + ) + """Metadata about the response. + + This field should include non-standard data returned by the provider, such as + response headers, service tiers, or log probabilities. + """ + + parsed: Optional[Union[dict[str, Any], BaseModel]] = None + """Auto-parsed message contents, if applicable.""" + + def __init__( + self, + content: Union[str, list[types.ContentBlock]], + id: Optional[str] = None, + name: Optional[str] = None, + lc_version: str = "v1", + response_metadata: Optional[ResponseMetadata] = None, + usage_metadata: Optional[UsageMetadata] = None, + tool_calls: Optional[list[types.ToolCall]] = None, + invalid_tool_calls: Optional[list[types.InvalidToolCall]] = None, + parsed: Optional[Union[dict[str, Any], BaseModel]] = None, + ): + """Initialize an AI message. + + Args: + content: Message content as string or list of content blocks. + id: Optional unique identifier for the message. + name: Optional human-readable name for the message. + lc_version: Encoding version for the message. + response_metadata: Optional metadata about the response. + usage_metadata: Optional metadata about token usage. + tool_calls: Optional list of tool calls made by the AI. Tool calls should + generally be included in message content. If passed on init, they will + be added to the content list. + invalid_tool_calls: Optional list of tool calls that failed validation. + parsed: Optional auto-parsed message contents, if applicable. + """ + if isinstance(content, str): + self.content = [{"type": "text", "text": content}] + else: + self.content = content + + self.id = _ensure_id(id) + self.name = name + self.lc_version = lc_version + self.usage_metadata = usage_metadata + self.parsed = parsed + if response_metadata is None: + self.response_metadata = {} + else: + self.response_metadata = response_metadata + + # Add tool calls to content if provided on init + if tool_calls: + content_tool_calls = { + block["id"] + for block in self.content + if block["type"] == "tool_call" and "id" in block + } + for tool_call in tool_calls: + if "id" in tool_call and tool_call["id"] in content_tool_calls: + continue + self.content.append(tool_call) + if invalid_tool_calls: + content_tool_calls = { + block["id"] + for block in self.content + if block["type"] == "invalid_tool_call" and "id" in block + } + for invalid_tool_call in invalid_tool_calls: + if ( + "id" in invalid_tool_call + and invalid_tool_call["id"] in content_tool_calls + ): + continue + self.content.append(invalid_tool_call) + self._tool_calls = [ + block for block in self.content if block["type"] == "tool_call" + ] + self._invalid_tool_calls = [ + block for block in self.content if block["type"] == "invalid_tool_call" + ] + + @property + def text(self) -> str: + """Extract all text content from the AI message as a string.""" + text_blocks = [block for block in self.content if block["type"] == "text"] + return "".join(block["text"] for block in text_blocks) + + @property + def tool_calls(self) -> list[types.ToolCall]: + """Get the tool calls made by the AI.""" + if not self._tool_calls: + self._tool_calls = [ + block for block in self.content if block["type"] == "tool_call" + ] + return self._tool_calls + + @tool_calls.setter + def tool_calls(self, value: list[types.ToolCall]) -> None: + """Set the tool calls for the AI message.""" + self._tool_calls = value + + @property + def invalid_tool_calls(self) -> list[types.InvalidToolCall]: + """Get the invalid tool calls made by the AI.""" + if not self._invalid_tool_calls: + self._invalid_tool_calls = [ + block for block in self.content if block["type"] == "invalid_tool_call" + ] + return self._invalid_tool_calls + + +@dataclass +class AIMessageChunk(AIMessage): + """A partial chunk of an AI message during streaming. + + Represents a portion of an AI response that is delivered incrementally + during streaming generation. Contains partial content and metadata. + + Attributes: + id: Unique identifier for the message chunk. + type: Message type identifier, always "ai_chunk". + name: Optional human-readable name for the message. + content: List of content blocks containing partial message data. + tool_call_chunks: Optional list of partial tool call data. + usage_metadata: Optional metadata about token usage and costs. + """ + + type: Literal["ai_chunk"] = "ai_chunk" # type: ignore[assignment] + """The type of the message. Must be a string that is unique to the message type. + + The purpose of this field is to allow for easy identification of the message type + when deserializing messages. + """ + + def __init__( + self, + content: Union[str, list[types.ContentBlock]], + *, + id: Optional[str] = None, + name: Optional[str] = None, + lc_version: str = "v1", + response_metadata: Optional[ResponseMetadata] = None, + usage_metadata: Optional[UsageMetadata] = None, + tool_call_chunks: Optional[list[types.ToolCallChunk]] = None, + parsed: Optional[Union[dict[str, Any], BaseModel]] = None, + chunk_position: Optional[Literal["last"]] = None, + ): + """Initialize an AI message. + + Args: + content: Message content as string or list of content blocks. + id: Optional unique identifier for the message. + name: Optional human-readable name for the message. + lc_version: Encoding version for the message. + response_metadata: Optional metadata about the response. + usage_metadata: Optional metadata about token usage. + tool_call_chunks: Optional list of partial tool call data. + parsed: Optional auto-parsed message contents, if applicable. + chunk_position: Optional position of the chunk in the stream. If "last", + tool calls will be parsed when aggregated into a stream. + """ + if isinstance(content, str): + self.content = [{"type": "text", "text": content, "index": 0}] + else: + self.content = content + + self.id = _ensure_id(id) + self.name = name + self.lc_version = lc_version + self.usage_metadata = usage_metadata + self.parsed = parsed + self.chunk_position = chunk_position + if response_metadata is None: + self.response_metadata = {} + else: + self.response_metadata = response_metadata + + if tool_call_chunks: + content_tool_call_chunks = { + block["id"] + for block in self.content + if block.get("type") == "tool_call_chunk" and "id" in block + } + for chunk in tool_call_chunks: + if "id" in chunk and chunk["id"] in content_tool_call_chunks: + continue + self.content.append(chunk) + self._tool_call_chunks = [ + block for block in self.content if block.get("type") == "tool_call_chunk" + ] + + self._tool_calls: list[types.ToolCall] = [] + self._invalid_tool_calls: list[types.InvalidToolCall] = [] + + @property + def tool_call_chunks(self) -> list[types.ToolCallChunk]: + """Get the tool calls made by the AI.""" + if not self._tool_call_chunks: + self._tool_call_chunks = [ + block + for block in self.content + if block.get("type") == "tool_call_chunk" + ] + return cast("list[types.ToolCallChunk]", self._tool_call_chunks) + + @property + def tool_calls(self) -> list[types.ToolCall]: + """Get the tool calls made by the AI.""" + if not self._tool_calls: + parsed_content = _init_tool_calls(self.content) + self._tool_calls = [ + block for block in parsed_content if block["type"] == "tool_call" + ] + self._invalid_tool_calls = [ + block + for block in parsed_content + if block["type"] == "invalid_tool_call" + ] + return self._tool_calls + + @tool_calls.setter + def tool_calls(self, value: list[types.ToolCall]) -> None: + """Set the tool calls for the AI message.""" + self._tool_calls = value + + @property + def invalid_tool_calls(self) -> list[types.InvalidToolCall]: + """Get the invalid tool calls made by the AI.""" + if not self._invalid_tool_calls: + parsed_content = _init_tool_calls(self.content) + self._tool_calls = [ + block for block in parsed_content if block["type"] == "tool_call" + ] + self._invalid_tool_calls = [ + block + for block in parsed_content + if block["type"] == "invalid_tool_call" + ] + return self._invalid_tool_calls + + def __add__(self, other: Any) -> "AIMessageChunk": + """Add AIMessageChunk to this one.""" + if isinstance(other, AIMessageChunk): + return add_ai_message_chunks(self, other) + if isinstance(other, (list, tuple)) and all( + isinstance(o, AIMessageChunk) for o in other + ): + return add_ai_message_chunks(self, *other) + error_msg = "Can only add AIMessageChunk or sequence of AIMessageChunk." + raise NotImplementedError(error_msg) + + def to_message(self) -> "AIMessage": + """Convert this AIMessageChunk to an AIMessage.""" + return AIMessage( + content=_init_tool_calls(self.content), + id=self.id, + name=self.name, + lc_version=self.lc_version, + response_metadata=self.response_metadata, + usage_metadata=self.usage_metadata, + parsed=self.parsed, + ) + + +def _init_tool_calls(content: list[types.ContentBlock]) -> list[types.ContentBlock]: + """Parse tool call chunks in content into tool calls.""" + new_content = [] + for block in content: + if block.get("type") != "tool_call_chunk": + new_content.append(block) + continue + try: + args_ = ( + parse_partial_json(cast("str", block.get("args") or "")) + if block.get("args") + else {} + ) + if isinstance(args_, dict): + new_content.append( + create_tool_call( + name=cast("str", block.get("name") or ""), + args=args_, + id=cast("str", block.get("id", "")), + ) + ) + else: + new_content.append( + create_invalid_tool_call( + name=cast("str", block.get("name", "")), + args=cast("str", block.get("args", "")), + id=cast("str", block.get("id", "")), + error=None, + ) + ) + except Exception: + new_content.append( + create_invalid_tool_call( + name=cast("str", block.get("name", "")), + args=cast("str", block.get("args", "")), + id=cast("str", block.get("id", "")), + error=None, + ) + ) + return new_content + + +def add_ai_message_chunks( + left: AIMessageChunk, *others: AIMessageChunk +) -> AIMessageChunk: + """Add multiple AIMessageChunks together.""" + if not others: + return left + content = cast( + "list[types.ContentBlock]", + merge_content( + cast("list[str | dict[Any, Any]]", left.content), + *(cast("list[str | dict[Any, Any]]", o.content) for o in others), + ), + ) + response_metadata = merge_dicts( + cast("dict", left.response_metadata), + *(cast("dict", o.response_metadata) for o in others), + ) + + # Token usage + if left.usage_metadata or any(o.usage_metadata is not None for o in others): + usage_metadata: Optional[UsageMetadata] = left.usage_metadata + for other in others: + usage_metadata = add_usage(usage_metadata, other.usage_metadata) + else: + usage_metadata = None + + # Parsed + # 'parsed' always represents an aggregation not an incremental value, so the last + # non-null value is kept. + parsed = None + for m in reversed([left, *others]): + if m.parsed is not None: + parsed = m.parsed + break + + chunk_id = None + candidates = [left.id] + [o.id for o in others] + # first pass: pick the first provider-assigned id (non-`run-*` and non-`lc_*`) + for id_ in candidates: + if ( + id_ + and not id_.startswith(_LC_ID_PREFIX) + and not id_.startswith(_LC_AUTO_PREFIX) + ): + chunk_id = id_ + break + else: + # second pass: prefer lc_* ids over run-* ids + for id_ in candidates: + if id_ and id_.startswith(_LC_AUTO_PREFIX): + chunk_id = id_ + break + else: + # third pass: take any remaining id (run-* ids) + for id_ in candidates: + if id_: + chunk_id = id_ + break + + chunk_position: Optional[Literal["last"]] = ( + "last" if any(x.chunk_position == "last" for x in [left, *others]) else None + ) + if chunk_position == "last": + content = _init_tool_calls(content) + + return left.__class__( + content=content, + response_metadata=cast("ResponseMetadata", response_metadata), + usage_metadata=usage_metadata, + parsed=parsed, + id=chunk_id, + chunk_position=chunk_position, + ) + + +@dataclass +class HumanMessage: + """A message from a human user. + + Represents input from a human user in a conversation, containing text + or other content types like images. + + Attributes: + id: Unique identifier for the message. + content: List of content blocks containing the user's input. + name: Optional human-readable name for the message. + type: Message type identifier, always "human". + """ + + id: str + """Used for serialization. + + If the provider assigns a meaningful ID, it should be used here. Otherwise, a + LangChain-generated ID will be used. + """ + + content: list[types.ContentBlock] + """Message content as a list of content blocks.""" + + type: Literal["human"] = "human" + """The type of the message. Must be a string that is unique to the message type. + + The purpose of this field is to allow for easy identification of the message type + when deserializing messages. + """ + + name: Optional[str] = None + """An optional name for the message. + + This can be used to provide a human-readable name for the message. + + Usage of this field is optional, and whether it's used or not is up to the + model implementation. + """ + + def __init__( + self, + content: Union[str, list[types.ContentBlock]], + *, + id: Optional[str] = None, + name: Optional[str] = None, + ): + """Initialize a human message. + + Args: + content: Message content as string or list of content blocks. + id: Optional unique identifier for the message. + name: Optional human-readable name for the message. + """ + self.id = _ensure_id(id) + if isinstance(content, str): + self.content = [{"type": "text", "text": content}] + else: + self.content = content + self.name = name + + def text(self) -> str: + """Extract all text content from the message. + + Returns: + Concatenated string of all text blocks in the message. + """ + return "".join( + block["text"] for block in self.content if block["type"] == "text" + ) + + +@dataclass +class SystemMessage: + """A system message containing instructions or context. + + Represents system-level instructions or context that guides the AI's + behavior and understanding of the conversation. + + Attributes: + id: Unique identifier for the message. + content: List of content blocks containing system instructions. + type: Message type identifier, always "system". + """ + + id: str + """Used for serialization. + + If the provider assigns a meaningful ID, it should be used here. Otherwise, a + LangChain-generated ID will be used. + """ + + content: list[types.ContentBlock] + """Message content as a list of content blocks.""" + + type: Literal["system"] = "system" + """The type of the message. Must be a string that is unique to the message type. + + The purpose of this field is to allow for easy identification of the message type + when deserializing messages. + """ + + name: Optional[str] = None + """An optional name for the message. + + This can be used to provide a human-readable name for the message. + + Usage of this field is optional, and whether it's used or not is up to the + model implementation. + """ + + custom_role: Optional[str] = None + """If provided, a custom role for the system message. + + Example: ``"developer"``. + + Integration packages may use this field to assign the system message role if it + contains a recognized value. + """ + + def __init__( + self, + content: Union[str, list[types.ContentBlock]], + *, + id: Optional[str] = None, + custom_role: Optional[str] = None, + name: Optional[str] = None, + ): + """Initialize a human message. + + Args: + content: Message content as string or list of content blocks. + id: Optional unique identifier for the message. + custom_role: If provided, a custom role for the system message. + name: Optional human-readable name for the message. + """ + self.id = _ensure_id(id) + if isinstance(content, str): + self.content = [{"type": "text", "text": content}] + else: + self.content = content + self.custom_role = custom_role + self.name = name + + def text(self) -> str: + """Extract all text content from the system message.""" + return "".join( + block["text"] for block in self.content if block["type"] == "text" + ) + + +@dataclass +class ToolMessage(ToolOutputMixin): + """A message containing the result of a tool execution. + + Represents the output from executing a tool or function call, + including the result data and execution status. + + Attributes: + id: Unique identifier for the message. + tool_call_id: ID of the tool call this message responds to. + content: The result content from tool execution. + artifact: Optional app-side payload not intended for the model. + status: Execution status ("success" or "error"). + type: Message type identifier, always "tool". + """ + + id: str + """Used for serialization.""" + + tool_call_id: str + """ID of the tool call this message responds to. + + This should match the ID of the tool call that this message is responding to. + """ + + content: list[types.ContentBlock] + """Message content as a list of content blocks.""" + + type: Literal["tool"] = "tool" + """The type of the message. Must be a string that is unique to the message type. + + The purpose of this field is to allow for easy identification of the message type + when deserializing messages. + """ + + artifact: Optional[Any] = None + """App-side payload not for the model.""" + + name: Optional[str] = None + """An optional name for the message. + + This can be used to provide a human-readable name for the message. + + Usage of this field is optional, and whether it's used or not is up to the + model implementation. + """ + + status: Literal["success", "error"] = "success" + """Execution status of the tool call. + + Indicates whether the tool call was successful or encountered an error. + Defaults to "success". + """ + + def __init__( + self, + content: Union[str, list[types.ContentBlock]], + tool_call_id: str, + *, + id: Optional[str] = None, + name: Optional[str] = None, + artifact: Optional[Any] = None, + status: Literal["success", "error"] = "success", + ): + """Initialize a human message. + + Args: + content: Message content as string or list of content blocks. + tool_call_id: ID of the tool call this message responds to. + id: Optional unique identifier for the message. + name: Optional human-readable name for the message. + artifact: Optional app-side payload not intended for the model. + status: Execution status ("success" or "error"). + """ + self.id = _ensure_id(id) + self.tool_call_id = tool_call_id + if isinstance(content, str): + self.content = [{"type": "text", "text": content}] + else: + self.content = content + self.name = name + self.artifact = artifact + self.status = status + + @property + def text(self) -> str: + """Extract all text content from the tool message.""" + return "".join( + block["text"] for block in self.content if block["type"] == "text" + ) + + def __post_init__(self) -> None: + """Initialize computed fields after dataclass creation. + + Ensures the tool message has a valid ID. + """ + self.id = _ensure_id(self.id) + + +# Alias for a message type that can be any of the defined message types +MessageV1 = Union[ + AIMessage, + AIMessageChunk, + HumanMessage, + SystemMessage, + ToolMessage, +] +MessageV1Types = get_args(MessageV1) diff --git a/libs/core/pyproject.toml b/libs/core/pyproject.toml index 9b1686659ef..e54a785bf5d 100644 --- a/libs/core/pyproject.toml +++ b/libs/core/pyproject.toml @@ -67,6 +67,7 @@ langchain-text-splitters = { path = "../text-splitters" } strict = "True" strict_bytes = "True" enable_error_code = "deprecated" +disable_error_code = ["typeddict-unknown-key"] # TODO: activate for 'strict' checking disallow_any_generics = "False" @@ -86,6 +87,7 @@ ignore = [ "FIX002", # Line contains TODO "ISC001", # Messes with the formatter "PERF203", # Rarely useful + "PLC0414", # Enable re-export "PLR09", # Too many something (arg, statements, etc) "RUF012", # Doesn't play well with Pydantic "TC001", # Doesn't play well with Pydantic @@ -105,6 +107,7 @@ unfixable = ["PLW1510",] flake8-annotations.allow-star-arg-any = true flake8-annotations.mypy-init-return = true +flake8-builtins.ignorelist = ["id", "input", "type"] flake8-type-checking.runtime-evaluated-base-classes = ["pydantic.BaseModel","langchain_core.load.serializable.Serializable","langchain_core.runnables.base.RunnableSerializable"] pep8-naming.classmethod-decorators = [ "classmethod", "langchain_core.utils.pydantic.pre_init", "pydantic.field_validator", "pydantic.v1.root_validator",] pydocstyle.convention = "google" diff --git a/libs/core/tests/benchmarks/test_async_callbacks.py b/libs/core/tests/benchmarks/test_async_callbacks.py index 5cb58f0210e..4b5938110c4 100644 --- a/libs/core/tests/benchmarks/test_async_callbacks.py +++ b/libs/core/tests/benchmarks/test_async_callbacks.py @@ -11,6 +11,8 @@ from langchain_core.callbacks.base import AsyncCallbackHandler from langchain_core.language_models import GenericFakeChatModel from langchain_core.messages import AIMessage, BaseMessage from langchain_core.outputs import ChatGenerationChunk, GenerationChunk +from langchain_core.v1.messages import AIMessageChunk as AIMessageChunkV1 +from langchain_core.v1.messages import MessageV1 class MyCustomAsyncHandler(AsyncCallbackHandler): @@ -18,7 +20,7 @@ class MyCustomAsyncHandler(AsyncCallbackHandler): async def on_chat_model_start( self, serialized: dict[str, Any], - messages: list[list[BaseMessage]], + messages: Union[list[list[BaseMessage]], list[MessageV1]], *, run_id: UUID, parent_run_id: Optional[UUID] = None, @@ -35,7 +37,9 @@ class MyCustomAsyncHandler(AsyncCallbackHandler): self, token: str, *, - chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None, + chunk: Optional[ + Union[GenerationChunk, ChatGenerationChunk, AIMessageChunkV1] + ] = None, run_id: UUID, parent_run_id: Optional[UUID] = None, tags: Optional[list[str]] = None, diff --git a/libs/core/tests/unit_tests/fake/callbacks.py b/libs/core/tests/unit_tests/fake/callbacks.py index b8ec1778b42..ecd849b8eff 100644 --- a/libs/core/tests/unit_tests/fake/callbacks.py +++ b/libs/core/tests/unit_tests/fake/callbacks.py @@ -9,6 +9,7 @@ from typing_extensions import override from langchain_core.callbacks.base import AsyncCallbackHandler, BaseCallbackHandler from langchain_core.messages import BaseMessage +from langchain_core.v1.messages import MessageV1 class BaseFakeCallbackHandler(BaseModel): @@ -285,7 +286,7 @@ class FakeCallbackHandlerWithChatStart(FakeCallbackHandler): def on_chat_model_start( self, serialized: dict[str, Any], - messages: list[list[BaseMessage]], + messages: Union[list[list[BaseMessage]], list[MessageV1]], *, run_id: UUID, parent_run_id: Optional[UUID] = None, diff --git a/libs/core/tests/unit_tests/fake/test_fake_chat_model.py b/libs/core/tests/unit_tests/fake/test_fake_chat_model.py index ce262797535..4721877e5cc 100644 --- a/libs/core/tests/unit_tests/fake/test_fake_chat_model.py +++ b/libs/core/tests/unit_tests/fake/test_fake_chat_model.py @@ -16,6 +16,8 @@ from langchain_core.language_models import ( ) from langchain_core.messages import AIMessage, AIMessageChunk, BaseMessage, HumanMessage from langchain_core.outputs import ChatGenerationChunk, GenerationChunk +from langchain_core.v1.messages import AIMessageChunk as AIMessageChunkV1 +from langchain_core.v1.messages import MessageV1 from tests.unit_tests.stubs import ( _any_id_ai_message, _any_id_ai_message_chunk, @@ -157,13 +159,13 @@ async def test_callback_handlers() -> None: """Verify that model is implemented correctly with handlers working.""" class MyCustomAsyncHandler(AsyncCallbackHandler): - def __init__(self, store: list[str]) -> None: + def __init__(self, store: list[Union[str, AIMessageChunkV1]]) -> None: self.store = store async def on_chat_model_start( self, serialized: dict[str, Any], - messages: list[list[BaseMessage]], + messages: Union[list[list[BaseMessage]], list[MessageV1]], *, run_id: UUID, parent_run_id: Optional[UUID] = None, @@ -178,9 +180,11 @@ async def test_callback_handlers() -> None: @override async def on_llm_new_token( self, - token: str, + token: Union[str, AIMessageChunkV1], *, - chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None, + chunk: Optional[ + Union[GenerationChunk, ChatGenerationChunk, AIMessageChunkV1] + ] = None, run_id: UUID, parent_run_id: Optional[UUID] = None, tags: Optional[list[str]] = None, @@ -194,7 +198,7 @@ async def test_callback_handlers() -> None: ] ) model = GenericFakeChatModel(messages=infinite_cycle) - tokens: list[str] = [] + tokens: list[Union[str, AIMessageChunkV1]] = [] # New model results = [ chunk diff --git a/libs/core/tests/unit_tests/language_models/chat_models/test_base.py b/libs/core/tests/unit_tests/language_models/chat_models/test_base.py index 37b05ed8255..5d1d4f581a2 100644 --- a/libs/core/tests/unit_tests/language_models/chat_models/test_base.py +++ b/libs/core/tests/unit_tests/language_models/chat_models/test_base.py @@ -14,7 +14,10 @@ from langchain_core.language_models import ( ParrotFakeChatModel, ) from langchain_core.language_models._utils import _normalize_messages -from langchain_core.language_models.fake_chat_models import FakeListChatModelError +from langchain_core.language_models.fake_chat_models import ( + FakeListChatModelError, + GenericFakeChatModelV1, +) from langchain_core.messages import ( AIMessage, AIMessageChunk, @@ -29,6 +32,7 @@ from langchain_core.tracers.base import BaseTracer from langchain_core.tracers.context import collect_runs from langchain_core.tracers.event_stream import _AstreamEventsCallbackHandler from langchain_core.tracers.schemas import Run +from langchain_core.v1.messages import AIMessageChunk as AIMessageChunkV1 from tests.unit_tests.fake.callbacks import ( BaseFakeCallbackHandler, FakeAsyncCallbackHandler, @@ -654,3 +658,93 @@ def test_normalize_messages_edge_cases() -> None: ) ] assert messages == _normalize_messages(messages) + + +def test_streaming_v1() -> None: + chunks = [ + AIMessageChunkV1( + [ + { + "type": "reasoning", + "reasoning": "Let's call a tool.", + "index": 0, + } + ] + ), + AIMessageChunkV1( + [], + tool_call_chunks=[ + { + "type": "tool_call_chunk", + "args": "", + "name": "tool_name", + "id": "call_123", + "index": 1, + }, + ], + ), + AIMessageChunkV1( + [], + tool_call_chunks=[ + { + "type": "tool_call_chunk", + "args": '{"a', + "name": "", + "id": "", + "index": 1, + }, + ], + ), + AIMessageChunkV1( + [], + tool_call_chunks=[ + { + "type": "tool_call_chunk", + "args": '": 1}', + "name": "", + "id": "", + "index": 1, + }, + ], + ), + ] + full: Optional[AIMessageChunkV1] = None + for chunk in chunks: + full = chunk if full is None else full + chunk + + assert isinstance(full, AIMessageChunkV1) + assert full.content == [ + { + "type": "reasoning", + "reasoning": "Let's call a tool.", + "index": 0, + }, + { + "type": "tool_call_chunk", + "args": '{"a": 1}', + "name": "tool_name", + "id": "call_123", + "index": 1, + }, + ] + + llm = GenericFakeChatModelV1(message_chunks=chunks) + + full = None + for chunk in llm.stream("anything"): + full = chunk if full is None else full + chunk + + assert isinstance(full, AIMessageChunkV1) + assert full.content == [ + { + "type": "reasoning", + "reasoning": "Let's call a tool.", + "index": 0, + }, + { + "type": "tool_call", + "args": {"a": 1}, + "name": "tool_name", + "id": "call_123", + }, + ] diff --git a/libs/core/tests/unit_tests/messages/test_content_block_factories.py b/libs/core/tests/unit_tests/messages/test_content_block_factories.py new file mode 100644 index 00000000000..51b30f501eb --- /dev/null +++ b/libs/core/tests/unit_tests/messages/test_content_block_factories.py @@ -0,0 +1,974 @@ +"""Unit tests for ContentBlock factory functions.""" + +from uuid import UUID + +import pytest + +from langchain_core.messages.content_blocks import ( + CodeInterpreterCall, + CodeInterpreterOutput, + CodeInterpreterResult, + InvalidToolCall, + ToolCallChunk, + WebSearchCall, + WebSearchResult, + create_audio_block, + create_citation, + create_file_block, + create_image_block, + create_non_standard_block, + create_plaintext_block, + create_reasoning_block, + create_text_block, + create_tool_call, + create_video_block, +) + + +def _validate_lc_uuid(id_value: str) -> None: + """Validate that the ID has ``lc_`` prefix and valid UUID suffix. + + Args: + id_value: The ID string to validate. + + Raises: + AssertionError: If the ID doesn't have ``lc_`` prefix or invalid UUID. + """ + assert id_value.startswith("lc_"), f"ID should start with 'lc_' but got: {id_value}" + # Validate the UUID part after the lc_ prefix + UUID(id_value[3:]) + + +class TestTextBlockFactory: + """Test create_text_block factory function.""" + + def test_basic_creation(self) -> None: + """Test basic text block creation.""" + block = create_text_block("Hello world") + + assert block["type"] == "text" + assert block.get("text") == "Hello world" + assert "id" in block + id_value = block.get("id") + assert id_value is not None, "block id is None" + _validate_lc_uuid(id_value) + + def test_with_custom_id(self) -> None: + """Test text block creation with custom ID.""" + custom_id = "custom-123" + block = create_text_block("Hello", id=custom_id) + + assert block.get("id") == custom_id + + def test_with_annotations(self) -> None: + """Test text block creation with annotations.""" + citation = create_citation(url="https://example.com", title="Example") + block = create_text_block("Hello", annotations=[citation]) + + assert block.get("annotations") == [citation] + + def test_with_index(self) -> None: + """Test text block creation with index.""" + block = create_text_block("Hello", index=42) + + assert block.get("index") == 42 + + def test_optional_fields_not_present_when_none(self) -> None: + """Test that optional fields are not included when None.""" + block = create_text_block("Hello") + + assert "annotations" not in block + assert "index" not in block + + +class TestImageBlockFactory: + """Test create_image_block factory function.""" + + def test_with_url(self) -> None: + """Test image block creation with URL.""" + block = create_image_block(url="https://example.com/image.jpg") + + assert block["type"] == "image" + assert block.get("url") == "https://example.com/image.jpg" + assert "id" in block + id_value = block.get("id") + assert id_value is not None, "block id is None" + _validate_lc_uuid(id_value) + + def test_with_base64(self) -> None: + """Test image block creation with base64 data.""" + block = create_image_block( + base64="iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJ", mime_type="image/png" + ) + + assert block.get("base64") == "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJ" + assert block.get("mime_type") == "image/png" + + def test_with_file_id(self) -> None: + """Test image block creation with file ID.""" + block = create_image_block(file_id="file-123") + + assert block.get("file_id") == "file-123" + + def test_no_source_raises_error(self) -> None: + """Test that missing all sources raises ValueError.""" + with pytest.raises( + ValueError, match="Must provide one of: url, base64, or file_id" + ): + create_image_block() + + def test_base64_without_mime_type_raises_error(self) -> None: + """Test that base64 without mime_type raises ValueError.""" + with pytest.raises( + ValueError, match="mime_type is required when using base64 data" + ): + create_image_block(base64="iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJ") + + def test_with_index(self) -> None: + """Test image block creation with index.""" + block = create_image_block(url="https://example.com/image.jpg", index=1) + + assert block.get("index") == 1 + + def test_optional_fields_not_present_when_not_provided(self) -> None: + """Test that optional fields are not included when not provided.""" + block = create_image_block(url="https://example.com/image.jpg") + + assert "base64" not in block + assert "file_id" not in block + assert "mime_type" not in block + assert "index" not in block + + +class TestVideoBlockFactory: + """Test create_video_block factory function.""" + + def test_with_url(self) -> None: + """Test video block creation with URL.""" + block = create_video_block(url="https://example.com/video.mp4") + + assert block["type"] == "video" + assert block.get("url") == "https://example.com/video.mp4" + + def test_with_base64(self) -> None: + """Test video block creation with base64 data.""" + block = create_video_block( + base64="UklGRnoGAABXQVZFZm10IBAAAAABAAEA", mime_type="video/mp4" + ) + + assert block.get("base64") == "UklGRnoGAABXQVZFZm10IBAAAAABAAEA" + assert block.get("mime_type") == "video/mp4" + + def test_no_source_raises_error(self) -> None: + """Test that missing all sources raises ValueError.""" + with pytest.raises( + ValueError, match="Must provide one of: url, base64, or file_id" + ): + create_video_block() + + def test_base64_without_mime_type_raises_error(self) -> None: + """Test that base64 without mime_type raises ValueError.""" + with pytest.raises( + ValueError, match="mime_type is required when using base64 data" + ): + create_video_block(base64="UklGRnoGAABXQVZFZm10IBAAAAABAAEA") + + +class TestAudioBlockFactory: + """Test create_audio_block factory function.""" + + def test_with_url(self) -> None: + """Test audio block creation with URL.""" + block = create_audio_block(url="https://example.com/audio.mp3") + + assert block["type"] == "audio" + assert block.get("url") == "https://example.com/audio.mp3" + + def test_with_base64(self) -> None: + """Test audio block creation with base64 data.""" + block = create_audio_block( + base64="UklGRnoGAABXQVZFZm10IBAAAAABAAEA", mime_type="audio/mp3" + ) + + assert block.get("base64") == "UklGRnoGAABXQVZFZm10IBAAAAABAAEA" + assert block.get("mime_type") == "audio/mp3" + + def test_no_source_raises_error(self) -> None: + """Test that missing all sources raises ValueError.""" + with pytest.raises( + ValueError, match="Must provide one of: url, base64, or file_id" + ): + create_audio_block() + + +class TestFileBlockFactory: + """Test create_file_block factory function.""" + + def test_with_url(self) -> None: + """Test file block creation with URL.""" + block = create_file_block(url="https://example.com/document.pdf") + + assert block["type"] == "file" + assert block.get("url") == "https://example.com/document.pdf" + + def test_with_base64(self) -> None: + """Test file block creation with base64 data.""" + block = create_file_block( + base64="JVBERi0xLjQKJdPr6eEKMSAwIG9iago8PAovVHlwZSAvQ2F0YWxvZwo=", + mime_type="application/pdf", + ) + + assert ( + block.get("base64") + == "JVBERi0xLjQKJdPr6eEKMSAwIG9iago8PAovVHlwZSAvQ2F0YWxvZwo=" + ) + assert block.get("mime_type") == "application/pdf" + + def test_no_source_raises_error(self) -> None: + """Test that missing all sources raises ValueError.""" + with pytest.raises( + ValueError, match="Must provide one of: url, base64, or file_id" + ): + create_file_block() + + +class TestPlainTextBlockFactory: + """Test create_plain_text_block factory function.""" + + def test_basic_creation(self) -> None: + """Test basic plain text block creation.""" + block = create_plaintext_block("This is plain text content.") + + assert block["type"] == "text-plain" + assert block.get("mime_type") == "text/plain" + assert block.get("text") == "This is plain text content." + assert "id" in block + id_value = block.get("id") + assert id_value is not None, "block id is None" + _validate_lc_uuid(id_value) + + def test_with_title_and_context(self) -> None: + """Test plain text block creation with title and context.""" + block = create_plaintext_block( + "Document content here.", + title="Important Document", + context="This document contains important information.", + ) + + assert block.get("title") == "Important Document" + assert block.get("context") == "This document contains important information." + + def test_with_url(self) -> None: + """Test plain text block creation with URL.""" + block = create_plaintext_block( + "Content", url="https://example.com/document.txt" + ) + + assert block.get("url") == "https://example.com/document.txt" + + +class TestToolCallFactory: + """Test create_tool_call factory function.""" + + def test_basic_creation(self) -> None: + """Test basic tool call creation.""" + block = create_tool_call("search", {"query": "python"}) + + assert block["type"] == "tool_call" + assert block["name"] == "search" + assert block["args"] == {"query": "python"} + assert "id" in block + id_value = block.get("id") + assert id_value is not None, "block id is None" + _validate_lc_uuid(id_value) + + def test_with_custom_id(self) -> None: + """Test tool call creation with custom ID.""" + block = create_tool_call("search", {"query": "python"}, id="tool-123") + + assert block.get("id") == "tool-123" + + def test_with_index(self) -> None: + """Test tool call creation with index.""" + block = create_tool_call("search", {"query": "python"}, index=2) + + assert block.get("index") == 2 + + +class TestReasoningBlockFactory: + """Test create_reasoning_block factory function.""" + + def test_basic_creation(self) -> None: + """Test basic reasoning block creation.""" + block = create_reasoning_block("Let me think about this problem...") + + assert block["type"] == "reasoning" + assert block.get("reasoning") == "Let me think about this problem..." + assert "id" in block + id_value = block.get("id") + assert id_value is not None, "block id is None" + _validate_lc_uuid(id_value) + + @pytest.mark.xfail(reason="Optional fields not implemented yet") + def test_with_signatures(self) -> None: + """Test reasoning block creation with signatures.""" + block = create_reasoning_block( + "Thinking...", + thought_signature="thought-sig-123", # type: ignore[call-arg] + signature="auth-sig-456", # type: ignore[call-arg, unused-ignore] + ) + + assert block.get("thought_signature") == "thought-sig-123" + assert block.get("signature") == "auth-sig-456" + + def test_with_index(self) -> None: + """Test reasoning block creation with index.""" + block = create_reasoning_block("Thinking...", index=3) + + assert block.get("index") == 3 + + +class TestCitationFactory: + """Test create_citation factory function.""" + + def test_basic_creation(self) -> None: + """Test basic citation creation.""" + block = create_citation() + + assert block["type"] == "citation" + assert "id" in block + id_value = block.get("id") + assert id_value is not None, "block id is None" + _validate_lc_uuid(id_value) + + def test_with_all_fields(self) -> None: + """Test citation creation with all fields.""" + block = create_citation( + url="https://example.com/source", + title="Source Document", + start_index=10, + end_index=50, + cited_text="This is the cited text.", + ) + + assert block.get("url") == "https://example.com/source" + assert block.get("title") == "Source Document" + assert block.get("start_index") == 10 + assert block.get("end_index") == 50 + assert block.get("cited_text") == "This is the cited text." + + def test_optional_fields_not_present_when_none(self) -> None: + """Test that optional fields are not included when None.""" + block = create_citation() + + assert "url" not in block + assert "title" not in block + assert "start_index" not in block + assert "end_index" not in block + assert "cited_text" not in block + + +class TestNonStandardBlockFactory: + """Test create_non_standard_block factory function.""" + + def test_basic_creation(self) -> None: + """Test basic non-standard block creation.""" + value = {"custom_field": "custom_value", "number": 42} + block = create_non_standard_block(value) + + assert block["type"] == "non_standard" + assert block["value"] == value + assert "id" in block + id_value = block.get("id") + assert id_value is not None, "block id is None" + _validate_lc_uuid(id_value) + + def test_with_index(self) -> None: + """Test non-standard block creation with index.""" + value = {"data": "test"} + block = create_non_standard_block(value, index=5) + + assert block.get("index") == 5 + + def test_optional_fields_not_present_when_none(self) -> None: + """Test that optional fields are not included when None.""" + value = {"data": "test"} + block = create_non_standard_block(value) + + assert "index" not in block + + +class TestUUIDValidation: + """Test UUID generation and validation behavior.""" + + def test_custom_id_bypasses_lc_prefix_requirement(self) -> None: + """Test that custom IDs can use any format (don't require lc_ prefix).""" + custom_id = "custom-123" + block = create_text_block("Hello", id=custom_id) + + assert block.get("id") == custom_id + # Custom IDs should not be validated with lc_ prefix requirement + + def test_generated_ids_are_unique(self) -> None: + """Test that multiple factory calls generate unique IDs.""" + blocks = [create_text_block("test") for _ in range(10)] + ids = [block.get("id") for block in blocks] + + # All IDs should be unique + assert len(set(ids)) == len(ids) + + # All generated IDs should have lc_ prefix + for id_value in ids: + _validate_lc_uuid(id_value or "") + + def test_empty_string_id_generates_new_uuid(self) -> None: + """Test that empty string ID generates new UUID with lc_ prefix.""" + block = create_text_block("Hello", id="") + + id_value: str = block.get("id", "") + assert id_value != "" + _validate_lc_uuid(id_value) + + def test_generated_id_length(self) -> None: + """Test that generated IDs have correct length (UUID4 + lc_ prefix).""" + block = create_text_block("Hello") + + id_value = block.get("id") + assert id_value is not None + + # UUID4 string length is 36 chars, plus 3 for "lc_" prefix = 39 total + expected_length = 36 + 3 + assert len(id_value) == expected_length, ( + f"Expected length {expected_length}, got {len(id_value)}" + ) + + # Validate it's properly formatted + _validate_lc_uuid(id_value) + + +class TestFactoryTypeConsistency: + """Test that factory functions return correctly typed objects.""" + + def test_factories_return_correct_types(self) -> None: + """Test that all factory functions return the expected TypedDict types.""" + text_block = create_text_block("test") + assert isinstance(text_block, dict) + assert text_block["type"] == "text" + + image_block = create_image_block(url="https://example.com/image.jpg") + assert isinstance(image_block, dict) + assert image_block["type"] == "image" + + video_block = create_video_block(url="https://example.com/video.mp4") + assert isinstance(video_block, dict) + assert video_block["type"] == "video" + + audio_block = create_audio_block(url="https://example.com/audio.mp3") + assert isinstance(audio_block, dict) + assert audio_block["type"] == "audio" + + file_block = create_file_block(url="https://example.com/file.pdf") + assert isinstance(file_block, dict) + assert file_block["type"] == "file" + + plain_text_block = create_plaintext_block("content") + assert isinstance(plain_text_block, dict) + assert plain_text_block["type"] == "text-plain" + + tool_call = create_tool_call("tool", {"arg": "value"}) + assert isinstance(tool_call, dict) + assert tool_call["type"] == "tool_call" + + reasoning_block = create_reasoning_block("reasoning") + assert isinstance(reasoning_block, dict) + assert reasoning_block["type"] == "reasoning" + + citation = create_citation() + assert isinstance(citation, dict) + assert citation["type"] == "citation" + + non_standard_block = create_non_standard_block({"data": "value"}) + assert isinstance(non_standard_block, dict) + assert non_standard_block["type"] == "non_standard" + + +class TestExtraItems: + """Test that content blocks support extra items via __extra_items__ field.""" + + def test_text_block_extra_items(self) -> None: + """Test that TextContentBlock can store extra provider-specific fields.""" + block = create_text_block("Hello world") + + block["openai_metadata"] = {"model": "gpt-4", "temperature": 0.7} # type: ignore[typeddict-unknown-key] + block["anthropic_usage"] = {"input_tokens": 10, "output_tokens": 20} # type: ignore[typeddict-unknown-key] + block["custom_field"] = "any value" # type: ignore[typeddict-unknown-key] + + assert block["type"] == "text" + assert block["text"] == "Hello world" + assert "id" in block + assert block.get("openai_metadata") == {"model": "gpt-4", "temperature": 0.7} + assert block.get("anthropic_usage") == {"input_tokens": 10, "output_tokens": 20} + assert block.get("custom_field") == "any value" + + def test_text_block_extras_field(self) -> None: + """Test that TextContentBlock properly supports the explicit extras field.""" + block = create_text_block("Hello world") + + # Test direct assignment to extras field + block["extras"] = { + "openai_metadata": {"model": "gpt-4", "temperature": 0.7}, + "anthropic_usage": {"input_tokens": 10, "output_tokens": 20}, + "custom_field": "any value", + } + + assert block["type"] == "text" + assert block["text"] == "Hello world" + assert "id" in block + assert "extras" in block + + extras = block.get("extras", {}) + assert extras.get("openai_metadata") == {"model": "gpt-4", "temperature": 0.7} + expected_usage = {"input_tokens": 10, "output_tokens": 20} + assert extras.get("anthropic_usage") == expected_usage + assert extras.get("custom_field") == "any value" + + def test_mixed_extra_items_types(self) -> None: + """Test that extra items can be various types (str, int, bool, dict, list).""" + block = create_text_block("Test content") + + # Add various types of extra fields + block["string_field"] = "string value" # type: ignore[typeddict-unknown-key] + block["int_field"] = 42 # type: ignore[typeddict-unknown-key] + block["float_field"] = 3.14 # type: ignore[typeddict-unknown-key] + block["bool_field"] = True # type: ignore[typeddict-unknown-key] + block["list_field"] = ["item1", "item2", "item3"] # type: ignore[typeddict-unknown-key] + block["dict_field"] = {"nested": {"deeply": "nested value"}} # type: ignore[typeddict-unknown-key] + block["none_field"] = None # type: ignore[typeddict-unknown-key] + + # Verify all types are preserved + assert block.get("string_field") == "string value" + assert block.get("int_field") == 42 + assert block.get("float_field") == 3.14 + assert block.get("bool_field") is True + assert block.get("list_field") == ["item1", "item2", "item3"] + dict_field = block.get("dict_field", {}) + assert isinstance(dict_field, dict) + nested = dict_field.get("nested", {}) + assert isinstance(nested, dict) + assert nested.get("deeply") == "nested value" + assert block.get("none_field") is None + + def test_extra_items_do_not_interfere_with_standard_fields(self) -> None: + """Test that extra items don't interfere with standard field access.""" + block = create_text_block("Original text", index=1) + + # Add many extra fields + for i in range(10): + block[f"extra_field_{i}"] = f"value_{i}" # type: ignore[literal-required] + + # Standard fields should still work correctly + assert block["type"] == "text" + assert block["text"] == "Original text" + assert block["index"] == 1 if "index" in block else None + assert "id" in block + + # Extra fields should also be accessible + for i in range(10): + assert block.get(f"extra_field_{i}") == f"value_{i}" + + def test_extra_items_can_be_modified(self) -> None: + """Test that extra items can be modified after creation.""" + block = create_image_block(url="https://example.com/image.jpg") + + # Add an extra field + block["status"] = "pending" # type: ignore[typeddict-unknown-key] + assert block.get("status") == "pending" + + # Modify the extra field + block["status"] = "processed" # type: ignore[typeddict-unknown-key] + assert block.get("status") == "processed" + + # Add more fields + block["metadata"] = {"version": 1} # type: ignore[typeddict-unknown-key] + metadata = block.get("metadata", {}) + assert isinstance(metadata, dict) + assert metadata.get("version") == 1 + + # Modify nested extra field + block["metadata"]["version"] = 2 # type: ignore[typeddict-item] + metadata = block.get("metadata", {}) + assert isinstance(metadata, dict) + assert metadata.get("version") == 2 + + def test_all_content_blocks_support_extra_items(self) -> None: + """Test that all content block types support extra items.""" + # Test each content block type + text_block = create_text_block("test") + text_block["extra"] = "text_extra" # type: ignore[typeddict-unknown-key] + assert text_block.get("extra") == "text_extra" + + image_block = create_image_block(url="https://example.com/image.jpg") + image_block["extra"] = "image_extra" # type: ignore[typeddict-unknown-key] + assert image_block.get("extra") == "image_extra" + + video_block = create_video_block(url="https://example.com/video.mp4") + video_block["extra"] = "video_extra" # type: ignore[typeddict-unknown-key] + assert video_block.get("extra") == "video_extra" + + audio_block = create_audio_block(url="https://example.com/audio.mp3") + audio_block["extra"] = "audio_extra" # type: ignore[typeddict-unknown-key] + assert audio_block.get("extra") == "audio_extra" + + file_block = create_file_block(url="https://example.com/file.pdf") + file_block["extra"] = "file_extra" # type: ignore[typeddict-unknown-key] + assert file_block.get("extra") == "file_extra" + + plain_text_block = create_plaintext_block("content") + plain_text_block["extra"] = "plaintext_extra" # type: ignore[typeddict-unknown-key] + assert plain_text_block.get("extra") == "plaintext_extra" + + tool_call = create_tool_call("tool", {"arg": "value"}) + tool_call["extra"] = "tool_extra" # type: ignore[typeddict-unknown-key] + assert tool_call.get("extra") == "tool_extra" + + reasoning_block = create_reasoning_block("reasoning") + reasoning_block["extra"] = "reasoning_extra" # type: ignore[typeddict-unknown-key] + assert reasoning_block.get("extra") == "reasoning_extra" + + non_standard_block = create_non_standard_block({"data": "value"}) + non_standard_block["extra"] = "non_standard_extra" # type: ignore[typeddict-unknown-key] + assert non_standard_block.get("extra") == "non_standard_extra" + + +class TestExtrasField: + """Test the explicit extras field across all content block types.""" + + def test_all_content_blocks_support_extras_field(self) -> None: + """Test that all content block types support the explicit extras field.""" + provider_metadata = { + "provider": "openai", + "model": "gpt-4", + "temperature": 0.7, + "usage": {"input_tokens": 10, "output_tokens": 20}, + } + + # Test TextContentBlock + text_block = create_text_block("test") + text_block["extras"] = provider_metadata + assert text_block.get("extras") == provider_metadata + assert text_block["type"] == "text" + + # Test ImageContentBlock + image_block = create_image_block(url="https://example.com/image.jpg") + image_block["extras"] = provider_metadata + assert image_block.get("extras") == provider_metadata + assert image_block["type"] == "image" + + # Test VideoContentBlock + video_block = create_video_block(url="https://example.com/video.mp4") + video_block["extras"] = provider_metadata + assert video_block.get("extras") == provider_metadata + assert video_block["type"] == "video" + + # Test AudioContentBlock + audio_block = create_audio_block(url="https://example.com/audio.mp3") + audio_block["extras"] = provider_metadata + assert audio_block.get("extras") == provider_metadata + assert audio_block["type"] == "audio" + + # Test FileContentBlock + file_block = create_file_block(url="https://example.com/file.pdf") + file_block["extras"] = provider_metadata + assert file_block.get("extras") == provider_metadata + assert file_block["type"] == "file" + + # Test PlainTextContentBlock + plain_text_block = create_plaintext_block("content") + plain_text_block["extras"] = provider_metadata + assert plain_text_block.get("extras") == provider_metadata + assert plain_text_block["type"] == "text-plain" + + # Test ToolCall + tool_call = create_tool_call("tool", {"arg": "value"}) + tool_call["extras"] = provider_metadata + assert tool_call.get("extras") == provider_metadata + assert tool_call["type"] == "tool_call" + + # Test ReasoningContentBlock + reasoning_block = create_reasoning_block("reasoning") + reasoning_block["extras"] = provider_metadata + assert reasoning_block.get("extras") == provider_metadata + assert reasoning_block["type"] == "reasoning" + + # Test Citation + citation = create_citation() + citation["extras"] = provider_metadata + assert citation.get("extras") == provider_metadata + assert citation["type"] == "citation" + + def test_extras_field_is_optional(self) -> None: + """Test that the extras field is optional and blocks work without it.""" + # Create blocks without extras + text_block = create_text_block("test") + image_block = create_image_block(url="https://example.com/image.jpg") + tool_call = create_tool_call("tool", {"arg": "value"}) + reasoning_block = create_reasoning_block("reasoning") + citation = create_citation() + + # Verify blocks work correctly without extras + assert text_block["type"] == "text" + assert image_block["type"] == "image" + assert tool_call["type"] == "tool_call" + assert reasoning_block["type"] == "reasoning" + assert citation["type"] == "citation" + + # Verify extras field is not present when not set + assert "extras" not in text_block + assert "extras" not in image_block + assert "extras" not in tool_call + assert "extras" not in reasoning_block + assert "extras" not in citation + + def test_extras_field_can_be_modified(self) -> None: + """Test that the extras field can be modified after creation.""" + block = create_text_block("test") + + # Add extras + block["extras"] = {"initial": "value"} + assert block.get("extras") == {"initial": "value"} + + # Modify extras + block["extras"] = {"updated": "value", "count": 42} + extras = block.get("extras", {}) + assert extras.get("updated") == "value" + assert extras.get("count") == 42 + assert "initial" not in extras + + # Update nested values in extras + if "extras" in block: + block["extras"]["nested"] = {"deep": "value"} + extras = block.get("extras", {}) + nested = extras.get("nested", {}) + assert isinstance(nested, dict) + assert nested.get("deep") == "value" + + def test_extras_field_supports_various_data_types(self) -> None: + """Test that the extras field can store various data types.""" + block = create_text_block("test") + + complex_extras = { + "string_val": "test string", + "int_val": 42, + "float_val": 3.14, + "bool_val": True, + "none_val": None, + "list_val": ["item1", "item2", {"nested": "in_list"}], + "dict_val": {"nested": {"deeply": {"nested": "value"}}}, + } + + block["extras"] = complex_extras + + extras = block.get("extras", {}) + assert extras.get("string_val") == "test string" + assert extras.get("int_val") == 42 + assert extras.get("float_val") == 3.14 + assert extras.get("bool_val") is True + assert extras.get("none_val") is None + + list_val = extras.get("list_val", []) + assert isinstance(list_val, list) + assert len(list_val) == 3 + assert list_val[0] == "item1" + assert list_val[1] == "item2" + assert isinstance(list_val[2], dict) + assert list_val[2].get("nested") == "in_list" + + dict_val = extras.get("dict_val", {}) + assert isinstance(dict_val, dict) + nested = dict_val.get("nested", {}) + assert isinstance(nested, dict) + deeply = nested.get("deeply", {}) + assert isinstance(deeply, dict) + assert deeply.get("nested") == "value" + + def test_extras_field_does_not_interfere_with_standard_fields(self) -> None: + """Test that the extras field doesn't interfere with standard fields.""" + # Create a complex block with all standard fields + block = create_text_block( + "Test content", + annotations=[create_citation(url="https://example.com")], + index=42, + ) + + # Add extensive extras + large_extras = {f"field_{i}": f"value_{i}" for i in range(100)} + block["extras"] = large_extras + + # Verify all standard fields still work + assert block["type"] == "text" + assert block["text"] == "Test content" + assert block.get("index") == 42 + assert "id" in block + assert "annotations" in block + + annotations = block.get("annotations", []) + assert len(annotations) == 1 + assert annotations[0]["type"] == "citation" + + # Verify extras field works + extras = block.get("extras", {}) + assert len(extras) == 100 + for i in range(100): + assert extras.get(f"field_{i}") == f"value_{i}" + + def test_special_content_blocks_support_extras_field(self) -> None: + """Test that special content blocks support extras field.""" + provider_metadata = { + "provider": "openai", + "request_id": "req_12345", + "timing": {"start": 1234567890, "end": 1234567895}, + } + + # Test ToolCallChunk + tool_call_chunk: ToolCallChunk = { + "type": "tool_call_chunk", + "id": "tool_123", + "name": "search", + "args": '{"query": "test"}', + "index": 0, + "extras": provider_metadata, + } + assert tool_call_chunk.get("extras") == provider_metadata + assert tool_call_chunk["type"] == "tool_call_chunk" + + # Test InvalidToolCall + invalid_tool_call: InvalidToolCall = { + "type": "invalid_tool_call", + "id": "invalid_123", + "name": "bad_tool", + "args": "invalid json", + "error": "JSON parse error", + "extras": provider_metadata, + } + assert invalid_tool_call.get("extras") == provider_metadata + assert invalid_tool_call["type"] == "invalid_tool_call" + + # Test WebSearchCall + web_search_call: WebSearchCall = { + "type": "web_search_call", + "id": "search_123", + "query": "python langchain", + "index": 0, + "extras": provider_metadata, + } + assert web_search_call.get("extras") == provider_metadata + assert web_search_call["type"] == "web_search_call" + + # Test WebSearchResult + web_search_result: WebSearchResult = { + "type": "web_search_result", + "id": "result_123", + "urls": ["https://example.com", "https://test.com"], + "index": 0, + "extras": provider_metadata, + } + assert web_search_result.get("extras") == provider_metadata + assert web_search_result["type"] == "web_search_result" + + # Test CodeInterpreterCall + code_interpreter_call: CodeInterpreterCall = { + "type": "code_interpreter_call", + "id": "code_123", + "language": "python", + "code": "print('hello world')", + "index": 0, + "extras": provider_metadata, + } + assert code_interpreter_call.get("extras") == provider_metadata + assert code_interpreter_call["type"] == "code_interpreter_call" + + # Test CodeInterpreterOutput + code_interpreter_output: CodeInterpreterOutput = { + "type": "code_interpreter_output", + "id": "output_123", + "return_code": 0, + "stderr": "", + "stdout": "hello world\n", + "file_ids": ["file_123"], + "index": 0, + "extras": provider_metadata, + } + assert code_interpreter_output.get("extras") == provider_metadata + assert code_interpreter_output["type"] == "code_interpreter_output" + + # Test CodeInterpreterResult + code_interpreter_result: CodeInterpreterResult = { + "type": "code_interpreter_result", + "id": "result_123", + "output": [code_interpreter_output], + "index": 0, + "extras": provider_metadata, + } + assert code_interpreter_result.get("extras") == provider_metadata + assert code_interpreter_result["type"] == "code_interpreter_result" + + def test_extras_field_is_not_required_for_special_blocks(self) -> None: + """Test that extras field is optional for all special content blocks.""" + # Create blocks without extras field + tool_call_chunk: ToolCallChunk = { + "id": "tool_123", + "name": "search", + "args": '{"query": "test"}', + "index": 0, + } + + invalid_tool_call: InvalidToolCall = { + "type": "invalid_tool_call", + "id": "invalid_123", + "name": "bad_tool", + "args": "invalid json", + "error": "JSON parse error", + } + + web_search_call: WebSearchCall = { + "type": "web_search_call", + "query": "python langchain", + } + + web_search_result: WebSearchResult = { + "type": "web_search_result", + "urls": ["https://example.com"], + } + + code_interpreter_call: CodeInterpreterCall = { + "type": "code_interpreter_call", + "code": "print('hello')", + } + + code_interpreter_output: CodeInterpreterOutput = { + "type": "code_interpreter_output", + "stdout": "hello\n", + } + + code_interpreter_result: CodeInterpreterResult = { + "type": "code_interpreter_result", + "output": [code_interpreter_output], + } + + # Verify they work without extras + assert tool_call_chunk.get("name") == "search" + assert invalid_tool_call["type"] == "invalid_tool_call" + assert web_search_call["type"] == "web_search_call" + assert web_search_result["type"] == "web_search_result" + assert code_interpreter_call["type"] == "code_interpreter_call" + assert code_interpreter_output["type"] == "code_interpreter_output" + assert code_interpreter_result["type"] == "code_interpreter_result" + + # Verify extras field is not present + assert "extras" not in tool_call_chunk + assert "extras" not in invalid_tool_call + assert "extras" not in web_search_call + assert "extras" not in web_search_result + assert "extras" not in code_interpreter_call + assert "extras" not in code_interpreter_output + assert "extras" not in code_interpreter_result diff --git a/libs/core/tests/unit_tests/messages/test_imports.py b/libs/core/tests/unit_tests/messages/test_imports.py index ff9fbf92fc7..9fda5493244 100644 --- a/libs/core/tests/unit_tests/messages/test_imports.py +++ b/libs/core/tests/unit_tests/messages/test_imports.py @@ -5,22 +5,40 @@ EXPECTED_ALL = [ "_message_from_dict", "AIMessage", "AIMessageChunk", + "Annotation", "AnyMessage", + "AudioContentBlock", "BaseMessage", "BaseMessageChunk", + "ContentBlock", "ChatMessage", "ChatMessageChunk", + "Citation", + "CodeInterpreterCall", + "CodeInterpreterOutput", + "CodeInterpreterResult", + "DataContentBlock", + "FileContentBlock", "FunctionMessage", "FunctionMessageChunk", "HumanMessage", "HumanMessageChunk", + "ImageContentBlock", "InvalidToolCall", + "NonStandardAnnotation", + "NonStandardContentBlock", + "PlainTextContentBlock", "SystemMessage", "SystemMessageChunk", + "TextContentBlock", "ToolCall", "ToolCallChunk", "ToolMessage", "ToolMessageChunk", + "VideoContentBlock", + "WebSearchCall", + "WebSearchResult", + "ReasoningContentBlock", "RemoveMessage", "convert_to_messages", "get_buffer_string", diff --git a/libs/core/tests/unit_tests/messages/test_response_metadata.py b/libs/core/tests/unit_tests/messages/test_response_metadata.py new file mode 100644 index 00000000000..44951ba6672 --- /dev/null +++ b/libs/core/tests/unit_tests/messages/test_response_metadata.py @@ -0,0 +1,343 @@ +"""Unit tests for ResponseMetadata TypedDict.""" + +from langchain_core.v1.messages import AIMessage, AIMessageChunk, ResponseMetadata + + +class TestResponseMetadata: + """Test the ResponseMetadata TypedDict functionality.""" + + def test_response_metadata_basic_fields(self) -> None: + """Test ResponseMetadata with basic required fields.""" + metadata: ResponseMetadata = { + "model_provider": "openai", + "model_name": "gpt-4", + } + + assert metadata.get("model_provider") == "openai" + assert metadata.get("model_name") == "gpt-4" + + def test_response_metadata_is_optional(self) -> None: + """Test that ResponseMetadata fields are optional due to total=False.""" + # Should be able to create empty ResponseMetadata + metadata: ResponseMetadata = {} + assert metadata == {} + + # Should be able to create with just one field + metadata_partial: ResponseMetadata = {"model_provider": "anthropic"} + assert metadata_partial.get("model_provider") == "anthropic" + assert "model_name" not in metadata_partial + + def test_response_metadata_supports_extra_fields(self) -> None: + """Test that ResponseMetadata supports provider-specific extra fields.""" + metadata: ResponseMetadata = { + "model_provider": "openai", + "model_name": "gpt-4-turbo", + # Extra fields should be allowed + "system_fingerprint": "fp_12345", + "logprobs": None, + "finish_reason": "stop", + "request_id": "req_abc123", + } + + assert metadata.get("model_provider") == "openai" + assert metadata.get("model_name") == "gpt-4-turbo" + assert metadata.get("system_fingerprint") == "fp_12345" + assert metadata.get("logprobs") is None + assert metadata.get("finish_reason") == "stop" + assert metadata.get("request_id") == "req_abc123" + + def test_response_metadata_various_data_types(self) -> None: + """Test that ResponseMetadata can store various data types in extra fields.""" + metadata: ResponseMetadata = { + "model_provider": "anthropic", + "model_name": "claude-3-sonnet", + "string_field": "test_value", + "int_field": 42, + "float_field": 3.14, + "bool_field": True, + "none_field": None, + "list_field": [1, 2, 3, "test"], + "dict_field": {"nested": {"deeply": "nested_value"}}, + } + + assert metadata.get("string_field") == "test_value" + assert metadata.get("int_field") == 42 + assert metadata.get("float_field") == 3.14 + assert metadata.get("bool_field") is True + assert metadata.get("none_field") is None + + list_field = metadata.get("list_field") + assert isinstance(list_field, list) + assert list_field == [1, 2, 3, "test"] + + dict_field = metadata.get("dict_field") + assert isinstance(dict_field, dict) + nested = dict_field.get("nested") + assert isinstance(nested, dict) + assert nested.get("deeply") == "nested_value" + + def test_response_metadata_can_be_modified(self) -> None: + """Test that ResponseMetadata can be modified after creation.""" + metadata: ResponseMetadata = { + "model_provider": "openai", + "model_name": "gpt-3.5-turbo", + } + + # Modify existing fields + metadata["model_name"] = "gpt-4" + assert metadata.get("model_name") == "gpt-4" + + # Add new fields + metadata["request_id"] = "req_12345" + assert metadata.get("request_id") == "req_12345" + + # Modify nested structures + metadata["headers"] = {"x-request-id": "abc123"} + metadata["headers"]["x-rate-limit"] = "100" # type: ignore[typeddict-item] + + headers = metadata.get("headers") + assert isinstance(headers, dict) + assert headers.get("x-request-id") == "abc123" + assert headers.get("x-rate-limit") == "100" + + def test_response_metadata_provider_specific_examples(self) -> None: + """Test ResponseMetadata with realistic provider-specific examples.""" + # OpenAI-style metadata + openai_metadata: ResponseMetadata = { + "model_provider": "openai", + "model_name": "gpt-4-turbo-2024-04-09", + "system_fingerprint": "fp_abc123", + "created": 1234567890, + "logprobs": None, + "finish_reason": "stop", + } + + assert openai_metadata.get("model_provider") == "openai" + assert openai_metadata.get("system_fingerprint") == "fp_abc123" + + # Anthropic-style metadata + anthropic_metadata: ResponseMetadata = { + "model_provider": "anthropic", + "model_name": "claude-3-sonnet-20240229", + "stop_reason": "end_turn", + "stop_sequence": None, + } + + assert anthropic_metadata.get("model_provider") == "anthropic" + assert anthropic_metadata.get("stop_reason") == "end_turn" + + # Custom provider metadata + custom_metadata: ResponseMetadata = { + "model_provider": "custom_llm_service", + "model_name": "custom-model-v1", + "service_tier": "premium", + "rate_limit_info": { + "requests_remaining": 100, + "reset_time": "2024-01-01T00:00:00Z", + }, + "response_time_ms": 1250, + } + + assert custom_metadata.get("service_tier") == "premium" + rate_limit = custom_metadata.get("rate_limit_info") + assert isinstance(rate_limit, dict) + assert rate_limit.get("requests_remaining") == 100 + + +class TestResponseMetadataWithAIMessages: + """Test ResponseMetadata integration with AI message classes.""" + + def test_ai_message_with_response_metadata(self) -> None: + """Test AIMessage with ResponseMetadata.""" + metadata: ResponseMetadata = { + "model_provider": "openai", + "model_name": "gpt-4", + "system_fingerprint": "fp_xyz789", + } + + message = AIMessage(content="Hello, world!", response_metadata=metadata) + + assert message.response_metadata == metadata + assert message.response_metadata.get("model_provider") == "openai" + assert message.response_metadata.get("model_name") == "gpt-4" + assert message.response_metadata.get("system_fingerprint") == "fp_xyz789" + + def test_ai_message_chunk_with_response_metadata(self) -> None: + """Test AIMessageChunk with ResponseMetadata.""" + metadata: ResponseMetadata = { + "model_provider": "anthropic", + "model_name": "claude-3-sonnet", + "stream_id": "stream_12345", + } + + chunk = AIMessageChunk(content="Hello", response_metadata=metadata) + + assert chunk.response_metadata == metadata + assert chunk.response_metadata.get("stream_id") == "stream_12345" + + def test_ai_message_default_empty_response_metadata(self) -> None: + """Test that AIMessage creates empty ResponseMetadata by default.""" + message = AIMessage(content="Test message") + + # Should have empty dict as default + assert message.response_metadata == {} + assert isinstance(message.response_metadata, dict) + + def test_ai_message_chunk_default_empty_response_metadata(self) -> None: + """Test that AIMessageChunk creates empty ResponseMetadata by default.""" + chunk = AIMessageChunk(content="Test chunk") + + # Should have empty dict as default + assert chunk.response_metadata == {} + assert isinstance(chunk.response_metadata, dict) + + def test_response_metadata_merging_in_chunks(self) -> None: + """Test that ResponseMetadata is properly merged when adding AIMessageChunks.""" + metadata1: ResponseMetadata = { + "model_provider": "openai", + "model_name": "gpt-4", + "request_id": "req_123", + "system_fingerprint": "fp_abc", + } + + metadata2: ResponseMetadata = { + "stream_chunk": 1, + "finish_reason": "length", + } + + chunk1 = AIMessageChunk(content="Hello ", response_metadata=metadata1) + chunk2 = AIMessageChunk(content="world!", response_metadata=metadata2) + + merged = chunk1 + chunk2 + + # Should have merged response_metadata + assert merged.response_metadata.get("model_provider") == "openai" + assert merged.response_metadata.get("model_name") == "gpt-4" + assert merged.response_metadata.get("request_id") == "req_123" + assert merged.response_metadata.get("stream_chunk") == 1 + assert merged.response_metadata.get("system_fingerprint") == "fp_abc" + assert merged.response_metadata.get("finish_reason") == "length" + + def test_response_metadata_modification_after_message_creation(self) -> None: + """Test that ResponseMetadata can be modified after message creation.""" + message = AIMessage( + content="Initial message", + response_metadata={"model_provider": "openai", "model_name": "gpt-3.5"}, + ) + + # Modify existing field + message.response_metadata["model_name"] = "gpt-4" + assert message.response_metadata.get("model_name") == "gpt-4" + + # Add new field + message.response_metadata["finish_reason"] = "stop" + assert message.response_metadata.get("finish_reason") == "stop" + + def test_response_metadata_with_none_values(self) -> None: + """Test ResponseMetadata handling of None values.""" + metadata: ResponseMetadata = { + "model_provider": "openai", + "model_name": "gpt-4", + "system_fingerprint": None, + "logprobs": None, + } + + message = AIMessage(content="Test", response_metadata=metadata) + + assert message.response_metadata.get("system_fingerprint") is None + assert message.response_metadata.get("logprobs") is None + assert "system_fingerprint" in message.response_metadata + assert "logprobs" in message.response_metadata + + +class TestResponseMetadataEdgeCases: + """Test edge cases and error conditions for ResponseMetadata.""" + + def test_response_metadata_with_complex_nested_structures(self) -> None: + """Test ResponseMetadata with deeply nested and complex structures.""" + metadata: ResponseMetadata = { + "model_provider": "custom", + "model_name": "complex-model", + "complex_data": { + "level1": { + "level2": { + "level3": { + "deeply_nested": "value", + "array": [ + {"item": 1, "metadata": {"nested": True}}, + {"item": 2, "metadata": {"nested": False}}, + ], + } + } + } + }, + } + + complex_data = metadata.get("complex_data") + assert isinstance(complex_data, dict) + level1 = complex_data.get("level1") + assert isinstance(level1, dict) + level2 = level1.get("level2") + assert isinstance(level2, dict) + level3 = level2.get("level3") + assert isinstance(level3, dict) + + assert level3.get("deeply_nested") == "value" + array = level3.get("array") + assert isinstance(array, list) + assert len(array) == 2 + assert array[0]["item"] == 1 + assert array[0]["metadata"]["nested"] is True + + def test_response_metadata_large_data(self) -> None: + """Test ResponseMetadata with large amounts of data.""" + # Create metadata with many fields + large_metadata: ResponseMetadata = { + "model_provider": "test_provider", + "model_name": "test_model", + } + + # Add 100 extra fields + for i in range(100): + large_metadata[f"field_{i}"] = f"value_{i}" # type: ignore[literal-required] + + message = AIMessage(content="Test", response_metadata=large_metadata) + + # Verify all fields are accessible + assert message.response_metadata.get("model_provider") == "test_provider" + for i in range(100): + assert message.response_metadata.get(f"field_{i}") == f"value_{i}" + + def test_response_metadata_empty_vs_none(self) -> None: + """Test the difference between empty ResponseMetadata and None.""" + # Message with empty metadata + message_empty = AIMessage(content="Test", response_metadata={}) + assert message_empty.response_metadata == {} + assert isinstance(message_empty.response_metadata, dict) + + # Message with None metadata (should become empty dict) + message_none = AIMessage(content="Test", response_metadata=None) + assert message_none.response_metadata == {} + assert isinstance(message_none.response_metadata, dict) + + # Default message (no metadata specified) + message_default = AIMessage(content="Test") + assert message_default.response_metadata == {} + assert isinstance(message_default.response_metadata, dict) + + def test_response_metadata_preserves_original_dict_type(self) -> None: + """Test that ResponseMetadata preserves the original dict when passed.""" + original_dict: ResponseMetadata = { + "model_provider": "openai", + "model_name": "gpt-4", + "custom_field": "custom_value", + } + + message = AIMessage(content="Test", response_metadata=original_dict) + + # Should be the same dict object + assert message.response_metadata is original_dict + + # Modifications to the message's response_metadata should affect original + message.response_metadata["new_field"] = "new_value" + assert original_dict.get("new_field") == "new_value" diff --git a/libs/core/tests/unit_tests/messages/test_response_metadata.py.bak b/libs/core/tests/unit_tests/messages/test_response_metadata.py.bak new file mode 100644 index 00000000000..c8cc7d79f70 --- /dev/null +++ b/libs/core/tests/unit_tests/messages/test_response_metadata.py.bak @@ -0,0 +1,361 @@ +"""Unit tests for ResponseMetadata TypedDict.""" + +from langchain_core.messages.v1 import AIMessage, AIMessageChunk, ResponseMetadata + + +class TestResponseMetadata: + """Test the ResponseMetadata TypedDict functionality.""" + + def test_response_metadata_basic_fields(self) -> None: + """Test ResponseMetadata with basic required fields.""" + metadata: ResponseMetadata = { + "model_provider": "openai", + "model_name": "gpt-4", + } + + assert metadata.get("model_provider") == "openai" + assert metadata.get("model_name") == "gpt-4" + + def test_response_metadata_is_optional(self) -> None: + """Test that ResponseMetadata fields are optional due to total=False.""" + # Should be able to create empty ResponseMetadata + metadata: ResponseMetadata = {} + assert metadata == {} + + # Should be able to create with just one field + metadata_partial: ResponseMetadata = {"model_provider": "anthropic"} + assert metadata_partial.get("model_provider") == "anthropic" + assert "model_name" not in metadata_partial + + def test_response_metadata_supports_extra_fields(self) -> None: + """Test that ResponseMetadata supports provider-specific extra fields.""" + metadata: ResponseMetadata = { + "model_provider": "openai", + "model_name": "gpt-4-turbo", + # Extra fields should be allowed + "usage": {"input_tokens": 100, "output_tokens": 50}, + "system_fingerprint": "fp_12345", + "logprobs": None, + "finish_reason": "stop", + } + + assert metadata.get("model_provider") == "openai" + assert metadata.get("model_name") == "gpt-4-turbo" + assert metadata.get("usage") == {"input_tokens": 100, "output_tokens": 50} + assert metadata.get("system_fingerprint") == "fp_12345" + assert metadata.get("logprobs") is None + assert metadata.get("finish_reason") == "stop" + + def test_response_metadata_various_data_types(self) -> None: + """Test that ResponseMetadata can store various data types in extra fields.""" + metadata: ResponseMetadata = { + "model_provider": "anthropic", + "model_name": "claude-3-sonnet", + "string_field": "test_value", # type: ignore[typeddict-unknown-key] + "int_field": 42, # type: ignore[typeddict-unknown-key] + "float_field": 3.14, # type: ignore[typeddict-unknown-key] + "bool_field": True, # type: ignore[typeddict-unknown-key] + "none_field": None, # type: ignore[typeddict-unknown-key] + "list_field": [1, 2, 3, "test"], # type: ignore[typeddict-unknown-key] + "dict_field": { # type: ignore[typeddict-unknown-key] + "nested": {"deeply": "nested_value"} + }, + } + + assert metadata.get("string_field") == "test_value" # type: ignore[typeddict-item] + assert metadata.get("int_field") == 42 # type: ignore[typeddict-item] + assert metadata.get("float_field") == 3.14 # type: ignore[typeddict-item] + assert metadata.get("bool_field") is True # type: ignore[typeddict-item] + assert metadata.get("none_field") is None # type: ignore[typeddict-item] + + list_field = metadata.get("list_field") # type: ignore[typeddict-item] + assert isinstance(list_field, list) + assert list_field == [1, 2, 3, "test"] + + dict_field = metadata.get("dict_field") # type: ignore[typeddict-item] + assert isinstance(dict_field, dict) + nested = dict_field.get("nested") # type: ignore[union-attr] + assert isinstance(nested, dict) + assert nested.get("deeply") == "nested_value" # type: ignore[union-attr] + + def test_response_metadata_can_be_modified(self) -> None: + """Test that ResponseMetadata can be modified after creation.""" + metadata: ResponseMetadata = { + "model_provider": "openai", + "model_name": "gpt-3.5-turbo", + } + + # Modify existing fields + metadata["model_name"] = "gpt-4" + assert metadata.get("model_name") == "gpt-4" + + # Add new fields + metadata["request_id"] = "req_12345" # type: ignore[typeddict-unknown-key] + assert metadata.get("request_id") == "req_12345" # type: ignore[typeddict-item] + + # Modify nested structures + metadata["usage"] = {"input_tokens": 10} # type: ignore[typeddict-unknown-key] + metadata["usage"]["output_tokens"] = 20 # type: ignore[typeddict-item] + + usage = metadata.get("usage") # type: ignore[typeddict-item] + assert isinstance(usage, dict) + assert usage.get("input_tokens") == 10 # type: ignore[union-attr] + assert usage.get("output_tokens") == 20 # type: ignore[union-attr] + + def test_response_metadata_provider_specific_examples(self) -> None: + """Test ResponseMetadata with realistic provider-specific examples.""" + # OpenAI-style metadata + openai_metadata: ResponseMetadata = { + "model_provider": "openai", + "model_name": "gpt-4-turbo-2024-04-09", + "usage": { # type: ignore[typeddict-unknown-key] + "prompt_tokens": 50, + "completion_tokens": 25, + "total_tokens": 75, + }, + "system_fingerprint": "fp_abc123", # type: ignore[typeddict-unknown-key] + "created": 1234567890, # type: ignore[typeddict-unknown-key] + "logprobs": None, # type: ignore[typeddict-unknown-key] + "finish_reason": "stop", # type: ignore[typeddict-unknown-key] + } + + assert openai_metadata.get("model_provider") == "openai" + assert openai_metadata.get("system_fingerprint") == "fp_abc123" # type: ignore[typeddict-item] + + # Anthropic-style metadata + anthropic_metadata: ResponseMetadata = { + "model_provider": "anthropic", + "model_name": "claude-3-sonnet-20240229", + "usage": { # type: ignore[typeddict-unknown-key] + "input_tokens": 75, + "output_tokens": 30, + }, + "stop_reason": "end_turn", # type: ignore[typeddict-unknown-key] + "stop_sequence": None, # type: ignore[typeddict-unknown-key] + } + + assert anthropic_metadata.get("model_provider") == "anthropic" + assert anthropic_metadata.get("stop_reason") == "end_turn" # type: ignore[typeddict-item] + + # Custom provider metadata + custom_metadata: ResponseMetadata = { + "model_provider": "custom_llm_service", + "model_name": "custom-model-v1", + "service_tier": "premium", # type: ignore[typeddict-unknown-key] + "rate_limit_info": { # type: ignore[typeddict-unknown-key] + "requests_remaining": 100, + "reset_time": "2024-01-01T00:00:00Z", + }, + "response_time_ms": 1250, # type: ignore[typeddict-unknown-key] + } + + assert custom_metadata.get("service_tier") == "premium" # type: ignore[typeddict-item] + rate_limit = custom_metadata.get("rate_limit_info") # type: ignore[typeddict-item] + assert isinstance(rate_limit, dict) + assert rate_limit.get("requests_remaining") == 100 # type: ignore[union-attr] + + +class TestResponseMetadataWithAIMessages: + """Test ResponseMetadata integration with AI message classes.""" + + def test_ai_message_with_response_metadata(self) -> None: + """Test AIMessage with ResponseMetadata.""" + metadata: ResponseMetadata = { + "model_provider": "openai", + "model_name": "gpt-4", + "usage": {"input_tokens": 10, "output_tokens": 5}, # type: ignore[typeddict-unknown-key] + } + + message = AIMessage(content="Hello, world!", response_metadata=metadata) + + assert message.response_metadata == metadata + assert message.response_metadata.get("model_provider") == "openai" + assert message.response_metadata.get("model_name") == "gpt-4" + + usage = message.response_metadata.get("usage") # type: ignore[typeddict-item] + assert isinstance(usage, dict) + assert usage.get("input_tokens") == 10 # type: ignore[union-attr] + + def test_ai_message_chunk_with_response_metadata(self) -> None: + """Test AIMessageChunk with ResponseMetadata.""" + metadata: ResponseMetadata = { + "model_provider": "anthropic", + "model_name": "claude-3-sonnet", + "stream_id": "stream_12345", # type: ignore[typeddict-unknown-key] + } + + chunk = AIMessageChunk(content="Hello", response_metadata=metadata) + + assert chunk.response_metadata == metadata + assert chunk.response_metadata.get("stream_id") == "stream_12345" # type: ignore[typeddict-item] + + def test_ai_message_default_empty_response_metadata(self) -> None: + """Test that AIMessage creates empty ResponseMetadata by default.""" + message = AIMessage(content="Test message") + + # Should have empty dict as default + assert message.response_metadata == {} + assert isinstance(message.response_metadata, dict) + + def test_ai_message_chunk_default_empty_response_metadata(self) -> None: + """Test that AIMessageChunk creates empty ResponseMetadata by default.""" + chunk = AIMessageChunk(content="Test chunk") + + # Should have empty dict as default + assert chunk.response_metadata == {} + assert isinstance(chunk.response_metadata, dict) + + def test_response_metadata_merging_in_chunks(self) -> None: + """Test that ResponseMetadata is properly merged when adding AIMessageChunks.""" + metadata1: ResponseMetadata = { + "model_provider": "openai", + "model_name": "gpt-4", + "request_id": "req_123", # type: ignore[typeddict-unknown-key] + "usage": {"input_tokens": 10}, # type: ignore[typeddict-unknown-key] + } + + metadata2: ResponseMetadata = { + "stream_chunk": 1, # type: ignore[typeddict-unknown-key] + "usage": {"output_tokens": 5}, # type: ignore[typeddict-unknown-key] + } + + chunk1 = AIMessageChunk(content="Hello ", response_metadata=metadata1) + chunk2 = AIMessageChunk(content="world!", response_metadata=metadata2) + + merged = chunk1 + chunk2 + + # Should have merged response_metadata + assert merged.response_metadata.get("model_provider") == "openai" + assert merged.response_metadata.get("model_name") == "gpt-4" + assert merged.response_metadata.get("request_id") == "req_123" # type: ignore[typeddict-item] + assert merged.response_metadata.get("stream_chunk") == 1 # type: ignore[typeddict-item] + + # Usage should be merged (from merge_dicts behavior) + merged_usage = merged.response_metadata.get("usage") # type: ignore[typeddict-item] + assert isinstance(merged_usage, dict) + assert merged_usage.get("input_tokens") == 10 # type: ignore[union-attr] + assert merged_usage.get("output_tokens") == 5 # type: ignore[union-attr] + + def test_response_metadata_modification_after_message_creation(self) -> None: + """Test that ResponseMetadata can be modified after message creation.""" + message = AIMessage( + content="Initial message", + response_metadata={"model_provider": "openai", "model_name": "gpt-3.5"}, + ) + + # Modify existing field + message.response_metadata["model_name"] = "gpt-4" + assert message.response_metadata.get("model_name") == "gpt-4" + + # Add new field + message.response_metadata["finish_reason"] = "stop" # type: ignore[typeddict-unknown-key] + assert message.response_metadata.get("finish_reason") == "stop" # type: ignore[typeddict-item] + + def test_response_metadata_with_none_values(self) -> None: + """Test ResponseMetadata handling of None values.""" + metadata: ResponseMetadata = { + "model_provider": "openai", + "model_name": "gpt-4", + "system_fingerprint": None, # type: ignore[typeddict-unknown-key] + "logprobs": None, # type: ignore[typeddict-unknown-key] + } + + message = AIMessage(content="Test", response_metadata=metadata) + + assert message.response_metadata.get("system_fingerprint") is None # type: ignore[typeddict-item] + assert message.response_metadata.get("logprobs") is None # type: ignore[typeddict-item] + assert "system_fingerprint" in message.response_metadata + assert "logprobs" in message.response_metadata + + +class TestResponseMetadataEdgeCases: + """Test edge cases and error conditions for ResponseMetadata.""" + + def test_response_metadata_with_complex_nested_structures(self) -> None: + """Test ResponseMetadata with deeply nested and complex structures.""" + metadata: ResponseMetadata = { + "model_provider": "custom", + "model_name": "complex-model", + "complex_data": { # type: ignore[typeddict-unknown-key] + "level1": { + "level2": { + "level3": { + "deeply_nested": "value", + "array": [ + {"item": 1, "metadata": {"nested": True}}, + {"item": 2, "metadata": {"nested": False}}, + ], + } + } + } + }, + } + + complex_data = metadata.get("complex_data") # type: ignore[typeddict-item] + assert isinstance(complex_data, dict) + level1 = complex_data.get("level1") # type: ignore[union-attr] + assert isinstance(level1, dict) + level2 = level1.get("level2") # type: ignore[union-attr] + assert isinstance(level2, dict) + level3 = level2.get("level3") # type: ignore[union-attr] + assert isinstance(level3, dict) + + assert level3.get("deeply_nested") == "value" # type: ignore[union-attr] + array = level3.get("array") # type: ignore[union-attr] + assert isinstance(array, list) + assert len(array) == 2 # type: ignore[arg-type] + assert array[0]["item"] == 1 # type: ignore[index, typeddict-item] + assert array[0]["metadata"]["nested"] is True # type: ignore[index, typeddict-item] + + def test_response_metadata_large_data(self) -> None: + """Test ResponseMetadata with large amounts of data.""" + # Create metadata with many fields + large_metadata: ResponseMetadata = { + "model_provider": "test_provider", + "model_name": "test_model", + } + + # Add 100 extra fields + for i in range(100): + large_metadata[f"field_{i}"] = f"value_{i}" # type: ignore[literal-required] + + message = AIMessage(content="Test", response_metadata=large_metadata) + + # Verify all fields are accessible + assert message.response_metadata.get("model_provider") == "test_provider" + for i in range(100): + assert message.response_metadata.get(f"field_{i}") == f"value_{i}" # type: ignore[typeddict-item] + + def test_response_metadata_empty_vs_none(self) -> None: + """Test the difference between empty ResponseMetadata and None.""" + # Message with empty metadata + message_empty = AIMessage(content="Test", response_metadata={}) + assert message_empty.response_metadata == {} + assert isinstance(message_empty.response_metadata, dict) + + # Message with None metadata (should become empty dict) + message_none = AIMessage(content="Test", response_metadata=None) + assert message_none.response_metadata == {} + assert isinstance(message_none.response_metadata, dict) + + # Default message (no metadata specified) + message_default = AIMessage(content="Test") + assert message_default.response_metadata == {} + assert isinstance(message_default.response_metadata, dict) + + def test_response_metadata_preserves_original_dict_type(self) -> None: + """Test that ResponseMetadata preserves the original dict when passed.""" + original_dict = { + "model_provider": "openai", + "model_name": "gpt-4", + "custom_field": "custom_value", + } + + message = AIMessage(content="Test", response_metadata=original_dict) + + # Should be the same dict object + assert message.response_metadata is original_dict + + # Modifications to the message's response_metadata should affect original + message.response_metadata["new_field"] = "new_value" # type: ignore[typeddict-unknown-key] + assert original_dict.get("new_field") == "new_value" # type: ignore[typeddict-item] diff --git a/libs/core/tests/unit_tests/messages/test_utils.py b/libs/core/tests/unit_tests/messages/test_utils.py index bedd518589e..f9f1c9c9ff0 100644 --- a/libs/core/tests/unit_tests/messages/test_utils.py +++ b/libs/core/tests/unit_tests/messages/test_utils.py @@ -1221,15 +1221,30 @@ def test_convert_to_openai_messages_multimodal() -> None: {"type": "text", "text": "Text message"}, { "type": "image", - "source_type": "url", "url": "https://example.com/test.png", }, + { + "type": "image", + "source_type": "url", # backward compatibility + "url": "https://example.com/test.png", + }, + { + "type": "image", + "base64": "", + "mime_type": "image/png", + }, { "type": "image", "source_type": "base64", "data": "", "mime_type": "image/png", }, + { + "type": "file", + "base64": "", + "mime_type": "application/pdf", + "filename": "test.pdf", + }, { "type": "file", "source_type": "base64", @@ -1244,11 +1259,20 @@ def test_convert_to_openai_messages_multimodal() -> None: "file_data": "data:application/pdf;base64,", }, }, + { + "type": "file", + "file_id": "file-abc123", + }, { "type": "file", "source_type": "id", "id": "file-abc123", }, + { + "type": "audio", + "base64": "", + "mime_type": "audio/wav", + }, { "type": "audio", "source_type": "base64", @@ -1268,7 +1292,7 @@ def test_convert_to_openai_messages_multimodal() -> None: result = convert_to_openai_messages(messages, text_format="block") assert len(result) == 1 message = result[0] - assert len(message["content"]) == 8 + assert len(message["content"]) == 13 # Test adding filename messages = [ @@ -1276,8 +1300,7 @@ def test_convert_to_openai_messages_multimodal() -> None: content=[ { "type": "file", - "source_type": "base64", - "data": "", + "base64": "", "mime_type": "application/pdf", }, ] diff --git a/libs/core/tests/unit_tests/output_parsers/test_base_parsers.py b/libs/core/tests/unit_tests/output_parsers/test_base_parsers.py index fa5e9c9c9c0..aec5bc29c5e 100644 --- a/libs/core/tests/unit_tests/output_parsers/test_base_parsers.py +++ b/libs/core/tests/unit_tests/output_parsers/test_base_parsers.py @@ -1,15 +1,19 @@ """Module to test base parser implementations.""" +from typing import Union + from typing_extensions import override from langchain_core.exceptions import OutputParserException from langchain_core.language_models import GenericFakeChatModel +from langchain_core.language_models.fake_chat_models import GenericFakeChatModelV1 from langchain_core.messages import AIMessage from langchain_core.output_parsers import ( BaseGenerationOutputParser, BaseTransformOutputParser, ) from langchain_core.outputs import ChatGeneration, Generation +from langchain_core.v1.messages import AIMessage as AIMessageV1 def test_base_generation_parser() -> None: @@ -20,7 +24,7 @@ def test_base_generation_parser() -> None: @override def parse_result( - self, result: list[Generation], *, partial: bool = False + self, result: Union[list[Generation], AIMessageV1], *, partial: bool = False ) -> str: """Parse a list of model Generations into a specific format. @@ -32,16 +36,22 @@ def test_base_generation_parser() -> None: partial: Whether to allow partial results. This is used for parsers that support streaming """ - if len(result) != 1: - msg = "This output parser can only be used with a single generation." - raise NotImplementedError(msg) - generation = result[0] - if not isinstance(generation, ChatGeneration): - # Say that this one only works with chat generations - msg = "This output parser can only be used with a chat generation." - raise OutputParserException(msg) + if isinstance(result, AIMessageV1): + content = result.text + else: + if len(result) != 1: + msg = ( + "This output parser can only be used with a single generation." + ) + raise NotImplementedError(msg) + generation = result[0] + if not isinstance(generation, ChatGeneration): + # Say that this one only works with chat generations + msg = "This output parser can only be used with a chat generation." + raise OutputParserException(msg) + assert isinstance(generation.message.content, str) + content = generation.message.content - content = generation.message.content assert isinstance(content, str) return content.swapcase() @@ -49,6 +59,10 @@ def test_base_generation_parser() -> None: chain = model | StrInvertCase() assert chain.invoke("") == "HeLLO" + model_v1 = GenericFakeChatModelV1(messages=iter([AIMessageV1("hEllo")])) + chain_v1 = model_v1 | StrInvertCase() + assert chain_v1.invoke("") == "HeLLO" + def test_base_transform_output_parser() -> None: """Test base transform output parser.""" @@ -62,7 +76,7 @@ def test_base_transform_output_parser() -> None: @override def parse_result( - self, result: list[Generation], *, partial: bool = False + self, result: Union[list[Generation], AIMessageV1], *, partial: bool = False ) -> str: """Parse a list of model Generations into a specific format. @@ -74,15 +88,22 @@ def test_base_transform_output_parser() -> None: partial: Whether to allow partial results. This is used for parsers that support streaming """ - if len(result) != 1: - msg = "This output parser can only be used with a single generation." - raise NotImplementedError(msg) - generation = result[0] - if not isinstance(generation, ChatGeneration): - # Say that this one only works with chat generations - msg = "This output parser can only be used with a chat generation." - raise OutputParserException(msg) - content = generation.message.content + if isinstance(result, AIMessageV1): + content = result.text + else: + if len(result) != 1: + msg = ( + "This output parser can only be used with a single generation." + ) + raise NotImplementedError(msg) + generation = result[0] + if not isinstance(generation, ChatGeneration): + # Say that this one only works with chat generations + msg = "This output parser can only be used with a chat generation." + raise OutputParserException(msg) + assert isinstance(generation.message.content, str) + content = generation.message.content + assert isinstance(content, str) return content.swapcase() @@ -91,3 +112,8 @@ def test_base_transform_output_parser() -> None: # inputs to models are ignored, response is hard-coded in model definition chunks = list(chain.stream("")) assert chunks == ["HELLO", " ", "WORLD"] + + model_v1 = GenericFakeChatModelV1(message_chunks=["hello", " ", "world"]) + chain_v1 = model_v1 | StrInvertCase() + chunks = list(chain_v1.stream("")) + assert chunks == ["HELLO", " ", "WORLD", ""] diff --git a/libs/core/tests/unit_tests/output_parsers/test_openai_tools.py b/libs/core/tests/unit_tests/output_parsers/test_openai_tools.py index 74862a8386a..28b7f92adf1 100644 --- a/libs/core/tests/unit_tests/output_parsers/test_openai_tools.py +++ b/libs/core/tests/unit_tests/output_parsers/test_openai_tools.py @@ -16,6 +16,8 @@ from langchain_core.output_parsers.openai_tools import ( PydanticToolsParser, ) from langchain_core.outputs import ChatGeneration +from langchain_core.v1.messages import AIMessage as AIMessageV1 +from langchain_core.v1.messages import AIMessageChunk as AIMessageChunkV1 STREAMED_MESSAGES: list = [ AIMessageChunk(content=""), @@ -331,6 +333,14 @@ for message in STREAMED_MESSAGES: STREAMED_MESSAGES_WITH_TOOL_CALLS.append(message) +STREAMED_MESSAGES_V1 = [ + AIMessageChunkV1( + content=[], + tool_call_chunks=chunk.tool_call_chunks, + ) + for chunk in STREAMED_MESSAGES_WITH_TOOL_CALLS +] + EXPECTED_STREAMED_JSON = [ {}, {"names": ["suz"]}, @@ -398,6 +408,19 @@ def test_partial_json_output_parser(*, use_tool_calls: bool) -> None: assert actual == expected +def test_partial_json_output_parser_v1() -> None: + def input_iter(_: Any) -> Iterator[AIMessageChunkV1]: + yield from STREAMED_MESSAGES_V1 + + chain = input_iter | JsonOutputToolsParser() + + actual = list(chain.stream(None)) + expected: list = [[]] + [ + [{"type": "NameCollector", "args": chunk}] for chunk in EXPECTED_STREAMED_JSON + ] + assert actual == expected + + @pytest.mark.parametrize("use_tool_calls", [False, True]) async def test_partial_json_output_parser_async(*, use_tool_calls: bool) -> None: input_iter = _get_aiter(use_tool_calls=use_tool_calls) @@ -410,6 +433,20 @@ async def test_partial_json_output_parser_async(*, use_tool_calls: bool) -> None assert actual == expected +async def test_partial_json_output_parser_async_v1() -> None: + async def input_iter(_: Any) -> AsyncIterator[AIMessageChunkV1]: + for msg in STREAMED_MESSAGES_V1: + yield msg + + chain = input_iter | JsonOutputToolsParser() + + actual = [p async for p in chain.astream(None)] + expected: list = [[]] + [ + [{"type": "NameCollector", "args": chunk}] for chunk in EXPECTED_STREAMED_JSON + ] + assert actual == expected + + @pytest.mark.parametrize("use_tool_calls", [False, True]) def test_partial_json_output_parser_return_id(*, use_tool_calls: bool) -> None: input_iter = _get_iter(use_tool_calls=use_tool_calls) @@ -429,6 +466,26 @@ def test_partial_json_output_parser_return_id(*, use_tool_calls: bool) -> None: assert actual == expected +def test_partial_json_output_parser_return_id_v1() -> None: + def input_iter(_: Any) -> Iterator[AIMessageChunkV1]: + yield from STREAMED_MESSAGES_V1 + + chain = input_iter | JsonOutputToolsParser(return_id=True) + + actual = list(chain.stream(None)) + expected: list = [[]] + [ + [ + { + "type": "NameCollector", + "args": chunk, + "id": "call_OwL7f5PEPJTYzw9sQlNJtCZl", + } + ] + for chunk in EXPECTED_STREAMED_JSON + ] + assert actual == expected + + @pytest.mark.parametrize("use_tool_calls", [False, True]) def test_partial_json_output_key_parser(*, use_tool_calls: bool) -> None: input_iter = _get_iter(use_tool_calls=use_tool_calls) @@ -439,6 +496,17 @@ def test_partial_json_output_key_parser(*, use_tool_calls: bool) -> None: assert actual == expected +def test_partial_json_output_key_parser_v1() -> None: + def input_iter(_: Any) -> Iterator[AIMessageChunkV1]: + yield from STREAMED_MESSAGES_V1 + + chain = input_iter | JsonOutputKeyToolsParser(key_name="NameCollector") + + actual = list(chain.stream(None)) + expected: list = [[]] + [[chunk] for chunk in EXPECTED_STREAMED_JSON] + assert actual == expected + + @pytest.mark.parametrize("use_tool_calls", [False, True]) async def test_partial_json_output_parser_key_async(*, use_tool_calls: bool) -> None: input_iter = _get_aiter(use_tool_calls=use_tool_calls) @@ -450,6 +518,18 @@ async def test_partial_json_output_parser_key_async(*, use_tool_calls: bool) -> assert actual == expected +async def test_partial_json_output_parser_key_async_v1() -> None: + async def input_iter(_: Any) -> AsyncIterator[AIMessageChunkV1]: + for msg in STREAMED_MESSAGES_V1: + yield msg + + chain = input_iter | JsonOutputKeyToolsParser(key_name="NameCollector") + + actual = [p async for p in chain.astream(None)] + expected: list = [[]] + [[chunk] for chunk in EXPECTED_STREAMED_JSON] + assert actual == expected + + @pytest.mark.parametrize("use_tool_calls", [False, True]) def test_partial_json_output_key_parser_first_only(*, use_tool_calls: bool) -> None: input_iter = _get_iter(use_tool_calls=use_tool_calls) @@ -461,6 +541,17 @@ def test_partial_json_output_key_parser_first_only(*, use_tool_calls: bool) -> N assert list(chain.stream(None)) == EXPECTED_STREAMED_JSON +def test_partial_json_output_key_parser_first_only_v1() -> None: + def input_iter(_: Any) -> Iterator[AIMessageChunkV1]: + yield from STREAMED_MESSAGES_V1 + + chain = input_iter | JsonOutputKeyToolsParser( + key_name="NameCollector", first_tool_only=True + ) + + assert list(chain.stream(None)) == EXPECTED_STREAMED_JSON + + @pytest.mark.parametrize("use_tool_calls", [False, True]) async def test_partial_json_output_parser_key_async_first_only( *, @@ -475,6 +566,18 @@ async def test_partial_json_output_parser_key_async_first_only( assert [p async for p in chain.astream(None)] == EXPECTED_STREAMED_JSON +async def test_partial_json_output_parser_key_async_first_only_v1() -> None: + async def input_iter(_: Any) -> AsyncIterator[AIMessageChunkV1]: + for msg in STREAMED_MESSAGES_V1: + yield msg + + chain = input_iter | JsonOutputKeyToolsParser( + key_name="NameCollector", first_tool_only=True + ) + + assert [p async for p in chain.astream(None)] == EXPECTED_STREAMED_JSON + + @pytest.mark.parametrize("use_tool_calls", [False, True]) def test_json_output_key_tools_parser_multiple_tools_first_only( *, use_tool_calls: bool @@ -531,6 +634,42 @@ def test_json_output_key_tools_parser_multiple_tools_first_only( assert output_no_id == {"a": 1} +def test_json_output_key_tools_parser_multiple_tools_first_only_v1() -> None: + message = AIMessageV1( + content=[], + tool_calls=[ + { + "type": "tool_call", + "id": "call_other", + "name": "other", + "args": {"b": 2}, + }, + {"type": "tool_call", "id": "call_func", "name": "func", "args": {"a": 1}}, + ], + ) + + # Test with return_id=True + parser = JsonOutputKeyToolsParser( + key_name="func", first_tool_only=True, return_id=True + ) + output = parser.parse_result(message) + + # Should return the func tool call, not None + assert output is not None + assert output["type"] == "func" + assert output["args"] == {"a": 1} + assert "id" in output + + # Test with return_id=False + parser_no_id = JsonOutputKeyToolsParser( + key_name="func", first_tool_only=True, return_id=False + ) + output_no_id = parser_no_id.parse_result(message) + + # Should return just the args + assert output_no_id == {"a": 1} + + @pytest.mark.parametrize("use_tool_calls", [False, True]) def test_json_output_key_tools_parser_multiple_tools_no_match( *, use_tool_calls: bool @@ -583,6 +722,44 @@ def test_json_output_key_tools_parser_multiple_tools_no_match( assert output_no_id is None +def test_json_output_key_tools_parser_multiple_tools_no_match_v1() -> None: + message = AIMessageV1( + content=[], + tool_calls=[ + { + "type": "tool_call", + "id": "call_other", + "name": "other", + "args": {"b": 2}, + }, + { + "type": "tool_call", + "id": "call_another", + "name": "another", + "args": {"c": 3}, + }, + ], + ) + + # Test with return_id=True, first_tool_only=True + parser = JsonOutputKeyToolsParser( + key_name="nonexistent", first_tool_only=True, return_id=True + ) + output = parser.parse_result(message) + + # Should return None when no matches + assert output is None + + # Test with return_id=False, first_tool_only=True + parser_no_id = JsonOutputKeyToolsParser( + key_name="nonexistent", first_tool_only=True, return_id=False + ) + output_no_id = parser_no_id.parse_result(message) + + # Should return None when no matches + assert output_no_id is None + + @pytest.mark.parametrize("use_tool_calls", [False, True]) def test_json_output_key_tools_parser_multiple_matching_tools( *, use_tool_calls: bool @@ -643,6 +820,42 @@ def test_json_output_key_tools_parser_multiple_matching_tools( assert output_all[1]["args"] == {"a": 3} +def test_json_output_key_tools_parser_multiple_matching_tools_v1() -> None: + message = AIMessageV1( + content=[], + tool_calls=[ + {"type": "tool_call", "id": "call_func1", "name": "func", "args": {"a": 1}}, + { + "type": "tool_call", + "id": "call_other", + "name": "other", + "args": {"b": 2}, + }, + {"type": "tool_call", "id": "call_func2", "name": "func", "args": {"a": 3}}, + ], + ) + + # Test with first_tool_only=True - should return first matching + parser = JsonOutputKeyToolsParser( + key_name="func", first_tool_only=True, return_id=True + ) + output = parser.parse_result(message) + + assert output is not None + assert output["type"] == "func" + assert output["args"] == {"a": 1} # First matching tool call + + # Test with first_tool_only=False - should return all matching + parser_all = JsonOutputKeyToolsParser( + key_name="func", first_tool_only=False, return_id=True + ) + output_all = parser_all.parse_result(message) + + assert len(output_all) == 2 + assert output_all[0]["args"] == {"a": 1} + assert output_all[1]["args"] == {"a": 3} + + @pytest.mark.parametrize("use_tool_calls", [False, True]) def test_json_output_key_tools_parser_empty_results(*, use_tool_calls: bool) -> None: def create_message() -> AIMessage: @@ -671,6 +884,35 @@ def test_json_output_key_tools_parser_empty_results(*, use_tool_calls: bool) -> assert output_all == [] +@pytest.mark.parametrize( + "empty_message", + [ + AIMessageV1(content=[], tool_calls=[]), + AIMessageV1(content="", tool_calls=[]), + ], +) +def test_json_output_key_tools_parser_empty_results_v1( + empty_message: AIMessageV1, +) -> None: + # Test with first_tool_only=True + parser = JsonOutputKeyToolsParser( + key_name="func", first_tool_only=True, return_id=True + ) + output = parser.parse_result(empty_message) + + # Should return None for empty results + assert output is None + + # Test with first_tool_only=False + parser_all = JsonOutputKeyToolsParser( + key_name="func", first_tool_only=False, return_id=True + ) + output_all = parser_all.parse_result(empty_message) + + # Should return empty list for empty results + assert output_all == [] + + @pytest.mark.parametrize("use_tool_calls", [False, True]) def test_json_output_key_tools_parser_parameter_combinations( *, use_tool_calls: bool @@ -746,6 +988,56 @@ def test_json_output_key_tools_parser_parameter_combinations( assert output4 == [{"a": 1}, {"a": 3}] +def test_json_output_key_tools_parser_parameter_combinations_v1() -> None: + """Test all parameter combinations of JsonOutputKeyToolsParser.""" + result = AIMessageV1( + content=[], + tool_calls=[ + { + "type": "tool_call", + "id": "call_other", + "name": "other", + "args": {"b": 2}, + }, + {"type": "tool_call", "id": "call_func1", "name": "func", "args": {"a": 1}}, + {"type": "tool_call", "id": "call_func2", "name": "func", "args": {"a": 3}}, + ], + ) + + # Test: first_tool_only=True, return_id=True + parser1 = JsonOutputKeyToolsParser( + key_name="func", first_tool_only=True, return_id=True + ) + output1 = parser1.parse_result(result) + assert output1["type"] == "func" + assert output1["args"] == {"a": 1} + assert "id" in output1 + + # Test: first_tool_only=True, return_id=False + parser2 = JsonOutputKeyToolsParser( + key_name="func", first_tool_only=True, return_id=False + ) + output2 = parser2.parse_result(result) + assert output2 == {"a": 1} + + # Test: first_tool_only=False, return_id=True + parser3 = JsonOutputKeyToolsParser( + key_name="func", first_tool_only=False, return_id=True + ) + output3 = parser3.parse_result(result) + assert len(output3) == 2 + assert all("id" in item for item in output3) + assert output3[0]["args"] == {"a": 1} + assert output3[1]["args"] == {"a": 3} + + # Test: first_tool_only=False, return_id=False + parser4 = JsonOutputKeyToolsParser( + key_name="func", first_tool_only=False, return_id=False + ) + output4 = parser4.parse_result(result) + assert output4 == [{"a": 1}, {"a": 3}] + + class Person(BaseModel): age: int hair_color: str @@ -788,6 +1080,18 @@ def test_partial_pydantic_output_parser() -> None: assert actual == EXPECTED_STREAMED_PYDANTIC +def test_partial_pydantic_output_parser_v1() -> None: + def input_iter(_: Any) -> Iterator[AIMessageChunkV1]: + yield from STREAMED_MESSAGES_V1 + + chain = input_iter | PydanticToolsParser( + tools=[NameCollector], first_tool_only=True + ) + + actual = list(chain.stream(None)) + assert actual == EXPECTED_STREAMED_PYDANTIC + + async def test_partial_pydantic_output_parser_async() -> None: for use_tool_calls in [False, True]: input_iter = _get_aiter(use_tool_calls=use_tool_calls) @@ -800,6 +1104,19 @@ async def test_partial_pydantic_output_parser_async() -> None: assert actual == EXPECTED_STREAMED_PYDANTIC +async def test_partial_pydantic_output_parser_async_v1() -> None: + async def input_iter(_: Any) -> AsyncIterator[AIMessageChunkV1]: + for msg in STREAMED_MESSAGES_V1: + yield msg + + chain = input_iter | PydanticToolsParser( + tools=[NameCollector], first_tool_only=True + ) + + actual = [p async for p in chain.astream(None)] + assert actual == EXPECTED_STREAMED_PYDANTIC + + def test_parse_with_different_pydantic_2_v1() -> None: """Test with pydantic.v1.BaseModel from pydantic 2.""" import pydantic @@ -870,20 +1187,22 @@ def test_parse_with_different_pydantic_2_proper() -> None: def test_max_tokens_error(caplog: Any) -> None: parser = PydanticToolsParser(tools=[NameCollector], first_tool_only=True) - message = AIMessage( - content="", - tool_calls=[ - { - "id": "call_OwL7f5PE", - "name": "NameCollector", - "args": {"names": ["suz", "jerm"]}, - } - ], - response_metadata={"stop_reason": "max_tokens"}, - ) - with pytest.raises(ValidationError): - _ = parser.invoke(message) - assert any( - "`max_tokens` stop reason" in msg and record.levelname == "ERROR" - for record, msg in zip(caplog.records, caplog.messages) - ) + for msg_class in [AIMessage, AIMessageV1]: + message = msg_class( + content="", + tool_calls=[ + { + "type": "tool_call", + "id": "call_OwL7f5PE", + "name": "NameCollector", + "args": {"names": ["suz", "jerm"]}, + } + ], + response_metadata={"stop_reason": "max_tokens"}, + ) + with pytest.raises(ValidationError): + _ = parser.invoke(message) + assert any( + "`max_tokens` stop reason" in msg and record.levelname == "ERROR" + for record, msg in zip(caplog.records, caplog.messages) + ) diff --git a/libs/core/tests/unit_tests/prompts/__snapshots__/test_chat.ambr b/libs/core/tests/unit_tests/prompts/__snapshots__/test_chat.ambr index 7c07416fe5d..44175f5b93d 100644 --- a/libs/core/tests/unit_tests/prompts/__snapshots__/test_chat.ambr +++ b/libs/core/tests/unit_tests/prompts/__snapshots__/test_chat.ambr @@ -726,7 +726,7 @@ 'description': ''' Allowance for errors made by LLM. - Here we add an `error` key to surface errors made during generation + Here we add an ``error`` key to surface errors made during generation (e.g., invalid JSON arguments.) ''', 'properties': dict({ @@ -752,6 +752,10 @@ ]), 'title': 'Error', }), + 'extras': dict({ + 'title': 'Extras', + 'type': 'object', + }), 'id': dict({ 'anyOf': list([ dict({ @@ -763,6 +767,10 @@ ]), 'title': 'Id', }), + 'index': dict({ + 'title': 'Index', + 'type': 'integer', + }), 'name': dict({ 'anyOf': list([ dict({ @@ -781,9 +789,10 @@ }), }), 'required': list([ + 'type', + 'id', 'name', 'args', - 'id', 'error', ]), 'title': 'InvalidToolCall', @@ -998,12 +1007,23 @@ This represents a request to call the tool named "foo" with arguments {"a": 1} and an identifier of "123". + + .. note:: + ``create_tool_call`` may also be used as a factory to create a + ``ToolCall``. Benefits include: + + * Automatic ID generation (when not provided) + * Required arguments strictly validated at creation time ''', 'properties': dict({ 'args': dict({ 'title': 'Args', 'type': 'object', }), + 'extras': dict({ + 'title': 'Extras', + 'type': 'object', + }), 'id': dict({ 'anyOf': list([ dict({ @@ -1015,6 +1035,10 @@ ]), 'title': 'Id', }), + 'index': dict({ + 'title': 'Index', + 'type': 'integer', + }), 'name': dict({ 'title': 'Name', 'type': 'string', @@ -1026,9 +1050,10 @@ }), }), 'required': list([ + 'type', + 'id', 'name', 'args', - 'id', ]), 'title': 'ToolCall', 'type': 'object', @@ -1037,9 +1062,9 @@ 'description': ''' A chunk of a tool call (e.g., as part of a stream). - When merging ToolCallChunks (e.g., via AIMessageChunk.__add__), + When merging ToolCallChunks (e.g., via ``AIMessageChunk.__add__``), all string attributes are concatenated. Chunks are only merged if their - values of `index` are equal and not None. + values of ``index`` are equal and not ``None``. Example: @@ -1065,6 +1090,10 @@ ]), 'title': 'Args', }), + 'extras': dict({ + 'title': 'Extras', + 'type': 'object', + }), 'id': dict({ 'anyOf': list([ dict({ @@ -1105,9 +1134,9 @@ }), }), 'required': list([ + 'id', 'name', 'args', - 'id', 'index', ]), 'title': 'ToolCallChunk', @@ -2158,7 +2187,7 @@ 'description': ''' Allowance for errors made by LLM. - Here we add an `error` key to surface errors made during generation + Here we add an ``error`` key to surface errors made during generation (e.g., invalid JSON arguments.) ''', 'properties': dict({ @@ -2184,6 +2213,10 @@ ]), 'title': 'Error', }), + 'extras': dict({ + 'title': 'Extras', + 'type': 'object', + }), 'id': dict({ 'anyOf': list([ dict({ @@ -2195,6 +2228,10 @@ ]), 'title': 'Id', }), + 'index': dict({ + 'title': 'Index', + 'type': 'integer', + }), 'name': dict({ 'anyOf': list([ dict({ @@ -2213,9 +2250,10 @@ }), }), 'required': list([ + 'type', + 'id', 'name', 'args', - 'id', 'error', ]), 'title': 'InvalidToolCall', @@ -2430,12 +2468,23 @@ This represents a request to call the tool named "foo" with arguments {"a": 1} and an identifier of "123". + + .. note:: + ``create_tool_call`` may also be used as a factory to create a + ``ToolCall``. Benefits include: + + * Automatic ID generation (when not provided) + * Required arguments strictly validated at creation time ''', 'properties': dict({ 'args': dict({ 'title': 'Args', 'type': 'object', }), + 'extras': dict({ + 'title': 'Extras', + 'type': 'object', + }), 'id': dict({ 'anyOf': list([ dict({ @@ -2447,6 +2496,10 @@ ]), 'title': 'Id', }), + 'index': dict({ + 'title': 'Index', + 'type': 'integer', + }), 'name': dict({ 'title': 'Name', 'type': 'string', @@ -2458,9 +2511,10 @@ }), }), 'required': list([ + 'type', + 'id', 'name', 'args', - 'id', ]), 'title': 'ToolCall', 'type': 'object', @@ -2469,9 +2523,9 @@ 'description': ''' A chunk of a tool call (e.g., as part of a stream). - When merging ToolCallChunks (e.g., via AIMessageChunk.__add__), + When merging ToolCallChunks (e.g., via ``AIMessageChunk.__add__``), all string attributes are concatenated. Chunks are only merged if their - values of `index` are equal and not None. + values of ``index`` are equal and not ``None``. Example: @@ -2497,6 +2551,10 @@ ]), 'title': 'Args', }), + 'extras': dict({ + 'title': 'Extras', + 'type': 'object', + }), 'id': dict({ 'anyOf': list([ dict({ @@ -2537,9 +2595,9 @@ }), }), 'required': list([ + 'id', 'name', 'args', - 'id', 'index', ]), 'title': 'ToolCallChunk', diff --git a/libs/core/tests/unit_tests/runnables/__snapshots__/test_graph.ambr b/libs/core/tests/unit_tests/runnables/__snapshots__/test_graph.ambr index a788c425fce..c014396ecab 100644 --- a/libs/core/tests/unit_tests/runnables/__snapshots__/test_graph.ambr +++ b/libs/core/tests/unit_tests/runnables/__snapshots__/test_graph.ambr @@ -405,229 +405,62 @@ dict({ 'data': dict({ '$defs': dict({ - 'AIMessage': dict({ - 'additionalProperties': True, + 'AudioContentBlock': dict({ 'description': ''' - Message from an AI. + Audio data. - AIMessage is returned from a chat model as a response to a prompt. + .. note:: + ``create_audio_block`` may also be used as a factory to create an + ``AudioContentBlock``. Benefits include: - This message represents the output of the model and consists of both - the raw output as returned by the model together standardized fields - (e.g., tool calls, usage metadata) added by the LangChain framework. + * Automatic ID generation (when not provided) + * Required arguments strictly validated at creation time ''', 'properties': dict({ - 'additional_kwargs': dict({ - 'title': 'Additional Kwargs', + 'base64': dict({ + 'title': 'Base64', + 'type': 'string', + }), + 'extras': dict({ + 'title': 'Extras', 'type': 'object', }), - 'content': dict({ - 'anyOf': list([ - dict({ - 'type': 'string', - }), - dict({ - 'items': dict({ - 'anyOf': list([ - dict({ - 'type': 'string', - }), - dict({ - 'type': 'object', - }), - ]), - }), - 'type': 'array', - }), - ]), - 'title': 'Content', - }), - 'example': dict({ - 'default': False, - 'title': 'Example', - 'type': 'boolean', + 'file_id': dict({ + 'title': 'File Id', + 'type': 'string', }), 'id': dict({ - 'anyOf': list([ - dict({ - 'type': 'string', - }), - dict({ - 'type': 'null', - }), - ]), - 'default': None, 'title': 'Id', + 'type': 'string', }), - 'invalid_tool_calls': dict({ - 'default': list([ - ]), - 'items': dict({ - '$ref': '#/$defs/InvalidToolCall', - }), - 'title': 'Invalid Tool Calls', - 'type': 'array', + 'index': dict({ + 'title': 'Index', + 'type': 'integer', }), - 'name': dict({ - 'anyOf': list([ - dict({ - 'type': 'string', - }), - dict({ - 'type': 'null', - }), - ]), - 'default': None, - 'title': 'Name', - }), - 'response_metadata': dict({ - 'title': 'Response Metadata', - 'type': 'object', - }), - 'tool_calls': dict({ - 'default': list([ - ]), - 'items': dict({ - '$ref': '#/$defs/ToolCall', - }), - 'title': 'Tool Calls', - 'type': 'array', + 'mime_type': dict({ + 'title': 'Mime Type', + 'type': 'string', }), 'type': dict({ - 'const': 'ai', - 'default': 'ai', + 'const': 'audio', 'title': 'Type', 'type': 'string', }), - 'usage_metadata': dict({ - 'anyOf': list([ - dict({ - '$ref': '#/$defs/UsageMetadata', - }), - dict({ - 'type': 'null', - }), - ]), - 'default': None, + 'url': dict({ + 'title': 'Url', + 'type': 'string', }), }), 'required': list([ - 'content', + 'type', ]), - 'title': 'AIMessage', + 'title': 'AudioContentBlock', 'type': 'object', }), - 'AIMessageChunk': dict({ - 'additionalProperties': True, - 'description': 'Message chunk from an AI.', + 'BaseModel': dict({ 'properties': dict({ - 'additional_kwargs': dict({ - 'title': 'Additional Kwargs', - 'type': 'object', - }), - 'content': dict({ - 'anyOf': list([ - dict({ - 'type': 'string', - }), - dict({ - 'items': dict({ - 'anyOf': list([ - dict({ - 'type': 'string', - }), - dict({ - 'type': 'object', - }), - ]), - }), - 'type': 'array', - }), - ]), - 'title': 'Content', - }), - 'example': dict({ - 'default': False, - 'title': 'Example', - 'type': 'boolean', - }), - 'id': dict({ - 'anyOf': list([ - dict({ - 'type': 'string', - }), - dict({ - 'type': 'null', - }), - ]), - 'default': None, - 'title': 'Id', - }), - 'invalid_tool_calls': dict({ - 'default': list([ - ]), - 'items': dict({ - '$ref': '#/$defs/InvalidToolCall', - }), - 'title': 'Invalid Tool Calls', - 'type': 'array', - }), - 'name': dict({ - 'anyOf': list([ - dict({ - 'type': 'string', - }), - dict({ - 'type': 'null', - }), - ]), - 'default': None, - 'title': 'Name', - }), - 'response_metadata': dict({ - 'title': 'Response Metadata', - 'type': 'object', - }), - 'tool_call_chunks': dict({ - 'default': list([ - ]), - 'items': dict({ - '$ref': '#/$defs/ToolCallChunk', - }), - 'title': 'Tool Call Chunks', - 'type': 'array', - }), - 'tool_calls': dict({ - 'default': list([ - ]), - 'items': dict({ - '$ref': '#/$defs/ToolCall', - }), - 'title': 'Tool Calls', - 'type': 'array', - }), - 'type': dict({ - 'const': 'AIMessageChunk', - 'default': 'AIMessageChunk', - 'title': 'Type', - 'type': 'string', - }), - 'usage_metadata': dict({ - 'anyOf': list([ - dict({ - '$ref': '#/$defs/UsageMetadata', - }), - dict({ - 'type': 'null', - }), - ]), - 'default': None, - }), }), - 'required': list([ - 'content', - ]), - 'title': 'AIMessageChunk', + 'title': 'BaseModel', 'type': 'object', }), 'ChatMessage': dict({ @@ -780,6 +613,243 @@ 'title': 'ChatMessageChunk', 'type': 'object', }), + 'Citation': dict({ + 'description': ''' + Annotation for citing data from a document. + + .. note:: + ``start/end`` indices refer to the **response text**, + not the source text. This means that the indices are relative to the model's + response, not the original document (as specified in the ``url``). + + .. note:: + ``create_citation`` may also be used as a factory to create a ``Citation``. + Benefits include: + + * Automatic ID generation (when not provided) + * Required arguments strictly validated at creation time + ''', + 'properties': dict({ + 'cited_text': dict({ + 'title': 'Cited Text', + 'type': 'string', + }), + 'end_index': dict({ + 'title': 'End Index', + 'type': 'integer', + }), + 'extras': dict({ + 'title': 'Extras', + 'type': 'object', + }), + 'id': dict({ + 'title': 'Id', + 'type': 'string', + }), + 'start_index': dict({ + 'title': 'Start Index', + 'type': 'integer', + }), + 'title': dict({ + 'title': 'Title', + 'type': 'string', + }), + 'type': dict({ + 'const': 'citation', + 'title': 'Type', + 'type': 'string', + }), + 'url': dict({ + 'title': 'Url', + 'type': 'string', + }), + }), + 'required': list([ + 'type', + ]), + 'title': 'Citation', + 'type': 'object', + }), + 'CodeInterpreterCall': dict({ + 'description': 'Built-in code interpreter tool call.', + 'properties': dict({ + 'code': dict({ + 'title': 'Code', + 'type': 'string', + }), + 'extras': dict({ + 'title': 'Extras', + 'type': 'object', + }), + 'id': dict({ + 'title': 'Id', + 'type': 'string', + }), + 'index': dict({ + 'title': 'Index', + 'type': 'integer', + }), + 'language': dict({ + 'title': 'Language', + 'type': 'string', + }), + 'type': dict({ + 'const': 'code_interpreter_call', + 'title': 'Type', + 'type': 'string', + }), + }), + 'required': list([ + 'type', + ]), + 'title': 'CodeInterpreterCall', + 'type': 'object', + }), + 'CodeInterpreterOutput': dict({ + 'description': ''' + Output of a singular code interpreter tool call. + + Full output of a code interpreter tool call is represented by + ``CodeInterpreterResult`` which is a list of these blocks. + ''', + 'properties': dict({ + 'extras': dict({ + 'title': 'Extras', + 'type': 'object', + }), + 'file_ids': dict({ + 'items': dict({ + 'type': 'string', + }), + 'title': 'File Ids', + 'type': 'array', + }), + 'id': dict({ + 'title': 'Id', + 'type': 'string', + }), + 'index': dict({ + 'title': 'Index', + 'type': 'integer', + }), + 'return_code': dict({ + 'title': 'Return Code', + 'type': 'integer', + }), + 'stderr': dict({ + 'title': 'Stderr', + 'type': 'string', + }), + 'stdout': dict({ + 'title': 'Stdout', + 'type': 'string', + }), + 'type': dict({ + 'const': 'code_interpreter_output', + 'title': 'Type', + 'type': 'string', + }), + }), + 'required': list([ + 'type', + ]), + 'title': 'CodeInterpreterOutput', + 'type': 'object', + }), + 'CodeInterpreterResult': dict({ + 'description': 'Result of a code interpreter tool call.', + 'properties': dict({ + 'extras': dict({ + 'title': 'Extras', + 'type': 'object', + }), + 'id': dict({ + 'title': 'Id', + 'type': 'string', + }), + 'index': dict({ + 'title': 'Index', + 'type': 'integer', + }), + 'output': dict({ + 'items': dict({ + '$ref': '#/$defs/CodeInterpreterOutput', + }), + 'title': 'Output', + 'type': 'array', + }), + 'type': dict({ + 'const': 'code_interpreter_result', + 'title': 'Type', + 'type': 'string', + }), + }), + 'required': list([ + 'type', + 'output', + ]), + 'title': 'CodeInterpreterResult', + 'type': 'object', + }), + 'FileContentBlock': dict({ + 'description': ''' + File data that doesn't fit into other multimodal blocks. + + This block is intended for files that are not images, audio, or plaintext. For + example, it can be used for PDFs, Word documents, etc. + + If the file is an image, audio, or plaintext, you should use the corresponding + content block type (e.g., ``ImageContentBlock``, ``AudioContentBlock``, + ``PlainTextContentBlock``). + + .. note:: + ``create_file_block`` may also be used as a factory to create a + ``FileContentBlock``. Benefits include: + + * Automatic ID generation (when not provided) + * Required arguments strictly validated at creation time + ''', + 'properties': dict({ + 'base64': dict({ + 'title': 'Base64', + 'type': 'string', + }), + 'extras': dict({ + 'title': 'Extras', + 'type': 'object', + }), + 'file_id': dict({ + 'title': 'File Id', + 'type': 'string', + }), + 'id': dict({ + 'title': 'Id', + 'type': 'string', + }), + 'index': dict({ + 'title': 'Index', + 'type': 'integer', + }), + 'mime_type': dict({ + 'title': 'Mime Type', + 'type': 'string', + }), + 'type': dict({ + 'const': 'file', + 'title': 'Type', + 'type': 'string', + }), + 'url': dict({ + 'title': 'Url', + 'type': 'string', + }), + }), + 'required': list([ + 'type', + ]), + 'title': 'FileContentBlock', + 'type': 'object', + }), 'FunctionMessage': dict({ 'additionalProperties': True, 'description': ''' @@ -915,104 +985,6 @@ 'title': 'FunctionMessageChunk', 'type': 'object', }), - 'HumanMessage': dict({ - 'additionalProperties': True, - 'description': ''' - Message from a human. - - HumanMessages are messages that are passed in from a human to the model. - - Example: - - .. code-block:: python - - from langchain_core.messages import HumanMessage, SystemMessage - - messages = [ - SystemMessage( - content="You are a helpful assistant! Your name is Bob." - ), - HumanMessage( - content="What is your name?" - ) - ] - - # Instantiate a chat model and invoke it with the messages - model = ... - print(model.invoke(messages)) - ''', - 'properties': dict({ - 'additional_kwargs': dict({ - 'title': 'Additional Kwargs', - 'type': 'object', - }), - 'content': dict({ - 'anyOf': list([ - dict({ - 'type': 'string', - }), - dict({ - 'items': dict({ - 'anyOf': list([ - dict({ - 'type': 'string', - }), - dict({ - 'type': 'object', - }), - ]), - }), - 'type': 'array', - }), - ]), - 'title': 'Content', - }), - 'example': dict({ - 'default': False, - 'title': 'Example', - 'type': 'boolean', - }), - 'id': dict({ - 'anyOf': list([ - dict({ - 'type': 'string', - }), - dict({ - 'type': 'null', - }), - ]), - 'default': None, - 'title': 'Id', - }), - 'name': dict({ - 'anyOf': list([ - dict({ - 'type': 'string', - }), - dict({ - 'type': 'null', - }), - ]), - 'default': None, - 'title': 'Name', - }), - 'response_metadata': dict({ - 'title': 'Response Metadata', - 'type': 'object', - }), - 'type': dict({ - 'const': 'human', - 'default': 'human', - 'title': 'Type', - 'type': 'string', - }), - }), - 'required': list([ - 'content', - ]), - 'title': 'HumanMessage', - 'type': 'object', - }), 'HumanMessageChunk': dict({ 'additionalProperties': True, 'description': 'Human Message chunk.', @@ -1088,6 +1060,58 @@ 'title': 'HumanMessageChunk', 'type': 'object', }), + 'ImageContentBlock': dict({ + 'description': ''' + Image data. + + .. note:: + ``create_image_block`` may also be used as a factory to create a + ``ImageContentBlock``. Benefits include: + + * Automatic ID generation (when not provided) + * Required arguments strictly validated at creation time + ''', + 'properties': dict({ + 'base64': dict({ + 'title': 'Base64', + 'type': 'string', + }), + 'extras': dict({ + 'title': 'Extras', + 'type': 'object', + }), + 'file_id': dict({ + 'title': 'File Id', + 'type': 'string', + }), + 'id': dict({ + 'title': 'Id', + 'type': 'string', + }), + 'index': dict({ + 'title': 'Index', + 'type': 'integer', + }), + 'mime_type': dict({ + 'title': 'Mime Type', + 'type': 'string', + }), + 'type': dict({ + 'const': 'image', + 'title': 'Type', + 'type': 'string', + }), + 'url': dict({ + 'title': 'Url', + 'type': 'string', + }), + }), + 'required': list([ + 'type', + ]), + 'title': 'ImageContentBlock', + 'type': 'object', + }), 'InputTokenDetails': dict({ 'description': ''' Breakdown of input token counts. @@ -1129,7 +1153,7 @@ 'description': ''' Allowance for errors made by LLM. - Here we add an `error` key to surface errors made during generation + Here we add an ``error`` key to surface errors made during generation (e.g., invalid JSON arguments.) ''', 'properties': dict({ @@ -1155,6 +1179,10 @@ ]), 'title': 'Error', }), + 'extras': dict({ + 'title': 'Extras', + 'type': 'object', + }), 'id': dict({ 'anyOf': list([ dict({ @@ -1166,6 +1194,10 @@ ]), 'title': 'Id', }), + 'index': dict({ + 'title': 'Index', + 'type': 'integer', + }), 'name': dict({ 'anyOf': list([ dict({ @@ -1184,14 +1216,83 @@ }), }), 'required': list([ + 'type', + 'id', 'name', 'args', - 'id', 'error', ]), 'title': 'InvalidToolCall', 'type': 'object', }), + 'NonStandardAnnotation': dict({ + 'description': 'Provider-specific annotation format.', + 'properties': dict({ + 'id': dict({ + 'title': 'Id', + 'type': 'string', + }), + 'type': dict({ + 'const': 'non_standard_annotation', + 'title': 'Type', + 'type': 'string', + }), + 'value': dict({ + 'title': 'Value', + 'type': 'object', + }), + }), + 'required': list([ + 'type', + 'value', + ]), + 'title': 'NonStandardAnnotation', + 'type': 'object', + }), + 'NonStandardContentBlock': dict({ + 'description': ''' + Provider-specific data. + + This block contains data for which there is not yet a standard type. + + The purpose of this block should be to simply hold a provider-specific payload. + If a provider's non-standard output includes reasoning and tool calls, it should be + the adapter's job to parse that payload and emit the corresponding standard + ReasoningContentBlock and ToolCallContentBlocks. + + .. note:: + ``create_non_standard_block`` may also be used as a factory to create a + ``NonStandardContentBlock``. Benefits include: + + * Automatic ID generation (when not provided) + * Required arguments strictly validated at creation time + ''', + 'properties': dict({ + 'id': dict({ + 'title': 'Id', + 'type': 'string', + }), + 'index': dict({ + 'title': 'Index', + 'type': 'integer', + }), + 'type': dict({ + 'const': 'non_standard', + 'title': 'Type', + 'type': 'string', + }), + 'value': dict({ + 'title': 'Value', + 'type': 'object', + }), + }), + 'required': list([ + 'type', + 'value', + ]), + 'title': 'NonStandardContentBlock', + 'type': 'object', + }), 'OutputTokenDetails': dict({ 'description': ''' Breakdown of output token counts. @@ -1222,97 +1323,138 @@ 'title': 'OutputTokenDetails', 'type': 'object', }), - 'SystemMessage': dict({ - 'additionalProperties': True, + 'PlainTextContentBlock': dict({ 'description': ''' - Message for priming AI behavior. + Plaintext data (e.g., from a document). - The system message is usually passed in as the first of a sequence - of input messages. + .. note:: + Title and context are optional fields that may be passed to the model. See + Anthropic `example `__. - Example: + .. note:: + ``create_plaintext_block`` may also be used as a factory to create a + ``PlainTextContentBlock``. Benefits include: - .. code-block:: python - - from langchain_core.messages import HumanMessage, SystemMessage - - messages = [ - SystemMessage( - content="You are a helpful assistant! Your name is Bob." - ), - HumanMessage( - content="What is your name?" - ) - ] - - # Define a chat model and invoke it with the messages - print(model.invoke(messages)) + * Automatic ID generation (when not provided) + * Required arguments strictly validated at creation time ''', 'properties': dict({ - 'additional_kwargs': dict({ - 'title': 'Additional Kwargs', + 'base64': dict({ + 'title': 'Base64', + 'type': 'string', + }), + 'context': dict({ + 'title': 'Context', + 'type': 'string', + }), + 'extras': dict({ + 'title': 'Extras', 'type': 'object', }), - 'content': dict({ - 'anyOf': list([ - dict({ - 'type': 'string', - }), - dict({ - 'items': dict({ - 'anyOf': list([ - dict({ - 'type': 'string', - }), - dict({ - 'type': 'object', - }), - ]), - }), - 'type': 'array', - }), - ]), - 'title': 'Content', + 'file_id': dict({ + 'title': 'File Id', + 'type': 'string', }), 'id': dict({ - 'anyOf': list([ - dict({ - 'type': 'string', - }), - dict({ - 'type': 'null', - }), - ]), - 'default': None, 'title': 'Id', + 'type': 'string', }), - 'name': dict({ - 'anyOf': list([ - dict({ - 'type': 'string', - }), - dict({ - 'type': 'null', - }), - ]), - 'default': None, - 'title': 'Name', + 'index': dict({ + 'title': 'Index', + 'type': 'integer', }), - 'response_metadata': dict({ - 'title': 'Response Metadata', - 'type': 'object', + 'mime_type': dict({ + 'const': 'text/plain', + 'title': 'Mime Type', + 'type': 'string', + }), + 'text': dict({ + 'title': 'Text', + 'type': 'string', + }), + 'title': dict({ + 'title': 'Title', + 'type': 'string', }), 'type': dict({ - 'const': 'system', - 'default': 'system', + 'const': 'text-plain', + 'title': 'Type', + 'type': 'string', + }), + 'url': dict({ + 'title': 'Url', + 'type': 'string', + }), + }), + 'required': list([ + 'type', + 'mime_type', + ]), + 'title': 'PlainTextContentBlock', + 'type': 'object', + }), + 'ReasoningContentBlock': dict({ + 'description': ''' + Reasoning output from a LLM. + + .. note:: + ``create_reasoning_block`` may also be used as a factory to create a + ``ReasoningContentBlock``. Benefits include: + + * Automatic ID generation (when not provided) + * Required arguments strictly validated at creation time + ''', + 'properties': dict({ + 'extras': dict({ + 'title': 'Extras', + 'type': 'object', + }), + 'id': dict({ + 'title': 'Id', + 'type': 'string', + }), + 'index': dict({ + 'title': 'Index', + 'type': 'integer', + }), + 'reasoning': dict({ + 'title': 'Reasoning', + 'type': 'string', + }), + 'type': dict({ + 'const': 'reasoning', 'title': 'Type', 'type': 'string', }), }), 'required': list([ - 'content', + 'type', ]), - 'title': 'SystemMessage', + 'title': 'ReasoningContentBlock', + 'type': 'object', + }), + 'ResponseMetadata': dict({ + 'description': ''' + Metadata about the response from the AI provider. + + Contains additional information returned by the provider, such as + response headers, service tiers, log probabilities, system fingerprints, etc. + + Extra keys are permitted from what is typed here (via `total=False`), allowing + for provider-specific metadata to be included without breaking the type + definition. + ''', + 'properties': dict({ + 'model_name': dict({ + 'title': 'Model Name', + 'type': 'string', + }), + 'model_provider': dict({ + 'title': 'Model Provider', + 'type': 'string', + }), + }), + 'title': 'ResponseMetadata', 'type': 'object', }), 'SystemMessageChunk': dict({ @@ -1385,6 +1527,64 @@ 'title': 'SystemMessageChunk', 'type': 'object', }), + 'TextContentBlock': dict({ + 'description': ''' + Text output from a LLM. + + This typically represents the main text content of a message, such as the response + from a language model or the text of a user message. + + .. note:: + ``create_text_block`` may also be used as a factory to create a + ``TextContentBlock``. Benefits include: + + * Automatic ID generation (when not provided) + * Required arguments strictly validated at creation time + ''', + 'properties': dict({ + 'annotations': dict({ + 'items': dict({ + 'anyOf': list([ + dict({ + '$ref': '#/$defs/Citation', + }), + dict({ + '$ref': '#/$defs/NonStandardAnnotation', + }), + ]), + }), + 'title': 'Annotations', + 'type': 'array', + }), + 'extras': dict({ + 'title': 'Extras', + 'type': 'object', + }), + 'id': dict({ + 'title': 'Id', + 'type': 'string', + }), + 'index': dict({ + 'title': 'Index', + 'type': 'integer', + }), + 'text': dict({ + 'title': 'Text', + 'type': 'string', + }), + 'type': dict({ + 'const': 'text', + 'title': 'Type', + 'type': 'string', + }), + }), + 'required': list([ + 'type', + 'text', + ]), + 'title': 'TextContentBlock', + 'type': 'object', + }), 'ToolCall': dict({ 'description': ''' Represents a request to call a tool. @@ -1401,12 +1601,23 @@ This represents a request to call the tool named "foo" with arguments {"a": 1} and an identifier of "123". + + .. note:: + ``create_tool_call`` may also be used as a factory to create a + ``ToolCall``. Benefits include: + + * Automatic ID generation (when not provided) + * Required arguments strictly validated at creation time ''', 'properties': dict({ 'args': dict({ 'title': 'Args', 'type': 'object', }), + 'extras': dict({ + 'title': 'Extras', + 'type': 'object', + }), 'id': dict({ 'anyOf': list([ dict({ @@ -1418,6 +1629,10 @@ ]), 'title': 'Id', }), + 'index': dict({ + 'title': 'Index', + 'type': 'integer', + }), 'name': dict({ 'title': 'Name', 'type': 'string', @@ -1429,9 +1644,10 @@ }), }), 'required': list([ + 'type', + 'id', 'name', 'args', - 'id', ]), 'title': 'ToolCall', 'type': 'object', @@ -1440,9 +1656,9 @@ 'description': ''' A chunk of a tool call (e.g., as part of a stream). - When merging ToolCallChunks (e.g., via AIMessageChunk.__add__), + When merging ToolCallChunks (e.g., via ``AIMessageChunk.__add__``), all string attributes are concatenated. Chunks are only merged if their - values of `index` are equal and not None. + values of ``index`` are equal and not ``None``. Example: @@ -1468,6 +1684,10 @@ ]), 'title': 'Args', }), + 'extras': dict({ + 'title': 'Extras', + 'type': 'object', + }), 'id': dict({ 'anyOf': list([ dict({ @@ -1508,135 +1728,14 @@ }), }), 'required': list([ + 'id', 'name', 'args', - 'id', 'index', ]), 'title': 'ToolCallChunk', 'type': 'object', }), - 'ToolMessage': dict({ - 'additionalProperties': True, - 'description': ''' - Message for passing the result of executing a tool back to a model. - - ToolMessages contain the result of a tool invocation. Typically, the result - is encoded inside the `content` field. - - Example: A ToolMessage representing a result of 42 from a tool call with id - - .. code-block:: python - - from langchain_core.messages import ToolMessage - - ToolMessage(content='42', tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL') - - - Example: A ToolMessage where only part of the tool output is sent to the model - and the full output is passed in to artifact. - - .. versionadded:: 0.2.17 - - .. code-block:: python - - from langchain_core.messages import ToolMessage - - tool_output = { - "stdout": "From the graph we can see that the correlation between x and y is ...", - "stderr": None, - "artifacts": {"type": "image", "base64_data": "/9j/4gIcSU..."}, - } - - ToolMessage( - content=tool_output["stdout"], - artifact=tool_output, - tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL', - ) - - The tool_call_id field is used to associate the tool call request with the - tool call response. This is useful in situations where a chat model is able - to request multiple tool calls in parallel. - ''', - 'properties': dict({ - 'additional_kwargs': dict({ - 'title': 'Additional Kwargs', - 'type': 'object', - }), - 'artifact': dict({ - 'title': 'Artifact', - }), - 'content': dict({ - 'anyOf': list([ - dict({ - 'type': 'string', - }), - dict({ - 'items': dict({ - 'anyOf': list([ - dict({ - 'type': 'string', - }), - dict({ - 'type': 'object', - }), - ]), - }), - 'type': 'array', - }), - ]), - 'title': 'Content', - }), - 'id': dict({ - 'anyOf': list([ - dict({ - 'type': 'string', - }), - dict({ - 'type': 'null', - }), - ]), - 'default': None, - 'title': 'Id', - }), - 'name': dict({ - 'anyOf': list([ - dict({ - 'type': 'string', - }), - dict({ - 'type': 'null', - }), - ]), - 'default': None, - 'title': 'Name', - }), - 'response_metadata': dict({ - 'title': 'Response Metadata', - 'type': 'object', - }), - 'status': dict({ - 'default': 'success', - 'title': 'Status', - }), - 'tool_call_id': dict({ - 'title': 'Tool Call Id', - 'type': 'string', - }), - 'type': dict({ - 'const': 'tool', - 'default': 'tool', - 'title': 'Type', - 'type': 'string', - }), - }), - 'required': list([ - 'content', - 'tool_call_id', - ]), - 'title': 'ToolMessage', - 'type': 'object', - }), 'ToolMessageChunk': dict({ 'additionalProperties': True, 'description': 'Tool Message chunk.', @@ -1776,6 +1875,1208 @@ 'title': 'UsageMetadata', 'type': 'object', }), + 'VideoContentBlock': dict({ + 'description': ''' + Video data. + + .. note:: + ``create_video_block`` may also be used as a factory to create a + ``VideoContentBlock``. Benefits include: + + * Automatic ID generation (when not provided) + * Required arguments strictly validated at creation time + ''', + 'properties': dict({ + 'base64': dict({ + 'title': 'Base64', + 'type': 'string', + }), + 'extras': dict({ + 'title': 'Extras', + 'type': 'object', + }), + 'file_id': dict({ + 'title': 'File Id', + 'type': 'string', + }), + 'id': dict({ + 'title': 'Id', + 'type': 'string', + }), + 'index': dict({ + 'title': 'Index', + 'type': 'integer', + }), + 'mime_type': dict({ + 'title': 'Mime Type', + 'type': 'string', + }), + 'type': dict({ + 'const': 'video', + 'title': 'Type', + 'type': 'string', + }), + 'url': dict({ + 'title': 'Url', + 'type': 'string', + }), + }), + 'required': list([ + 'type', + ]), + 'title': 'VideoContentBlock', + 'type': 'object', + }), + 'WebSearchCall': dict({ + 'description': 'Built-in web search tool call.', + 'properties': dict({ + 'extras': dict({ + 'title': 'Extras', + 'type': 'object', + }), + 'id': dict({ + 'title': 'Id', + 'type': 'string', + }), + 'index': dict({ + 'title': 'Index', + 'type': 'integer', + }), + 'query': dict({ + 'title': 'Query', + 'type': 'string', + }), + 'type': dict({ + 'const': 'web_search_call', + 'title': 'Type', + 'type': 'string', + }), + }), + 'required': list([ + 'type', + ]), + 'title': 'WebSearchCall', + 'type': 'object', + }), + 'WebSearchResult': dict({ + 'description': 'Result of a built-in web search tool call.', + 'properties': dict({ + 'extras': dict({ + 'title': 'Extras', + 'type': 'object', + }), + 'id': dict({ + 'title': 'Id', + 'type': 'string', + }), + 'index': dict({ + 'title': 'Index', + 'type': 'integer', + }), + 'type': dict({ + 'const': 'web_search_result', + 'title': 'Type', + 'type': 'string', + }), + 'urls': dict({ + 'items': dict({ + 'type': 'string', + }), + 'title': 'Urls', + 'type': 'array', + }), + }), + 'required': list([ + 'type', + ]), + 'title': 'WebSearchResult', + 'type': 'object', + }), + 'langchain_core__messages__ai__AIMessage': dict({ + 'additionalProperties': True, + 'description': ''' + Message from an AI. + + AIMessage is returned from a chat model as a response to a prompt. + + This message represents the output of the model and consists of both + the raw output as returned by the model together standardized fields + (e.g., tool calls, usage metadata) added by the LangChain framework. + ''', + 'properties': dict({ + 'additional_kwargs': dict({ + 'title': 'Additional Kwargs', + 'type': 'object', + }), + 'content': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'items': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'type': 'object', + }), + ]), + }), + 'type': 'array', + }), + ]), + 'title': 'Content', + }), + 'example': dict({ + 'default': False, + 'title': 'Example', + 'type': 'boolean', + }), + 'id': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'type': 'null', + }), + ]), + 'default': None, + 'title': 'Id', + }), + 'invalid_tool_calls': dict({ + 'default': list([ + ]), + 'items': dict({ + '$ref': '#/$defs/InvalidToolCall', + }), + 'title': 'Invalid Tool Calls', + 'type': 'array', + }), + 'name': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'type': 'null', + }), + ]), + 'default': None, + 'title': 'Name', + }), + 'response_metadata': dict({ + 'title': 'Response Metadata', + 'type': 'object', + }), + 'tool_calls': dict({ + 'default': list([ + ]), + 'items': dict({ + '$ref': '#/$defs/ToolCall', + }), + 'title': 'Tool Calls', + 'type': 'array', + }), + 'type': dict({ + 'const': 'ai', + 'default': 'ai', + 'title': 'Type', + 'type': 'string', + }), + 'usage_metadata': dict({ + 'anyOf': list([ + dict({ + '$ref': '#/$defs/UsageMetadata', + }), + dict({ + 'type': 'null', + }), + ]), + 'default': None, + }), + }), + 'required': list([ + 'content', + ]), + 'title': 'AIMessage', + 'type': 'object', + }), + 'langchain_core__messages__ai__AIMessageChunk': dict({ + 'additionalProperties': True, + 'description': 'Message chunk from an AI.', + 'properties': dict({ + 'additional_kwargs': dict({ + 'title': 'Additional Kwargs', + 'type': 'object', + }), + 'content': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'items': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'type': 'object', + }), + ]), + }), + 'type': 'array', + }), + ]), + 'title': 'Content', + }), + 'example': dict({ + 'default': False, + 'title': 'Example', + 'type': 'boolean', + }), + 'id': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'type': 'null', + }), + ]), + 'default': None, + 'title': 'Id', + }), + 'invalid_tool_calls': dict({ + 'default': list([ + ]), + 'items': dict({ + '$ref': '#/$defs/InvalidToolCall', + }), + 'title': 'Invalid Tool Calls', + 'type': 'array', + }), + 'name': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'type': 'null', + }), + ]), + 'default': None, + 'title': 'Name', + }), + 'response_metadata': dict({ + 'title': 'Response Metadata', + 'type': 'object', + }), + 'tool_call_chunks': dict({ + 'default': list([ + ]), + 'items': dict({ + '$ref': '#/$defs/ToolCallChunk', + }), + 'title': 'Tool Call Chunks', + 'type': 'array', + }), + 'tool_calls': dict({ + 'default': list([ + ]), + 'items': dict({ + '$ref': '#/$defs/ToolCall', + }), + 'title': 'Tool Calls', + 'type': 'array', + }), + 'type': dict({ + 'const': 'AIMessageChunk', + 'default': 'AIMessageChunk', + 'title': 'Type', + 'type': 'string', + }), + 'usage_metadata': dict({ + 'anyOf': list([ + dict({ + '$ref': '#/$defs/UsageMetadata', + }), + dict({ + 'type': 'null', + }), + ]), + 'default': None, + }), + }), + 'required': list([ + 'content', + ]), + 'title': 'AIMessageChunk', + 'type': 'object', + }), + 'langchain_core__messages__human__HumanMessage': dict({ + 'additionalProperties': True, + 'description': ''' + Message from a human. + + HumanMessages are messages that are passed in from a human to the model. + + Example: + + .. code-block:: python + + from langchain_core.messages import HumanMessage, SystemMessage + + messages = [ + SystemMessage( + content="You are a helpful assistant! Your name is Bob." + ), + HumanMessage( + content="What is your name?" + ) + ] + + # Instantiate a chat model and invoke it with the messages + model = ... + print(model.invoke(messages)) + ''', + 'properties': dict({ + 'additional_kwargs': dict({ + 'title': 'Additional Kwargs', + 'type': 'object', + }), + 'content': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'items': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'type': 'object', + }), + ]), + }), + 'type': 'array', + }), + ]), + 'title': 'Content', + }), + 'example': dict({ + 'default': False, + 'title': 'Example', + 'type': 'boolean', + }), + 'id': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'type': 'null', + }), + ]), + 'default': None, + 'title': 'Id', + }), + 'name': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'type': 'null', + }), + ]), + 'default': None, + 'title': 'Name', + }), + 'response_metadata': dict({ + 'title': 'Response Metadata', + 'type': 'object', + }), + 'type': dict({ + 'const': 'human', + 'default': 'human', + 'title': 'Type', + 'type': 'string', + }), + }), + 'required': list([ + 'content', + ]), + 'title': 'HumanMessage', + 'type': 'object', + }), + 'langchain_core__messages__system__SystemMessage': dict({ + 'additionalProperties': True, + 'description': ''' + Message for priming AI behavior. + + The system message is usually passed in as the first of a sequence + of input messages. + + Example: + + .. code-block:: python + + from langchain_core.messages import HumanMessage, SystemMessage + + messages = [ + SystemMessage( + content="You are a helpful assistant! Your name is Bob." + ), + HumanMessage( + content="What is your name?" + ) + ] + + # Define a chat model and invoke it with the messages + print(model.invoke(messages)) + ''', + 'properties': dict({ + 'additional_kwargs': dict({ + 'title': 'Additional Kwargs', + 'type': 'object', + }), + 'content': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'items': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'type': 'object', + }), + ]), + }), + 'type': 'array', + }), + ]), + 'title': 'Content', + }), + 'id': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'type': 'null', + }), + ]), + 'default': None, + 'title': 'Id', + }), + 'name': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'type': 'null', + }), + ]), + 'default': None, + 'title': 'Name', + }), + 'response_metadata': dict({ + 'title': 'Response Metadata', + 'type': 'object', + }), + 'type': dict({ + 'const': 'system', + 'default': 'system', + 'title': 'Type', + 'type': 'string', + }), + }), + 'required': list([ + 'content', + ]), + 'title': 'SystemMessage', + 'type': 'object', + }), + 'langchain_core__messages__tool__ToolMessage': dict({ + 'additionalProperties': True, + 'description': ''' + Message for passing the result of executing a tool back to a model. + + ToolMessages contain the result of a tool invocation. Typically, the result + is encoded inside the `content` field. + + Example: A ToolMessage representing a result of 42 from a tool call with id + + .. code-block:: python + + from langchain_core.messages import ToolMessage + + ToolMessage(content='42', tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL') + + + Example: A ToolMessage where only part of the tool output is sent to the model + and the full output is passed in to artifact. + + .. versionadded:: 0.2.17 + + .. code-block:: python + + from langchain_core.messages import ToolMessage + + tool_output = { + "stdout": "From the graph we can see that the correlation between x and y is ...", + "stderr": None, + "artifacts": {"type": "image", "base64_data": "/9j/4gIcSU..."}, + } + + ToolMessage( + content=tool_output["stdout"], + artifact=tool_output, + tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL', + ) + + The tool_call_id field is used to associate the tool call request with the + tool call response. This is useful in situations where a chat model is able + to request multiple tool calls in parallel. + ''', + 'properties': dict({ + 'additional_kwargs': dict({ + 'title': 'Additional Kwargs', + 'type': 'object', + }), + 'artifact': dict({ + 'title': 'Artifact', + }), + 'content': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'items': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'type': 'object', + }), + ]), + }), + 'type': 'array', + }), + ]), + 'title': 'Content', + }), + 'id': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'type': 'null', + }), + ]), + 'default': None, + 'title': 'Id', + }), + 'name': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'type': 'null', + }), + ]), + 'default': None, + 'title': 'Name', + }), + 'response_metadata': dict({ + 'title': 'Response Metadata', + 'type': 'object', + }), + 'status': dict({ + 'default': 'success', + 'title': 'Status', + }), + 'tool_call_id': dict({ + 'title': 'Tool Call Id', + 'type': 'string', + }), + 'type': dict({ + 'const': 'tool', + 'default': 'tool', + 'title': 'Type', + 'type': 'string', + }), + }), + 'required': list([ + 'content', + 'tool_call_id', + ]), + 'title': 'ToolMessage', + 'type': 'object', + }), + 'langchain_core__v1__messages__AIMessage': dict({ + 'properties': dict({ + 'content': dict({ + 'items': dict({ + 'anyOf': list([ + dict({ + '$ref': '#/$defs/TextContentBlock', + }), + dict({ + '$ref': '#/$defs/ToolCall', + }), + dict({ + '$ref': '#/$defs/InvalidToolCall', + }), + dict({ + '$ref': '#/$defs/ToolCallChunk', + }), + dict({ + '$ref': '#/$defs/ReasoningContentBlock', + }), + dict({ + '$ref': '#/$defs/NonStandardContentBlock', + }), + dict({ + '$ref': '#/$defs/ImageContentBlock', + }), + dict({ + '$ref': '#/$defs/VideoContentBlock', + }), + dict({ + '$ref': '#/$defs/AudioContentBlock', + }), + dict({ + '$ref': '#/$defs/PlainTextContentBlock', + }), + dict({ + '$ref': '#/$defs/FileContentBlock', + }), + dict({ + '$ref': '#/$defs/CodeInterpreterCall', + }), + dict({ + '$ref': '#/$defs/CodeInterpreterOutput', + }), + dict({ + '$ref': '#/$defs/CodeInterpreterResult', + }), + dict({ + '$ref': '#/$defs/WebSearchCall', + }), + dict({ + '$ref': '#/$defs/WebSearchResult', + }), + ]), + }), + 'title': 'Content', + 'type': 'array', + }), + 'id': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'type': 'null', + }), + ]), + 'default': None, + 'title': 'Id', + }), + 'lc_version': dict({ + 'default': 'v1', + 'title': 'Lc Version', + 'type': 'string', + }), + 'name': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'type': 'null', + }), + ]), + 'default': None, + 'title': 'Name', + }), + 'parsed': dict({ + 'anyOf': list([ + dict({ + 'type': 'object', + }), + dict({ + '$ref': '#/$defs/BaseModel', + }), + dict({ + 'type': 'null', + }), + ]), + 'default': None, + 'title': 'Parsed', + }), + 'response_metadata': dict({ + '$ref': '#/$defs/ResponseMetadata', + }), + 'type': dict({ + 'const': 'ai', + 'default': 'ai', + 'title': 'Type', + 'type': 'string', + }), + 'usage_metadata': dict({ + 'anyOf': list([ + dict({ + '$ref': '#/$defs/UsageMetadata', + }), + dict({ + 'type': 'null', + }), + ]), + 'default': None, + }), + }), + 'title': 'AIMessage', + 'type': 'object', + }), + 'langchain_core__v1__messages__AIMessageChunk': dict({ + 'properties': dict({ + 'content': dict({ + 'items': dict({ + 'anyOf': list([ + dict({ + '$ref': '#/$defs/TextContentBlock', + }), + dict({ + '$ref': '#/$defs/ToolCall', + }), + dict({ + '$ref': '#/$defs/InvalidToolCall', + }), + dict({ + '$ref': '#/$defs/ToolCallChunk', + }), + dict({ + '$ref': '#/$defs/ReasoningContentBlock', + }), + dict({ + '$ref': '#/$defs/NonStandardContentBlock', + }), + dict({ + '$ref': '#/$defs/ImageContentBlock', + }), + dict({ + '$ref': '#/$defs/VideoContentBlock', + }), + dict({ + '$ref': '#/$defs/AudioContentBlock', + }), + dict({ + '$ref': '#/$defs/PlainTextContentBlock', + }), + dict({ + '$ref': '#/$defs/FileContentBlock', + }), + dict({ + '$ref': '#/$defs/CodeInterpreterCall', + }), + dict({ + '$ref': '#/$defs/CodeInterpreterOutput', + }), + dict({ + '$ref': '#/$defs/CodeInterpreterResult', + }), + dict({ + '$ref': '#/$defs/WebSearchCall', + }), + dict({ + '$ref': '#/$defs/WebSearchResult', + }), + ]), + }), + 'title': 'Content', + 'type': 'array', + }), + 'id': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'type': 'null', + }), + ]), + 'default': None, + 'title': 'Id', + }), + 'lc_version': dict({ + 'default': 'v1', + 'title': 'Lc Version', + 'type': 'string', + }), + 'name': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'type': 'null', + }), + ]), + 'default': None, + 'title': 'Name', + }), + 'parsed': dict({ + 'anyOf': list([ + dict({ + 'type': 'object', + }), + dict({ + '$ref': '#/$defs/BaseModel', + }), + dict({ + 'type': 'null', + }), + ]), + 'default': None, + 'title': 'Parsed', + }), + 'response_metadata': dict({ + '$ref': '#/$defs/ResponseMetadata', + }), + 'type': dict({ + 'const': 'ai_chunk', + 'default': 'ai_chunk', + 'title': 'Type', + 'type': 'string', + }), + 'usage_metadata': dict({ + 'anyOf': list([ + dict({ + '$ref': '#/$defs/UsageMetadata', + }), + dict({ + 'type': 'null', + }), + ]), + 'default': None, + }), + }), + 'title': 'AIMessageChunk', + 'type': 'object', + }), + 'langchain_core__v1__messages__HumanMessage': dict({ + 'properties': dict({ + 'content': dict({ + 'items': dict({ + 'anyOf': list([ + dict({ + '$ref': '#/$defs/TextContentBlock', + }), + dict({ + '$ref': '#/$defs/ToolCall', + }), + dict({ + '$ref': '#/$defs/InvalidToolCall', + }), + dict({ + '$ref': '#/$defs/ToolCallChunk', + }), + dict({ + '$ref': '#/$defs/ReasoningContentBlock', + }), + dict({ + '$ref': '#/$defs/NonStandardContentBlock', + }), + dict({ + '$ref': '#/$defs/ImageContentBlock', + }), + dict({ + '$ref': '#/$defs/VideoContentBlock', + }), + dict({ + '$ref': '#/$defs/AudioContentBlock', + }), + dict({ + '$ref': '#/$defs/PlainTextContentBlock', + }), + dict({ + '$ref': '#/$defs/FileContentBlock', + }), + dict({ + '$ref': '#/$defs/CodeInterpreterCall', + }), + dict({ + '$ref': '#/$defs/CodeInterpreterOutput', + }), + dict({ + '$ref': '#/$defs/CodeInterpreterResult', + }), + dict({ + '$ref': '#/$defs/WebSearchCall', + }), + dict({ + '$ref': '#/$defs/WebSearchResult', + }), + ]), + }), + 'title': 'Content', + 'type': 'array', + }), + 'id': dict({ + 'title': 'Id', + 'type': 'string', + }), + 'name': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'type': 'null', + }), + ]), + 'default': None, + 'title': 'Name', + }), + 'type': dict({ + 'const': 'human', + 'default': 'human', + 'title': 'Type', + 'type': 'string', + }), + }), + 'required': list([ + 'id', + 'content', + ]), + 'title': 'HumanMessage', + 'type': 'object', + }), + 'langchain_core__v1__messages__SystemMessage': dict({ + 'properties': dict({ + 'content': dict({ + 'items': dict({ + 'anyOf': list([ + dict({ + '$ref': '#/$defs/TextContentBlock', + }), + dict({ + '$ref': '#/$defs/ToolCall', + }), + dict({ + '$ref': '#/$defs/InvalidToolCall', + }), + dict({ + '$ref': '#/$defs/ToolCallChunk', + }), + dict({ + '$ref': '#/$defs/ReasoningContentBlock', + }), + dict({ + '$ref': '#/$defs/NonStandardContentBlock', + }), + dict({ + '$ref': '#/$defs/ImageContentBlock', + }), + dict({ + '$ref': '#/$defs/VideoContentBlock', + }), + dict({ + '$ref': '#/$defs/AudioContentBlock', + }), + dict({ + '$ref': '#/$defs/PlainTextContentBlock', + }), + dict({ + '$ref': '#/$defs/FileContentBlock', + }), + dict({ + '$ref': '#/$defs/CodeInterpreterCall', + }), + dict({ + '$ref': '#/$defs/CodeInterpreterOutput', + }), + dict({ + '$ref': '#/$defs/CodeInterpreterResult', + }), + dict({ + '$ref': '#/$defs/WebSearchCall', + }), + dict({ + '$ref': '#/$defs/WebSearchResult', + }), + ]), + }), + 'title': 'Content', + 'type': 'array', + }), + 'custom_role': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'type': 'null', + }), + ]), + 'default': None, + 'title': 'Custom Role', + }), + 'id': dict({ + 'title': 'Id', + 'type': 'string', + }), + 'name': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'type': 'null', + }), + ]), + 'default': None, + 'title': 'Name', + }), + 'type': dict({ + 'const': 'system', + 'default': 'system', + 'title': 'Type', + 'type': 'string', + }), + }), + 'required': list([ + 'id', + 'content', + ]), + 'title': 'SystemMessage', + 'type': 'object', + }), + 'langchain_core__v1__messages__ToolMessage': dict({ + 'properties': dict({ + 'artifact': dict({ + 'anyOf': list([ + dict({ + }), + dict({ + 'type': 'null', + }), + ]), + 'default': None, + 'title': 'Artifact', + }), + 'content': dict({ + 'items': dict({ + 'anyOf': list([ + dict({ + '$ref': '#/$defs/TextContentBlock', + }), + dict({ + '$ref': '#/$defs/ToolCall', + }), + dict({ + '$ref': '#/$defs/InvalidToolCall', + }), + dict({ + '$ref': '#/$defs/ToolCallChunk', + }), + dict({ + '$ref': '#/$defs/ReasoningContentBlock', + }), + dict({ + '$ref': '#/$defs/NonStandardContentBlock', + }), + dict({ + '$ref': '#/$defs/ImageContentBlock', + }), + dict({ + '$ref': '#/$defs/VideoContentBlock', + }), + dict({ + '$ref': '#/$defs/AudioContentBlock', + }), + dict({ + '$ref': '#/$defs/PlainTextContentBlock', + }), + dict({ + '$ref': '#/$defs/FileContentBlock', + }), + dict({ + '$ref': '#/$defs/CodeInterpreterCall', + }), + dict({ + '$ref': '#/$defs/CodeInterpreterOutput', + }), + dict({ + '$ref': '#/$defs/CodeInterpreterResult', + }), + dict({ + '$ref': '#/$defs/WebSearchCall', + }), + dict({ + '$ref': '#/$defs/WebSearchResult', + }), + ]), + }), + 'title': 'Content', + 'type': 'array', + }), + 'id': dict({ + 'title': 'Id', + 'type': 'string', + }), + 'name': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'type': 'null', + }), + ]), + 'default': None, + 'title': 'Name', + }), + 'status': dict({ + 'default': 'success', + 'title': 'Status', + }), + 'tool_call_id': dict({ + 'title': 'Tool Call Id', + 'type': 'string', + }), + 'type': dict({ + 'const': 'tool', + 'default': 'tool', + 'title': 'Type', + 'type': 'string', + }), + }), + 'required': list([ + 'id', + 'tool_call_id', + 'content', + ]), + 'title': 'ToolMessage', + 'type': 'object', + }), }), 'anyOf': list([ dict({ @@ -1784,25 +3085,25 @@ dict({ 'oneOf': list([ dict({ - '$ref': '#/$defs/AIMessage', + '$ref': '#/$defs/langchain_core__messages__ai__AIMessage', }), dict({ - '$ref': '#/$defs/HumanMessage', + '$ref': '#/$defs/langchain_core__messages__human__HumanMessage', }), dict({ '$ref': '#/$defs/ChatMessage', }), dict({ - '$ref': '#/$defs/SystemMessage', + '$ref': '#/$defs/langchain_core__messages__system__SystemMessage', }), dict({ '$ref': '#/$defs/FunctionMessage', }), dict({ - '$ref': '#/$defs/ToolMessage', + '$ref': '#/$defs/langchain_core__messages__tool__ToolMessage', }), dict({ - '$ref': '#/$defs/AIMessageChunk', + '$ref': '#/$defs/langchain_core__messages__ai__AIMessageChunk', }), dict({ '$ref': '#/$defs/HumanMessageChunk', @@ -1821,6 +3122,21 @@ }), ]), }), + dict({ + '$ref': '#/$defs/langchain_core__v1__messages__AIMessage', + }), + dict({ + '$ref': '#/$defs/langchain_core__v1__messages__AIMessageChunk', + }), + dict({ + '$ref': '#/$defs/langchain_core__v1__messages__HumanMessage', + }), + dict({ + '$ref': '#/$defs/langchain_core__v1__messages__SystemMessage', + }), + dict({ + '$ref': '#/$defs/langchain_core__v1__messages__ToolMessage', + }), ]), 'title': 'RunnableParallelInput', }), diff --git a/libs/core/tests/unit_tests/runnables/__snapshots__/test_runnable.ambr b/libs/core/tests/unit_tests/runnables/__snapshots__/test_runnable.ambr index 079e4909061..0d4407d8ca4 100644 --- a/libs/core/tests/unit_tests/runnables/__snapshots__/test_runnable.ambr +++ b/libs/core/tests/unit_tests/runnables/__snapshots__/test_runnable.ambr @@ -2674,7 +2674,7 @@ 'description': ''' Allowance for errors made by LLM. - Here we add an `error` key to surface errors made during generation + Here we add an ``error`` key to surface errors made during generation (e.g., invalid JSON arguments.) ''', 'properties': dict({ @@ -2700,6 +2700,10 @@ ]), 'title': 'Error', }), + 'extras': dict({ + 'title': 'Extras', + 'type': 'object', + }), 'id': dict({ 'anyOf': list([ dict({ @@ -2711,6 +2715,10 @@ ]), 'title': 'Id', }), + 'index': dict({ + 'title': 'Index', + 'type': 'integer', + }), 'name': dict({ 'anyOf': list([ dict({ @@ -2728,9 +2736,10 @@ }), }), 'required': list([ + 'type', + 'id', 'name', 'args', - 'id', 'error', ]), 'title': 'InvalidToolCall', @@ -2943,12 +2952,23 @@ This represents a request to call the tool named "foo" with arguments {"a": 1} and an identifier of "123". + + .. note:: + ``create_tool_call`` may also be used as a factory to create a + ``ToolCall``. Benefits include: + + * Automatic ID generation (when not provided) + * Required arguments strictly validated at creation time ''', 'properties': dict({ 'args': dict({ 'title': 'Args', 'type': 'object', }), + 'extras': dict({ + 'title': 'Extras', + 'type': 'object', + }), 'id': dict({ 'anyOf': list([ dict({ @@ -2960,6 +2980,10 @@ ]), 'title': 'Id', }), + 'index': dict({ + 'title': 'Index', + 'type': 'integer', + }), 'name': dict({ 'title': 'Name', 'type': 'string', @@ -2970,9 +2994,10 @@ }), }), 'required': list([ + 'type', + 'id', 'name', 'args', - 'id', ]), 'title': 'ToolCall', 'type': 'object', @@ -2981,9 +3006,9 @@ 'description': ''' A chunk of a tool call (e.g., as part of a stream). - When merging ToolCallChunks (e.g., via AIMessageChunk.__add__), + When merging ToolCallChunks (e.g., via ``AIMessageChunk.__add__``), all string attributes are concatenated. Chunks are only merged if their - values of `index` are equal and not None. + values of ``index`` are equal and not ``None``. Example: @@ -3009,6 +3034,10 @@ ]), 'title': 'Args', }), + 'extras': dict({ + 'title': 'Extras', + 'type': 'object', + }), 'id': dict({ 'anyOf': list([ dict({ @@ -3048,9 +3077,9 @@ }), }), 'required': list([ + 'id', 'name', 'args', - 'id', 'index', ]), 'title': 'ToolCallChunk', @@ -4150,7 +4179,7 @@ 'description': ''' Allowance for errors made by LLM. - Here we add an `error` key to surface errors made during generation + Here we add an ``error`` key to surface errors made during generation (e.g., invalid JSON arguments.) ''', 'properties': dict({ @@ -4176,6 +4205,10 @@ ]), 'title': 'Error', }), + 'extras': dict({ + 'title': 'Extras', + 'type': 'object', + }), 'id': dict({ 'anyOf': list([ dict({ @@ -4187,6 +4220,10 @@ ]), 'title': 'Id', }), + 'index': dict({ + 'title': 'Index', + 'type': 'integer', + }), 'name': dict({ 'anyOf': list([ dict({ @@ -4204,9 +4241,10 @@ }), }), 'required': list([ + 'type', + 'id', 'name', 'args', - 'id', 'error', ]), 'title': 'InvalidToolCall', @@ -4438,12 +4476,23 @@ This represents a request to call the tool named "foo" with arguments {"a": 1} and an identifier of "123". + + .. note:: + ``create_tool_call`` may also be used as a factory to create a + ``ToolCall``. Benefits include: + + * Automatic ID generation (when not provided) + * Required arguments strictly validated at creation time ''', 'properties': dict({ 'args': dict({ 'title': 'Args', 'type': 'object', }), + 'extras': dict({ + 'title': 'Extras', + 'type': 'object', + }), 'id': dict({ 'anyOf': list([ dict({ @@ -4455,6 +4504,10 @@ ]), 'title': 'Id', }), + 'index': dict({ + 'title': 'Index', + 'type': 'integer', + }), 'name': dict({ 'title': 'Name', 'type': 'string', @@ -4465,9 +4518,10 @@ }), }), 'required': list([ + 'type', + 'id', 'name', 'args', - 'id', ]), 'title': 'ToolCall', 'type': 'object', @@ -4476,9 +4530,9 @@ 'description': ''' A chunk of a tool call (e.g., as part of a stream). - When merging ToolCallChunks (e.g., via AIMessageChunk.__add__), + When merging ToolCallChunks (e.g., via ``AIMessageChunk.__add__``), all string attributes are concatenated. Chunks are only merged if their - values of `index` are equal and not None. + values of ``index`` are equal and not ``None``. Example: @@ -4504,6 +4558,10 @@ ]), 'title': 'Args', }), + 'extras': dict({ + 'title': 'Extras', + 'type': 'object', + }), 'id': dict({ 'anyOf': list([ dict({ @@ -4543,9 +4601,9 @@ }), }), 'required': list([ + 'id', 'name', 'args', - 'id', 'index', ]), 'title': 'ToolCallChunk', @@ -5657,7 +5715,7 @@ 'description': ''' Allowance for errors made by LLM. - Here we add an `error` key to surface errors made during generation + Here we add an ``error`` key to surface errors made during generation (e.g., invalid JSON arguments.) ''', 'properties': dict({ @@ -5683,6 +5741,10 @@ ]), 'title': 'Error', }), + 'extras': dict({ + 'title': 'Extras', + 'type': 'object', + }), 'id': dict({ 'anyOf': list([ dict({ @@ -5694,6 +5756,10 @@ ]), 'title': 'Id', }), + 'index': dict({ + 'title': 'Index', + 'type': 'integer', + }), 'name': dict({ 'anyOf': list([ dict({ @@ -5711,9 +5777,10 @@ }), }), 'required': list([ + 'type', + 'id', 'name', 'args', - 'id', 'error', ]), 'title': 'InvalidToolCall', @@ -5945,12 +6012,23 @@ This represents a request to call the tool named "foo" with arguments {"a": 1} and an identifier of "123". + + .. note:: + ``create_tool_call`` may also be used as a factory to create a + ``ToolCall``. Benefits include: + + * Automatic ID generation (when not provided) + * Required arguments strictly validated at creation time ''', 'properties': dict({ 'args': dict({ 'title': 'Args', 'type': 'object', }), + 'extras': dict({ + 'title': 'Extras', + 'type': 'object', + }), 'id': dict({ 'anyOf': list([ dict({ @@ -5962,6 +6040,10 @@ ]), 'title': 'Id', }), + 'index': dict({ + 'title': 'Index', + 'type': 'integer', + }), 'name': dict({ 'title': 'Name', 'type': 'string', @@ -5972,9 +6054,10 @@ }), }), 'required': list([ + 'type', + 'id', 'name', 'args', - 'id', ]), 'title': 'ToolCall', 'type': 'object', @@ -5983,9 +6066,9 @@ 'description': ''' A chunk of a tool call (e.g., as part of a stream). - When merging ToolCallChunks (e.g., via AIMessageChunk.__add__), + When merging ToolCallChunks (e.g., via ``AIMessageChunk.__add__``), all string attributes are concatenated. Chunks are only merged if their - values of `index` are equal and not None. + values of ``index`` are equal and not ``None``. Example: @@ -6011,6 +6094,10 @@ ]), 'title': 'Args', }), + 'extras': dict({ + 'title': 'Extras', + 'type': 'object', + }), 'id': dict({ 'anyOf': list([ dict({ @@ -6050,9 +6137,9 @@ }), }), 'required': list([ + 'id', 'name', 'args', - 'id', 'index', ]), 'title': 'ToolCallChunk', @@ -7039,7 +7126,7 @@ 'description': ''' Allowance for errors made by LLM. - Here we add an `error` key to surface errors made during generation + Here we add an ``error`` key to surface errors made during generation (e.g., invalid JSON arguments.) ''', 'properties': dict({ @@ -7065,6 +7152,10 @@ ]), 'title': 'Error', }), + 'extras': dict({ + 'title': 'Extras', + 'type': 'object', + }), 'id': dict({ 'anyOf': list([ dict({ @@ -7076,6 +7167,10 @@ ]), 'title': 'Id', }), + 'index': dict({ + 'title': 'Index', + 'type': 'integer', + }), 'name': dict({ 'anyOf': list([ dict({ @@ -7093,9 +7188,10 @@ }), }), 'required': list([ + 'type', + 'id', 'name', 'args', - 'id', 'error', ]), 'title': 'InvalidToolCall', @@ -7308,12 +7404,23 @@ This represents a request to call the tool named "foo" with arguments {"a": 1} and an identifier of "123". + + .. note:: + ``create_tool_call`` may also be used as a factory to create a + ``ToolCall``. Benefits include: + + * Automatic ID generation (when not provided) + * Required arguments strictly validated at creation time ''', 'properties': dict({ 'args': dict({ 'title': 'Args', 'type': 'object', }), + 'extras': dict({ + 'title': 'Extras', + 'type': 'object', + }), 'id': dict({ 'anyOf': list([ dict({ @@ -7325,6 +7432,10 @@ ]), 'title': 'Id', }), + 'index': dict({ + 'title': 'Index', + 'type': 'integer', + }), 'name': dict({ 'title': 'Name', 'type': 'string', @@ -7335,9 +7446,10 @@ }), }), 'required': list([ + 'type', + 'id', 'name', 'args', - 'id', ]), 'title': 'ToolCall', 'type': 'object', @@ -7346,9 +7458,9 @@ 'description': ''' A chunk of a tool call (e.g., as part of a stream). - When merging ToolCallChunks (e.g., via AIMessageChunk.__add__), + When merging ToolCallChunks (e.g., via ``AIMessageChunk.__add__``), all string attributes are concatenated. Chunks are only merged if their - values of `index` are equal and not None. + values of ``index`` are equal and not ``None``. Example: @@ -7374,6 +7486,10 @@ ]), 'title': 'Args', }), + 'extras': dict({ + 'title': 'Extras', + 'type': 'object', + }), 'id': dict({ 'anyOf': list([ dict({ @@ -7413,9 +7529,9 @@ }), }), 'required': list([ + 'id', 'name', 'args', - 'id', 'index', ]), 'title': 'ToolCallChunk', @@ -8557,7 +8673,7 @@ 'description': ''' Allowance for errors made by LLM. - Here we add an `error` key to surface errors made during generation + Here we add an ``error`` key to surface errors made during generation (e.g., invalid JSON arguments.) ''', 'properties': dict({ @@ -8583,6 +8699,10 @@ ]), 'title': 'Error', }), + 'extras': dict({ + 'title': 'Extras', + 'type': 'object', + }), 'id': dict({ 'anyOf': list([ dict({ @@ -8594,6 +8714,10 @@ ]), 'title': 'Id', }), + 'index': dict({ + 'title': 'Index', + 'type': 'integer', + }), 'name': dict({ 'anyOf': list([ dict({ @@ -8611,9 +8735,10 @@ }), }), 'required': list([ + 'type', + 'id', 'name', 'args', - 'id', 'error', ]), 'title': 'InvalidToolCall', @@ -8845,12 +8970,23 @@ This represents a request to call the tool named "foo" with arguments {"a": 1} and an identifier of "123". + + .. note:: + ``create_tool_call`` may also be used as a factory to create a + ``ToolCall``. Benefits include: + + * Automatic ID generation (when not provided) + * Required arguments strictly validated at creation time ''', 'properties': dict({ 'args': dict({ 'title': 'Args', 'type': 'object', }), + 'extras': dict({ + 'title': 'Extras', + 'type': 'object', + }), 'id': dict({ 'anyOf': list([ dict({ @@ -8862,6 +8998,10 @@ ]), 'title': 'Id', }), + 'index': dict({ + 'title': 'Index', + 'type': 'integer', + }), 'name': dict({ 'title': 'Name', 'type': 'string', @@ -8872,9 +9012,10 @@ }), }), 'required': list([ + 'type', + 'id', 'name', 'args', - 'id', ]), 'title': 'ToolCall', 'type': 'object', @@ -8883,9 +9024,9 @@ 'description': ''' A chunk of a tool call (e.g., as part of a stream). - When merging ToolCallChunks (e.g., via AIMessageChunk.__add__), + When merging ToolCallChunks (e.g., via ``AIMessageChunk.__add__``), all string attributes are concatenated. Chunks are only merged if their - values of `index` are equal and not None. + values of ``index`` are equal and not ``None``. Example: @@ -8911,6 +9052,10 @@ ]), 'title': 'Args', }), + 'extras': dict({ + 'title': 'Extras', + 'type': 'object', + }), 'id': dict({ 'anyOf': list([ dict({ @@ -8950,9 +9095,9 @@ }), }), 'required': list([ + 'id', 'name', 'args', - 'id', 'index', ]), 'title': 'ToolCallChunk', @@ -9229,25 +9374,25 @@ dict({ 'oneOf': list([ dict({ - '$ref': '#/definitions/AIMessage', + '$ref': '#/definitions/langchain_core__messages__ai__AIMessage', }), dict({ - '$ref': '#/definitions/HumanMessage', + '$ref': '#/definitions/langchain_core__messages__human__HumanMessage', }), dict({ '$ref': '#/definitions/ChatMessage', }), dict({ - '$ref': '#/definitions/SystemMessage', + '$ref': '#/definitions/langchain_core__messages__system__SystemMessage', }), dict({ '$ref': '#/definitions/FunctionMessage', }), dict({ - '$ref': '#/definitions/ToolMessage', + '$ref': '#/definitions/langchain_core__messages__tool__ToolMessage', }), dict({ - '$ref': '#/definitions/AIMessageChunk', + '$ref': '#/definitions/langchain_core__messages__ai__AIMessageChunk', }), dict({ '$ref': '#/definitions/HumanMessageChunk', @@ -9266,229 +9411,78 @@ }), ]), }), + dict({ + '$ref': '#/definitions/langchain_core__v1__messages__AIMessage', + }), + dict({ + '$ref': '#/definitions/langchain_core__v1__messages__AIMessageChunk', + }), + dict({ + '$ref': '#/definitions/langchain_core__v1__messages__HumanMessage', + }), + dict({ + '$ref': '#/definitions/langchain_core__v1__messages__SystemMessage', + }), + dict({ + '$ref': '#/definitions/langchain_core__v1__messages__ToolMessage', + }), ]), 'definitions': dict({ - 'AIMessage': dict({ - 'additionalProperties': True, + 'AudioContentBlock': dict({ 'description': ''' - Message from an AI. + Audio data. - AIMessage is returned from a chat model as a response to a prompt. + .. note:: + ``create_audio_block`` may also be used as a factory to create an + ``AudioContentBlock``. Benefits include: - This message represents the output of the model and consists of both - the raw output as returned by the model together standardized fields - (e.g., tool calls, usage metadata) added by the LangChain framework. + * Automatic ID generation (when not provided) + * Required arguments strictly validated at creation time ''', 'properties': dict({ - 'additional_kwargs': dict({ - 'title': 'Additional Kwargs', + 'base64': dict({ + 'title': 'Base64', + 'type': 'string', + }), + 'extras': dict({ + 'title': 'Extras', 'type': 'object', }), - 'content': dict({ - 'anyOf': list([ - dict({ - 'type': 'string', - }), - dict({ - 'items': dict({ - 'anyOf': list([ - dict({ - 'type': 'string', - }), - dict({ - 'type': 'object', - }), - ]), - }), - 'type': 'array', - }), - ]), - 'title': 'Content', - }), - 'example': dict({ - 'default': False, - 'title': 'Example', - 'type': 'boolean', + 'file_id': dict({ + 'title': 'File Id', + 'type': 'string', }), 'id': dict({ - 'anyOf': list([ - dict({ - 'type': 'string', - }), - dict({ - 'type': 'null', - }), - ]), - 'default': None, 'title': 'Id', + 'type': 'string', }), - 'invalid_tool_calls': dict({ - 'default': list([ - ]), - 'items': dict({ - '$ref': '#/definitions/InvalidToolCall', - }), - 'title': 'Invalid Tool Calls', - 'type': 'array', + 'index': dict({ + 'title': 'Index', + 'type': 'integer', }), - 'name': dict({ - 'anyOf': list([ - dict({ - 'type': 'string', - }), - dict({ - 'type': 'null', - }), - ]), - 'default': None, - 'title': 'Name', - }), - 'response_metadata': dict({ - 'title': 'Response Metadata', - 'type': 'object', - }), - 'tool_calls': dict({ - 'default': list([ - ]), - 'items': dict({ - '$ref': '#/definitions/ToolCall', - }), - 'title': 'Tool Calls', - 'type': 'array', + 'mime_type': dict({ + 'title': 'Mime Type', + 'type': 'string', }), 'type': dict({ - 'const': 'ai', - 'default': 'ai', + 'const': 'audio', 'title': 'Type', }), - 'usage_metadata': dict({ - 'anyOf': list([ - dict({ - '$ref': '#/definitions/UsageMetadata', - }), - dict({ - 'type': 'null', - }), - ]), - 'default': None, + 'url': dict({ + 'title': 'Url', + 'type': 'string', }), }), 'required': list([ - 'content', + 'type', ]), - 'title': 'AIMessage', + 'title': 'AudioContentBlock', 'type': 'object', }), - 'AIMessageChunk': dict({ - 'additionalProperties': True, - 'description': 'Message chunk from an AI.', + 'BaseModel': dict({ 'properties': dict({ - 'additional_kwargs': dict({ - 'title': 'Additional Kwargs', - 'type': 'object', - }), - 'content': dict({ - 'anyOf': list([ - dict({ - 'type': 'string', - }), - dict({ - 'items': dict({ - 'anyOf': list([ - dict({ - 'type': 'string', - }), - dict({ - 'type': 'object', - }), - ]), - }), - 'type': 'array', - }), - ]), - 'title': 'Content', - }), - 'example': dict({ - 'default': False, - 'title': 'Example', - 'type': 'boolean', - }), - 'id': dict({ - 'anyOf': list([ - dict({ - 'type': 'string', - }), - dict({ - 'type': 'null', - }), - ]), - 'default': None, - 'title': 'Id', - }), - 'invalid_tool_calls': dict({ - 'default': list([ - ]), - 'items': dict({ - '$ref': '#/definitions/InvalidToolCall', - }), - 'title': 'Invalid Tool Calls', - 'type': 'array', - }), - 'name': dict({ - 'anyOf': list([ - dict({ - 'type': 'string', - }), - dict({ - 'type': 'null', - }), - ]), - 'default': None, - 'title': 'Name', - }), - 'response_metadata': dict({ - 'title': 'Response Metadata', - 'type': 'object', - }), - 'tool_call_chunks': dict({ - 'default': list([ - ]), - 'items': dict({ - '$ref': '#/definitions/ToolCallChunk', - }), - 'title': 'Tool Call Chunks', - 'type': 'array', - }), - 'tool_calls': dict({ - 'default': list([ - ]), - 'items': dict({ - '$ref': '#/definitions/ToolCall', - }), - 'title': 'Tool Calls', - 'type': 'array', - }), - 'type': dict({ - 'const': 'AIMessageChunk', - 'default': 'AIMessageChunk', - 'title': 'Type', - }), - 'usage_metadata': dict({ - 'anyOf': list([ - dict({ - '$ref': '#/definitions/UsageMetadata', - }), - dict({ - 'type': 'null', - }), - ]), - 'default': None, - }), }), - 'required': list([ - 'content', - ]), - 'title': 'AIMessageChunk', + 'title': 'BaseModel', 'type': 'object', }), 'ChatMessage': dict({ @@ -9639,6 +9633,238 @@ 'title': 'ChatMessageChunk', 'type': 'object', }), + 'Citation': dict({ + 'description': ''' + Annotation for citing data from a document. + + .. note:: + ``start/end`` indices refer to the **response text**, + not the source text. This means that the indices are relative to the model's + response, not the original document (as specified in the ``url``). + + .. note:: + ``create_citation`` may also be used as a factory to create a ``Citation``. + Benefits include: + + * Automatic ID generation (when not provided) + * Required arguments strictly validated at creation time + ''', + 'properties': dict({ + 'cited_text': dict({ + 'title': 'Cited Text', + 'type': 'string', + }), + 'end_index': dict({ + 'title': 'End Index', + 'type': 'integer', + }), + 'extras': dict({ + 'title': 'Extras', + 'type': 'object', + }), + 'id': dict({ + 'title': 'Id', + 'type': 'string', + }), + 'start_index': dict({ + 'title': 'Start Index', + 'type': 'integer', + }), + 'title': dict({ + 'title': 'Title', + 'type': 'string', + }), + 'type': dict({ + 'const': 'citation', + 'title': 'Type', + }), + 'url': dict({ + 'title': 'Url', + 'type': 'string', + }), + }), + 'required': list([ + 'type', + ]), + 'title': 'Citation', + 'type': 'object', + }), + 'CodeInterpreterCall': dict({ + 'description': 'Built-in code interpreter tool call.', + 'properties': dict({ + 'code': dict({ + 'title': 'Code', + 'type': 'string', + }), + 'extras': dict({ + 'title': 'Extras', + 'type': 'object', + }), + 'id': dict({ + 'title': 'Id', + 'type': 'string', + }), + 'index': dict({ + 'title': 'Index', + 'type': 'integer', + }), + 'language': dict({ + 'title': 'Language', + 'type': 'string', + }), + 'type': dict({ + 'const': 'code_interpreter_call', + 'title': 'Type', + }), + }), + 'required': list([ + 'type', + ]), + 'title': 'CodeInterpreterCall', + 'type': 'object', + }), + 'CodeInterpreterOutput': dict({ + 'description': ''' + Output of a singular code interpreter tool call. + + Full output of a code interpreter tool call is represented by + ``CodeInterpreterResult`` which is a list of these blocks. + ''', + 'properties': dict({ + 'extras': dict({ + 'title': 'Extras', + 'type': 'object', + }), + 'file_ids': dict({ + 'items': dict({ + 'type': 'string', + }), + 'title': 'File Ids', + 'type': 'array', + }), + 'id': dict({ + 'title': 'Id', + 'type': 'string', + }), + 'index': dict({ + 'title': 'Index', + 'type': 'integer', + }), + 'return_code': dict({ + 'title': 'Return Code', + 'type': 'integer', + }), + 'stderr': dict({ + 'title': 'Stderr', + 'type': 'string', + }), + 'stdout': dict({ + 'title': 'Stdout', + 'type': 'string', + }), + 'type': dict({ + 'const': 'code_interpreter_output', + 'title': 'Type', + }), + }), + 'required': list([ + 'type', + ]), + 'title': 'CodeInterpreterOutput', + 'type': 'object', + }), + 'CodeInterpreterResult': dict({ + 'description': 'Result of a code interpreter tool call.', + 'properties': dict({ + 'extras': dict({ + 'title': 'Extras', + 'type': 'object', + }), + 'id': dict({ + 'title': 'Id', + 'type': 'string', + }), + 'index': dict({ + 'title': 'Index', + 'type': 'integer', + }), + 'output': dict({ + 'items': dict({ + '$ref': '#/definitions/CodeInterpreterOutput', + }), + 'title': 'Output', + 'type': 'array', + }), + 'type': dict({ + 'const': 'code_interpreter_result', + 'title': 'Type', + }), + }), + 'required': list([ + 'type', + 'output', + ]), + 'title': 'CodeInterpreterResult', + 'type': 'object', + }), + 'FileContentBlock': dict({ + 'description': ''' + File data that doesn't fit into other multimodal blocks. + + This block is intended for files that are not images, audio, or plaintext. For + example, it can be used for PDFs, Word documents, etc. + + If the file is an image, audio, or plaintext, you should use the corresponding + content block type (e.g., ``ImageContentBlock``, ``AudioContentBlock``, + ``PlainTextContentBlock``). + + .. note:: + ``create_file_block`` may also be used as a factory to create a + ``FileContentBlock``. Benefits include: + + * Automatic ID generation (when not provided) + * Required arguments strictly validated at creation time + ''', + 'properties': dict({ + 'base64': dict({ + 'title': 'Base64', + 'type': 'string', + }), + 'extras': dict({ + 'title': 'Extras', + 'type': 'object', + }), + 'file_id': dict({ + 'title': 'File Id', + 'type': 'string', + }), + 'id': dict({ + 'title': 'Id', + 'type': 'string', + }), + 'index': dict({ + 'title': 'Index', + 'type': 'integer', + }), + 'mime_type': dict({ + 'title': 'Mime Type', + 'type': 'string', + }), + 'type': dict({ + 'const': 'file', + 'title': 'Type', + }), + 'url': dict({ + 'title': 'Url', + 'type': 'string', + }), + }), + 'required': list([ + 'type', + ]), + 'title': 'FileContentBlock', + 'type': 'object', + }), 'FunctionMessage': dict({ 'additionalProperties': True, 'description': ''' @@ -9772,103 +9998,6 @@ 'title': 'FunctionMessageChunk', 'type': 'object', }), - 'HumanMessage': dict({ - 'additionalProperties': True, - 'description': ''' - Message from a human. - - HumanMessages are messages that are passed in from a human to the model. - - Example: - - .. code-block:: python - - from langchain_core.messages import HumanMessage, SystemMessage - - messages = [ - SystemMessage( - content="You are a helpful assistant! Your name is Bob." - ), - HumanMessage( - content="What is your name?" - ) - ] - - # Instantiate a chat model and invoke it with the messages - model = ... - print(model.invoke(messages)) - ''', - 'properties': dict({ - 'additional_kwargs': dict({ - 'title': 'Additional Kwargs', - 'type': 'object', - }), - 'content': dict({ - 'anyOf': list([ - dict({ - 'type': 'string', - }), - dict({ - 'items': dict({ - 'anyOf': list([ - dict({ - 'type': 'string', - }), - dict({ - 'type': 'object', - }), - ]), - }), - 'type': 'array', - }), - ]), - 'title': 'Content', - }), - 'example': dict({ - 'default': False, - 'title': 'Example', - 'type': 'boolean', - }), - 'id': dict({ - 'anyOf': list([ - dict({ - 'type': 'string', - }), - dict({ - 'type': 'null', - }), - ]), - 'default': None, - 'title': 'Id', - }), - 'name': dict({ - 'anyOf': list([ - dict({ - 'type': 'string', - }), - dict({ - 'type': 'null', - }), - ]), - 'default': None, - 'title': 'Name', - }), - 'response_metadata': dict({ - 'title': 'Response Metadata', - 'type': 'object', - }), - 'type': dict({ - 'const': 'human', - 'default': 'human', - 'title': 'Type', - }), - }), - 'required': list([ - 'content', - ]), - 'title': 'HumanMessage', - 'type': 'object', - }), 'HumanMessageChunk': dict({ 'additionalProperties': True, 'description': 'Human Message chunk.', @@ -9943,6 +10072,57 @@ 'title': 'HumanMessageChunk', 'type': 'object', }), + 'ImageContentBlock': dict({ + 'description': ''' + Image data. + + .. note:: + ``create_image_block`` may also be used as a factory to create a + ``ImageContentBlock``. Benefits include: + + * Automatic ID generation (when not provided) + * Required arguments strictly validated at creation time + ''', + 'properties': dict({ + 'base64': dict({ + 'title': 'Base64', + 'type': 'string', + }), + 'extras': dict({ + 'title': 'Extras', + 'type': 'object', + }), + 'file_id': dict({ + 'title': 'File Id', + 'type': 'string', + }), + 'id': dict({ + 'title': 'Id', + 'type': 'string', + }), + 'index': dict({ + 'title': 'Index', + 'type': 'integer', + }), + 'mime_type': dict({ + 'title': 'Mime Type', + 'type': 'string', + }), + 'type': dict({ + 'const': 'image', + 'title': 'Type', + }), + 'url': dict({ + 'title': 'Url', + 'type': 'string', + }), + }), + 'required': list([ + 'type', + ]), + 'title': 'ImageContentBlock', + 'type': 'object', + }), 'InputTokenDetails': dict({ 'description': ''' Breakdown of input token counts. @@ -9984,7 +10164,7 @@ 'description': ''' Allowance for errors made by LLM. - Here we add an `error` key to surface errors made during generation + Here we add an ``error`` key to surface errors made during generation (e.g., invalid JSON arguments.) ''', 'properties': dict({ @@ -10010,6 +10190,10 @@ ]), 'title': 'Error', }), + 'extras': dict({ + 'title': 'Extras', + 'type': 'object', + }), 'id': dict({ 'anyOf': list([ dict({ @@ -10021,6 +10205,10 @@ ]), 'title': 'Id', }), + 'index': dict({ + 'title': 'Index', + 'type': 'integer', + }), 'name': dict({ 'anyOf': list([ dict({ @@ -10038,14 +10226,81 @@ }), }), 'required': list([ + 'type', + 'id', 'name', 'args', - 'id', 'error', ]), 'title': 'InvalidToolCall', 'type': 'object', }), + 'NonStandardAnnotation': dict({ + 'description': 'Provider-specific annotation format.', + 'properties': dict({ + 'id': dict({ + 'title': 'Id', + 'type': 'string', + }), + 'type': dict({ + 'const': 'non_standard_annotation', + 'title': 'Type', + }), + 'value': dict({ + 'title': 'Value', + 'type': 'object', + }), + }), + 'required': list([ + 'type', + 'value', + ]), + 'title': 'NonStandardAnnotation', + 'type': 'object', + }), + 'NonStandardContentBlock': dict({ + 'description': ''' + Provider-specific data. + + This block contains data for which there is not yet a standard type. + + The purpose of this block should be to simply hold a provider-specific payload. + If a provider's non-standard output includes reasoning and tool calls, it should be + the adapter's job to parse that payload and emit the corresponding standard + ReasoningContentBlock and ToolCallContentBlocks. + + .. note:: + ``create_non_standard_block`` may also be used as a factory to create a + ``NonStandardContentBlock``. Benefits include: + + * Automatic ID generation (when not provided) + * Required arguments strictly validated at creation time + ''', + 'properties': dict({ + 'id': dict({ + 'title': 'Id', + 'type': 'string', + }), + 'index': dict({ + 'title': 'Index', + 'type': 'integer', + }), + 'type': dict({ + 'const': 'non_standard', + 'title': 'Type', + }), + 'value': dict({ + 'title': 'Value', + 'type': 'object', + }), + }), + 'required': list([ + 'type', + 'value', + ]), + 'title': 'NonStandardContentBlock', + 'type': 'object', + }), 'OutputTokenDetails': dict({ 'description': ''' Breakdown of output token counts. @@ -10076,96 +10331,135 @@ 'title': 'OutputTokenDetails', 'type': 'object', }), - 'SystemMessage': dict({ - 'additionalProperties': True, + 'PlainTextContentBlock': dict({ 'description': ''' - Message for priming AI behavior. + Plaintext data (e.g., from a document). - The system message is usually passed in as the first of a sequence - of input messages. + .. note:: + Title and context are optional fields that may be passed to the model. See + Anthropic `example `__. - Example: + .. note:: + ``create_plaintext_block`` may also be used as a factory to create a + ``PlainTextContentBlock``. Benefits include: - .. code-block:: python - - from langchain_core.messages import HumanMessage, SystemMessage - - messages = [ - SystemMessage( - content="You are a helpful assistant! Your name is Bob." - ), - HumanMessage( - content="What is your name?" - ) - ] - - # Define a chat model and invoke it with the messages - print(model.invoke(messages)) + * Automatic ID generation (when not provided) + * Required arguments strictly validated at creation time ''', 'properties': dict({ - 'additional_kwargs': dict({ - 'title': 'Additional Kwargs', + 'base64': dict({ + 'title': 'Base64', + 'type': 'string', + }), + 'context': dict({ + 'title': 'Context', + 'type': 'string', + }), + 'extras': dict({ + 'title': 'Extras', 'type': 'object', }), - 'content': dict({ - 'anyOf': list([ - dict({ - 'type': 'string', - }), - dict({ - 'items': dict({ - 'anyOf': list([ - dict({ - 'type': 'string', - }), - dict({ - 'type': 'object', - }), - ]), - }), - 'type': 'array', - }), - ]), - 'title': 'Content', + 'file_id': dict({ + 'title': 'File Id', + 'type': 'string', }), 'id': dict({ - 'anyOf': list([ - dict({ - 'type': 'string', - }), - dict({ - 'type': 'null', - }), - ]), - 'default': None, 'title': 'Id', + 'type': 'string', }), - 'name': dict({ - 'anyOf': list([ - dict({ - 'type': 'string', - }), - dict({ - 'type': 'null', - }), - ]), - 'default': None, - 'title': 'Name', + 'index': dict({ + 'title': 'Index', + 'type': 'integer', }), - 'response_metadata': dict({ - 'title': 'Response Metadata', - 'type': 'object', + 'mime_type': dict({ + 'const': 'text/plain', + 'title': 'Mime Type', + }), + 'text': dict({ + 'title': 'Text', + 'type': 'string', + }), + 'title': dict({ + 'title': 'Title', + 'type': 'string', }), 'type': dict({ - 'const': 'system', - 'default': 'system', + 'const': 'text-plain', + 'title': 'Type', + }), + 'url': dict({ + 'title': 'Url', + 'type': 'string', + }), + }), + 'required': list([ + 'type', + 'mime_type', + ]), + 'title': 'PlainTextContentBlock', + 'type': 'object', + }), + 'ReasoningContentBlock': dict({ + 'description': ''' + Reasoning output from a LLM. + + .. note:: + ``create_reasoning_block`` may also be used as a factory to create a + ``ReasoningContentBlock``. Benefits include: + + * Automatic ID generation (when not provided) + * Required arguments strictly validated at creation time + ''', + 'properties': dict({ + 'extras': dict({ + 'title': 'Extras', + 'type': 'object', + }), + 'id': dict({ + 'title': 'Id', + 'type': 'string', + }), + 'index': dict({ + 'title': 'Index', + 'type': 'integer', + }), + 'reasoning': dict({ + 'title': 'Reasoning', + 'type': 'string', + }), + 'type': dict({ + 'const': 'reasoning', 'title': 'Type', }), }), 'required': list([ - 'content', + 'type', ]), - 'title': 'SystemMessage', + 'title': 'ReasoningContentBlock', + 'type': 'object', + }), + 'ResponseMetadata': dict({ + 'description': ''' + Metadata about the response from the AI provider. + + Contains additional information returned by the provider, such as + response headers, service tiers, log probabilities, system fingerprints, etc. + + Extra keys are permitted from what is typed here (via `total=False`), allowing + for provider-specific metadata to be included without breaking the type + definition. + ''', + 'properties': dict({ + 'model_name': dict({ + 'title': 'Model Name', + 'type': 'string', + }), + 'model_provider': dict({ + 'title': 'Model Provider', + 'type': 'string', + }), + }), + 'title': 'ResponseMetadata', 'type': 'object', }), 'SystemMessageChunk': dict({ @@ -10237,6 +10531,63 @@ 'title': 'SystemMessageChunk', 'type': 'object', }), + 'TextContentBlock': dict({ + 'description': ''' + Text output from a LLM. + + This typically represents the main text content of a message, such as the response + from a language model or the text of a user message. + + .. note:: + ``create_text_block`` may also be used as a factory to create a + ``TextContentBlock``. Benefits include: + + * Automatic ID generation (when not provided) + * Required arguments strictly validated at creation time + ''', + 'properties': dict({ + 'annotations': dict({ + 'items': dict({ + 'anyOf': list([ + dict({ + '$ref': '#/definitions/Citation', + }), + dict({ + '$ref': '#/definitions/NonStandardAnnotation', + }), + ]), + }), + 'title': 'Annotations', + 'type': 'array', + }), + 'extras': dict({ + 'title': 'Extras', + 'type': 'object', + }), + 'id': dict({ + 'title': 'Id', + 'type': 'string', + }), + 'index': dict({ + 'title': 'Index', + 'type': 'integer', + }), + 'text': dict({ + 'title': 'Text', + 'type': 'string', + }), + 'type': dict({ + 'const': 'text', + 'title': 'Type', + }), + }), + 'required': list([ + 'type', + 'text', + ]), + 'title': 'TextContentBlock', + 'type': 'object', + }), 'ToolCall': dict({ 'description': ''' Represents a request to call a tool. @@ -10253,12 +10604,23 @@ This represents a request to call the tool named "foo" with arguments {"a": 1} and an identifier of "123". + + .. note:: + ``create_tool_call`` may also be used as a factory to create a + ``ToolCall``. Benefits include: + + * Automatic ID generation (when not provided) + * Required arguments strictly validated at creation time ''', 'properties': dict({ 'args': dict({ 'title': 'Args', 'type': 'object', }), + 'extras': dict({ + 'title': 'Extras', + 'type': 'object', + }), 'id': dict({ 'anyOf': list([ dict({ @@ -10270,6 +10632,10 @@ ]), 'title': 'Id', }), + 'index': dict({ + 'title': 'Index', + 'type': 'integer', + }), 'name': dict({ 'title': 'Name', 'type': 'string', @@ -10280,9 +10646,10 @@ }), }), 'required': list([ + 'type', + 'id', 'name', 'args', - 'id', ]), 'title': 'ToolCall', 'type': 'object', @@ -10291,9 +10658,9 @@ 'description': ''' A chunk of a tool call (e.g., as part of a stream). - When merging ToolCallChunks (e.g., via AIMessageChunk.__add__), + When merging ToolCallChunks (e.g., via ``AIMessageChunk.__add__``), all string attributes are concatenated. Chunks are only merged if their - values of `index` are equal and not None. + values of ``index`` are equal and not ``None``. Example: @@ -10319,6 +10686,10 @@ ]), 'title': 'Args', }), + 'extras': dict({ + 'title': 'Extras', + 'type': 'object', + }), 'id': dict({ 'anyOf': list([ dict({ @@ -10358,134 +10729,14 @@ }), }), 'required': list([ + 'id', 'name', 'args', - 'id', 'index', ]), 'title': 'ToolCallChunk', 'type': 'object', }), - 'ToolMessage': dict({ - 'additionalProperties': True, - 'description': ''' - Message for passing the result of executing a tool back to a model. - - ToolMessages contain the result of a tool invocation. Typically, the result - is encoded inside the `content` field. - - Example: A ToolMessage representing a result of 42 from a tool call with id - - .. code-block:: python - - from langchain_core.messages import ToolMessage - - ToolMessage(content='42', tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL') - - - Example: A ToolMessage where only part of the tool output is sent to the model - and the full output is passed in to artifact. - - .. versionadded:: 0.2.17 - - .. code-block:: python - - from langchain_core.messages import ToolMessage - - tool_output = { - "stdout": "From the graph we can see that the correlation between x and y is ...", - "stderr": None, - "artifacts": {"type": "image", "base64_data": "/9j/4gIcSU..."}, - } - - ToolMessage( - content=tool_output["stdout"], - artifact=tool_output, - tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL', - ) - - The tool_call_id field is used to associate the tool call request with the - tool call response. This is useful in situations where a chat model is able - to request multiple tool calls in parallel. - ''', - 'properties': dict({ - 'additional_kwargs': dict({ - 'title': 'Additional Kwargs', - 'type': 'object', - }), - 'artifact': dict({ - 'title': 'Artifact', - }), - 'content': dict({ - 'anyOf': list([ - dict({ - 'type': 'string', - }), - dict({ - 'items': dict({ - 'anyOf': list([ - dict({ - 'type': 'string', - }), - dict({ - 'type': 'object', - }), - ]), - }), - 'type': 'array', - }), - ]), - 'title': 'Content', - }), - 'id': dict({ - 'anyOf': list([ - dict({ - 'type': 'string', - }), - dict({ - 'type': 'null', - }), - ]), - 'default': None, - 'title': 'Id', - }), - 'name': dict({ - 'anyOf': list([ - dict({ - 'type': 'string', - }), - dict({ - 'type': 'null', - }), - ]), - 'default': None, - 'title': 'Name', - }), - 'response_metadata': dict({ - 'title': 'Response Metadata', - 'type': 'object', - }), - 'status': dict({ - 'default': 'success', - 'title': 'Status', - }), - 'tool_call_id': dict({ - 'title': 'Tool Call Id', - 'type': 'string', - }), - 'type': dict({ - 'const': 'tool', - 'default': 'tool', - 'title': 'Type', - }), - }), - 'required': list([ - 'content', - 'tool_call_id', - ]), - 'title': 'ToolMessage', - 'type': 'object', - }), 'ToolMessageChunk': dict({ 'additionalProperties': True, 'description': 'Tool Message chunk.', @@ -10624,6 +10875,1195 @@ 'title': 'UsageMetadata', 'type': 'object', }), + 'VideoContentBlock': dict({ + 'description': ''' + Video data. + + .. note:: + ``create_video_block`` may also be used as a factory to create a + ``VideoContentBlock``. Benefits include: + + * Automatic ID generation (when not provided) + * Required arguments strictly validated at creation time + ''', + 'properties': dict({ + 'base64': dict({ + 'title': 'Base64', + 'type': 'string', + }), + 'extras': dict({ + 'title': 'Extras', + 'type': 'object', + }), + 'file_id': dict({ + 'title': 'File Id', + 'type': 'string', + }), + 'id': dict({ + 'title': 'Id', + 'type': 'string', + }), + 'index': dict({ + 'title': 'Index', + 'type': 'integer', + }), + 'mime_type': dict({ + 'title': 'Mime Type', + 'type': 'string', + }), + 'type': dict({ + 'const': 'video', + 'title': 'Type', + }), + 'url': dict({ + 'title': 'Url', + 'type': 'string', + }), + }), + 'required': list([ + 'type', + ]), + 'title': 'VideoContentBlock', + 'type': 'object', + }), + 'WebSearchCall': dict({ + 'description': 'Built-in web search tool call.', + 'properties': dict({ + 'extras': dict({ + 'title': 'Extras', + 'type': 'object', + }), + 'id': dict({ + 'title': 'Id', + 'type': 'string', + }), + 'index': dict({ + 'title': 'Index', + 'type': 'integer', + }), + 'query': dict({ + 'title': 'Query', + 'type': 'string', + }), + 'type': dict({ + 'const': 'web_search_call', + 'title': 'Type', + }), + }), + 'required': list([ + 'type', + ]), + 'title': 'WebSearchCall', + 'type': 'object', + }), + 'WebSearchResult': dict({ + 'description': 'Result of a built-in web search tool call.', + 'properties': dict({ + 'extras': dict({ + 'title': 'Extras', + 'type': 'object', + }), + 'id': dict({ + 'title': 'Id', + 'type': 'string', + }), + 'index': dict({ + 'title': 'Index', + 'type': 'integer', + }), + 'type': dict({ + 'const': 'web_search_result', + 'title': 'Type', + }), + 'urls': dict({ + 'items': dict({ + 'type': 'string', + }), + 'title': 'Urls', + 'type': 'array', + }), + }), + 'required': list([ + 'type', + ]), + 'title': 'WebSearchResult', + 'type': 'object', + }), + 'langchain_core__messages__ai__AIMessage': dict({ + 'additionalProperties': True, + 'description': ''' + Message from an AI. + + AIMessage is returned from a chat model as a response to a prompt. + + This message represents the output of the model and consists of both + the raw output as returned by the model together standardized fields + (e.g., tool calls, usage metadata) added by the LangChain framework. + ''', + 'properties': dict({ + 'additional_kwargs': dict({ + 'title': 'Additional Kwargs', + 'type': 'object', + }), + 'content': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'items': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'type': 'object', + }), + ]), + }), + 'type': 'array', + }), + ]), + 'title': 'Content', + }), + 'example': dict({ + 'default': False, + 'title': 'Example', + 'type': 'boolean', + }), + 'id': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'type': 'null', + }), + ]), + 'default': None, + 'title': 'Id', + }), + 'invalid_tool_calls': dict({ + 'default': list([ + ]), + 'items': dict({ + '$ref': '#/definitions/InvalidToolCall', + }), + 'title': 'Invalid Tool Calls', + 'type': 'array', + }), + 'name': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'type': 'null', + }), + ]), + 'default': None, + 'title': 'Name', + }), + 'response_metadata': dict({ + 'title': 'Response Metadata', + 'type': 'object', + }), + 'tool_calls': dict({ + 'default': list([ + ]), + 'items': dict({ + '$ref': '#/definitions/ToolCall', + }), + 'title': 'Tool Calls', + 'type': 'array', + }), + 'type': dict({ + 'const': 'ai', + 'default': 'ai', + 'title': 'Type', + }), + 'usage_metadata': dict({ + 'anyOf': list([ + dict({ + '$ref': '#/definitions/UsageMetadata', + }), + dict({ + 'type': 'null', + }), + ]), + 'default': None, + }), + }), + 'required': list([ + 'content', + ]), + 'title': 'AIMessage', + 'type': 'object', + }), + 'langchain_core__messages__ai__AIMessageChunk': dict({ + 'additionalProperties': True, + 'description': 'Message chunk from an AI.', + 'properties': dict({ + 'additional_kwargs': dict({ + 'title': 'Additional Kwargs', + 'type': 'object', + }), + 'content': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'items': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'type': 'object', + }), + ]), + }), + 'type': 'array', + }), + ]), + 'title': 'Content', + }), + 'example': dict({ + 'default': False, + 'title': 'Example', + 'type': 'boolean', + }), + 'id': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'type': 'null', + }), + ]), + 'default': None, + 'title': 'Id', + }), + 'invalid_tool_calls': dict({ + 'default': list([ + ]), + 'items': dict({ + '$ref': '#/definitions/InvalidToolCall', + }), + 'title': 'Invalid Tool Calls', + 'type': 'array', + }), + 'name': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'type': 'null', + }), + ]), + 'default': None, + 'title': 'Name', + }), + 'response_metadata': dict({ + 'title': 'Response Metadata', + 'type': 'object', + }), + 'tool_call_chunks': dict({ + 'default': list([ + ]), + 'items': dict({ + '$ref': '#/definitions/ToolCallChunk', + }), + 'title': 'Tool Call Chunks', + 'type': 'array', + }), + 'tool_calls': dict({ + 'default': list([ + ]), + 'items': dict({ + '$ref': '#/definitions/ToolCall', + }), + 'title': 'Tool Calls', + 'type': 'array', + }), + 'type': dict({ + 'const': 'AIMessageChunk', + 'default': 'AIMessageChunk', + 'title': 'Type', + }), + 'usage_metadata': dict({ + 'anyOf': list([ + dict({ + '$ref': '#/definitions/UsageMetadata', + }), + dict({ + 'type': 'null', + }), + ]), + 'default': None, + }), + }), + 'required': list([ + 'content', + ]), + 'title': 'AIMessageChunk', + 'type': 'object', + }), + 'langchain_core__messages__human__HumanMessage': dict({ + 'additionalProperties': True, + 'description': ''' + Message from a human. + + HumanMessages are messages that are passed in from a human to the model. + + Example: + + .. code-block:: python + + from langchain_core.messages import HumanMessage, SystemMessage + + messages = [ + SystemMessage( + content="You are a helpful assistant! Your name is Bob." + ), + HumanMessage( + content="What is your name?" + ) + ] + + # Instantiate a chat model and invoke it with the messages + model = ... + print(model.invoke(messages)) + ''', + 'properties': dict({ + 'additional_kwargs': dict({ + 'title': 'Additional Kwargs', + 'type': 'object', + }), + 'content': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'items': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'type': 'object', + }), + ]), + }), + 'type': 'array', + }), + ]), + 'title': 'Content', + }), + 'example': dict({ + 'default': False, + 'title': 'Example', + 'type': 'boolean', + }), + 'id': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'type': 'null', + }), + ]), + 'default': None, + 'title': 'Id', + }), + 'name': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'type': 'null', + }), + ]), + 'default': None, + 'title': 'Name', + }), + 'response_metadata': dict({ + 'title': 'Response Metadata', + 'type': 'object', + }), + 'type': dict({ + 'const': 'human', + 'default': 'human', + 'title': 'Type', + }), + }), + 'required': list([ + 'content', + ]), + 'title': 'HumanMessage', + 'type': 'object', + }), + 'langchain_core__messages__system__SystemMessage': dict({ + 'additionalProperties': True, + 'description': ''' + Message for priming AI behavior. + + The system message is usually passed in as the first of a sequence + of input messages. + + Example: + + .. code-block:: python + + from langchain_core.messages import HumanMessage, SystemMessage + + messages = [ + SystemMessage( + content="You are a helpful assistant! Your name is Bob." + ), + HumanMessage( + content="What is your name?" + ) + ] + + # Define a chat model and invoke it with the messages + print(model.invoke(messages)) + ''', + 'properties': dict({ + 'additional_kwargs': dict({ + 'title': 'Additional Kwargs', + 'type': 'object', + }), + 'content': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'items': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'type': 'object', + }), + ]), + }), + 'type': 'array', + }), + ]), + 'title': 'Content', + }), + 'id': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'type': 'null', + }), + ]), + 'default': None, + 'title': 'Id', + }), + 'name': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'type': 'null', + }), + ]), + 'default': None, + 'title': 'Name', + }), + 'response_metadata': dict({ + 'title': 'Response Metadata', + 'type': 'object', + }), + 'type': dict({ + 'const': 'system', + 'default': 'system', + 'title': 'Type', + }), + }), + 'required': list([ + 'content', + ]), + 'title': 'SystemMessage', + 'type': 'object', + }), + 'langchain_core__messages__tool__ToolMessage': dict({ + 'additionalProperties': True, + 'description': ''' + Message for passing the result of executing a tool back to a model. + + ToolMessages contain the result of a tool invocation. Typically, the result + is encoded inside the `content` field. + + Example: A ToolMessage representing a result of 42 from a tool call with id + + .. code-block:: python + + from langchain_core.messages import ToolMessage + + ToolMessage(content='42', tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL') + + + Example: A ToolMessage where only part of the tool output is sent to the model + and the full output is passed in to artifact. + + .. versionadded:: 0.2.17 + + .. code-block:: python + + from langchain_core.messages import ToolMessage + + tool_output = { + "stdout": "From the graph we can see that the correlation between x and y is ...", + "stderr": None, + "artifacts": {"type": "image", "base64_data": "/9j/4gIcSU..."}, + } + + ToolMessage( + content=tool_output["stdout"], + artifact=tool_output, + tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL', + ) + + The tool_call_id field is used to associate the tool call request with the + tool call response. This is useful in situations where a chat model is able + to request multiple tool calls in parallel. + ''', + 'properties': dict({ + 'additional_kwargs': dict({ + 'title': 'Additional Kwargs', + 'type': 'object', + }), + 'artifact': dict({ + 'title': 'Artifact', + }), + 'content': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'items': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'type': 'object', + }), + ]), + }), + 'type': 'array', + }), + ]), + 'title': 'Content', + }), + 'id': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'type': 'null', + }), + ]), + 'default': None, + 'title': 'Id', + }), + 'name': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'type': 'null', + }), + ]), + 'default': None, + 'title': 'Name', + }), + 'response_metadata': dict({ + 'title': 'Response Metadata', + 'type': 'object', + }), + 'status': dict({ + 'default': 'success', + 'title': 'Status', + }), + 'tool_call_id': dict({ + 'title': 'Tool Call Id', + 'type': 'string', + }), + 'type': dict({ + 'const': 'tool', + 'default': 'tool', + 'title': 'Type', + }), + }), + 'required': list([ + 'content', + 'tool_call_id', + ]), + 'title': 'ToolMessage', + 'type': 'object', + }), + 'langchain_core__v1__messages__AIMessage': dict({ + 'properties': dict({ + 'content': dict({ + 'items': dict({ + 'anyOf': list([ + dict({ + '$ref': '#/definitions/TextContentBlock', + }), + dict({ + '$ref': '#/definitions/ToolCall', + }), + dict({ + '$ref': '#/definitions/InvalidToolCall', + }), + dict({ + '$ref': '#/definitions/ToolCallChunk', + }), + dict({ + '$ref': '#/definitions/ReasoningContentBlock', + }), + dict({ + '$ref': '#/definitions/NonStandardContentBlock', + }), + dict({ + '$ref': '#/definitions/ImageContentBlock', + }), + dict({ + '$ref': '#/definitions/VideoContentBlock', + }), + dict({ + '$ref': '#/definitions/AudioContentBlock', + }), + dict({ + '$ref': '#/definitions/PlainTextContentBlock', + }), + dict({ + '$ref': '#/definitions/FileContentBlock', + }), + dict({ + '$ref': '#/definitions/CodeInterpreterCall', + }), + dict({ + '$ref': '#/definitions/CodeInterpreterOutput', + }), + dict({ + '$ref': '#/definitions/CodeInterpreterResult', + }), + dict({ + '$ref': '#/definitions/WebSearchCall', + }), + dict({ + '$ref': '#/definitions/WebSearchResult', + }), + ]), + }), + 'title': 'Content', + 'type': 'array', + }), + 'id': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'type': 'null', + }), + ]), + 'default': None, + 'title': 'Id', + }), + 'lc_version': dict({ + 'default': 'v1', + 'title': 'Lc Version', + 'type': 'string', + }), + 'name': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'type': 'null', + }), + ]), + 'default': None, + 'title': 'Name', + }), + 'parsed': dict({ + 'anyOf': list([ + dict({ + 'type': 'object', + }), + dict({ + '$ref': '#/definitions/BaseModel', + }), + dict({ + 'type': 'null', + }), + ]), + 'default': None, + 'title': 'Parsed', + }), + 'response_metadata': dict({ + '$ref': '#/definitions/ResponseMetadata', + }), + 'type': dict({ + 'const': 'ai', + 'default': 'ai', + 'title': 'Type', + }), + 'usage_metadata': dict({ + 'anyOf': list([ + dict({ + '$ref': '#/definitions/UsageMetadata', + }), + dict({ + 'type': 'null', + }), + ]), + 'default': None, + }), + }), + 'title': 'AIMessage', + 'type': 'object', + }), + 'langchain_core__v1__messages__AIMessageChunk': dict({ + 'properties': dict({ + 'content': dict({ + 'items': dict({ + 'anyOf': list([ + dict({ + '$ref': '#/definitions/TextContentBlock', + }), + dict({ + '$ref': '#/definitions/ToolCall', + }), + dict({ + '$ref': '#/definitions/InvalidToolCall', + }), + dict({ + '$ref': '#/definitions/ToolCallChunk', + }), + dict({ + '$ref': '#/definitions/ReasoningContentBlock', + }), + dict({ + '$ref': '#/definitions/NonStandardContentBlock', + }), + dict({ + '$ref': '#/definitions/ImageContentBlock', + }), + dict({ + '$ref': '#/definitions/VideoContentBlock', + }), + dict({ + '$ref': '#/definitions/AudioContentBlock', + }), + dict({ + '$ref': '#/definitions/PlainTextContentBlock', + }), + dict({ + '$ref': '#/definitions/FileContentBlock', + }), + dict({ + '$ref': '#/definitions/CodeInterpreterCall', + }), + dict({ + '$ref': '#/definitions/CodeInterpreterOutput', + }), + dict({ + '$ref': '#/definitions/CodeInterpreterResult', + }), + dict({ + '$ref': '#/definitions/WebSearchCall', + }), + dict({ + '$ref': '#/definitions/WebSearchResult', + }), + ]), + }), + 'title': 'Content', + 'type': 'array', + }), + 'id': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'type': 'null', + }), + ]), + 'default': None, + 'title': 'Id', + }), + 'lc_version': dict({ + 'default': 'v1', + 'title': 'Lc Version', + 'type': 'string', + }), + 'name': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'type': 'null', + }), + ]), + 'default': None, + 'title': 'Name', + }), + 'parsed': dict({ + 'anyOf': list([ + dict({ + 'type': 'object', + }), + dict({ + '$ref': '#/definitions/BaseModel', + }), + dict({ + 'type': 'null', + }), + ]), + 'default': None, + 'title': 'Parsed', + }), + 'response_metadata': dict({ + '$ref': '#/definitions/ResponseMetadata', + }), + 'type': dict({ + 'const': 'ai_chunk', + 'default': 'ai_chunk', + 'title': 'Type', + }), + 'usage_metadata': dict({ + 'anyOf': list([ + dict({ + '$ref': '#/definitions/UsageMetadata', + }), + dict({ + 'type': 'null', + }), + ]), + 'default': None, + }), + }), + 'title': 'AIMessageChunk', + 'type': 'object', + }), + 'langchain_core__v1__messages__HumanMessage': dict({ + 'properties': dict({ + 'content': dict({ + 'items': dict({ + 'anyOf': list([ + dict({ + '$ref': '#/definitions/TextContentBlock', + }), + dict({ + '$ref': '#/definitions/ToolCall', + }), + dict({ + '$ref': '#/definitions/InvalidToolCall', + }), + dict({ + '$ref': '#/definitions/ToolCallChunk', + }), + dict({ + '$ref': '#/definitions/ReasoningContentBlock', + }), + dict({ + '$ref': '#/definitions/NonStandardContentBlock', + }), + dict({ + '$ref': '#/definitions/ImageContentBlock', + }), + dict({ + '$ref': '#/definitions/VideoContentBlock', + }), + dict({ + '$ref': '#/definitions/AudioContentBlock', + }), + dict({ + '$ref': '#/definitions/PlainTextContentBlock', + }), + dict({ + '$ref': '#/definitions/FileContentBlock', + }), + dict({ + '$ref': '#/definitions/CodeInterpreterCall', + }), + dict({ + '$ref': '#/definitions/CodeInterpreterOutput', + }), + dict({ + '$ref': '#/definitions/CodeInterpreterResult', + }), + dict({ + '$ref': '#/definitions/WebSearchCall', + }), + dict({ + '$ref': '#/definitions/WebSearchResult', + }), + ]), + }), + 'title': 'Content', + 'type': 'array', + }), + 'id': dict({ + 'title': 'Id', + 'type': 'string', + }), + 'name': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'type': 'null', + }), + ]), + 'default': None, + 'title': 'Name', + }), + 'type': dict({ + 'const': 'human', + 'default': 'human', + 'title': 'Type', + }), + }), + 'required': list([ + 'id', + 'content', + ]), + 'title': 'HumanMessage', + 'type': 'object', + }), + 'langchain_core__v1__messages__SystemMessage': dict({ + 'properties': dict({ + 'content': dict({ + 'items': dict({ + 'anyOf': list([ + dict({ + '$ref': '#/definitions/TextContentBlock', + }), + dict({ + '$ref': '#/definitions/ToolCall', + }), + dict({ + '$ref': '#/definitions/InvalidToolCall', + }), + dict({ + '$ref': '#/definitions/ToolCallChunk', + }), + dict({ + '$ref': '#/definitions/ReasoningContentBlock', + }), + dict({ + '$ref': '#/definitions/NonStandardContentBlock', + }), + dict({ + '$ref': '#/definitions/ImageContentBlock', + }), + dict({ + '$ref': '#/definitions/VideoContentBlock', + }), + dict({ + '$ref': '#/definitions/AudioContentBlock', + }), + dict({ + '$ref': '#/definitions/PlainTextContentBlock', + }), + dict({ + '$ref': '#/definitions/FileContentBlock', + }), + dict({ + '$ref': '#/definitions/CodeInterpreterCall', + }), + dict({ + '$ref': '#/definitions/CodeInterpreterOutput', + }), + dict({ + '$ref': '#/definitions/CodeInterpreterResult', + }), + dict({ + '$ref': '#/definitions/WebSearchCall', + }), + dict({ + '$ref': '#/definitions/WebSearchResult', + }), + ]), + }), + 'title': 'Content', + 'type': 'array', + }), + 'custom_role': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'type': 'null', + }), + ]), + 'default': None, + 'title': 'Custom Role', + }), + 'id': dict({ + 'title': 'Id', + 'type': 'string', + }), + 'name': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'type': 'null', + }), + ]), + 'default': None, + 'title': 'Name', + }), + 'type': dict({ + 'const': 'system', + 'default': 'system', + 'title': 'Type', + }), + }), + 'required': list([ + 'id', + 'content', + ]), + 'title': 'SystemMessage', + 'type': 'object', + }), + 'langchain_core__v1__messages__ToolMessage': dict({ + 'properties': dict({ + 'artifact': dict({ + 'anyOf': list([ + dict({ + }), + dict({ + 'type': 'null', + }), + ]), + 'default': None, + 'title': 'Artifact', + }), + 'content': dict({ + 'items': dict({ + 'anyOf': list([ + dict({ + '$ref': '#/definitions/TextContentBlock', + }), + dict({ + '$ref': '#/definitions/ToolCall', + }), + dict({ + '$ref': '#/definitions/InvalidToolCall', + }), + dict({ + '$ref': '#/definitions/ToolCallChunk', + }), + dict({ + '$ref': '#/definitions/ReasoningContentBlock', + }), + dict({ + '$ref': '#/definitions/NonStandardContentBlock', + }), + dict({ + '$ref': '#/definitions/ImageContentBlock', + }), + dict({ + '$ref': '#/definitions/VideoContentBlock', + }), + dict({ + '$ref': '#/definitions/AudioContentBlock', + }), + dict({ + '$ref': '#/definitions/PlainTextContentBlock', + }), + dict({ + '$ref': '#/definitions/FileContentBlock', + }), + dict({ + '$ref': '#/definitions/CodeInterpreterCall', + }), + dict({ + '$ref': '#/definitions/CodeInterpreterOutput', + }), + dict({ + '$ref': '#/definitions/CodeInterpreterResult', + }), + dict({ + '$ref': '#/definitions/WebSearchCall', + }), + dict({ + '$ref': '#/definitions/WebSearchResult', + }), + ]), + }), + 'title': 'Content', + 'type': 'array', + }), + 'id': dict({ + 'title': 'Id', + 'type': 'string', + }), + 'name': dict({ + 'anyOf': list([ + dict({ + 'type': 'string', + }), + dict({ + 'type': 'null', + }), + ]), + 'default': None, + 'title': 'Name', + }), + 'status': dict({ + 'default': 'success', + 'title': 'Status', + }), + 'tool_call_id': dict({ + 'title': 'Tool Call Id', + 'type': 'string', + }), + 'type': dict({ + 'const': 'tool', + 'default': 'tool', + 'title': 'Type', + }), + }), + 'required': list([ + 'id', + 'tool_call_id', + 'content', + ]), + 'title': 'ToolMessage', + 'type': 'object', + }), }), 'title': 'CommaSeparatedListOutputParserInput', }) @@ -11410,7 +12850,7 @@ 'description': ''' Allowance for errors made by LLM. - Here we add an `error` key to surface errors made during generation + Here we add an ``error`` key to surface errors made during generation (e.g., invalid JSON arguments.) ''', 'properties': dict({ @@ -11436,6 +12876,10 @@ ]), 'title': 'Error', }), + 'extras': dict({ + 'title': 'Extras', + 'type': 'object', + }), 'id': dict({ 'anyOf': list([ dict({ @@ -11447,6 +12891,10 @@ ]), 'title': 'Id', }), + 'index': dict({ + 'title': 'Index', + 'type': 'integer', + }), 'name': dict({ 'anyOf': list([ dict({ @@ -11464,9 +12912,10 @@ }), }), 'required': list([ + 'type', + 'id', 'name', 'args', - 'id', 'error', ]), 'title': 'InvalidToolCall', @@ -11709,12 +13158,23 @@ This represents a request to call the tool named "foo" with arguments {"a": 1} and an identifier of "123". + + .. note:: + ``create_tool_call`` may also be used as a factory to create a + ``ToolCall``. Benefits include: + + * Automatic ID generation (when not provided) + * Required arguments strictly validated at creation time ''', 'properties': dict({ 'args': dict({ 'title': 'Args', 'type': 'object', }), + 'extras': dict({ + 'title': 'Extras', + 'type': 'object', + }), 'id': dict({ 'anyOf': list([ dict({ @@ -11726,6 +13186,10 @@ ]), 'title': 'Id', }), + 'index': dict({ + 'title': 'Index', + 'type': 'integer', + }), 'name': dict({ 'title': 'Name', 'type': 'string', @@ -11736,9 +13200,10 @@ }), }), 'required': list([ + 'type', + 'id', 'name', 'args', - 'id', ]), 'title': 'ToolCall', 'type': 'object', @@ -11747,9 +13212,9 @@ 'description': ''' A chunk of a tool call (e.g., as part of a stream). - When merging ToolCallChunks (e.g., via AIMessageChunk.__add__), + When merging ToolCallChunks (e.g., via ``AIMessageChunk.__add__``), all string attributes are concatenated. Chunks are only merged if their - values of `index` are equal and not None. + values of ``index`` are equal and not ``None``. Example: @@ -11775,6 +13240,10 @@ ]), 'title': 'Args', }), + 'extras': dict({ + 'title': 'Extras', + 'type': 'object', + }), 'id': dict({ 'anyOf': list([ dict({ @@ -11814,9 +13283,9 @@ }), }), 'required': list([ + 'id', 'name', 'args', - 'id', 'index', ]), 'title': 'ToolCallChunk', @@ -12878,7 +14347,7 @@ 'description': ''' Allowance for errors made by LLM. - Here we add an `error` key to surface errors made during generation + Here we add an ``error`` key to surface errors made during generation (e.g., invalid JSON arguments.) ''', 'properties': dict({ @@ -12904,6 +14373,10 @@ ]), 'title': 'Error', }), + 'extras': dict({ + 'title': 'Extras', + 'type': 'object', + }), 'id': dict({ 'anyOf': list([ dict({ @@ -12915,6 +14388,10 @@ ]), 'title': 'Id', }), + 'index': dict({ + 'title': 'Index', + 'type': 'integer', + }), 'name': dict({ 'anyOf': list([ dict({ @@ -12932,9 +14409,10 @@ }), }), 'required': list([ + 'type', + 'id', 'name', 'args', - 'id', 'error', ]), 'title': 'InvalidToolCall', @@ -13166,12 +14644,23 @@ This represents a request to call the tool named "foo" with arguments {"a": 1} and an identifier of "123". + + .. note:: + ``create_tool_call`` may also be used as a factory to create a + ``ToolCall``. Benefits include: + + * Automatic ID generation (when not provided) + * Required arguments strictly validated at creation time ''', 'properties': dict({ 'args': dict({ 'title': 'Args', 'type': 'object', }), + 'extras': dict({ + 'title': 'Extras', + 'type': 'object', + }), 'id': dict({ 'anyOf': list([ dict({ @@ -13183,6 +14672,10 @@ ]), 'title': 'Id', }), + 'index': dict({ + 'title': 'Index', + 'type': 'integer', + }), 'name': dict({ 'title': 'Name', 'type': 'string', @@ -13193,9 +14686,10 @@ }), }), 'required': list([ + 'type', + 'id', 'name', 'args', - 'id', ]), 'title': 'ToolCall', 'type': 'object', @@ -13204,9 +14698,9 @@ 'description': ''' A chunk of a tool call (e.g., as part of a stream). - When merging ToolCallChunks (e.g., via AIMessageChunk.__add__), + When merging ToolCallChunks (e.g., via ``AIMessageChunk.__add__``), all string attributes are concatenated. Chunks are only merged if their - values of `index` are equal and not None. + values of ``index`` are equal and not ``None``. Example: @@ -13232,6 +14726,10 @@ ]), 'title': 'Args', }), + 'extras': dict({ + 'title': 'Extras', + 'type': 'object', + }), 'id': dict({ 'anyOf': list([ dict({ @@ -13271,9 +14769,9 @@ }), }), 'required': list([ + 'id', 'name', 'args', - 'id', 'index', ]), 'title': 'ToolCallChunk', diff --git a/libs/core/tests/unit_tests/runnables/test_graph.py b/libs/core/tests/unit_tests/runnables/test_graph.py index 7944d0b1da4..fd9ff2f813e 100644 --- a/libs/core/tests/unit_tests/runnables/test_graph.py +++ b/libs/core/tests/unit_tests/runnables/test_graph.py @@ -14,9 +14,7 @@ from langchain_core.runnables import RunnableConfig from langchain_core.runnables.base import Runnable from langchain_core.runnables.graph import Edge, Graph, Node from langchain_core.runnables.graph_mermaid import _escape_node_label -from langchain_core.utils.pydantic import ( - PYDANTIC_VERSION, -) +from langchain_core.utils.pydantic import PYDANTIC_VERSION from tests.unit_tests.pydantic_utils import _normalize_schema diff --git a/libs/core/tests/unit_tests/runnables/test_runnable.py b/libs/core/tests/unit_tests/runnables/test_runnable.py index 4b63fb50ae2..d9a9db349e8 100644 --- a/libs/core/tests/unit_tests/runnables/test_runnable.py +++ b/libs/core/tests/unit_tests/runnables/test_runnable.py @@ -6,13 +6,7 @@ import warnings from collections.abc import AsyncIterator, Awaitable, Iterator, Sequence from functools import partial from operator import itemgetter -from typing import ( - Any, - Callable, - Optional, - Union, - cast, -) +from typing import Any, Callable, Optional, Union, cast from uuid import UUID import pytest @@ -37,11 +31,7 @@ from langchain_core.language_models import ( ) from langchain_core.load import dumpd, dumps from langchain_core.load.load import loads -from langchain_core.messages import ( - AIMessageChunk, - HumanMessage, - SystemMessage, -) +from langchain_core.messages import AIMessageChunk, HumanMessage, SystemMessage from langchain_core.messages.base import BaseMessage from langchain_core.output_parsers import ( BaseOutputParser, @@ -90,9 +80,7 @@ from langchain_core.tracers import ( RunLogPatch, ) from langchain_core.tracers.context import collect_runs -from langchain_core.utils.pydantic import ( - PYDANTIC_VERSION, -) +from langchain_core.utils.pydantic import PYDANTIC_VERSION from tests.unit_tests.pydantic_utils import _normalize_schema, _schema from tests.unit_tests.stubs import AnyStr, _any_id_ai_message, _any_id_ai_message_chunk @@ -243,7 +231,11 @@ def test_schemas(snapshot: SnapshotAssertion) -> None: } assert fake.get_config_jsonschema(include=["tags", "metadata", "run_name"]) == { "properties": { - "metadata": {"default": None, "title": "Metadata", "type": "object"}, + "metadata": { + "default": None, + "title": "Metadata", + "type": "object", + }, "run_name": {"default": None, "title": "Run Name", "type": "string"}, "tags": { "default": None, diff --git a/libs/core/tests/unit_tests/test_messages.py b/libs/core/tests/unit_tests/test_messages.py index 58822433d7a..d59ebeee2a2 100644 --- a/libs/core/tests/unit_tests/test_messages.py +++ b/libs/core/tests/unit_tests/test_messages.py @@ -3,6 +3,7 @@ import uuid from typing import Optional, Union import pytest +from typing_extensions import get_args from langchain_core.documents import Document from langchain_core.load import dumpd, load @@ -30,10 +31,13 @@ from langchain_core.messages import ( messages_from_dict, messages_to_dict, ) +from langchain_core.messages.content_blocks import KNOWN_BLOCK_TYPES, ContentBlock from langchain_core.messages.tool import invalid_tool_call as create_invalid_tool_call from langchain_core.messages.tool import tool_call as create_tool_call from langchain_core.messages.tool import tool_call_chunk as create_tool_call_chunk from langchain_core.utils._merge import merge_lists +from langchain_core.v1.messages import AIMessage as AIMessageV1 +from langchain_core.v1.messages import AIMessageChunk as AIMessageChunkV1 def test_message_init() -> None: @@ -195,6 +199,156 @@ def test_message_chunks() -> None: assert (meaningful_id + default_id).id == "msg_def456" +def test_message_chunks_v1() -> None: + left = AIMessageChunkV1("foo ", id="abc") + right = AIMessageChunkV1("bar") + expected = AIMessageChunkV1("foo bar", id="abc") + assert left + right == expected + + # Test tool calls + one = AIMessageChunkV1( + [], + tool_call_chunks=[ + create_tool_call_chunk(name="tool1", args="", id="1", index=0) + ], + ) + two = AIMessageChunkV1( + [], + tool_call_chunks=[ + create_tool_call_chunk(name=None, args='{"arg1": "val', id=None, index=0) + ], + ) + three = AIMessageChunkV1( + [], + tool_call_chunks=[ + create_tool_call_chunk(name=None, args='ue}"', id=None, index=0) + ], + ) + result = one + two + three + expected = AIMessageChunkV1( + [], + tool_call_chunks=[ + create_tool_call_chunk( + name="tool1", args='{"arg1": "value}"', id="1", index=0 + ) + ], + id=result.id, # Use the same ID as the result + ) + assert result == expected + + converted_message = result.to_message() + assert converted_message == AIMessageV1( + content=[ + { + "name": "tool1", + "args": {"arg1": "value}"}, + "id": "1", + "type": "tool_call", + } + ], + id=converted_message.id, # Use the same ID as the converted message + ) + + chunk1 = AIMessageChunkV1( + [], + tool_call_chunks=[ + create_tool_call_chunk(name="tool1", args="", id="1", index=0) + ], + ) + chunk2 = AIMessageChunkV1( + [], + tool_call_chunks=[ + create_tool_call_chunk(name="tool1", args="a", id=None, index=1) + ], + ) + # Don't merge if `index` field does not match. + merge_result = chunk1 + chunk2 + assert merge_result == AIMessageChunkV1( + [], + tool_call_chunks=[ + create_tool_call_chunk(name="tool1", args="", id="1", index=0), + create_tool_call_chunk(name="tool1", args="a", id=None, index=1), + ], + id=merge_result.id, # Use the same ID as the merge result + ) + + ai_msg_chunk = AIMessageChunkV1([]) + tool_calls_msg_chunk = AIMessageChunkV1( + [], + tool_call_chunks=[ + create_tool_call_chunk(name="tool1", args="a", id=None, index=1) + ], + ) + # These assertions test that adding empty chunks preserves the non-empty chunk + result1 = ai_msg_chunk + tool_calls_msg_chunk + assert result1.tool_call_chunks == tool_calls_msg_chunk.tool_call_chunks + assert result1.content == tool_calls_msg_chunk.content + + result2 = tool_calls_msg_chunk + ai_msg_chunk + assert result2.tool_call_chunks == tool_calls_msg_chunk.tool_call_chunks + assert result2.content == tool_calls_msg_chunk.content + + ai_msg_chunk = AIMessageChunkV1( + [], + tool_call_chunks=[ + create_tool_call_chunk(name="tool1", args="", id="1", index=0) + ], + ) + assert ai_msg_chunk.tool_calls == [create_tool_call(name="tool1", args={}, id="1")] + + # Test token usage + left = AIMessageChunkV1( + [], + usage_metadata={"input_tokens": 1, "output_tokens": 2, "total_tokens": 3}, + ) + right = AIMessageChunkV1( + [], + usage_metadata={"input_tokens": 4, "output_tokens": 5, "total_tokens": 9}, + ) + usage_result = left + right + expected_usage = AIMessageChunkV1( + content=[], + usage_metadata={"input_tokens": 5, "output_tokens": 7, "total_tokens": 12}, + id=usage_result.id, # Use the same ID as the result + ) + assert usage_result == expected_usage + + # Test adding empty chunks preserves the original + left_result = AIMessageChunkV1(content=[]) + left + assert left_result.usage_metadata == left.usage_metadata + assert left_result.content == left.content + + right_result = right + AIMessageChunkV1(content=[]) + assert right_result.usage_metadata == right.usage_metadata + assert right_result.content == right.content + + # Test ID order of precedence + # Note: AIMessageChunkV1 always generates an ID if none provided + auto_id = AIMessageChunkV1(content=[]) # Gets auto-generated lc_* ID + default_id = AIMessageChunkV1( + content=[], id="run-abc123" + ) # LangChain-assigned run ID + meaningful_id = AIMessageChunkV1( + content=[], id="msg_def456" + ) # provider-assigned ID + + # Provider-assigned IDs (non-run-* and non-lc_*) have highest precedence + # Provider-assigned IDs always win over LangChain-generated IDs + assert (auto_id + meaningful_id).id == "msg_def456" # provider-assigned wins + assert (meaningful_id + auto_id).id == "msg_def456" # provider-assigned wins + + assert ( + default_id + meaningful_id + ).id == "msg_def456" # meaningful_id is provider-assigned + assert ( + meaningful_id + default_id + ).id == "msg_def456" # meaningful_id is provider-assigned + + # Between auto-generated and run-* IDs, auto-generated wins (since lc_ != run-) + assert (auto_id + default_id).id == auto_id.id + assert (default_id + auto_id).id == auto_id.id + + def test_chat_message_chunks() -> None: assert ChatMessageChunk(role="User", content="I am", id="ai4") + ChatMessageChunk( role="User", content=" indeed." @@ -207,7 +361,7 @@ def test_chat_message_chunks() -> None: ): ChatMessageChunk(role="User", content="I am") + ChatMessageChunk( role="Assistant", content=" indeed." - ) + ) # type: ignore[reportUnusedExpression, unused-ignore] assert ChatMessageChunk(role="User", content="I am") + AIMessageChunk( content=" indeed." @@ -316,7 +470,7 @@ def test_function_message_chunks() -> None: ): FunctionMessageChunk(name="hello", content="I am") + FunctionMessageChunk( name="bye", content=" indeed." - ) + ) # type: ignore[reportUnusedExpression, unused-ignore] def test_ai_message_chunks() -> None: @@ -332,7 +486,7 @@ def test_ai_message_chunks() -> None: ): AIMessageChunk(example=True, content="I am") + AIMessageChunk( example=False, content=" indeed." - ) + ) # type: ignore[reportUnusedExpression, unused-ignore] class TestGetBufferString(unittest.TestCase): @@ -1111,23 +1265,20 @@ def test_is_data_content_block() -> None: assert is_data_content_block( { "type": "image", - "source_type": "url", "url": "https://...", } ) assert is_data_content_block( { "type": "image", - "source_type": "base64", - "data": "", + "base64": "", "mime_type": "image/jpeg", } ) assert is_data_content_block( { "type": "image", - "source_type": "base64", - "data": "", + "base64": "", "mime_type": "image/jpeg", "cache_control": {"type": "ephemeral"}, } @@ -1135,13 +1286,17 @@ def test_is_data_content_block() -> None: assert is_data_content_block( { "type": "image", - "source_type": "base64", - "data": "", + "base64": "", "mime_type": "image/jpeg", "metadata": {"cache_control": {"type": "ephemeral"}}, } ) - + assert is_data_content_block( + { + "type": "image", + "source_type": "base64", # backward compatibility + } + ) assert not is_data_content_block( { "type": "text", @@ -1154,12 +1309,6 @@ def test_is_data_content_block() -> None: "image_url": {"url": "https://..."}, } ) - assert not is_data_content_block( - { - "type": "image", - "source_type": "base64", - } - ) assert not is_data_content_block( { "type": "image", @@ -1169,31 +1318,65 @@ def test_is_data_content_block() -> None: def test_convert_to_openai_image_block() -> None: - input_block = { - "type": "image", - "source_type": "url", - "url": "https://...", - "cache_control": {"type": "ephemeral"}, - } - expected = { - "type": "image_url", - "image_url": {"url": "https://..."}, - } - result = convert_to_openai_image_block(input_block) - assert result == expected - - input_block = { - "type": "image", - "source_type": "base64", - "data": "", - "mime_type": "image/jpeg", - "cache_control": {"type": "ephemeral"}, - } - expected = { - "type": "image_url", - "image_url": { - "url": "data:image/jpeg;base64,", + for input_block in [ + { + "type": "image", + "url": "https://...", + "cache_control": {"type": "ephemeral"}, }, + { + "type": "image", + "source_type": "url", + "url": "https://...", + "cache_control": {"type": "ephemeral"}, + }, + ]: + expected = { + "type": "image_url", + "image_url": {"url": "https://..."}, + } + result = convert_to_openai_image_block(input_block) + assert result == expected + + for input_block in [ + { + "type": "image", + "base64": "", + "mime_type": "image/jpeg", + "cache_control": {"type": "ephemeral"}, + }, + { + "type": "image", + "source_type": "base64", + "data": "", + "mime_type": "image/jpeg", + "cache_control": {"type": "ephemeral"}, + }, + ]: + expected = { + "type": "image_url", + "image_url": { + "url": "data:image/jpeg;base64,", + }, + } + result = convert_to_openai_image_block(input_block) + assert result == expected + + +def test_known_block_types() -> None: + expected = { + bt + for bt in get_args(ContentBlock) + for bt in get_args(bt.__annotations__["type"]) } - result = convert_to_openai_image_block(input_block) - assert result == expected + # Normalize any Literal[...] types in block types to their string values. + # This ensures all entries are plain strings, not Literal objects. + expected = { + t + if isinstance(t, str) + else t.__args__[0] + if hasattr(t, "__args__") and len(t.__args__) == 1 + else t + for t in expected + } + assert expected == KNOWN_BLOCK_TYPES diff --git a/libs/core/tests/unit_tests/test_tools.py b/libs/core/tests/unit_tests/test_tools.py index 57b4573d70d..e7df9ab0ca7 100644 --- a/libs/core/tests/unit_tests/test_tools.py +++ b/libs/core/tests/unit_tests/test_tools.py @@ -68,8 +68,10 @@ from langchain_core.utils.pydantic import ( _create_subset_model, create_model_v2, ) +from langchain_core.v1.messages import ToolMessage as ToolMessageV1 from tests.unit_tests.fake.callbacks import FakeCallbackHandler from tests.unit_tests.pydantic_utils import _schema +from tests.unit_tests.stubs import AnyStr def _get_tool_call_json_schema(tool: BaseTool) -> dict: @@ -1379,17 +1381,28 @@ def test_tool_annotated_descriptions() -> None: } -def test_tool_call_input_tool_message_output() -> None: +@pytest.mark.parametrize("message_version", ["v0", "v1"]) +def test_tool_call_input_tool_message(message_version: Literal["v0", "v1"]) -> None: tool_call = { "name": "structured_api", "args": {"arg1": 1, "arg2": True, "arg3": {"img": "base64string..."}}, "id": "123", "type": "tool_call", } - tool = _MockStructuredTool() - expected = ToolMessage( - "1 True {'img': 'base64string...'}", tool_call_id="123", name="structured_api" - ) + tool = _MockStructuredTool(message_version=message_version) + if message_version == "v0": + expected: Union[ToolMessage, ToolMessageV1] = ToolMessage( + "1 True {'img': 'base64string...'}", + tool_call_id="123", + name="structured_api", + ) + else: + expected = ToolMessageV1( + "1 True {'img': 'base64string...'}", + tool_call_id="123", + name="structured_api", + id=AnyStr("lc_abc123"), + ) actual = tool.invoke(tool_call) assert actual == expected @@ -1421,6 +1434,14 @@ def _mock_structured_tool_with_artifact( return f"{arg1} {arg2}", {"arg1": arg1, "arg2": arg2, "arg3": arg3} +@tool("structured_api", response_format="content_and_artifact", message_version="v1") +def _mock_structured_tool_with_artifact_v1( + *, arg1: int, arg2: bool, arg3: Optional[dict] = None +) -> tuple[str, dict]: + """A Structured Tool.""" + return f"{arg1} {arg2}", {"arg1": arg1, "arg2": arg2, "arg3": arg3} + + @pytest.mark.parametrize( "tool", [_MockStructuredToolWithRawOutput(), _mock_structured_tool_with_artifact] ) @@ -1445,6 +1466,38 @@ def test_tool_call_input_tool_message_with_artifact(tool: BaseTool) -> None: assert actual_content == expected.content +@pytest.mark.parametrize( + "tool", + [ + _MockStructuredToolWithRawOutput(message_version="v1"), + _mock_structured_tool_with_artifact_v1, + ], +) +def test_tool_call_input_tool_message_with_artifact_v1(tool: BaseTool) -> None: + tool_call: dict = { + "name": "structured_api", + "args": {"arg1": 1, "arg2": True, "arg3": {"img": "base64string..."}}, + "id": "123", + "type": "tool_call", + } + expected = ToolMessageV1( + "1 True", + artifact=tool_call["args"], + tool_call_id="123", + name="structured_api", + id=AnyStr("lc_abc123"), + ) + actual = tool.invoke(tool_call) + assert actual == expected + + tool_call.pop("type") + with pytest.raises(ValidationError): + tool.invoke(tool_call) + + actual_content = tool.invoke(tool_call["args"]) + assert actual_content == expected.text + + def test_convert_from_runnable_dict() -> None: # Test with typed dict input class Args(TypedDict): @@ -1550,6 +1603,17 @@ def injected_tool(x: int, y: Annotated[str, InjectedToolArg]) -> str: return y +@tool("foo", parse_docstring=True, message_version="v1") +def injected_tool_v1(x: int, y: Annotated[str, InjectedToolArg]) -> str: + """Foo. + + Args: + x: abc + y: 123 + """ + return y + + class InjectedTool(BaseTool): name: str = "foo" description: str = "foo." @@ -1587,7 +1651,12 @@ def injected_tool_with_schema(x: int, y: str) -> str: return y -@pytest.mark.parametrize("tool_", [InjectedTool()]) +@tool("foo", args_schema=fooSchema, message_version="v1") +def injected_tool_with_schema_v1(x: int, y: str) -> str: + return y + + +@pytest.mark.parametrize("tool_", [InjectedTool(), InjectedTool(message_version="v1")]) def test_tool_injected_arg_without_schema(tool_: BaseTool) -> None: assert _schema(tool_.get_input_schema()) == { "title": "foo", @@ -1607,14 +1676,25 @@ def test_tool_injected_arg_without_schema(tool_: BaseTool) -> None: "required": ["x"], } assert tool_.invoke({"x": 5, "y": "bar"}) == "bar" - assert tool_.invoke( - { - "name": "foo", - "args": {"x": 5, "y": "bar"}, - "id": "123", - "type": "tool_call", - } - ) == ToolMessage("bar", tool_call_id="123", name="foo") + if tool_.message_version == "v0": + expected: Union[ToolMessage, ToolMessageV1] = ToolMessage( + "bar", tool_call_id="123", name="foo" + ) + else: + expected = ToolMessageV1( + "bar", tool_call_id="123", name="foo", id=AnyStr("lc_abc123") + ) + assert ( + tool_.invoke( + { + "name": "foo", + "args": {"x": 5, "y": "bar"}, + "id": "123", + "type": "tool_call", + } + ) + == expected + ) expected_error = ( ValidationError if not isinstance(tool_, InjectedTool) else TypeError ) @@ -1634,7 +1714,12 @@ def test_tool_injected_arg_without_schema(tool_: BaseTool) -> None: @pytest.mark.parametrize( "tool_", - [injected_tool_with_schema, InjectedToolWithSchema()], + [ + injected_tool_with_schema, + InjectedToolWithSchema(), + injected_tool_with_schema_v1, + InjectedToolWithSchema(message_version="v1"), + ], ) def test_tool_injected_arg_with_schema(tool_: BaseTool) -> None: assert _schema(tool_.get_input_schema()) == { @@ -1655,14 +1740,25 @@ def test_tool_injected_arg_with_schema(tool_: BaseTool) -> None: "required": ["x"], } assert tool_.invoke({"x": 5, "y": "bar"}) == "bar" - assert tool_.invoke( - { - "name": "foo", - "args": {"x": 5, "y": "bar"}, - "id": "123", - "type": "tool_call", - } - ) == ToolMessage("bar", tool_call_id="123", name="foo") + if tool_.message_version == "v0": + expected: Union[ToolMessage, ToolMessageV1] = ToolMessage( + "bar", tool_call_id="123", name="foo" + ) + else: + expected = ToolMessageV1( + "bar", tool_call_id="123", name="foo", id=AnyStr("lc_abc123") + ) + assert ( + tool_.invoke( + { + "name": "foo", + "args": {"x": 5, "y": "bar"}, + "id": "123", + "type": "tool_call", + } + ) + == expected + ) expected_error = ( ValidationError if not isinstance(tool_, InjectedTool) else TypeError ) @@ -1680,8 +1776,9 @@ def test_tool_injected_arg_with_schema(tool_: BaseTool) -> None: } -def test_tool_injected_arg() -> None: - tool_ = injected_tool +@pytest.mark.parametrize("message_version", ["v0", "v1"]) +def test_tool_injected_arg(message_version: Literal["v0", "v1"]) -> None: + tool_ = injected_tool if message_version == "v0" else injected_tool_v1 assert _schema(tool_.get_input_schema()) == { "title": "foo", "description": "Foo.", @@ -1700,14 +1797,25 @@ def test_tool_injected_arg() -> None: "required": ["x"], } assert tool_.invoke({"x": 5, "y": "bar"}) == "bar" - assert tool_.invoke( - { - "name": "foo", - "args": {"x": 5, "y": "bar"}, - "id": "123", - "type": "tool_call", - } - ) == ToolMessage("bar", tool_call_id="123", name="foo") + if message_version == "v0": + expected: Union[ToolMessage, ToolMessageV1] = ToolMessage( + "bar", tool_call_id="123", name="foo" + ) + else: + expected = ToolMessageV1( + "bar", tool_call_id="123", name="foo", id=AnyStr("lc_abc123") + ) + assert ( + tool_.invoke( + { + "name": "foo", + "args": {"x": 5, "y": "bar"}, + "id": "123", + "type": "tool_call", + } + ) + == expected + ) expected_error = ( ValidationError if not isinstance(tool_, InjectedTool) else TypeError ) @@ -1725,7 +1833,8 @@ def test_tool_injected_arg() -> None: } -def test_tool_inherited_injected_arg() -> None: +@pytest.mark.parametrize("message_version", ["v0", "v1"]) +def test_tool_inherited_injected_arg(message_version: Literal["v0", "v1"]) -> None: class BarSchema(BaseModel): """bar.""" @@ -1746,7 +1855,7 @@ def test_tool_inherited_injected_arg() -> None: def _run(self, x: int, y: str) -> Any: return y - tool_ = InheritedInjectedArgTool() + tool_ = InheritedInjectedArgTool(message_version=message_version) assert tool_.get_input_schema().model_json_schema() == { "title": "FooSchema", # Matches the title from the provided schema "description": "foo.", @@ -1766,14 +1875,25 @@ def test_tool_inherited_injected_arg() -> None: "required": ["x"], } assert tool_.invoke({"x": 5, "y": "bar"}) == "bar" - assert tool_.invoke( - { - "name": "foo", - "args": {"x": 5, "y": "bar"}, - "id": "123", - "type": "tool_call", - } - ) == ToolMessage("bar", tool_call_id="123", name="foo") + if message_version == "v0": + expected: Union[ToolMessage, ToolMessageV1] = ToolMessage( + "bar", tool_call_id="123", name="foo" + ) + else: + expected = ToolMessageV1( + "bar", tool_call_id="123", name="foo", id=AnyStr("lc_abc123") + ) + assert ( + tool_.invoke( + { + "name": "foo", + "args": {"x": 5, "y": "bar"}, + "id": "123", + "type": "tool_call", + } + ) + == expected + ) expected_error = ( ValidationError if not isinstance(tool_, InjectedTool) else TypeError ) @@ -2133,7 +2253,8 @@ def test_tool_annotations_preserved() -> None: assert schema.__annotations__ == expected_type_hints -def test_create_retriever_tool() -> None: +@pytest.mark.parametrize("message_version", ["v0", "v1"]) +def test_create_retriever_tool(message_version: Literal["v0", "v1"]) -> None: class MyRetriever(BaseRetriever): def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun @@ -2142,21 +2263,36 @@ def test_create_retriever_tool() -> None: retriever = MyRetriever() retriever_tool = tools.create_retriever_tool( - retriever, "retriever_tool_content", "Retriever Tool Content" + retriever, + "retriever_tool_content", + "Retriever Tool Content", + message_version=message_version, ) assert isinstance(retriever_tool, BaseTool) assert retriever_tool.name == "retriever_tool_content" assert retriever_tool.description == "Retriever Tool Content" assert retriever_tool.invoke("bar") == "foo bar\n\nbar" - assert retriever_tool.invoke( - ToolCall( - name="retriever_tool_content", - args={"query": "bar"}, - id="123", - type="tool_call", + if message_version == "v0": + expected: Union[ToolMessage, ToolMessageV1] = ToolMessage( + "foo bar\n\nbar", tool_call_id="123", name="retriever_tool_content" ) - ) == ToolMessage( - "foo bar\n\nbar", tool_call_id="123", name="retriever_tool_content" + else: + expected = ToolMessageV1( + "foo bar\n\nbar", + tool_call_id="123", + name="retriever_tool_content", + id=AnyStr("lc_abc123"), + ) + assert ( + retriever_tool.invoke( + ToolCall( + name="retriever_tool_content", + args={"query": "bar"}, + id="123", + type="tool_call", + ) + ) + == expected ) retriever_tool_artifact = tools.create_retriever_tool( @@ -2164,23 +2300,37 @@ def test_create_retriever_tool() -> None: "retriever_tool_artifact", "Retriever Tool Artifact", response_format="content_and_artifact", + message_version=message_version, ) assert isinstance(retriever_tool_artifact, BaseTool) assert retriever_tool_artifact.name == "retriever_tool_artifact" assert retriever_tool_artifact.description == "Retriever Tool Artifact" assert retriever_tool_artifact.invoke("bar") == "foo bar\n\nbar" - assert retriever_tool_artifact.invoke( - ToolCall( + if message_version == "v0": + expected = ToolMessage( + "foo bar\n\nbar", + artifact=[Document(page_content="foo bar"), Document(page_content="bar")], + tool_call_id="123", name="retriever_tool_artifact", - args={"query": "bar"}, - id="123", - type="tool_call", ) - ) == ToolMessage( - "foo bar\n\nbar", - artifact=[Document(page_content="foo bar"), Document(page_content="bar")], - tool_call_id="123", - name="retriever_tool_artifact", + else: + expected = ToolMessageV1( + "foo bar\n\nbar", + artifact=[Document(page_content="foo bar"), Document(page_content="bar")], + tool_call_id="123", + name="retriever_tool_artifact", + id=AnyStr("lc_abc123"), + ) + assert ( + retriever_tool_artifact.invoke( + ToolCall( + name="retriever_tool_artifact", + args={"query": "bar"}, + id="123", + type="tool_call", + ) + ) + == expected ) @@ -2313,6 +2463,45 @@ def test_tool_injected_tool_call_id() -> None: ) == ToolMessage(0, tool_call_id="bar") # type: ignore[arg-type] +def test_tool_injected_tool_call_id_v1() -> None: + @tool + def foo(x: int, tool_call_id: Annotated[str, InjectedToolCallId]) -> ToolMessageV1: + """Foo.""" + return ToolMessageV1(str(x), tool_call_id=tool_call_id) + + assert foo.invoke( + { + "type": "tool_call", + "args": {"x": 0}, + "name": "foo", + "id": "bar", + } + ) == ToolMessageV1("0", tool_call_id="bar", id=AnyStr("lc_abc123")) + + with pytest.raises( + ValueError, + match="When tool includes an InjectedToolCallId argument, " + "tool must always be invoked with a full model ToolCall", + ): + assert foo.invoke({"x": 0}) + + @tool + def foo2( + x: int, tool_call_id: Annotated[str, InjectedToolCallId()] + ) -> ToolMessageV1: + """Foo.""" + return ToolMessageV1(str(x), tool_call_id=tool_call_id) + + assert foo2.invoke( + { + "type": "tool_call", + "args": {"x": 0}, + "name": "foo", + "id": "bar", + } + ) == ToolMessageV1("0", tool_call_id="bar", id=AnyStr("lc_abc123")) + + def test_tool_uninjected_tool_call_id() -> None: @tool def foo(x: int, tool_call_id: str) -> ToolMessage: @@ -2332,6 +2521,25 @@ def test_tool_uninjected_tool_call_id() -> None: ) == ToolMessage(0, tool_call_id="zap") # type: ignore[arg-type] +def test_tool_uninjected_tool_call_id_v1() -> None: + @tool + def foo(x: int, tool_call_id: str) -> ToolMessageV1: + """Foo.""" + return ToolMessageV1(str(x), tool_call_id=tool_call_id) + + with pytest.raises(ValueError, match="1 validation error for foo"): + foo.invoke({"type": "tool_call", "args": {"x": 0}, "name": "foo", "id": "bar"}) + + assert foo.invoke( + { + "type": "tool_call", + "args": {"x": 0, "tool_call_id": "zap"}, + "name": "foo", + "id": "bar", + } + ) == ToolMessageV1("0", tool_call_id="zap", id=AnyStr("lc_abc123")) + + def test_tool_return_output_mixin() -> None: class Bar(ToolOutputMixin): def __init__(self, x: int) -> None: @@ -2457,6 +2665,19 @@ def test_empty_string_tool_call_id() -> None: ) +def test_empty_string_tool_call_id_v1() -> None: + @tool(message_version="v1") + def foo(x: int) -> str: + """Foo.""" + return "hi" + + assert foo.invoke( + {"type": "tool_call", "args": {"x": 0}, "id": ""} + ) == ToolMessageV1( + content="hi", name="foo", tool_call_id="", id=AnyStr("lc_abc123") + ) + + def test_tool_decorator_description() -> None: # test basic tool @tool diff --git a/libs/core/tests/unit_tests/tracers/test_async_base_tracer.py b/libs/core/tests/unit_tests/tracers/test_async_base_tracer.py index 1b243c03816..98c465f900c 100644 --- a/libs/core/tests/unit_tests/tracers/test_async_base_tracer.py +++ b/libs/core/tests/unit_tests/tracers/test_async_base_tracer.py @@ -3,7 +3,7 @@ from __future__ import annotations from datetime import datetime, timezone -from typing import Any +from typing import TYPE_CHECKING, Any from uuid import uuid4 import pytest @@ -15,6 +15,11 @@ from langchain_core.messages import HumanMessage from langchain_core.outputs import LLMResult from langchain_core.tracers.base import AsyncBaseTracer from langchain_core.tracers.schemas import Run +from langchain_core.v1.messages import HumanMessage as HumanMessageV1 +from langchain_core.v1.messages import MessageV1 + +if TYPE_CHECKING: + from langchain_core.messages import BaseMessage SERIALIZED = {"id": ["llm"]} SERIALIZED_CHAT = {"id": ["chat_model"]} @@ -84,8 +89,41 @@ async def test_tracer_chat_model_run() -> None: """Test tracer on a Chat Model run.""" tracer = FakeAsyncTracer() manager = AsyncCallbackManager(handlers=[tracer]) + messages: list[list[BaseMessage]] = [[HumanMessage(content="")]] run_managers = await manager.on_chat_model_start( - serialized=SERIALIZED_CHAT, messages=[[HumanMessage(content="")]] + serialized=SERIALIZED_CHAT, messages=messages + ) + compare_run = Run( + id=str(run_managers[0].run_id), # type: ignore[arg-type] + name="chat_model", + start_time=datetime.now(timezone.utc), + end_time=datetime.now(timezone.utc), + events=[ + {"name": "start", "time": datetime.now(timezone.utc)}, + {"name": "end", "time": datetime.now(timezone.utc)}, + ], + extra={}, + serialized=SERIALIZED_CHAT, + inputs={"prompts": ["Human: "]}, + outputs=LLMResult(generations=[[]]), # type: ignore[arg-type] + error=None, + run_type="llm", + trace_id=run_managers[0].run_id, + dotted_order=f"20230101T000000000000Z{run_managers[0].run_id}", + ) + for run_manager in run_managers: + await run_manager.on_llm_end(response=LLMResult(generations=[[]])) + assert tracer.runs == [compare_run] + + +@freeze_time("2023-01-01") +async def test_tracer_chat_model_run_v1() -> None: + """Test tracer on a Chat Model run.""" + tracer = FakeAsyncTracer() + manager = AsyncCallbackManager(handlers=[tracer]) + messages: list[MessageV1] = [HumanMessageV1("")] + run_managers = await manager.on_chat_model_start( + serialized=SERIALIZED_CHAT, messages=messages ) compare_run = Run( id=str(run_managers[0].run_id), # type: ignore[arg-type] diff --git a/libs/core/tests/unit_tests/tracers/test_base_tracer.py b/libs/core/tests/unit_tests/tracers/test_base_tracer.py index aaa34a662f2..f4964e15ff8 100644 --- a/libs/core/tests/unit_tests/tracers/test_base_tracer.py +++ b/libs/core/tests/unit_tests/tracers/test_base_tracer.py @@ -3,7 +3,7 @@ from __future__ import annotations from datetime import datetime, timezone -from typing import Any +from typing import TYPE_CHECKING, Any from unittest.mock import MagicMock from uuid import uuid4 @@ -19,6 +19,11 @@ from langchain_core.outputs import LLMResult from langchain_core.runnables import chain as as_runnable from langchain_core.tracers.base import BaseTracer from langchain_core.tracers.schemas import Run +from langchain_core.v1.messages import HumanMessage as HumanMessageV1 +from langchain_core.v1.messages import MessageV1 + +if TYPE_CHECKING: + from langchain_core.messages import BaseMessage SERIALIZED = {"id": ["llm"]} SERIALIZED_CHAT = {"id": ["chat_model"]} @@ -89,8 +94,42 @@ def test_tracer_chat_model_run() -> None: """Test tracer on a Chat Model run.""" tracer = FakeTracer() manager = CallbackManager(handlers=[tracer]) + # TODO: why is this annotation needed + messages: list[list[BaseMessage]] = [[HumanMessage(content="")]] run_managers = manager.on_chat_model_start( - serialized=SERIALIZED_CHAT, messages=[[HumanMessage(content="")]] + serialized=SERIALIZED_CHAT, messages=messages + ) + compare_run = Run( + id=str(run_managers[0].run_id), # type: ignore[arg-type] + name="chat_model", + start_time=datetime.now(timezone.utc), + end_time=datetime.now(timezone.utc), + events=[ + {"name": "start", "time": datetime.now(timezone.utc)}, + {"name": "end", "time": datetime.now(timezone.utc)}, + ], + extra={}, + serialized=SERIALIZED_CHAT, + inputs={"prompts": ["Human: "]}, + outputs=LLMResult(generations=[[]]), # type: ignore[arg-type] + error=None, + run_type="llm", + trace_id=run_managers[0].run_id, + dotted_order=f"20230101T000000000000Z{run_managers[0].run_id}", + ) + for run_manager in run_managers: + run_manager.on_llm_end(response=LLMResult(generations=[[]])) + assert tracer.runs == [compare_run] + + +@freeze_time("2023-01-01") +def test_tracer_chat_model_run_v1() -> None: + """Test tracer on a Chat Model run.""" + tracer = FakeTracer() + manager = CallbackManager(handlers=[tracer]) + messages: list[MessageV1] = [HumanMessageV1("")] + run_managers = manager.on_chat_model_start( + serialized=SERIALIZED_CHAT, messages=messages ) compare_run = Run( id=str(run_managers[0].run_id), # type: ignore[arg-type] diff --git a/libs/langchain/langchain/agents/output_parsers/openai_functions.py b/libs/langchain/langchain/agents/output_parsers/openai_functions.py index e452d8d604d..a42e237b9af 100644 --- a/libs/langchain/langchain/agents/output_parsers/openai_functions.py +++ b/libs/langchain/langchain/agents/output_parsers/openai_functions.py @@ -8,7 +8,9 @@ from langchain_core.messages import ( AIMessage, BaseMessage, ) +from langchain_core.messages.utils import convert_from_v1_message from langchain_core.outputs import ChatGeneration, Generation +from langchain_core.v1.messages import AIMessage as AIMessageV1 from typing_extensions import override from langchain.agents.agent import AgentOutputParser @@ -83,10 +85,12 @@ class OpenAIFunctionsAgentOutputParser(AgentOutputParser): @override def parse_result( self, - result: list[Generation], + result: Union[list[Generation], AIMessageV1], *, partial: bool = False, ) -> Union[AgentAction, AgentFinish]: + if isinstance(result, AIMessageV1): + result = [ChatGeneration(message=convert_from_v1_message(result))] if not isinstance(result[0], ChatGeneration): msg = "This output parser only works on ChatGeneration output" raise ValueError(msg) # noqa: TRY004 diff --git a/libs/langchain/langchain/agents/output_parsers/openai_tools.py b/libs/langchain/langchain/agents/output_parsers/openai_tools.py index 8db0ceb663c..eca9a186173 100644 --- a/libs/langchain/langchain/agents/output_parsers/openai_tools.py +++ b/libs/langchain/langchain/agents/output_parsers/openai_tools.py @@ -2,7 +2,9 @@ from typing import Union from langchain_core.agents import AgentAction, AgentFinish from langchain_core.messages import BaseMessage +from langchain_core.messages.utils import convert_from_v1_message from langchain_core.outputs import ChatGeneration, Generation +from langchain_core.v1.messages import AIMessage as AIMessageV1 from typing_extensions import override from langchain.agents.agent import MultiActionAgentOutputParser @@ -57,10 +59,12 @@ class OpenAIToolsAgentOutputParser(MultiActionAgentOutputParser): @override def parse_result( self, - result: list[Generation], + result: Union[list[Generation], AIMessageV1], *, partial: bool = False, ) -> Union[list[AgentAction], AgentFinish]: + if isinstance(result, AIMessageV1): + result = [ChatGeneration(message=convert_from_v1_message(result))] if not isinstance(result[0], ChatGeneration): msg = "This output parser only works on ChatGeneration output" raise ValueError(msg) # noqa: TRY004 diff --git a/libs/langchain/langchain/agents/output_parsers/tools.py b/libs/langchain/langchain/agents/output_parsers/tools.py index 10ebd3c3dfc..4df63ad2010 100644 --- a/libs/langchain/langchain/agents/output_parsers/tools.py +++ b/libs/langchain/langchain/agents/output_parsers/tools.py @@ -9,7 +9,9 @@ from langchain_core.messages import ( BaseMessage, ToolCall, ) +from langchain_core.messages.utils import convert_from_v1_message from langchain_core.outputs import ChatGeneration, Generation +from langchain_core.v1.messages import AIMessage as AIMessageV1 from typing_extensions import override from langchain.agents.agent import MultiActionAgentOutputParser @@ -47,7 +49,12 @@ def parse_ai_message_to_tool_action( try: args = json.loads(function["arguments"] or "{}") tool_calls.append( - ToolCall(name=function_name, args=args, id=tool_call["id"]), + ToolCall( + name=function_name, + args=args, + id=tool_call["id"], + type="tool_call", + ) ) except JSONDecodeError as e: msg = ( @@ -96,10 +103,12 @@ class ToolsAgentOutputParser(MultiActionAgentOutputParser): @override def parse_result( self, - result: list[Generation], + result: Union[list[Generation], AIMessageV1], *, partial: bool = False, ) -> Union[list[AgentAction], AgentFinish]: + if isinstance(result, AIMessageV1): + result = [ChatGeneration(message=convert_from_v1_message(result))] if not isinstance(result[0], ChatGeneration): msg = "This output parser only works on ChatGeneration output" raise ValueError(msg) # noqa: TRY004 diff --git a/libs/langchain/langchain/callbacks/streaming_aiter.py b/libs/langchain/langchain/callbacks/streaming_aiter.py index 96cf78fd83a..cb2bb823a7d 100644 --- a/libs/langchain/langchain/callbacks/streaming_aiter.py +++ b/libs/langchain/langchain/callbacks/streaming_aiter.py @@ -6,6 +6,7 @@ from typing import Any, Literal, Union, cast from langchain_core.callbacks import AsyncCallbackHandler from langchain_core.outputs import LLMResult +from langchain_core.v1.messages import AIMessage from typing_extensions import override # TODO If used by two LLM runs in parallel this won't work as expected @@ -44,7 +45,9 @@ class AsyncIteratorCallbackHandler(AsyncCallbackHandler): self.queue.put_nowait(token) @override - async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: + async def on_llm_end( + self, response: Union[LLMResult, AIMessage], **kwargs: Any + ) -> None: self.done.set() @override diff --git a/libs/langchain/langchain/callbacks/streaming_aiter_final_only.py b/libs/langchain/langchain/callbacks/streaming_aiter_final_only.py index dbf125bd175..4d9b755e69a 100644 --- a/libs/langchain/langchain/callbacks/streaming_aiter_final_only.py +++ b/libs/langchain/langchain/callbacks/streaming_aiter_final_only.py @@ -1,8 +1,9 @@ from __future__ import annotations -from typing import Any, Optional +from typing import Any, Optional, Union from langchain_core.outputs import LLMResult +from langchain_core.v1.messages import AIMessage from typing_extensions import override from langchain.callbacks.streaming_aiter import AsyncIteratorCallbackHandler @@ -75,7 +76,9 @@ class AsyncFinalIteratorCallbackHandler(AsyncIteratorCallbackHandler): self.answer_reached = False @override - async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: + async def on_llm_end( + self, response: Union[LLMResult, AIMessage], **kwargs: Any + ) -> None: if self.answer_reached: self.done.set() diff --git a/libs/langchain/langchain/chat_models/base.py b/libs/langchain/langchain/chat_models/base.py index 2f8b46bcb59..f5a8a588b80 100644 --- a/libs/langchain/langchain/chat_models/base.py +++ b/libs/langchain/langchain/chat_models/base.py @@ -19,6 +19,7 @@ from langchain_core.runnables import Runnable, RunnableConfig, ensure_config from langchain_core.runnables.schema import StreamEvent from langchain_core.tools import BaseTool from langchain_core.tracers import RunLog, RunLogPatch +from langchain_core.v1.chat_models import BaseChatModel as BaseChatModelV1 from pydantic import BaseModel from typing_extensions import TypeAlias, override @@ -39,10 +40,23 @@ def init_chat_model( model_provider: Optional[str] = None, configurable_fields: Literal[None] = None, config_prefix: Optional[str] = None, + message_version: Literal["v0"] = "v0", **kwargs: Any, ) -> BaseChatModel: ... +@overload +def init_chat_model( + model: str, + *, + model_provider: Optional[str] = None, + configurable_fields: Literal[None] = None, + config_prefix: Optional[str] = None, + message_version: Literal["v1"] = "v1", + **kwargs: Any, +) -> BaseChatModelV1: ... + + @overload def init_chat_model( model: Literal[None] = None, @@ -50,6 +64,7 @@ def init_chat_model( model_provider: Optional[str] = None, configurable_fields: Literal[None] = None, config_prefix: Optional[str] = None, + message_version: Literal["v0", "v1"] = "v0", **kwargs: Any, ) -> _ConfigurableModel: ... @@ -61,6 +76,7 @@ def init_chat_model( model_provider: Optional[str] = None, configurable_fields: Union[Literal["any"], list[str], tuple[str, ...]] = ..., config_prefix: Optional[str] = None, + message_version: Literal["v0", "v1"] = "v0", **kwargs: Any, ) -> _ConfigurableModel: ... @@ -76,8 +92,9 @@ def init_chat_model( Union[Literal["any"], list[str], tuple[str, ...]] ] = None, config_prefix: Optional[str] = None, + message_version: Literal["v0", "v1"] = "v0", **kwargs: Any, -) -> Union[BaseChatModel, _ConfigurableModel]: +) -> Union[BaseChatModel, BaseChatModelV1, _ConfigurableModel]: """Initialize a ChatModel in a single line using the model's name and provider. .. note:: @@ -128,6 +145,20 @@ def init_chat_model( - ``deepseek...`` -> ``deepseek`` - ``grok...`` -> ``xai`` - ``sonar...`` -> ``perplexity`` + + message_version: The version of the BaseChatModel to return. Either ``"v0"`` for + a v0 :class:`~langchain_core.language_models.chat_models.BaseChatModel` or + ``"v1"`` for a v1 :class:`~langchain_core.v1.chat_models.BaseChatModel`. The + output version determines what type of message objects the model will + generate. + + .. note:: + Currently supported for these providers: + + - ``openai`` + + .. versionadded:: 0.4.0 + configurable_fields: Which model parameters are configurable: - None: No configurable fields. @@ -316,6 +347,7 @@ def init_chat_model( return _init_chat_model_helper( cast("str", model), model_provider=model_provider, + message_version=message_version, **kwargs, ) if model: @@ -333,14 +365,27 @@ def _init_chat_model_helper( model: str, *, model_provider: Optional[str] = None, + message_version: Literal["v0", "v1"] = "v0", **kwargs: Any, -) -> BaseChatModel: +) -> Union[BaseChatModel, BaseChatModelV1]: model, model_provider = _parse_model(model, model_provider) + if message_version != "v0" and model_provider not in ("openai",): + warnings.warn( + f"Model provider {model_provider} does not support " + f"message_version={message_version}. Defaulting to v0.", + stacklevel=2, + ) if model_provider == "openai": _check_pkg("langchain_openai") - from langchain_openai import ChatOpenAI + if message_version == "v0": + from langchain_openai import ChatOpenAI + + return ChatOpenAI(model=model, **kwargs) + # v1 + from langchain_openai.v1 import ChatOpenAI as ChatOpenAIV1 + + return ChatOpenAIV1(model=model, **kwargs) - return ChatOpenAI(model=model, **kwargs) if model_provider == "anthropic": _check_pkg("langchain_anthropic") from langchain_anthropic import ChatAnthropic diff --git a/libs/langchain/langchain/smith/evaluation/progress.py b/libs/langchain/langchain/smith/evaluation/progress.py index 4282f9c76ae..a5242e1f8e0 100644 --- a/libs/langchain/langchain/smith/evaluation/progress.py +++ b/libs/langchain/langchain/smith/evaluation/progress.py @@ -2,12 +2,13 @@ import threading from collections.abc import Sequence -from typing import Any, Optional +from typing import Any, Optional, Union from uuid import UUID from langchain_core.callbacks import base as base_callbacks from langchain_core.documents import Document from langchain_core.outputs import LLMResult +from langchain_core.v1.messages import AIMessage from typing_extensions import override @@ -111,7 +112,7 @@ class ProgressBarCallback(base_callbacks.BaseCallbackHandler): @override def on_llm_end( self, - response: LLMResult, + response: Union[LLMResult, AIMessage], *, run_id: UUID, parent_run_id: Optional[UUID] = None, diff --git a/libs/langchain/tests/unit_tests/agents/format_scratchpad/test_openai_tools.py b/libs/langchain/tests/unit_tests/agents/format_scratchpad/test_openai_tools.py index 04d33d12f4e..089f31f47f7 100644 --- a/libs/langchain/tests/unit_tests/agents/format_scratchpad/test_openai_tools.py +++ b/libs/langchain/tests/unit_tests/agents/format_scratchpad/test_openai_tools.py @@ -53,7 +53,12 @@ def test_calls_convert_agent_action_to_messages() -> None: message4 = AIMessage( content="", tool_calls=[ - ToolCall(name="exponentiate", args={"a": 3, "b": 5}, id="call_abc02468"), + ToolCall( + name="exponentiate", + args={"a": 3, "b": 5}, + id="call_abc02468", + type="tool_call", + ), ], ) actions4 = parse_ai_message_to_openai_tool_action(message4) diff --git a/libs/langchain/tests/unit_tests/agents/test_agent.py b/libs/langchain/tests/unit_tests/agents/test_agent.py index ea6c455db69..d1bc7e6c2a0 100644 --- a/libs/langchain/tests/unit_tests/agents/test_agent.py +++ b/libs/langchain/tests/unit_tests/agents/test_agent.py @@ -1008,7 +1008,7 @@ def _make_tools_invocation(name_to_arguments: dict[str, dict[str, Any]]) -> AIMe for idx, (name, arguments) in enumerate(name_to_arguments.items()) ] tool_calls = [ - ToolCall(name=name, args=args, id=str(idx)) + ToolCall(name=name, args=args, id=str(idx), type="tool_call") for idx, (name, args) in enumerate(name_to_arguments.items()) ] return AIMessage( diff --git a/libs/langchain/tests/unit_tests/callbacks/fake_callback_handler.py b/libs/langchain/tests/unit_tests/callbacks/fake_callback_handler.py index 8b97e226ec7..cfc58c19789 100644 --- a/libs/langchain/tests/unit_tests/callbacks/fake_callback_handler.py +++ b/libs/langchain/tests/unit_tests/callbacks/fake_callback_handler.py @@ -6,6 +6,7 @@ from uuid import UUID from langchain_core.callbacks.base import AsyncCallbackHandler, BaseCallbackHandler from langchain_core.messages import BaseMessage +from langchain_core.v1.messages import MessageV1 from pydantic import BaseModel from typing_extensions import override @@ -281,7 +282,7 @@ class FakeCallbackHandlerWithChatStart(FakeCallbackHandler): def on_chat_model_start( self, serialized: dict[str, Any], - messages: list[list[BaseMessage]], + messages: Union[list[list[BaseMessage]], list[MessageV1]], *, run_id: UUID, parent_run_id: Optional[UUID] = None, diff --git a/libs/langchain/tests/unit_tests/chat_models/test_base.py b/libs/langchain/tests/unit_tests/chat_models/test_base.py index 8cd5e0631b8..e59ba54ff68 100644 --- a/libs/langchain/tests/unit_tests/chat_models/test_base.py +++ b/libs/langchain/tests/unit_tests/chat_models/test_base.py @@ -6,6 +6,7 @@ import pytest from langchain_core.language_models import BaseChatModel from langchain_core.prompts import ChatPromptTemplate from langchain_core.runnables import RunnableConfig, RunnableSequence +from langchain_core.v1.chat_models import BaseChatModel as BaseChatModelV1 from pydantic import SecretStr from langchain.chat_models.base import __all__, init_chat_model @@ -51,6 +52,22 @@ def test_init_chat_model(model_name: str, model_provider: Optional[str]) -> None assert llm1.dict() == llm2.dict() +@pytest.mark.requires("langchain_openai") +def test_message_version() -> None: + model = init_chat_model("openai:gpt-4.1", api_key="foo") + assert isinstance(model, BaseChatModel) + + model_v1 = init_chat_model("openai:gpt-4.1", api_key="foo", message_version="v1") + assert isinstance(model_v1, BaseChatModelV1) + + # Test we emit a warning for unsupported providers + with ( + pytest.warns(match="Model provider bar does not support message_version=v1"), + pytest.raises(ValueError, match="Unsupported model_provider='bar'."), + ): + init_chat_model("foo", model_provider="bar", message_version="v1") + + def test_init_missing_dep() -> None: with pytest.raises(ImportError): init_chat_model("mixtral-8x7b-32768", model_provider="groq") diff --git a/libs/langchain/tests/unit_tests/llms/test_fake_chat_model.py b/libs/langchain/tests/unit_tests/llms/test_fake_chat_model.py index e5e8de87f0f..700e4538e2b 100644 --- a/libs/langchain/tests/unit_tests/llms/test_fake_chat_model.py +++ b/libs/langchain/tests/unit_tests/llms/test_fake_chat_model.py @@ -7,6 +7,8 @@ from uuid import UUID from langchain_core.callbacks.base import AsyncCallbackHandler from langchain_core.messages import AIMessage, AIMessageChunk, BaseMessage from langchain_core.outputs import ChatGenerationChunk, GenerationChunk +from langchain_core.v1.messages import AIMessageChunk as AIMessageChunkV1 +from langchain_core.v1.messages import MessageV1 from typing_extensions import override from tests.unit_tests.llms.fake_chat_model import GenericFakeChatModel @@ -155,7 +157,7 @@ async def test_callback_handlers() -> None: async def on_chat_model_start( self, serialized: dict[str, Any], - messages: list[list[BaseMessage]], + messages: Union[list[list[BaseMessage]], list[MessageV1]], *, run_id: UUID, parent_run_id: Optional[UUID] = None, @@ -172,7 +174,9 @@ async def test_callback_handlers() -> None: self, token: str, *, - chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None, + chunk: Optional[ + Union[GenerationChunk, ChatGenerationChunk, AIMessageChunkV1] + ] = None, run_id: UUID, parent_run_id: Optional[UUID] = None, tags: Optional[list[str]] = None, diff --git a/libs/partners/openai/langchain_openai/chat_models/_compat.py b/libs/partners/openai/langchain_openai/chat_models/_compat.py index 25ff3eb607c..00f3b365c9b 100644 --- a/libs/partners/openai/langchain_openai/chat_models/_compat.py +++ b/libs/partners/openai/langchain_openai/chat_models/_compat.py @@ -1,7 +1,10 @@ """ -This module converts between AIMessage output formats for the Responses API. +This module converts between AIMessage output formats, which are governed by the +``output_version`` attribute on ChatOpenAI. Supported values are ``"v0"`` and +``"responses/v1"``. -ChatOpenAI v0.3 stores reasoning and tool outputs in AIMessage.additional_kwargs: +``"v0"`` corresponds to the format as of ChatOpenAI v0.3. For the Responses API, it +stores reasoning and tool outputs in AIMessage.additional_kwargs: .. code-block:: python @@ -28,8 +31,9 @@ ChatOpenAI v0.3 stores reasoning and tool outputs in AIMessage.additional_kwargs id="msg_123", ) -To retain information about response item sequencing (and to accommodate multiple -reasoning items), ChatOpenAI now stores these items in the content sequence: +``"responses/v1"`` is only applicable to the Responses API. It retains information +about response item sequencing and accommodates multiple reasoning items by +representing these items in the content sequence: .. code-block:: python @@ -57,18 +61,22 @@ There are other, small improvements as well-- e.g., we store message IDs on text content blocks, rather than on the AIMessage.id, which now stores the response ID. For backwards compatibility, this module provides functions to convert between the -old and new formats. The functions are used internally by ChatOpenAI. - +formats. The functions are used internally by ChatOpenAI. """ # noqa: E501 +import copy import json -from typing import Union +from collections.abc import Iterable, Iterator +from typing import Any, Literal, Optional, Union, cast -from langchain_core.messages import AIMessage +from langchain_core.messages import AIMessage, is_data_content_block +from langchain_core.messages import content_blocks as types +from langchain_core.v1.messages import AIMessage as AIMessageV1 _FUNCTION_CALL_IDS_MAP_KEY = "__openai_function_call_ids__" +# v0.3 / Responses def _convert_to_v03_ai_message( message: AIMessage, has_reasoning: bool = False ) -> AIMessage: @@ -253,3 +261,447 @@ def _convert_from_v03_ai_message(message: AIMessage) -> AIMessage: }, deep=False, ) + + +# v1 / Chat Completions +def _convert_from_v1_to_chat_completions(message: AIMessageV1) -> AIMessageV1: + """Convert a v1 message to the Chat Completions format.""" + new_content: list[types.ContentBlock] = [] + for block in message.content: + if block["type"] == "text": + # Strip annotations + new_content.append({"type": "text", "text": block["text"]}) + elif block["type"] in ("reasoning", "tool_call"): + pass + else: + new_content.append(block) + new_message = copy.copy(message) + new_message.content = new_content + + return new_message + + +# v1 / Responses +def _convert_annotation_to_v1(annotation: dict[str, Any]) -> types.Annotation: + annotation_type = annotation.get("type") + + if annotation_type == "url_citation": + known_fields = { + "type", + "url", + "title", + "cited_text", + "start_index", + "end_index", + } + url_citation = cast(types.Citation, {}) + for field in ("end_index", "start_index", "title"): + if field in annotation: + url_citation[field] = annotation[field] + url_citation["type"] = "citation" + url_citation["url"] = annotation["url"] + for field in annotation: + if field not in known_fields: + if "extras" not in url_citation: + url_citation["extras"] = {} + url_citation["extras"][field] = annotation[field] + return url_citation + + elif annotation_type == "file_citation": + known_fields = {"type", "title", "cited_text", "start_index", "end_index"} + document_citation: types.Citation = {"type": "citation"} + if "filename" in annotation: + document_citation["title"] = annotation.pop("filename") + for field in annotation: + if field not in known_fields: + if "extras" not in document_citation: + document_citation["extras"] = {} + document_citation["extras"][field] = annotation[field] + + return document_citation + + # TODO: standardise container_file_citation? + else: + non_standard_annotation: types.NonStandardAnnotation = { + "type": "non_standard_annotation", + "value": annotation, + } + return non_standard_annotation + + +def _explode_reasoning(block: dict[str, Any]) -> Iterable[types.ReasoningContentBlock]: + if "summary" not in block: + yield cast(types.ReasoningContentBlock, block) + return + + known_fields = {"type", "reasoning", "id", "index"} + unknown_fields = [ + field for field in block if field != "summary" and field not in known_fields + ] + if unknown_fields: + block["extras"] = {} + for field in unknown_fields: + block["extras"][field] = block.pop(field) + + if not block["summary"]: + _ = block.pop("summary", None) + yield cast(types.ReasoningContentBlock, block) + return + + # Common part for every exploded line, except 'summary' + common = {k: v for k, v in block.items() if k in known_fields} + + # Optional keys that must appear only in the first exploded item + first_only = block.pop("extras", None) + + for idx, part in enumerate(block["summary"]): + new_block = dict(common) + new_block["reasoning"] = part.get("text", "") + if idx == 0 and first_only: + new_block.update(first_only) + yield cast(types.ReasoningContentBlock, new_block) + + +def _convert_to_v1_from_responses( + content: list[dict[str, Any]], + tool_calls: Optional[list[types.ToolCall]] = None, + invalid_tool_calls: Optional[list[types.InvalidToolCall]] = None, +) -> list[types.ContentBlock]: + """Mutate a Responses message to v1 format.""" + + def _iter_blocks() -> Iterable[types.ContentBlock]: + for block in content: + if not isinstance(block, dict): + continue + block_type = block.get("type") + + if block_type == "text": + if "annotations" in block: + block["annotations"] = [ + _convert_annotation_to_v1(a) for a in block["annotations"] + ] + yield cast(types.TextContentBlock, block) + + elif block_type == "reasoning": + yield from _explode_reasoning(block) + + elif block_type == "image_generation_call" and ( + result := block.get("result") + ): + new_block = {"type": "image", "base64": result} + if output_format := block.get("output_format"): + new_block["mime_type"] = f"image/{output_format}" + if "id" in block: + new_block["id"] = block["id"] + if "index" in block: + new_block["index"] = block["index"] + for extra_key in ( + "status", + "background", + "output_format", + "quality", + "revised_prompt", + "size", + ): + if extra_key in block: + new_block[extra_key] = block[extra_key] + yield cast(types.ImageContentBlock, new_block) + + elif block_type == "function_call": + tool_call_block: Optional[types.ContentBlock] = None + call_id = block.get("call_id", "") + if call_id: + for tool_call in tool_calls or []: + if tool_call.get("id") == call_id: + tool_call_block = cast(types.ToolCall, tool_call.copy()) + break + else: + for invalid_tool_call in invalid_tool_calls or []: + if invalid_tool_call.get("id") == call_id: + tool_call_block = cast( + types.InvalidToolCall, invalid_tool_call.copy() + ) + break + if tool_call_block: + if "id" in block: + if "extras" not in tool_call_block: + tool_call_block["extras"] = {} + tool_call_block["extras"]["item_id"] = block["id"] # type: ignore[typeddict-item] + if "index" in block: + tool_call_block["index"] = block["index"] + yield tool_call_block + + elif block_type == "web_search_call": + web_search_call = {"type": "web_search_call", "id": block["id"]} + if "index" in block: + web_search_call["index"] = block["index"] + if ( + "action" in block + and isinstance(block["action"], dict) + and block["action"].get("type") == "search" + and "query" in block["action"] + ): + web_search_call["query"] = block["action"]["query"] + for key in block: + if key not in ("type", "id"): + web_search_call[key] = block[key] + + web_search_result = {"type": "web_search_result", "id": block["id"]} + if "index" in block: + web_search_result["index"] = block["index"] + 1 + yield cast(types.WebSearchCall, web_search_call) + yield cast(types.WebSearchResult, web_search_result) + + elif block_type == "code_interpreter_call": + code_interpreter_call = { + "type": "code_interpreter_call", + "id": block["id"], + } + if "code" in block: + code_interpreter_call["code"] = block["code"] + if "container_id" in block: + code_interpreter_call["container_id"] = block["container_id"] + if "index" in block: + code_interpreter_call["index"] = block["index"] + + code_interpreter_result = { + "type": "code_interpreter_result", + "id": block["id"], + } + if "outputs" in block: + code_interpreter_result["outputs"] = block["outputs"] + for output in block["outputs"]: + if ( + isinstance(output, dict) + and (output_type := output.get("type")) + and output_type == "logs" + ): + if "output" not in code_interpreter_result: + code_interpreter_result["output"] = [] + code_interpreter_result["output"].append( + { + "type": "code_interpreter_output", + "stdout": output.get("logs", ""), + } + ) + + if "status" in block: + code_interpreter_result["status"] = block["status"] + if "index" in block: + code_interpreter_result["index"] = block["index"] + 1 + + yield cast(types.CodeInterpreterCall, code_interpreter_call) + yield cast(types.CodeInterpreterResult, code_interpreter_result) + + else: + new_block = {"type": "non_standard", "value": block} + if "index" in new_block["value"]: + new_block["index"] = new_block["value"].pop("index") + yield cast(types.NonStandardContentBlock, new_block) + + return list(_iter_blocks()) + + +def _convert_annotation_from_v1(annotation: types.Annotation) -> dict[str, Any]: + if annotation["type"] == "citation": + new_ann: dict[str, Any] = {} + for field in ("end_index", "start_index"): + if field in annotation: + new_ann[field] = annotation[field] + + if "url" in annotation: + # URL citation + if "title" in annotation: + new_ann["title"] = annotation["title"] + new_ann["type"] = "url_citation" + new_ann["url"] = annotation["url"] + else: + # Document citation + new_ann["type"] = "file_citation" + if "title" in annotation: + new_ann["filename"] = annotation["title"] + + if extra_fields := annotation.get("extras"): + for field, value in extra_fields.items(): + new_ann[field] = value + + return new_ann + + elif annotation["type"] == "non_standard_annotation": + return annotation["value"] + + else: + return dict(annotation) + + +def _implode_reasoning_blocks(blocks: list[dict[str, Any]]) -> Iterable[dict[str, Any]]: + i = 0 + n = len(blocks) + + while i < n: + block = blocks[i] + + # Skip non-reasoning blocks or blocks already in Responses format + if block.get("type") != "reasoning" or "summary" in block: + yield dict(block) + i += 1 + continue + elif "reasoning" not in block and "summary" not in block: + # {"type": "reasoning", "id": "rs_..."} + oai_format = {**block, "summary": []} + if "extras" in oai_format: + oai_format.update(oai_format.pop("extras")) + oai_format["type"] = oai_format.pop("type", "reasoning") + if "encrypted_content" in oai_format: + oai_format["encrypted_content"] = oai_format.pop("encrypted_content") + yield oai_format + i += 1 + continue + else: + pass + + summary: list[dict[str, str]] = [ + {"type": "summary_text", "text": block.get("reasoning", "")} + ] + # 'common' is every field except the exploded 'reasoning' + common = {k: v for k, v in block.items() if k != "reasoning"} + if "extras" in common: + common.update(common.pop("extras")) + + i += 1 + while i < n: + next_ = blocks[i] + if next_.get("type") == "reasoning" and "reasoning" in next_: + summary.append( + {"type": "summary_text", "text": next_.get("reasoning", "")} + ) + i += 1 + else: + break + + merged = dict(common) + merged["summary"] = summary + merged["type"] = merged.pop("type", "reasoning") + yield merged + + +def _consolidate_calls( + items: Iterable[dict[str, Any]], + call_name: Literal["web_search_call", "code_interpreter_call"], + result_name: Literal["web_search_result", "code_interpreter_result"], +) -> Iterator[dict[str, Any]]: + """ + Generator that walks through *items* and, whenever it meets the pair + + {"type": "web_search_call", "id": X, ...} + {"type": "web_search_result", "id": X} + + merges them into + + {"id": X, + "action": …, + "status": …, + "type": "web_search_call"} + + keeping every other element untouched. + """ + items = iter(items) # make sure we have a true iterator + for current in items: + # Only a call can start a pair worth collapsing + if current.get("type") != call_name: + yield current + continue + + try: + nxt = next(items) # look-ahead one element + except StopIteration: # no “result” – just yield the call back + yield current + break + + # If this really is the matching “result” – collapse + if nxt.get("type") == result_name and nxt.get("id") == current.get("id"): + if call_name == "web_search_call": + collapsed = {"id": current["id"]} + if "action" in current: + collapsed["action"] = current["action"] + collapsed["status"] = current["status"] + collapsed["type"] = "web_search_call" + + if call_name == "code_interpreter_call": + collapsed = {"id": current["id"]} + for key in ("code", "container_id"): + if key in current: + collapsed[key] = current[key] + + for key in ("outputs", "status"): + if key in nxt: + collapsed[key] = nxt[key] + collapsed["type"] = "code_interpreter_call" + + yield collapsed + + else: + # Not a matching pair – emit both, in original order + yield current + yield nxt + + +def _convert_from_v1_to_responses( + content: list[types.ContentBlock], tool_calls: list[types.ToolCall] +) -> list[dict[str, Any]]: + new_content: list = [] + for block in content: + if block["type"] == "text" and "annotations" in block: + # Need a copy because we’re changing the annotations list + new_block = dict(block) + new_block["annotations"] = [ + _convert_annotation_from_v1(a) for a in block["annotations"] + ] + new_content.append(new_block) + elif block["type"] == "tool_call": + new_block = {"type": "function_call", "call_id": block["id"]} + if "extras" in block and "item_id" in block["extras"]: + new_block["id"] = block["extras"]["item_id"] + if "name" in block: + new_block["name"] = block["name"] + if "extras" in block and "arguments" in block["extras"]: + new_block["arguments"] = block["extras"]["arguments"] + if any(key not in block for key in ("name", "arguments")): + matching_tool_calls = [ + call for call in tool_calls if call["id"] == block["id"] + ] + if matching_tool_calls: + tool_call = matching_tool_calls[0] + if "name" not in block: + new_block["name"] = tool_call["name"] + if "arguments" not in block: + new_block["arguments"] = json.dumps(tool_call["args"]) + new_content.append(new_block) + elif ( + is_data_content_block(cast(dict, block)) + and block["type"] == "image" + and "base64" in block + and isinstance(block.get("id"), str) + and block["id"].startswith("ig_") + ): + new_block = {"type": "image_generation_call", "result": block["base64"]} + for extra_key in ("id", "status"): + if extra_key in block: + new_block[extra_key] = block[extra_key] # type: ignore[typeddict-item] + new_content.append(new_block) + elif block["type"] == "non_standard" and "value" in block: + new_content.append(block["value"]) + else: + new_content.append(block) + + new_content = list(_implode_reasoning_blocks(new_content)) + new_content = list( + _consolidate_calls(new_content, "web_search_call", "web_search_result") + ) + new_content = list( + _consolidate_calls( + new_content, "code_interpreter_call", "code_interpreter_result" + ) + ) + + return new_content diff --git a/libs/partners/openai/langchain_openai/chat_models/base.py b/libs/partners/openai/langchain_openai/chat_models/base.py index 1bc9b66d880..826e1605f2e 100644 --- a/libs/partners/openai/langchain_openai/chat_models/base.py +++ b/libs/partners/openai/langchain_openai/chat_models/base.py @@ -458,7 +458,7 @@ class BaseChatOpenAI(BaseChatModel): alias="api_key", default_factory=secret_from_env("OPENAI_API_KEY", default=None) ) openai_api_base: Optional[str] = Field(default=None, alias="base_url") - """Base URL path for API requests, leave blank if not using a proxy or service + """Base URL path for API requests, leave blank if not using a proxy or service emulator.""" openai_organization: Optional[str] = Field(default=None, alias="organization") """Automatically inferred from env var ``OPENAI_ORG_ID`` if not provided.""" @@ -489,7 +489,7 @@ class BaseChatOpenAI(BaseChatModel): """Whether to return logprobs.""" top_logprobs: Optional[int] = None """Number of most likely tokens to return at each token position, each with - an associated log probability. `logprobs` must be set to true + an associated log probability. `logprobs` must be set to true if this parameter is used.""" logit_bias: Optional[dict[int, int]] = None """Modify the likelihood of specified tokens appearing in the completion.""" @@ -507,7 +507,7 @@ class BaseChatOpenAI(BaseChatModel): Reasoning models only, like OpenAI o1, o3, and o4-mini. - Currently supported values are low, medium, and high. Reducing reasoning effort + Currently supported values are low, medium, and high. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response. .. versionadded:: 0.2.14 @@ -529,26 +529,26 @@ class BaseChatOpenAI(BaseChatModel): """ tiktoken_model_name: Optional[str] = None - """The model name to pass to tiktoken when using this class. - Tiktoken is used to count the number of tokens in documents to constrain - them to be under a certain limit. By default, when set to None, this will - be the same as the embedding model name. However, there are some cases - where you may want to use this Embedding class with a model name not - supported by tiktoken. This can include when using Azure embeddings or - when using one of the many model providers that expose an OpenAI-like - API but with different models. In those cases, in order to avoid erroring + """The model name to pass to tiktoken when using this class. + Tiktoken is used to count the number of tokens in documents to constrain + them to be under a certain limit. By default, when set to None, this will + be the same as the embedding model name. However, there are some cases + where you may want to use this Embedding class with a model name not + supported by tiktoken. This can include when using Azure embeddings or + when using one of the many model providers that expose an OpenAI-like + API but with different models. In those cases, in order to avoid erroring when tiktoken is called, you can specify a model name to use here.""" default_headers: Union[Mapping[str, str], None] = None default_query: Union[Mapping[str, object], None] = None # Configure a custom httpx client. See the # [httpx documentation](https://www.python-httpx.org/api/#client) for more details. http_client: Union[Any, None] = Field(default=None, exclude=True) - """Optional ``httpx.Client``. Only used for sync invocations. Must specify + """Optional ``httpx.Client``. Only used for sync invocations. Must specify ``http_async_client`` as well if you'd like a custom client for async invocations. """ http_async_client: Union[Any, None] = Field(default=None, exclude=True) - """Optional ``httpx.AsyncClient``. Only used for async invocations. Must specify + """Optional ``httpx.AsyncClient``. Only used for async invocations. Must specify ``http_client`` as well if you'd like a custom client for sync invocations.""" stop: Optional[Union[list[str], str]] = Field(default=None, alias="stop_sequences") """Default stop sequences.""" @@ -556,40 +556,40 @@ class BaseChatOpenAI(BaseChatModel): """Optional additional JSON properties to include in the request parameters when making requests to OpenAI compatible APIs, such as vLLM, LM Studio, or other providers. - + This is the recommended way to pass custom parameters that are specific to your OpenAI-compatible API provider but not part of the standard OpenAI API. - + Examples: - LM Studio TTL parameter: ``extra_body={"ttl": 300}`` - vLLM custom parameters: ``extra_body={"use_beam_search": True}`` - Any other provider-specific parameters - + .. note:: - + Do NOT use ``model_kwargs`` for custom parameters that are not part of the - standard OpenAI API, as this will cause errors when making API calls. Use + standard OpenAI API, as this will cause errors when making API calls. Use ``extra_body`` instead. """ include_response_headers: bool = False """Whether to include response headers in the output message ``response_metadata``.""" # noqa: E501 disabled_params: Optional[dict[str, Any]] = Field(default=None) - """Parameters of the OpenAI client or chat.completions endpoint that should be + """Parameters of the OpenAI client or chat.completions endpoint that should be disabled for the given model. - - Should be specified as ``{"param": None | ['val1', 'val2']}`` where the key is the + + Should be specified as ``{"param": None | ['val1', 'val2']}`` where the key is the parameter and the value is either None, meaning that parameter should never be used, or it's a list of disabled values for the parameter. - + For example, older models may not support the ``'parallel_tool_calls'`` parameter at - all, in which case ``disabled_params={"parallel_tool_calls": None}`` can be passed + all, in which case ``disabled_params={"parallel_tool_calls": None}`` can be passed in. - + If a parameter is disabled then it will not be used by default in any methods, e.g. in :meth:`~langchain_openai.chat_models.base.ChatOpenAI.with_structured_output`. However this does not prevent a user from directly passed in the parameter during - invocation. + invocation. """ include: Optional[list[str]] = None diff --git a/libs/partners/openai/langchain_openai/v1/__init__.py b/libs/partners/openai/langchain_openai/v1/__init__.py new file mode 100644 index 00000000000..2d6bafdd6d0 --- /dev/null +++ b/libs/partners/openai/langchain_openai/v1/__init__.py @@ -0,0 +1,3 @@ +from langchain_openai.v1.chat_models import ChatOpenAI + +__all__ = ["ChatOpenAI"] diff --git a/libs/partners/openai/langchain_openai/v1/chat_models/__init__.py b/libs/partners/openai/langchain_openai/v1/chat_models/__init__.py new file mode 100644 index 00000000000..0e63b2a441b --- /dev/null +++ b/libs/partners/openai/langchain_openai/v1/chat_models/__init__.py @@ -0,0 +1,3 @@ +from langchain_openai.v1.chat_models.base import ChatOpenAI + +__all__ = ["ChatOpenAI"] diff --git a/libs/partners/openai/langchain_openai/v1/chat_models/base.py b/libs/partners/openai/langchain_openai/v1/chat_models/base.py new file mode 100644 index 00000000000..0015989dbbc --- /dev/null +++ b/libs/partners/openai/langchain_openai/v1/chat_models/base.py @@ -0,0 +1,3762 @@ +"""OpenAI chat wrapper.""" + +from __future__ import annotations + +import base64 +import json +import logging +import os +import re +import ssl +import sys +import warnings +from collections.abc import AsyncIterator, Iterator, Mapping, Sequence +from functools import partial +from io import BytesIO +from json import JSONDecodeError +from math import ceil +from operator import itemgetter +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Literal, + Optional, + TypedDict, + TypeVar, + Union, + cast, +) +from urllib.parse import urlparse + +import certifi +import openai +import tiktoken +from langchain_core.callbacks import ( + AsyncCallbackManagerForLLMRun, + CallbackManagerForLLMRun, +) +from langchain_core.language_models import LanguageModelInput +from langchain_core.language_models.chat_models import LangSmithParams +from langchain_core.messages import ( + InvalidToolCall, + ToolCall, + convert_to_openai_data_block, + is_data_content_block, +) +from langchain_core.messages.ai import ( + InputTokenDetails, + OutputTokenDetails, + UsageMetadata, +) +from langchain_core.messages.tool import tool_call_chunk +from langchain_core.output_parsers import JsonOutputParser, PydanticOutputParser +from langchain_core.output_parsers.openai_tools import ( + JsonOutputKeyToolsParser, + PydanticToolsParser, + make_invalid_tool_call, + parse_tool_call, +) +from langchain_core.runnables import ( + Runnable, + RunnableLambda, + RunnableMap, + RunnablePassthrough, +) +from langchain_core.runnables.config import run_in_executor +from langchain_core.tools import BaseTool +from langchain_core.tools.base import _stringify +from langchain_core.utils import get_pydantic_field_names +from langchain_core.utils.function_calling import ( + convert_to_openai_function, + convert_to_openai_tool, +) +from langchain_core.utils.pydantic import ( + PydanticBaseModel, + TypeBaseModel, + is_basemodel_subclass, +) +from langchain_core.utils.utils import _build_model_kwargs, from_env, secret_from_env +from langchain_core.v1.chat_models import ( + BaseChatModel, + agenerate_from_stream, + generate_from_stream, +) +from langchain_core.v1.messages import AIMessage as AIMessageV1 +from langchain_core.v1.messages import AIMessageChunk as AIMessageChunkV1 +from langchain_core.v1.messages import HumanMessage as HumanMessageV1 +from langchain_core.v1.messages import MessageV1, ResponseMetadata +from langchain_core.v1.messages import SystemMessage as SystemMessageV1 +from langchain_core.v1.messages import ToolMessage as ToolMessageV1 +from pydantic import BaseModel, ConfigDict, Field, SecretStr, model_validator +from pydantic.v1 import BaseModel as BaseModelV1 +from typing_extensions import Self + +from langchain_openai.chat_models._client_utils import ( + _get_default_async_httpx_client, + _get_default_httpx_client, +) +from langchain_openai.chat_models._compat import ( + _convert_from_v1_to_chat_completions, + _convert_from_v1_to_responses, + _convert_to_v1_from_responses, +) + +if TYPE_CHECKING: + from langchain_core.messages import content_blocks as types + from openai.types.responses import Response + +logger = logging.getLogger(__name__) + +# This SSL context is equivelent to the default `verify=True`. +# https://www.python-httpx.org/advanced/ssl/#configuring-client-instances +global_ssl_context = ssl.create_default_context(cafile=certifi.where()) + +WellKnownTools = ( + "file_search", + "web_search_preview", + "computer_use_preview", + "code_interpreter", + "mcp", + "image_generation", +) + + +def _convert_dict_to_message(_dict: Mapping[str, Any]) -> MessageV1: + """Convert a dictionary to a LangChain message. + + Args: + _dict: The dictionary. + + Returns: + The LangChain message. + """ + role = _dict.get("role") + name = _dict.get("name") + id_ = _dict.get("id") + if role == "user": + return HumanMessageV1(content=_dict.get("content", ""), id=id_, name=name) + elif role == "assistant": + # Fix for azure + # Also OpenAI returns None for tool invocations + content: list[types.ContentBlock] = [] + if (oai_content := _dict.get("content")) and isinstance(oai_content, str): + content.append({"type": "text", "text": oai_content}) + tool_calls = [] + invalid_tool_calls = [] + if raw_tool_calls := _dict.get("tool_calls"): + for raw_tool_call in raw_tool_calls: + try: + tool_call = parse_tool_call(raw_tool_call, return_id=True) + if tool_call: + tool_calls.append(tool_call) + except Exception as e: + invalid_tool_calls.append( + make_invalid_tool_call(raw_tool_call, str(e)) + ) + content.extend(cast(list[ToolCall], tool_calls)) + if audio := _dict.get("audio"): + # TODO: populate standard fields + content.append( + cast(types.AudioContentBlock, {"type": "audio", "audio": audio}) + ) + return AIMessageV1( + content=content, + name=name, + id=id_, + tool_calls=cast(list[ToolCall], tool_calls), + invalid_tool_calls=cast(list[InvalidToolCall], invalid_tool_calls), + ) + elif role in ("system", "developer"): + return SystemMessageV1( + content=_dict.get("content", ""), + name=name, + id=id_, + custom_role=role if role == "developer" else None, + ) + elif role == "tool": + return ToolMessageV1( + content=_dict.get("content", ""), + tool_call_id=cast(str, _dict.get("tool_call_id")), + name=name, + id=id_, + ) + else: + error_message = f"Unexpected role {role} in message." + raise ValueError(error_message) + + +def _format_message_content(content: Any, responses_api: bool = False) -> Any: + """Format message content.""" + if content and isinstance(content, list): + formatted_content = [] + for block in content: + # Remove unexpected block types + if ( + isinstance(block, dict) + and "type" in block + and block["type"] in ("tool_use", "thinking", "reasoning_content") + ): + continue + elif ( + isinstance(block, dict) + and is_data_content_block(block) + and not responses_api + ): + formatted_content.append(convert_to_openai_data_block(block)) + # Anthropic image blocks + elif ( + isinstance(block, dict) + and block.get("type") == "image" + and (source := block.get("source")) + and isinstance(source, dict) + ): + if source.get("type") == "base64" and ( + (media_type := source.get("media_type")) + and (data := source.get("data")) + ): + formatted_content.append( + { + "type": "image_url", + "image_url": {"url": f"data:{media_type};base64,{data}"}, + } + ) + elif source.get("type") == "url" and (url := source.get("url")): + formatted_content.append( + {"type": "image_url", "image_url": {"url": url}} + ) + else: + continue + else: + formatted_content.append(block) + else: + formatted_content = content + + return formatted_content + + +def _convert_message_to_dict(message: MessageV1, responses_api: bool = False) -> dict: + """Convert a LangChain message to a dictionary. + + Args: + message: The LangChain message. + + Returns: + The dictionary. + """ + message_dict: dict[str, Any] = { + "content": _format_message_content(message.content, responses_api=responses_api) + } + if name := message.name: + message_dict["name"] = name + + # populate role and additional message data + if isinstance(message, HumanMessageV1): + message_dict["role"] = "user" + elif isinstance(message, AIMessageV1): + message_dict["role"] = "assistant" + if message.tool_calls or message.invalid_tool_calls: + message_dict["tool_calls"] = [ + _lc_tool_call_to_openai_tool_call(tc) for tc in message.tool_calls + ] + [ + _lc_invalid_tool_call_to_openai_tool_call(tc) + for tc in message.invalid_tool_calls + ] + else: + pass + # If tool calls present, content null value should be None not empty string. + if "tool_calls" in message_dict: + message_dict["content"] = message_dict["content"] or None + + audio: Optional[dict[str, Any]] = None + for block in message.content: + if ( + block.get("type") == "audio" + and (id_ := block.get("id")) + and not responses_api + ): + # openai doesn't support passing the data back - only the id + # https://platform.openai.com/docs/guides/audio/multi-turn-conversations + audio = {"id": id_} + if audio: + message_dict["audio"] = audio + elif isinstance(message, SystemMessageV1): + if message.custom_role == "developer": + message_dict["role"] = "developer" + else: + message_dict["role"] = "system" + elif isinstance(message, ToolMessageV1): + message_dict["role"] = "tool" + message_dict["tool_call_id"] = message.tool_call_id + + supported_props = {"content", "role", "tool_call_id"} + message_dict = {k: v for k, v in message_dict.items() if k in supported_props} + else: + raise TypeError(f"Got unknown type {message}") + return message_dict + + +def _convert_delta_to_message_chunk(_dict: Mapping[str, Any]) -> AIMessageChunkV1: + id_ = _dict.get("id") + content = cast(str, _dict.get("content") or "") + tool_call_chunks = [] + if raw_tool_calls := _dict.get("tool_calls"): + try: + tool_call_chunks = [ + tool_call_chunk( + name=rtc["function"].get("name"), + args=rtc["function"].get("arguments"), + id=rtc.get("id"), + index=rtc["index"], + ) + for rtc in raw_tool_calls + ] + except KeyError: + pass + + return AIMessageChunkV1( + content=content or [], id=id_, tool_call_chunks=tool_call_chunks + ) + + +def _update_token_usage( + overall_token_usage: Union[int, dict], new_usage: Union[int, dict] +) -> Union[int, dict]: + # Token usage is either ints or dictionaries + # `reasoning_tokens` is nested inside `completion_tokens_details` + if isinstance(new_usage, int): + if not isinstance(overall_token_usage, int): + raise ValueError( + f"Got different types for token usage: " + f"{type(new_usage)} and {type(overall_token_usage)}" + ) + return new_usage + overall_token_usage + elif isinstance(new_usage, dict): + if not isinstance(overall_token_usage, dict): + raise ValueError( + f"Got different types for token usage: " + f"{type(new_usage)} and {type(overall_token_usage)}" + ) + return { + k: _update_token_usage(overall_token_usage.get(k, 0), v) + for k, v in new_usage.items() + } + else: + warnings.warn(f"Unexpected type for token usage: {type(new_usage)}") + return new_usage + + +def _handle_openai_bad_request(e: openai.BadRequestError) -> None: + if ( + "'response_format' of type 'json_schema' is not supported with this model" + ) in e.message: + message = ( + "This model does not support OpenAI's structured output feature, which " + "is the default method for `with_structured_output` as of " + "langchain-openai==0.3. To use `with_structured_output` with this model, " + 'specify `method="function_calling"`.' + ) + warnings.warn(message) + raise e + elif "Invalid schema for response_format" in e.message: + message = ( + "Invalid schema for OpenAI's structured output feature, which is the " + "default method for `with_structured_output` as of langchain-openai==0.3. " + 'Specify `method="function_calling"` instead or update your schema. ' + "See supported schemas: " + "https://platform.openai.com/docs/guides/structured-outputs#supported-schemas" # noqa: E501 + ) + warnings.warn(message) + raise e + else: + raise + + +class _FunctionCall(TypedDict): + name: str + + +_BM = TypeVar("_BM", bound=BaseModel) +_DictOrPydanticClass = Union[dict[str, Any], type[_BM], type] +_DictOrPydantic = Union[dict, _BM] + + +class _AllReturnType(TypedDict): + raw: AIMessageV1 + parsed: Optional[_DictOrPydantic] + parsing_error: Optional[BaseException] + + +class BaseChatOpenAI(BaseChatModel): + client: Any = Field(default=None, exclude=True) #: :meta private: + async_client: Any = Field(default=None, exclude=True) #: :meta private: + root_client: Any = Field(default=None, exclude=True) #: :meta private: + root_async_client: Any = Field(default=None, exclude=True) #: :meta private: + model_name: str = Field(default="gpt-3.5-turbo", alias="model") + """Model name to use.""" + temperature: Optional[float] = None + """What sampling temperature to use.""" + model_kwargs: dict[str, Any] = Field(default_factory=dict) + """Holds any model parameters valid for `create` call not explicitly specified.""" + openai_api_key: Optional[SecretStr] = Field( + alias="api_key", default_factory=secret_from_env("OPENAI_API_KEY", default=None) + ) + openai_api_base: Optional[str] = Field(default=None, alias="base_url") + """Base URL path for API requests, leave blank if not using a proxy or service + emulator.""" + openai_organization: Optional[str] = Field(default=None, alias="organization") + """Automatically inferred from env var `OPENAI_ORG_ID` if not provided.""" + # to support explicit proxy for OpenAI + openai_proxy: Optional[str] = Field( + default_factory=from_env("OPENAI_PROXY", default=None) + ) + request_timeout: Union[float, tuple[float, float], Any, None] = Field( + default=None, alias="timeout" + ) + """Timeout for requests to OpenAI completion API. Can be float, httpx.Timeout or + None.""" + stream_usage: bool = False + """Whether to include usage metadata in streaming output. If True, an additional + message chunk will be generated during the stream including usage metadata. + + .. versionadded:: 0.3.9 + """ + max_retries: Optional[int] = None + """Maximum number of retries to make when generating.""" + presence_penalty: Optional[float] = None + """Penalizes repeated tokens.""" + frequency_penalty: Optional[float] = None + """Penalizes repeated tokens according to frequency.""" + seed: Optional[int] = None + """Seed for generation""" + logprobs: Optional[bool] = None + """Whether to return logprobs.""" + top_logprobs: Optional[int] = None + """Number of most likely tokens to return at each token position, each with + an associated log probability. `logprobs` must be set to true + if this parameter is used.""" + logit_bias: Optional[dict[int, int]] = None + """Modify the likelihood of specified tokens appearing in the completion.""" + streaming: bool = False + """Whether to stream the results or not.""" + n: Optional[int] = None + """Number of chat completions to generate for each prompt.""" + top_p: Optional[float] = None + """Total probability mass of tokens to consider at each step.""" + max_tokens: Optional[int] = Field(default=None) + """Maximum number of tokens to generate.""" + reasoning_effort: Optional[str] = None + """Constrains effort on reasoning for reasoning models. For use with the Chat + Completions API. + + Reasoning models only, like OpenAI o1, o3, and o4-mini. + + Currently supported values are low, medium, and high. Reducing reasoning effort + can result in faster responses and fewer tokens used on reasoning in a response. + + .. versionadded:: 0.2.14 + """ + reasoning: Optional[dict[str, Any]] = None + """Reasoning parameters for reasoning models, i.e., OpenAI o-series models (o1, o3, + o4-mini, etc.). For use with the Responses API. + + Example: + + .. code-block:: python + + reasoning={ + "effort": "medium", # can be "low", "medium", or "high" + "summary": "auto", # can be "auto", "concise", or "detailed" + } + + .. versionadded:: 0.3.24 + """ + tiktoken_model_name: Optional[str] = None + """The model name to pass to tiktoken when using this class. + Tiktoken is used to count the number of tokens in documents to constrain + them to be under a certain limit. By default, when set to None, this will + be the same as the embedding model name. However, there are some cases + where you may want to use this Embedding class with a model name not + supported by tiktoken. This can include when using Azure embeddings or + when using one of the many model providers that expose an OpenAI-like + API but with different models. In those cases, in order to avoid erroring + when tiktoken is called, you can specify a model name to use here.""" + default_headers: Union[Mapping[str, str], None] = None + default_query: Union[Mapping[str, object], None] = None + # Configure a custom httpx client. See the + # [httpx documentation](https://www.python-httpx.org/api/#client) for more details. + http_client: Union[Any, None] = Field(default=None, exclude=True) + """Optional ``httpx.Client``. Only used for sync invocations. Must specify + ``http_async_client`` as well if you'd like a custom client for async + invocations. + """ + http_async_client: Union[Any, None] = Field(default=None, exclude=True) + """Optional httpx.AsyncClient. Only used for async invocations. Must specify + ``http_client`` as well if you'd like a custom client for sync invocations.""" + stop: Optional[Union[list[str], str]] = Field(default=None, alias="stop_sequences") + """Default stop sequences.""" + extra_body: Optional[Mapping[str, Any]] = None + """Optional additional JSON properties to include in the request parameters when + making requests to OpenAI compatible APIs, such as vLLM.""" + include_response_headers: bool = False + """Whether to include response headers in the output message response_metadata.""" + disabled_params: Optional[dict[str, Any]] = Field(default=None) + """Parameters of the OpenAI client or chat.completions endpoint that should be + disabled for the given model. + + Should be specified as ``{"param": None | ['val1', 'val2']}`` where the key is the + parameter and the value is either None, meaning that parameter should never be + used, or it's a list of disabled values for the parameter. + + For example, older models may not support the 'parallel_tool_calls' parameter at + all, in which case ``disabled_params={"parallel_tool_calls": None}`` can be passed + in. + + If a parameter is disabled then it will not be used by default in any methods, e.g. + in :meth:`~langchain_openai.chat_models.base.ChatOpenAI.with_structured_output`. + However this does not prevent a user from directly passed in the parameter during + invocation. + """ + + include: Optional[list[str]] = None + """Additional fields to include in generations from Responses API. + + Supported values: + + - ``"file_search_call.results"`` + - ``"message.input_image.image_url"`` + - ``"computer_call_output.output.image_url"`` + - ``"reasoning.encrypted_content"`` + - ``"code_interpreter_call.outputs"`` + + .. versionadded:: 0.3.24 + """ + + service_tier: Optional[str] = None + """Latency tier for request. Options are ``'auto'``, ``'default'``, or ``'flex'``. + Relevant for users of OpenAI's scale tier service. + """ + + store: Optional[bool] = None + """If True, OpenAI may store response data for future use. Defaults to True + for the Responses API and False for the Chat Completions API. + + .. versionadded:: 0.3.24 + """ + + truncation: Optional[str] = None + """Truncation strategy (Responses API). Can be ``'auto'`` or ``'disabled'`` + (default). If ``'auto'``, model may drop input items from the middle of the + message sequence to fit the context window. + + .. versionadded:: 0.3.24 + """ + + use_previous_response_id: bool = False + """If True, always pass ``previous_response_id`` using the ID of the most recent + response. Responses API only. + + Input messages up to the most recent response will be dropped from request + payloads. + + For example, the following two are equivalent: + + .. code-block:: python + + llm = ChatOpenAI( + model="o4-mini", + use_previous_response_id=True, + ) + llm.invoke( + [ + HumanMessage("Hello"), + AIMessage("Hi there!", response_metadata={"id": "resp_123"}), + HumanMessage("How are you?"), + ] + ) + + .. code-block:: python + + llm = ChatOpenAI( + model="o4-mini", + use_responses_api=True, + ) + llm.invoke([HumanMessage("How are you?")], previous_response_id="resp_123") + + .. versionadded:: 0.3.26 + """ + + use_responses_api: Optional[bool] = None + """Whether to use the Responses API instead of the Chat API. + + If not specified then will be inferred based on invocation params. + + .. versionadded:: 0.3.9 + """ + + model_config = ConfigDict(populate_by_name=True) + + @model_validator(mode="before") + @classmethod + def build_extra(cls, values: dict[str, Any]) -> Any: + """Build extra kwargs from additional params that were passed in.""" + all_required_field_names = get_pydantic_field_names(cls) + values = _build_model_kwargs(values, all_required_field_names) + return values + + @model_validator(mode="before") + @classmethod + def validate_temperature(cls, values: dict[str, Any]) -> Any: + """Currently o1 models only allow temperature=1.""" + model = values.get("model_name") or values.get("model") or "" + if model.startswith("o1") and "temperature" not in values: + values["temperature"] = 1 + return values + + @model_validator(mode="after") + def validate_environment(self) -> Self: + """Validate that api key and python package exists in environment.""" + if self.n is not None and self.n < 1: + raise ValueError("n must be at least 1.") + elif self.n is not None and self.n > 1 and self.streaming: + raise ValueError("n must be 1 when streaming.") + + # Check OPENAI_ORGANIZATION for backwards compatibility. + self.openai_organization = ( + self.openai_organization + or os.getenv("OPENAI_ORG_ID") + or os.getenv("OPENAI_ORGANIZATION") + ) + self.openai_api_base = self.openai_api_base or os.getenv("OPENAI_API_BASE") + client_params: dict = { + "api_key": ( + self.openai_api_key.get_secret_value() if self.openai_api_key else None + ), + "organization": self.openai_organization, + "base_url": self.openai_api_base, + "timeout": self.request_timeout, + "default_headers": self.default_headers, + "default_query": self.default_query, + } + if self.max_retries is not None: + client_params["max_retries"] = self.max_retries + + if self.openai_proxy and (self.http_client or self.http_async_client): + openai_proxy = self.openai_proxy + http_client = self.http_client + http_async_client = self.http_async_client + raise ValueError( + "Cannot specify 'openai_proxy' if one of " + "'http_client'/'http_async_client' is already specified. Received:\n" + f"{openai_proxy=}\n{http_client=}\n{http_async_client=}" + ) + if not self.client: + if self.openai_proxy and not self.http_client: + try: + import httpx + except ImportError as e: + raise ImportError( + "Could not import httpx python package. " + "Please install it with `pip install httpx`." + ) from e + self.http_client = httpx.Client( + proxy=self.openai_proxy, verify=global_ssl_context + ) + sync_specific = { + "http_client": self.http_client + or _get_default_httpx_client(self.openai_api_base, self.request_timeout) + } + self.root_client = openai.OpenAI(**client_params, **sync_specific) # type: ignore[arg-type] + self.client = self.root_client.chat.completions + if not self.async_client: + if self.openai_proxy and not self.http_async_client: + try: + import httpx + except ImportError as e: + raise ImportError( + "Could not import httpx python package. " + "Please install it with `pip install httpx`." + ) from e + self.http_async_client = httpx.AsyncClient( + proxy=self.openai_proxy, verify=global_ssl_context + ) + async_specific = { + "http_client": self.http_async_client + or _get_default_async_httpx_client( + self.openai_api_base, self.request_timeout + ) + } + self.root_async_client = openai.AsyncOpenAI( + **client_params, + **async_specific, # type: ignore[arg-type] + ) + self.async_client = self.root_async_client.chat.completions + return self + + @property + def _default_params(self) -> dict[str, Any]: + """Get the default parameters for calling OpenAI API.""" + exclude_if_none = { + "presence_penalty": self.presence_penalty, + "frequency_penalty": self.frequency_penalty, + "seed": self.seed, + "top_p": self.top_p, + "logprobs": self.logprobs, + "top_logprobs": self.top_logprobs, + "logit_bias": self.logit_bias, + "stop": self.stop or None, # also exclude empty list for this + "max_tokens": self.max_tokens, + "extra_body": self.extra_body, + "n": self.n, + "temperature": self.temperature, + "reasoning_effort": self.reasoning_effort, + "reasoning": self.reasoning, + "include": self.include, + "service_tier": self.service_tier, + "truncation": self.truncation, + "store": self.store, + } + + params = { + "model": self.model_name, + "stream": self.streaming, + **{k: v for k, v in exclude_if_none.items() if v is not None}, + **self.model_kwargs, + } + + return params + + def _convert_chunk_to_message_chunk( + self, chunk: dict, base_generation_info: Optional[dict] + ) -> Optional[AIMessageChunkV1]: + if chunk.get("type") == "content.delta": # from beta.chat.completions.stream + return None + token_usage = chunk.get("usage") + choices = ( + chunk.get("choices", []) + # from beta.chat.completions.stream + or chunk.get("chunk", {}).get("choices", []) + ) + + usage_metadata: Optional[UsageMetadata] = ( + _create_usage_metadata(token_usage) if token_usage else None + ) + if len(choices) == 0: + # logprobs is implicitly None + return AIMessageChunkV1( + content=[], + usage_metadata=usage_metadata, + response_metadata=cast(ResponseMetadata, base_generation_info), + ) + + choice = choices[0] + if choice["delta"] is None: + return None + + message_chunk = _convert_delta_to_message_chunk(choice["delta"]) + generation_info = {**base_generation_info} if base_generation_info else {} + + if finish_reason := choice.get("finish_reason"): + generation_info["finish_reason"] = finish_reason + if model_name := chunk.get("model"): + generation_info["model_name"] = model_name + if system_fingerprint := chunk.get("system_fingerprint"): + generation_info["system_fingerprint"] = system_fingerprint + if service_tier := chunk.get("service_tier"): + generation_info["service_tier"] = service_tier + + logprobs = choice.get("logprobs") + if logprobs: + generation_info["logprobs"] = logprobs + + if usage_metadata: + message_chunk.usage_metadata = usage_metadata + + message_chunk.response_metadata = { + **message_chunk.response_metadata, + **generation_info, + } + return message_chunk + + def _stream_responses( + self, + messages: list[MessageV1], + stop: Optional[list[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> Iterator[AIMessageChunkV1]: + kwargs["stream"] = True + payload = self._get_request_payload(messages, stop=stop, **kwargs) + if self.include_response_headers: + raw_context_manager = self.root_client.with_raw_response.responses.create( + **payload + ) + context_manager = raw_context_manager.parse() + headers = {"headers": dict(raw_context_manager.headers)} + else: + context_manager = self.root_client.responses.create(**payload) + headers = {} + original_schema_obj = kwargs.get("response_format") + + with context_manager as response: + is_first_chunk = True + current_index = -1 + current_output_index = -1 + current_sub_index = -1 + for chunk in response: + metadata = headers if is_first_chunk else {} + ( + current_index, + current_output_index, + current_sub_index, + generation_chunk, + ) = _convert_responses_chunk_to_generation_chunk( + chunk, + current_index, + current_output_index, + current_sub_index, + schema=original_schema_obj, + metadata=metadata, + ) + if generation_chunk: + if run_manager: + run_manager.on_llm_new_token( + generation_chunk.text, chunk=generation_chunk + ) + is_first_chunk = False + yield generation_chunk + + async def _astream_responses( + self, + messages: list[MessageV1], + stop: Optional[list[str]] = None, + run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> AsyncIterator[AIMessageChunkV1]: + kwargs["stream"] = True + payload = self._get_request_payload(messages, stop=stop, **kwargs) + if self.include_response_headers: + raw_context_manager = ( + await self.root_async_client.with_raw_response.responses.create( + **payload + ) + ) + context_manager = raw_context_manager.parse() + headers = {"headers": dict(raw_context_manager.headers)} + else: + context_manager = await self.root_async_client.responses.create(**payload) + headers = {} + original_schema_obj = kwargs.get("response_format") + + async with context_manager as response: + is_first_chunk = True + current_index = -1 + current_output_index = -1 + current_sub_index = -1 + async for chunk in response: + metadata = headers if is_first_chunk else {} + ( + current_index, + current_output_index, + current_sub_index, + generation_chunk, + ) = _convert_responses_chunk_to_generation_chunk( + chunk, + current_index, + current_output_index, + current_sub_index, + schema=original_schema_obj, + metadata=metadata, + ) + if generation_chunk: + if run_manager: + await run_manager.on_llm_new_token( + generation_chunk.text, chunk=generation_chunk + ) + is_first_chunk = False + yield generation_chunk + + def _should_stream_usage( + self, stream_usage: Optional[bool] = None, **kwargs: Any + ) -> bool: + """Determine whether to include usage metadata in streaming output. + + For backwards compatibility, we check for `stream_options` passed + explicitly to kwargs or in the model_kwargs and override self.stream_usage. + """ + stream_usage_sources = [ # order of precedence + stream_usage, + kwargs.get("stream_options", {}).get("include_usage"), + self.model_kwargs.get("stream_options", {}).get("include_usage"), + self.stream_usage, + ] + for source in stream_usage_sources: + if isinstance(source, bool): + return source + return self.stream_usage + + def _stream( + self, + messages: list[MessageV1], + stop: Optional[list[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + *, + stream_usage: Optional[bool] = None, + **kwargs: Any, + ) -> Iterator[AIMessageChunkV1]: + kwargs["stream"] = True + stream_usage = self._should_stream_usage(stream_usage, **kwargs) + if stream_usage: + kwargs["stream_options"] = {"include_usage": stream_usage} + payload = self._get_request_payload(messages, stop=stop, **kwargs) + base_generation_info = {} + + if "response_format" in payload: + if self.include_response_headers: + warnings.warn( + "Cannot currently include response headers when response_format is " + "specified." + ) + payload.pop("stream") + response_stream = self.root_client.beta.chat.completions.stream(**payload) + context_manager = response_stream + else: + if self.include_response_headers: + raw_response = self.client.with_raw_response.create(**payload) + response = raw_response.parse() + base_generation_info = {"headers": dict(raw_response.headers)} + else: + response = self.client.create(**payload) + context_manager = response + try: + with context_manager as response: + is_first_chunk = True + for chunk in response: + if not isinstance(chunk, dict): + chunk = chunk.model_dump() + message_chunk = self._convert_chunk_to_message_chunk( + chunk, base_generation_info if is_first_chunk else {} + ) + if message_chunk is None: + continue + logprobs = message_chunk.response_metadata.get("logprobs") + if run_manager: + run_manager.on_llm_new_token( + message_chunk.text, chunk=message_chunk, logprobs=logprobs + ) + is_first_chunk = False + yield message_chunk + except openai.BadRequestError as e: + _handle_openai_bad_request(e) + if hasattr(response, "get_final_completion") and "response_format" in payload: + final_completion = response.get_final_completion() + message_chunk = self._get_message_chunk_from_completion(final_completion) + if run_manager: + run_manager.on_llm_new_token(message_chunk.text, chunk=message_chunk) + yield message_chunk + + def _invoke( + self, + messages: list[MessageV1], + stop: Optional[list[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> AIMessageV1: + if self.streaming: + stream_iter = self._stream( + messages, stop=stop, run_manager=run_manager, **kwargs + ) + return generate_from_stream(stream_iter) + payload = self._get_request_payload(messages, stop=stop, **kwargs) + generation_info = None + if "response_format" in payload: + if self.include_response_headers: + warnings.warn( + "Cannot currently include response headers when response_format is " + "specified." + ) + payload.pop("stream") + try: + response = self.root_client.beta.chat.completions.parse(**payload) + except openai.BadRequestError as e: + _handle_openai_bad_request(e) + elif self._use_responses_api(payload): + original_schema_obj = kwargs.get("response_format") + if original_schema_obj and _is_pydantic_class(original_schema_obj): + response = self.root_client.responses.parse(**payload) + else: + if self.include_response_headers: + raw_response = self.root_client.with_raw_response.responses.create( + **payload + ) + response = raw_response.parse() + generation_info = {"headers": dict(raw_response.headers)} + else: + response = self.root_client.responses.create(**payload) + return _construct_lc_result_from_responses_api( + response, schema=original_schema_obj, metadata=generation_info + ) + elif self.include_response_headers: + raw_response = self.client.with_raw_response.create(**payload) + response = raw_response.parse() + generation_info = {"headers": dict(raw_response.headers)} + else: + response = self.client.create(**payload) + return self._create_ai_message(response, generation_info) + + def _use_responses_api(self, payload: dict) -> bool: + if isinstance(self.use_responses_api, bool): + return self.use_responses_api + elif self.include is not None: + return True + elif self.reasoning is not None: + return True + elif self.truncation is not None: + return True + elif self.use_previous_response_id: + return True + else: + return _use_responses_api(payload) + + def _get_request_payload( + self, + input_: LanguageModelInput, + *, + stop: Optional[list[str]] = None, + **kwargs: Any, + ) -> dict: + messages = self._convert_input(input_) + if stop is not None: + kwargs["stop"] = stop + + payload = {**self._default_params, **kwargs} + if self._use_responses_api(payload): + if self.use_previous_response_id: + last_messages, previous_response_id = _get_last_messages(messages) + payload_to_use = last_messages if previous_response_id else messages + if previous_response_id: + payload["previous_response_id"] = previous_response_id + payload = _construct_responses_api_payload(payload_to_use, payload) + else: + payload = _construct_responses_api_payload(messages, payload) + else: + payload["messages"] = [ + _convert_message_to_dict(_convert_from_v1_to_chat_completions(m)) + if isinstance(m, AIMessageV1) + else _convert_message_to_dict(m) + for m in messages + ] + return payload + + def _create_ai_message( + self, + response: Union[dict, openai.BaseModel], + generation_info: Optional[dict] = None, + ) -> AIMessageV1: + response_dict = ( + response if isinstance(response, dict) else response.model_dump() + ) + # Sometimes the AI Model calling will get error, we should raise it (this is + # typically followed by a null value for `choices`, which we raise for + # separately below). + if response_dict.get("error"): + raise ValueError(response_dict.get("error")) + + # Raise informative error messages for non-OpenAI chat completions APIs + # that return malformed responses. + try: + choices = response_dict["choices"] + except KeyError as e: + raise KeyError( + f"Response missing `choices` key: {response_dict.keys()}" + ) from e + + if choices is None: + raise TypeError("Received response with null value for `choices`.") + + token_usage = response_dict.get("usage") + + for res in choices: + message = cast(AIMessageV1, _convert_dict_to_message(res["message"])) + if token_usage: + message.usage_metadata = _create_usage_metadata(token_usage) + generation_info = generation_info or {} + generation_info["finish_reason"] = ( + res.get("finish_reason") + if res.get("finish_reason") is not None + else generation_info.get("finish_reason") + ) + if "logprobs" in res: + generation_info["logprobs"] = res["logprobs"] + message.response_metadata = {**message.response_metadata, **generation_info} + llm_output = { + "model_name": response_dict.get("model", self.model_name), + "model_provider": "openai", + "system_fingerprint": response_dict.get("system_fingerprint", ""), + } + if "id" in response_dict: + llm_output["id"] = response_dict["id"] + if "service_tier" in response_dict: + llm_output["service_tier"] = response_dict["service_tier"] + + if isinstance(response, openai.BaseModel) and getattr( + response, "choices", None + ): + oai_message = response.choices[0].message # type: ignore[attr-defined] + if hasattr(oai_message, "parsed"): + message.parsed = oai_message.parsed + if refusal := getattr(oai_message, "refusal", None): + message.content.append( + {"type": "non_standard", "value": {"refusal": refusal}} + ) + + message.response_metadata = {**message.response_metadata, **llm_output} # type: ignore[typeddict-item] + return message + + async def _astream( + self, + messages: list[MessageV1], + stop: Optional[list[str]] = None, + run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, + *, + stream_usage: Optional[bool] = None, + **kwargs: Any, + ) -> AsyncIterator[AIMessageChunkV1]: + kwargs["stream"] = True + stream_usage = self._should_stream_usage(stream_usage, **kwargs) + if stream_usage: + kwargs["stream_options"] = {"include_usage": stream_usage} + payload = self._get_request_payload(messages, stop=stop, **kwargs) + base_generation_info = {} + + if "response_format" in payload: + if self.include_response_headers: + warnings.warn( + "Cannot currently include response headers when response_format is " + "specified." + ) + payload.pop("stream") + response_stream = self.root_async_client.beta.chat.completions.stream( + **payload + ) + context_manager = response_stream + else: + if self.include_response_headers: + raw_response = await self.async_client.with_raw_response.create( + **payload + ) + response = raw_response.parse() + base_generation_info = {"headers": dict(raw_response.headers)} + else: + response = await self.async_client.create(**payload) + context_manager = response + try: + async with context_manager as response: + is_first_chunk = True + async for chunk in response: + if not isinstance(chunk, dict): + chunk = chunk.model_dump() + message_chunk = self._convert_chunk_to_message_chunk( + chunk, base_generation_info if is_first_chunk else {} + ) + if message_chunk is None: + continue + logprobs = message_chunk.response_metadata.get("logprobs") + if run_manager: + await run_manager.on_llm_new_token( + message_chunk.text, chunk=message_chunk, logprobs=logprobs + ) + is_first_chunk = False + yield message_chunk + except openai.BadRequestError as e: + _handle_openai_bad_request(e) + if hasattr(response, "get_final_completion") and "response_format" in payload: + final_completion = await response.get_final_completion() + message_chunk = self._get_message_chunk_from_completion(final_completion) + if run_manager: + await run_manager.on_llm_new_token( + message_chunk.text, chunk=message_chunk + ) + yield message_chunk + + async def _ainvoke( + self, + messages: list[MessageV1], + stop: Optional[list[str]] = None, + run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> AIMessageV1: + if self.streaming: + stream_iter = self._astream( + messages, stop=stop, run_manager=run_manager, **kwargs + ) + return await agenerate_from_stream(stream_iter) + payload = self._get_request_payload(messages, stop=stop, **kwargs) + generation_info = None + if "response_format" in payload: + if self.include_response_headers: + warnings.warn( + "Cannot currently include response headers when response_format is " + "specified." + ) + payload.pop("stream") + try: + response = await self.root_async_client.beta.chat.completions.parse( + **payload + ) + except openai.BadRequestError as e: + _handle_openai_bad_request(e) + elif self._use_responses_api(payload): + original_schema_obj = kwargs.get("response_format") + if original_schema_obj and _is_pydantic_class(original_schema_obj): + response = await self.root_async_client.responses.parse(**payload) + else: + if self.include_response_headers: + raw_response = ( + await self.root_async_client.with_raw_response.responses.create( + **payload + ) + ) + response = raw_response.parse() + generation_info = {"headers": dict(raw_response.headers)} + else: + response = await self.root_async_client.responses.create(**payload) + return _construct_lc_result_from_responses_api( + response, schema=original_schema_obj, metadata=generation_info + ) + elif self.include_response_headers: + raw_response = await self.async_client.with_raw_response.create(**payload) + response = raw_response.parse() + generation_info = {"headers": dict(raw_response.headers)} + else: + response = await self.async_client.create(**payload) + return await run_in_executor( + None, self._create_ai_message, response, generation_info + ) + + @property + def _identifying_params(self) -> dict[str, Any]: + """Get the identifying parameters.""" + return {"model_name": self.model_name, **self._default_params} + + def _get_invocation_params( + self, stop: Optional[list[str]] = None, **kwargs: Any + ) -> dict[str, Any]: + """Get the parameters used to invoke the model.""" + params = { + "model": self.model_name, + **super()._get_invocation_params(stop=stop), + **self._default_params, + **kwargs, + } + # Redact headers from built-in remote MCP tool invocations + if (tools := params.get("tools")) and isinstance(tools, list): + params["tools"] = [ + ({**tool, "headers": "**REDACTED**"} if "headers" in tool else tool) + if isinstance(tool, dict) and tool.get("type") == "mcp" + else tool + for tool in tools + ] + + return params + + def _get_ls_params( + self, stop: Optional[list[str]] = None, **kwargs: Any + ) -> LangSmithParams: + """Get standard params for tracing.""" + params = self._get_invocation_params(stop=stop, **kwargs) + ls_params = LangSmithParams( + ls_provider="openai", + ls_model_name=self.model_name, + ls_model_type="chat", + ls_temperature=params.get("temperature", self.temperature), + ) + if ls_max_tokens := params.get("max_tokens", self.max_tokens) or params.get( + "max_completion_tokens", self.max_tokens + ): + ls_params["ls_max_tokens"] = ls_max_tokens + if ls_stop := stop or params.get("stop", None): + ls_params["ls_stop"] = ls_stop + return ls_params + + @property + def _llm_type(self) -> str: + """Return type of chat model.""" + return "openai-chat" + + def _get_encoding_model(self) -> tuple[str, tiktoken.Encoding]: + if self.tiktoken_model_name is not None: + model = self.tiktoken_model_name + else: + model = self.model_name + try: + encoding = tiktoken.encoding_for_model(model) + except KeyError: + encoder = "cl100k_base" + if self.model_name.startswith("gpt-4o") or self.model_name.startswith( + "gpt-4.1" + ): + encoder = "o200k_base" + encoding = tiktoken.get_encoding(encoder) + return model, encoding + + def get_token_ids(self, text: str) -> list[int]: + """Get the tokens present in the text with tiktoken package.""" + if self.custom_get_token_ids is not None: + return self.custom_get_token_ids(text) + # tiktoken NOT supported for Python 3.7 or below + if sys.version_info[1] <= 7: + return super().get_token_ids(text) + _, encoding_model = self._get_encoding_model() + return encoding_model.encode(text) + + def get_num_tokens_from_messages( + self, + messages: list[MessageV1], + tools: Optional[ + Sequence[Union[dict[str, Any], type, Callable, BaseTool]] + ] = None, + ) -> int: + """Calculate num tokens for ``gpt-3.5-turbo`` and ``gpt-4`` with ``tiktoken`` package. + + **Requirements**: You must have the ``pillow`` installed if you want to count + image tokens if you are specifying the image as a base64 string, and you must + have both ``pillow`` and ``httpx`` installed if you are specifying the image + as a URL. If these aren't installed image inputs will be ignored in token + counting. + + `OpenAI reference `__ + + Args: + messages: The message inputs to tokenize. + tools: If provided, sequence of dict, BaseModel, function, or BaseTools + to be converted to tool schemas. + """ # noqa: E501 + # TODO: Count bound tools as part of input. + if tools is not None: + warnings.warn( + "Counting tokens in tool schemas is not yet supported. Ignoring tools." + ) + model, encoding = self._get_encoding_model() + if model.startswith("gpt-3.5-turbo-0301"): + # every message follows {role/name}\n{content}\n + tokens_per_message = 4 + # if there's a name, the role is omitted + tokens_per_name = -1 + elif model.startswith("gpt-3.5-turbo") or model.startswith("gpt-4"): + tokens_per_message = 3 + tokens_per_name = 1 + else: + raise NotImplementedError( + f"get_num_tokens_from_messages() is not presently implemented " + f"for model {model}. See " + "https://platform.openai.com/docs/guides/text-generation/managing-tokens" # noqa: E501 + " for information on how messages are converted to tokens." + ) + num_tokens = 0 + messages_dict = [_convert_message_to_dict(m) for m in messages] + for message in messages_dict: + num_tokens += tokens_per_message + for key, value in message.items(): + # This is an inferred approximation. OpenAI does not document how to + # count tool message tokens. + if key == "tool_call_id": + num_tokens += 3 + continue + if isinstance(value, list): + # content or tool calls + for val in value: + if isinstance(val, str) or val["type"] == "text": + text = val["text"] if isinstance(val, dict) else val + num_tokens += len(encoding.encode(text)) + elif val["type"] == "image_url": + if val["image_url"].get("detail") == "low": + num_tokens += 85 + else: + image_size = _url_to_size(val["image_url"]["url"]) + if not image_size: + continue + num_tokens += _count_image_tokens(*image_size) + # Tool/function call token counting is not documented by OpenAI. + # This is an approximation. + elif val["type"] == "function": + num_tokens += len( + encoding.encode(val["function"]["arguments"]) + ) + num_tokens += len(encoding.encode(val["function"]["name"])) + elif val["type"] == "file": + warnings.warn( + "Token counts for file inputs are not supported. " + "Ignoring file inputs." + ) + pass + else: + raise ValueError( + f"Unrecognized content block type\n\n{val}" + ) + elif not value: + continue + else: + # Cast str(value) in case the message value is not a string + # This occurs with function messages + num_tokens += len(encoding.encode(str(value))) + if key == "name": + num_tokens += tokens_per_name + # every reply is primed with assistant + num_tokens += 3 + return num_tokens + + def bind_tools( + self, + tools: Sequence[Union[dict[str, Any], type, Callable, BaseTool]], + *, + tool_choice: Optional[ + Union[dict, str, Literal["auto", "none", "required", "any"], bool] + ] = None, + strict: Optional[bool] = None, + parallel_tool_calls: Optional[bool] = None, + **kwargs: Any, + ) -> Runnable[LanguageModelInput, AIMessageV1]: + """Bind tool-like objects to this chat model. + + Assumes model is compatible with OpenAI tool-calling API. + + Args: + tools: A list of tool definitions to bind to this chat model. + Supports any tool definition handled by + :meth:`langchain_core.utils.function_calling.convert_to_openai_tool`. + tool_choice: Which tool to require the model to call. Options are: + + - str of the form ``"<>"``: calls <> tool. + - ``"auto"``: automatically selects a tool (including no tool). + - ``"none"``: does not call a tool. + - ``"any"`` or ``"required"`` or ``True``: force at least one tool to be called. + - dict of the form ``{"type": "function", "function": {"name": <>}}``: calls <> tool. + - ``False`` or ``None``: no effect, default OpenAI behavior. + strict: If True, model output is guaranteed to exactly match the JSON Schema + provided in the tool definition. If True, the input schema will be + validated according to + https://platform.openai.com/docs/guides/structured-outputs/supported-schemas. + If False, input schema will not be validated and model output will not + be validated. + If None, ``strict`` argument will not be passed to the model. + parallel_tool_calls: Set to ``False`` to disable parallel tool use. + Defaults to ``None`` (no specification, which allows parallel tool use). + kwargs: Any additional parameters are passed directly to + :meth:`~langchain_openai.chat_models.base.ChatOpenAI.bind`. + + .. versionchanged:: 0.1.21 + + Support for ``strict`` argument added. + + """ # noqa: E501 + + if parallel_tool_calls is not None: + kwargs["parallel_tool_calls"] = parallel_tool_calls + formatted_tools = [ + convert_to_openai_tool(tool, strict=strict) for tool in tools + ] + tool_names = [] + for tool in formatted_tools: + if "function" in tool: + tool_names.append(tool["function"]["name"]) + elif "name" in tool: + tool_names.append(tool["name"]) + else: + pass + if tool_choice: + if isinstance(tool_choice, str): + # tool_choice is a tool/function name + if tool_choice in tool_names: + tool_choice = { + "type": "function", + "function": {"name": tool_choice}, + } + elif tool_choice in WellKnownTools: + tool_choice = {"type": tool_choice} + # 'any' is not natively supported by OpenAI API. + # We support 'any' since other models use this instead of 'required'. + elif tool_choice == "any": + tool_choice = "required" + else: + pass + elif isinstance(tool_choice, bool): + tool_choice = "required" + elif isinstance(tool_choice, dict): + pass + else: + raise ValueError( + f"Unrecognized tool_choice type. Expected str, bool or dict. " + f"Received: {tool_choice}" + ) + kwargs["tool_choice"] = tool_choice + return super().bind(tools=formatted_tools, **kwargs) + + def with_structured_output( + self, + schema: Optional[_DictOrPydanticClass] = None, + *, + method: Literal[ + "function_calling", "json_mode", "json_schema" + ] = "function_calling", + include_raw: bool = False, + strict: Optional[bool] = None, + tools: Optional[list] = None, + **kwargs: Any, + ) -> Runnable[LanguageModelInput, _DictOrPydantic]: + """Model wrapper that returns outputs formatted to match the given schema. + + Args: + schema: + The output schema. Can be passed in as: + + - an OpenAI function/tool schema, + - a JSON Schema, + - a TypedDict class (support added in 0.1.20), + - or a Pydantic class. + + If ``schema`` is a Pydantic class then the model output will be a + Pydantic instance of that class, and the model-generated fields will be + validated by the Pydantic class. Otherwise the model output will be a + dict and will not be validated. See :meth:`langchain_core.utils.function_calling.convert_to_openai_tool` + for more on how to properly specify types and descriptions of + schema fields when specifying a Pydantic or TypedDict class. + + method: The method for steering model generation, one of: + + - "function_calling": + Uses OpenAI's tool-calling (formerly called function calling) + API: https://platform.openai.com/docs/guides/function-calling + - "json_schema": + Uses OpenAI's Structured Output API: https://platform.openai.com/docs/guides/structured-outputs + Supported for "gpt-4o-mini", "gpt-4o-2024-08-06", "o1", and later + models. + - "json_mode": + Uses OpenAI's JSON mode. Note that if using JSON mode then you + must include instructions for formatting the output into the + desired schema into the model call: + https://platform.openai.com/docs/guides/structured-outputs/json-mode + + Learn more about the differences between the methods and which models + support which methods here: + + - https://platform.openai.com/docs/guides/structured-outputs/structured-outputs-vs-json-mode + - https://platform.openai.com/docs/guides/structured-outputs/function-calling-vs-response-format + + include_raw: + If False then only the parsed structured output is returned. If + an error occurs during model output parsing it will be raised. If True + then both the raw model response (an AIMessage) and the parsed model + response will be returned. If an error occurs during output parsing it + will be caught and returned as well. The final output is always a dict + with keys "raw", "parsed", and "parsing_error". + strict: + + - True: + Model output is guaranteed to exactly match the schema. + The input schema will also be validated according to + https://platform.openai.com/docs/guides/structured-outputs/supported-schemas + - False: + Input schema will not be validated and model output will not be + validated. + - None: + ``strict`` argument will not be passed to the model. + + tools: + A list of tool-like objects to bind to the chat model. Requires that: + + - ``method`` is ``"json_schema"`` (default). + - ``strict=True`` + - ``include_raw=True`` + + If a model elects to call a + tool, the resulting ``AIMessage`` in ``"raw"`` will include tool calls. + + .. dropdown:: Example + + .. code-block:: python + + from langchain.chat_models import init_chat_model + from pydantic import BaseModel + + + class ResponseSchema(BaseModel): + response: str + + + def get_weather(location: str) -> str: + \"\"\"Get weather at a location.\"\"\" + pass + + llm = init_chat_model("openai:gpt-4o-mini") + + structured_llm = llm.with_structured_output( + ResponseSchema, + tools=[get_weather], + strict=True, + include_raw=True, + ) + + structured_llm.invoke("What's the weather in Boston?") + + .. code-block:: python + + { + "raw": AIMessage(content="", tool_calls=[...], ...), + "parsing_error": None, + "parsed": None, + } + + kwargs: Additional keyword args are passed through to the model. + + Returns: + A Runnable that takes same inputs as a :class:`from langchain_core.v1.chat_models import BaseChatModel`. + + | If ``include_raw`` is False and ``schema`` is a Pydantic class, Runnable outputs an instance of ``schema`` (i.e., a Pydantic object). Otherwise, if ``include_raw`` is False then Runnable outputs a dict. + + | If ``include_raw`` is True, then Runnable outputs a dict with keys: + + - "raw": AIMessage + - "parsed": None if there was a parsing error, otherwise the type depends on the ``schema`` as described above. + - "parsing_error": Optional[BaseException] + + .. versionchanged:: 0.1.20 + + Added support for TypedDict class ``schema``. + + .. versionchanged:: 0.1.21 + + Support for ``strict`` argument added. + Support for ``method`` = "json_schema" added. + + .. versionchanged:: 0.3.12 + Support for ``tools`` added. + + .. versionchanged:: 0.3.21 + Pass ``kwargs`` through to the model. + """ # noqa: E501 + if strict is not None and method == "json_mode": + raise ValueError( + "Argument `strict` is not supported with `method`='json_mode'" + ) + is_pydantic_schema = _is_pydantic_class(schema) + + if method == "json_schema": + # Check for Pydantic BaseModel V1 + if ( + is_pydantic_schema and issubclass(schema, BaseModelV1) # type: ignore[arg-type] + ): + warnings.warn( + "Received a Pydantic BaseModel V1 schema. This is not supported by " + 'method="json_schema". Please use method="function_calling" ' + "or specify schema via JSON Schema or Pydantic V2 BaseModel. " + 'Overriding to method="function_calling".' + ) + method = "function_calling" + # Check for incompatible model + if self.model_name and ( + self.model_name.startswith("gpt-3") + or self.model_name.startswith("gpt-4-") + or self.model_name == "gpt-4" + ): + warnings.warn( + f"Cannot use method='json_schema' with model {self.model_name} " + f"since it doesn't support OpenAI's Structured Output API. You can " + f"see supported models here: " + f"https://platform.openai.com/docs/guides/structured-outputs#supported-models. " # noqa: E501 + "To fix this warning, set `method='function_calling'. " + "Overriding to method='function_calling'." + ) + method = "function_calling" + + if method == "function_calling": + if schema is None: + raise ValueError( + "schema must be specified when method is not 'json_mode'. " + "Received None." + ) + tool_name = convert_to_openai_tool(schema)["function"]["name"] + bind_kwargs = self._filter_disabled_params( + **{ + **dict( + tool_choice=tool_name, + parallel_tool_calls=False, + strict=strict, + ls_structured_output_format={ + "kwargs": {"method": method, "strict": strict}, + "schema": schema, + }, + ), + **kwargs, + } + ) + + llm = self.bind_tools([schema], **bind_kwargs) + if is_pydantic_schema: + output_parser: Runnable = PydanticToolsParser( + tools=[schema], # type: ignore[list-item] + first_tool_only=True, # type: ignore[list-item] + ) + else: + output_parser = JsonOutputKeyToolsParser( + key_name=tool_name, first_tool_only=True + ) + elif method == "json_mode": + llm = self.bind( + **{ + **dict( + response_format={"type": "json_object"}, + ls_structured_output_format={ + "kwargs": {"method": method}, + "schema": schema, + }, + ), + **kwargs, + } + ) + output_parser = ( + PydanticOutputParser(pydantic_object=schema) # type: ignore[arg-type] + if is_pydantic_schema + else JsonOutputParser() + ) + elif method == "json_schema": + if schema is None: + raise ValueError( + "schema must be specified when method is not 'json_mode'. " + "Received None." + ) + response_format = _convert_to_openai_response_format(schema, strict=strict) + bind_kwargs = { + **dict( + response_format=response_format, + ls_structured_output_format={ + "kwargs": {"method": method, "strict": strict}, + "schema": convert_to_openai_tool(schema), + }, + **kwargs, + ) + } + if tools: + bind_kwargs["tools"] = [ + convert_to_openai_tool(t, strict=strict) for t in tools + ] + llm = self.bind(**bind_kwargs) + if is_pydantic_schema: + output_parser = RunnableLambda( + partial(_oai_structured_outputs_parser, schema=cast(type, schema)) + ).with_types(output_type=cast(type, schema)) + else: + output_parser = JsonOutputParser() + else: + raise ValueError( + f"Unrecognized method argument. Expected one of 'function_calling' or " + f"'json_mode'. Received: '{method}'" + ) + + if include_raw: + parser_assign = RunnablePassthrough.assign( + parsed=itemgetter("raw") | output_parser, parsing_error=lambda _: None + ) + parser_none = RunnablePassthrough.assign(parsed=lambda _: None) + parser_with_fallback = parser_assign.with_fallbacks( + [parser_none], exception_key="parsing_error" + ) + return RunnableMap(raw=llm) | parser_with_fallback + else: + return llm | output_parser + + def _filter_disabled_params(self, **kwargs: Any) -> dict[str, Any]: + if not self.disabled_params: + return kwargs + filtered = {} + for k, v in kwargs.items(): + # Skip param + if k in self.disabled_params and ( + self.disabled_params[k] is None or v in self.disabled_params[k] + ): + continue + # Keep param + else: + filtered[k] = v + return filtered + + def _get_message_chunk_from_completion( + self, completion: openai.BaseModel + ) -> AIMessageChunkV1: + """Get chunk from completion (e.g., from final completion of a stream).""" + ai_message = self._create_ai_message(completion) + return AIMessageChunkV1( + content="", + usage_metadata=ai_message.usage_metadata, + response_metadata=ai_message.response_metadata, + parsed=ai_message.parsed, + ) + + +class ChatOpenAI(BaseChatOpenAI): # type: ignore[override] + """OpenAI chat model integration. + + .. dropdown:: Setup + :open: + + Install ``langchain-openai`` and set environment variable ``OPENAI_API_KEY``. + + .. code-block:: bash + + pip install -U langchain-openai + export OPENAI_API_KEY="your-api-key" + + .. dropdown:: Key init args — completion params + + model: str + Name of OpenAI model to use. + temperature: float + Sampling temperature. + max_tokens: Optional[int] + Max number of tokens to generate. + logprobs: Optional[bool] + Whether to return logprobs. + stream_options: Dict + Configure streaming outputs, like whether to return token usage when + streaming (``{"include_usage": True}``). + use_responses_api: Optional[bool] + Whether to use the responses API. + + See full list of supported init args and their descriptions in the params section. + + .. dropdown:: Key init args — client params + + timeout: Union[float, Tuple[float, float], Any, None] + Timeout for requests. + max_retries: Optional[int] + Max number of retries. + api_key: Optional[str] + OpenAI API key. If not passed in will be read from env var ``OPENAI_API_KEY``. + base_url: Optional[str] + Base URL for API requests. Only specify if using a proxy or service + emulator. + organization: Optional[str] + OpenAI organization ID. If not passed in will be read from env + var ``OPENAI_ORG_ID``. + + See full list of supported init args and their descriptions in the params section. + + .. dropdown:: Instantiate + + .. code-block:: python + + from langchain_openai.v1 import ChatOpenAI + + llm = ChatOpenAI( + model="gpt-4o", + temperature=0, + max_tokens=None, + timeout=None, + max_retries=2, + # api_key="...", + # base_url="...", + # organization="...", + # other params... + ) + + **NOTE**: Any param which is not explicitly supported will be passed directly to the + ``openai.OpenAI.chat.completions.create(...)`` API every time to the model is + invoked. For example: + + .. code-block:: python + + from langchain_openai.v1 import ChatOpenAI + import openai + + ChatOpenAI(..., frequency_penalty=0.2).invoke(...) + + # results in underlying API call of: + + openai.OpenAI(..).chat.completions.create(..., frequency_penalty=0.2) + + # which is also equivalent to: + + ChatOpenAI(...).invoke(..., frequency_penalty=0.2) + + .. dropdown:: Invoke + + .. code-block:: python + + messages = [ + ( + "system", + "You are a helpful translator. Translate the user sentence to French.", + ), + ("human", "I love programming."), + ] + llm.invoke(messages) + + .. code-block:: pycon + + AIMessage( + content="J'adore la programmation.", + response_metadata={ + "token_usage": { + "completion_tokens": 5, + "prompt_tokens": 31, + "total_tokens": 36, + }, + "model_name": "gpt-4o", + "system_fingerprint": "fp_43dfabdef1", + "finish_reason": "stop", + "logprobs": None, + }, + id="run-012cffe2-5d3d-424d-83b5-51c6d4a593d1-0", + usage_metadata={"input_tokens": 31, "output_tokens": 5, "total_tokens": 36}, + ) + + .. dropdown:: Stream + + .. code-block:: python + + for chunk in llm.stream(messages): + print(chunk.text, end="") + + .. code-block:: python + + AIMessageChunk(content="", id="run-9e1517e3-12bf-48f2-bb1b-2e824f7cd7b0") + AIMessageChunk(content="J", id="run-9e1517e3-12bf-48f2-bb1b-2e824f7cd7b0") + AIMessageChunk( + content="'adore", id="run-9e1517e3-12bf-48f2-bb1b-2e824f7cd7b0" + ) + AIMessageChunk(content=" la", id="run-9e1517e3-12bf-48f2-bb1b-2e824f7cd7b0") + AIMessageChunk( + content=" programmation", id="run-9e1517e3-12bf-48f2-bb1b-2e824f7cd7b0" + ) + AIMessageChunk(content=".", id="run-9e1517e3-12bf-48f2-bb1b-2e824f7cd7b0") + AIMessageChunk( + content="", + response_metadata={"finish_reason": "stop"}, + id="run-9e1517e3-12bf-48f2-bb1b-2e824f7cd7b0", + ) + + .. code-block:: python + + stream = llm.stream(messages) + full = next(stream) + for chunk in stream: + full += chunk + full + + .. code-block:: python + + AIMessageChunk( + content="J'adore la programmation.", + response_metadata={"finish_reason": "stop"}, + id="run-bf917526-7f58-4683-84f7-36a6b671d140", + ) + + .. dropdown:: Async + + .. code-block:: python + + await llm.ainvoke(messages) + + # stream: + # async for chunk in (await llm.astream(messages)) + + # batch: + # await llm.abatch([messages]) + + .. code-block:: python + + AIMessage( + content="J'adore la programmation.", + response_metadata={ + "token_usage": { + "completion_tokens": 5, + "prompt_tokens": 31, + "total_tokens": 36, + }, + "model_name": "gpt-4o", + "system_fingerprint": "fp_43dfabdef1", + "finish_reason": "stop", + "logprobs": None, + }, + id="run-012cffe2-5d3d-424d-83b5-51c6d4a593d1-0", + usage_metadata={ + "input_tokens": 31, + "output_tokens": 5, + "total_tokens": 36, + }, + ) + + .. dropdown:: Tool calling + + .. code-block:: python + + from pydantic import BaseModel, Field + + + class GetWeather(BaseModel): + '''Get the current weather in a given location''' + + location: str = Field( + ..., description="The city and state, e.g. San Francisco, CA" + ) + + + class GetPopulation(BaseModel): + '''Get the current population in a given location''' + + location: str = Field( + ..., description="The city and state, e.g. San Francisco, CA" + ) + + + llm_with_tools = llm.bind_tools( + [GetWeather, GetPopulation] + # strict = True # enforce tool args schema is respected + ) + ai_msg = llm_with_tools.invoke( + "Which city is hotter today and which is bigger: LA or NY?" + ) + ai_msg.tool_calls + + .. code-block:: python + + [ + { + "name": "GetWeather", + "args": {"location": "Los Angeles, CA"}, + "id": "call_6XswGD5Pqk8Tt5atYr7tfenU", + }, + { + "name": "GetWeather", + "args": {"location": "New York, NY"}, + "id": "call_ZVL15vA8Y7kXqOy3dtmQgeCi", + }, + { + "name": "GetPopulation", + "args": {"location": "Los Angeles, CA"}, + "id": "call_49CFW8zqC9W7mh7hbMLSIrXw", + }, + { + "name": "GetPopulation", + "args": {"location": "New York, NY"}, + "id": "call_6ghfKxV264jEfe1mRIkS3PE7", + }, + ] + + Note that ``openai >= 1.32`` supports a ``parallel_tool_calls`` parameter + that defaults to ``True``. This parameter can be set to ``False`` to + disable parallel tool calls: + + .. code-block:: python + + ai_msg = llm_with_tools.invoke( + "What is the weather in LA and NY?", parallel_tool_calls=False + ) + ai_msg.tool_calls + + .. code-block:: python + + [ + { + "name": "GetWeather", + "args": {"location": "Los Angeles, CA"}, + "id": "call_4OoY0ZR99iEvC7fevsH8Uhtz", + } + ] + + Like other runtime parameters, ``parallel_tool_calls`` can be bound to a model + using ``llm.bind(parallel_tool_calls=False)`` or during instantiation by + setting ``model_kwargs``. + + See ``ChatOpenAI.bind_tools()`` method for more. + + .. dropdown:: Built-in tools + + .. versionadded:: 0.3.9 + + You can access `built-in tools `_ + supported by the OpenAI Responses API. See LangChain + `docs `_ for more + detail. + + .. code-block:: python + + from langchain_openai.v1 import ChatOpenAI + + llm = ChatOpenAI(model="gpt-4.1-mini") + + tool = {"type": "web_search_preview"} + llm_with_tools = llm.bind_tools([tool]) + + response = llm_with_tools.invoke( + "What was a positive news story from today?" + ) + response.content + + .. code-block:: python + + [ + { + "type": "text", + "text": "Today, a heartwarming story emerged from ...", + "annotations": [ + { + "end_index": 778, + "start_index": 682, + "title": "Title of story", + "type": "url_citation", + "url": "", + } + ], + } + ] + + .. dropdown:: Managing conversation state + + .. versionadded:: 0.3.9 + + OpenAI's Responses API supports management of + `conversation state `_. + Passing in response IDs from previous messages will continue a conversational + thread. See LangChain + `docs `_ for more + detail. + + .. code-block:: python + + from langchain_openai.v1 import ChatOpenAI + + llm = ChatOpenAI(model="gpt-4.1-mini", use_responses_api=True) + response = llm.invoke("Hi, I'm Bob.") + response.text + + .. code-block:: python + + "Hi Bob! How can I assist you today?" + + .. code-block:: python + + second_response = llm.invoke( + "What is my name?", + previous_response_id=response.response_metadata["id"], + ) + second_response.text + + .. code-block:: python + + "Your name is Bob. How can I help you today, Bob?" + + .. versionadded:: 0.3.26 + + You can also initialize ChatOpenAI with :attr:`use_previous_response_id`. + Input messages up to the most recent response will then be dropped from request + payloads, and ``previous_response_id`` will be set using the ID of the most + recent response. + + .. code-block:: python + + llm = ChatOpenAI(model="gpt-4.1-mini", use_previous_response_id=True) + + .. dropdown:: Reasoning output + + OpenAI's Responses API supports `reasoning models `_ + that expose a summary of internal reasoning processes. + + .. code-block:: python + + from langchain_openai.v1 import ChatOpenAI + + reasoning = { + "effort": "medium", # 'low', 'medium', or 'high' + "summary": "auto", # 'detailed', 'auto', or None + } + + llm = ChatOpenAI(model="o4-mini", reasoning=reasoning) + response = llm.invoke("What is 3^3?") + + # Response text + print(f"Output: {response.text}") + + # Reasoning summaries + for block in response.content: + if block["type"] == "reasoning": + for summary in block["summary"]: + print(summary["text"]) + + .. code-block:: none + + Output: 3³ = 27 + Reasoning: The user wants to know... + + .. dropdown:: Structured output + + .. code-block:: python + + from typing import Optional + + from pydantic import BaseModel, Field + + + class Joke(BaseModel): + '''Joke to tell user.''' + + setup: str = Field(description="The setup of the joke") + punchline: str = Field(description="The punchline to the joke") + rating: Optional[int] = Field( + description="How funny the joke is, from 1 to 10" + ) + + + structured_llm = llm.with_structured_output(Joke) + structured_llm.invoke("Tell me a joke about cats") + + .. code-block:: python + + Joke( + setup="Why was the cat sitting on the computer?", + punchline="To keep an eye on the mouse!", + rating=None, + ) + + See ``ChatOpenAI.with_structured_output()`` for more. + + .. dropdown:: JSON mode + + .. code-block:: python + + json_llm = llm.bind(response_format={"type": "json_object"}) + ai_msg = json_llm.invoke( + "Return a JSON object with key 'random_ints' and a value of 10 random ints in [0-99]" + ) + ai_msg.content + + .. code-block:: python + + '\\n{\\n "random_ints": [23, 87, 45, 12, 78, 34, 56, 90, 11, 67]\\n}' + + .. dropdown:: Image input + + .. code-block:: python + + import base64 + import httpx + from langchain_core.messages import HumanMessage + + image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg" + image_data = base64.b64encode(httpx.get(image_url).content).decode("utf-8") + message = HumanMessage( + content=[ + {"type": "text", "text": "describe the weather in this image"}, + { + "type": "image_url", + "image_url": {"url": f"data:image/jpeg;base64,{image_data}"}, + }, + ] + ) + ai_msg = llm.invoke([message]) + ai_msg.content + + .. code-block:: python + + "The weather in the image appears to be clear and pleasant. The sky is mostly blue with scattered, light clouds, suggesting a sunny day with minimal cloud cover. There is no indication of rain or strong winds, and the overall scene looks bright and calm. The lush green grass and clear visibility further indicate good weather conditions." + + .. dropdown:: Token usage + + .. code-block:: python + + ai_msg = llm.invoke(messages) + ai_msg.usage_metadata + + .. code-block:: python + + {"input_tokens": 28, "output_tokens": 5, "total_tokens": 33} + + When streaming, set the ``stream_usage`` kwarg: + + .. code-block:: python + + stream = llm.stream(messages, stream_usage=True) + full = next(stream) + for chunk in stream: + full += chunk + full.usage_metadata + + .. code-block:: python + + {"input_tokens": 28, "output_tokens": 5, "total_tokens": 33} + + Alternatively, setting ``stream_usage`` when instantiating the model can be + useful when incorporating ``ChatOpenAI`` into LCEL chains-- or when using + methods like ``.with_structured_output``, which generate chains under the + hood. + + .. code-block:: python + + llm = ChatOpenAI(model="gpt-4o", stream_usage=True) + structured_llm = llm.with_structured_output(...) + + .. dropdown:: Logprobs + + .. code-block:: python + + logprobs_llm = llm.bind(logprobs=True) + ai_msg = logprobs_llm.invoke(messages) + ai_msg.response_metadata["logprobs"] + + .. code-block:: python + + { + "content": [ + { + "token": "J", + "bytes": [74], + "logprob": -4.9617593e-06, + "top_logprobs": [], + }, + { + "token": "'adore", + "bytes": [39, 97, 100, 111, 114, 101], + "logprob": -0.25202933, + "top_logprobs": [], + }, + { + "token": " la", + "bytes": [32, 108, 97], + "logprob": -0.20141791, + "top_logprobs": [], + }, + { + "token": " programmation", + "bytes": [ + 32, + 112, + 114, + 111, + 103, + 114, + 97, + 109, + 109, + 97, + 116, + 105, + 111, + 110, + ], + "logprob": -1.9361265e-07, + "top_logprobs": [], + }, + { + "token": ".", + "bytes": [46], + "logprob": -1.2233183e-05, + "top_logprobs": [], + }, + ] + } + + .. dropdown:: Response metadata + + .. code-block:: python + + ai_msg = llm.invoke(messages) + ai_msg.response_metadata + + .. code-block:: python + + { + "token_usage": { + "completion_tokens": 5, + "prompt_tokens": 28, + "total_tokens": 33, + }, + "model_name": "gpt-4o", + "system_fingerprint": "fp_319be4768e", + "finish_reason": "stop", + "logprobs": None, + } + + .. dropdown:: Flex processing + + OpenAI offers a variety of + `service tiers `_. + The "flex" tier offers cheaper pricing for requests, with the trade-off that + responses may take longer and resources might not always be available. + This approach is best suited for non-critical tasks, including model testing, + data enhancement, or jobs that can be run asynchronously. + + To use it, initialize the model with ``service_tier="flex"``: + + .. code-block:: python + + from langchain_openai.v1 import ChatOpenAI + + llm = ChatOpenAI(model="o4-mini", service_tier="flex") + + Note that this is a beta feature that is only available for a subset of models. + See OpenAI `docs `_ + for more detail. + + """ # noqa: E501 + + max_tokens: Optional[int] = Field(default=None, alias="max_completion_tokens") + """Maximum number of tokens to generate.""" + + @property + def lc_secrets(self) -> dict[str, str]: + return {"openai_api_key": "OPENAI_API_KEY"} + + @classmethod + def get_lc_namespace(cls) -> list[str]: + """Get the namespace of the langchain object.""" + return ["langchain", "chat_models", "openai"] + + @property + def lc_attributes(self) -> dict[str, Any]: + attributes: dict[str, Any] = {} + + if self.openai_organization: + attributes["openai_organization"] = self.openai_organization + + if self.openai_api_base: + attributes["openai_api_base"] = self.openai_api_base + + if self.openai_proxy: + attributes["openai_proxy"] = self.openai_proxy + + return attributes + + @classmethod + def is_lc_serializable(cls) -> bool: + """Return whether this model can be serialized by Langchain.""" + return True + + @property + def _default_params(self) -> dict[str, Any]: + """Get the default parameters for calling OpenAI API.""" + params = super()._default_params + if "max_tokens" in params: + params["max_completion_tokens"] = params.pop("max_tokens") + + return params + + def _get_request_payload( + self, + input_: LanguageModelInput, + *, + stop: Optional[list[str]] = None, + **kwargs: Any, + ) -> dict: + payload = super()._get_request_payload(input_, stop=stop, **kwargs) + # max_tokens was deprecated in favor of max_completion_tokens + # in September 2024 release + if "max_tokens" in payload: + payload["max_completion_tokens"] = payload.pop("max_tokens") + + # Mutate system message role to "developer" for o-series models + if self.model_name and re.match(r"^o\d", self.model_name): + for message in payload.get("messages", []): + if message["role"] == "system": + message["role"] = "developer" + return payload + + def _stream(self, *args: Any, **kwargs: Any) -> Iterator[AIMessageChunkV1]: + """Route to Chat Completions or Responses API.""" + if self._use_responses_api({**kwargs, **self.model_kwargs}): + return super()._stream_responses(*args, **kwargs) + else: + return super()._stream(*args, **kwargs) + + async def _astream( + self, *args: Any, **kwargs: Any + ) -> AsyncIterator[AIMessageChunkV1]: + """Route to Chat Completions or Responses API.""" + if self._use_responses_api({**kwargs, **self.model_kwargs}): + async for chunk in super()._astream_responses(*args, **kwargs): + yield chunk + else: + async for chunk in super()._astream(*args, **kwargs): + yield chunk + + def with_structured_output( + self, + schema: Optional[_DictOrPydanticClass] = None, + *, + method: Literal["function_calling", "json_mode", "json_schema"] = "json_schema", + include_raw: bool = False, + strict: Optional[bool] = None, + **kwargs: Any, + ) -> Runnable[LanguageModelInput, _DictOrPydantic]: + """Model wrapper that returns outputs formatted to match the given schema. + + Args: + schema: + The output schema. Can be passed in as: + + - a JSON Schema, + - a TypedDict class, + - or a Pydantic class, + - an OpenAI function/tool schema. + + If ``schema`` is a Pydantic class then the model output will be a + Pydantic instance of that class, and the model-generated fields will be + validated by the Pydantic class. Otherwise the model output will be a + dict and will not be validated. See :meth:`langchain_core.utils.function_calling.convert_to_openai_tool` + for more on how to properly specify types and descriptions of + schema fields when specifying a Pydantic or TypedDict class. + + method: The method for steering model generation, one of: + + - "json_schema": + Uses OpenAI's Structured Output API: + https://platform.openai.com/docs/guides/structured-outputs + Supported for "gpt-4o-mini", "gpt-4o-2024-08-06", "o1", and later + models. + - "function_calling": + Uses OpenAI's tool-calling (formerly called function calling) + API: https://platform.openai.com/docs/guides/function-calling + - "json_mode": + Uses OpenAI's JSON mode. Note that if using JSON mode then you + must include instructions for formatting the output into the + desired schema into the model call: + https://platform.openai.com/docs/guides/structured-outputs/json-mode + + Learn more about the differences between the methods and which models + support which methods here: + + - https://platform.openai.com/docs/guides/structured-outputs/structured-outputs-vs-json-mode + - https://platform.openai.com/docs/guides/structured-outputs/function-calling-vs-response-format + + include_raw: + If False then only the parsed structured output is returned. If + an error occurs during model output parsing it will be raised. If True + then both the raw model response (an AIMessage) and the parsed model + response will be returned. If an error occurs during output parsing it + will be caught and returned as well. The final output is always a dict + with keys "raw", "parsed", and "parsing_error". + strict: + + - True: + Model output is guaranteed to exactly match the schema. + The input schema will also be validated according to + https://platform.openai.com/docs/guides/structured-outputs/supported-schemas + - False: + Input schema will not be validated and model output will not be + validated. + - None: + ``strict`` argument will not be passed to the model. + + If schema is specified via TypedDict or JSON schema, ``strict`` is not + enabled by default. Pass ``strict=True`` to enable it. + + Note: ``strict`` can only be non-null if ``method`` is + ``"json_schema"`` or ``"function_calling"``. + tools: + A list of tool-like objects to bind to the chat model. Requires that: + + - ``method`` is ``"json_schema"`` (default). + - ``strict=True`` + - ``include_raw=True`` + + If a model elects to call a + tool, the resulting ``AIMessage`` in ``"raw"`` will include tool calls. + + .. dropdown:: Example + + .. code-block:: python + + from langchain.chat_models import init_chat_model + from pydantic import BaseModel + + + class ResponseSchema(BaseModel): + response: str + + + def get_weather(location: str) -> str: + \"\"\"Get weather at a location.\"\"\" + pass + + llm = init_chat_model("openai:gpt-4o-mini") + + structured_llm = llm.with_structured_output( + ResponseSchema, + tools=[get_weather], + strict=True, + include_raw=True, + ) + + structured_llm.invoke("What's the weather in Boston?") + + .. code-block:: python + + { + "raw": AIMessage(content="", tool_calls=[...], ...), + "parsing_error": None, + "parsed": None, + } + + kwargs: Additional keyword args are passed through to the model. + + Returns: + A Runnable that takes same inputs as a :class:`from langchain_core.v1.chat_models import BaseChatModel`. + + | If ``include_raw`` is False and ``schema`` is a Pydantic class, Runnable outputs an instance of ``schema`` (i.e., a Pydantic object). Otherwise, if ``include_raw`` is False then Runnable outputs a dict. + + | If ``include_raw`` is True, then Runnable outputs a dict with keys: + + - "raw": AIMessage + - "parsed": None if there was a parsing error, otherwise the type depends on the ``schema`` as described above. + - "parsing_error": Optional[BaseException] + + .. versionchanged:: 0.1.20 + + Added support for TypedDict class ``schema``. + + .. versionchanged:: 0.1.21 + + Support for ``strict`` argument added. + Support for ``method="json_schema"`` added. + + .. versionchanged:: 0.3.0 + + ``method`` default changed from "function_calling" to "json_schema". + + .. versionchanged:: 0.3.12 + Support for ``tools`` added. + + .. versionchanged:: 0.3.21 + Pass ``kwargs`` through to the model. + + .. dropdown:: Example: schema=Pydantic class, method="json_schema", include_raw=False, strict=True + + Note, OpenAI has a number of restrictions on what types of schemas can be + provided if ``strict`` = True. When using Pydantic, our model cannot + specify any Field metadata (like min/max constraints) and fields cannot + have default values. + + See all constraints here: https://platform.openai.com/docs/guides/structured-outputs/supported-schemas + + .. code-block:: python + + from typing import Optional + + from langchain_openai.v1 import ChatOpenAI + from pydantic import BaseModel, Field + + + class AnswerWithJustification(BaseModel): + '''An answer to the user question along with justification for the answer.''' + + answer: str + justification: Optional[str] = Field( + default=..., description="A justification for the answer." + ) + + + llm = ChatOpenAI(model="gpt-4o", temperature=0) + structured_llm = llm.with_structured_output(AnswerWithJustification) + + structured_llm.invoke( + "What weighs more a pound of bricks or a pound of feathers" + ) + + # -> AnswerWithJustification( + # answer='They weigh the same', + # justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.' + # ) + + .. dropdown:: Example: schema=Pydantic class, method="function_calling", include_raw=False, strict=False + + .. code-block:: python + + from typing import Optional + + from langchain_openai.v1 import ChatOpenAI + from pydantic import BaseModel, Field + + + class AnswerWithJustification(BaseModel): + '''An answer to the user question along with justification for the answer.''' + + answer: str + justification: Optional[str] = Field( + default=..., description="A justification for the answer." + ) + + + llm = ChatOpenAI(model="gpt-4o", temperature=0) + structured_llm = llm.with_structured_output( + AnswerWithJustification, method="function_calling" + ) + + structured_llm.invoke( + "What weighs more a pound of bricks or a pound of feathers" + ) + + # -> AnswerWithJustification( + # answer='They weigh the same', + # justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.' + # ) + + .. dropdown:: Example: schema=Pydantic class, method="json_schema", include_raw=True + + .. code-block:: python + + from langchain_openai.v1 import ChatOpenAI + from pydantic import BaseModel + + + class AnswerWithJustification(BaseModel): + '''An answer to the user question along with justification for the answer.''' + + answer: str + justification: str + + + llm = ChatOpenAI(model="gpt-4o", temperature=0) + structured_llm = llm.with_structured_output( + AnswerWithJustification, include_raw=True + ) + + structured_llm.invoke( + "What weighs more a pound of bricks or a pound of feathers" + ) + # -> { + # 'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Ao02pnFYXD6GN1yzc0uXPsvF', 'function': {'arguments': '{"answer":"They weigh the same.","justification":"Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ."}', 'name': 'AnswerWithJustification'}, 'type': 'function'}]}), + # 'parsed': AnswerWithJustification(answer='They weigh the same.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'), + # 'parsing_error': None + # } + + .. dropdown:: Example: schema=TypedDict class, method="json_schema", include_raw=False, strict=False + + .. code-block:: python + + # IMPORTANT: If you are using Python <=3.8, you need to import Annotated + # from typing_extensions, not from typing. + from typing_extensions import Annotated, TypedDict + + from langchain_openai.v1 import ChatOpenAI + + + class AnswerWithJustification(TypedDict): + '''An answer to the user question along with justification for the answer.''' + + answer: str + justification: Annotated[ + Optional[str], None, "A justification for the answer." + ] + + + llm = ChatOpenAI(model="gpt-4o", temperature=0) + structured_llm = llm.with_structured_output(AnswerWithJustification) + + structured_llm.invoke( + "What weighs more a pound of bricks or a pound of feathers" + ) + # -> { + # 'answer': 'They weigh the same', + # 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.' + # } + + .. dropdown:: Example: schema=OpenAI function schema, method="json_schema", include_raw=False + + .. code-block:: python + + from langchain_openai.v1 import ChatOpenAI + + oai_schema = { + 'name': 'AnswerWithJustification', + 'description': 'An answer to the user question along with justification for the answer.', + 'parameters': { + 'type': 'object', + 'properties': { + 'answer': {'type': 'string'}, + 'justification': {'description': 'A justification for the answer.', 'type': 'string'} + }, + 'required': ['answer'] + } + } + + llm = ChatOpenAI(model="gpt-4o", temperature=0) + structured_llm = llm.with_structured_output(oai_schema) + + structured_llm.invoke( + "What weighs more a pound of bricks or a pound of feathers" + ) + # -> { + # 'answer': 'They weigh the same', + # 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.' + # } + + .. dropdown:: Example: schema=Pydantic class, method="json_mode", include_raw=True + + .. code-block:: + + from langchain_openai.v1 import ChatOpenAI + from pydantic import BaseModel + + class AnswerWithJustification(BaseModel): + answer: str + justification: str + + llm = ChatOpenAI(model="gpt-4o", temperature=0) + structured_llm = llm.with_structured_output( + AnswerWithJustification, + method="json_mode", + include_raw=True + ) + + structured_llm.invoke( + "Answer the following question. " + "Make sure to return a JSON blob with keys 'answer' and 'justification'.\\n\\n" + "What's heavier a pound of bricks or a pound of feathers?" + ) + # -> { + # 'raw': AIMessage(content='{\\n "answer": "They are both the same weight.",\\n "justification": "Both a pound of bricks and a pound of feathers weigh one pound. The difference lies in the volume and density of the materials, not the weight." \\n}'), + # 'parsed': AnswerWithJustification(answer='They are both the same weight.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The difference lies in the volume and density of the materials, not the weight.'), + # 'parsing_error': None + # } + + .. dropdown:: Example: schema=None, method="json_mode", include_raw=True + + .. code-block:: + + structured_llm = llm.with_structured_output(method="json_mode", include_raw=True) + + structured_llm.invoke( + "Answer the following question. " + "Make sure to return a JSON blob with keys 'answer' and 'justification'.\\n\\n" + "What's heavier a pound of bricks or a pound of feathers?" + ) + # -> { + # 'raw': AIMessage(content='{\\n "answer": "They are both the same weight.",\\n "justification": "Both a pound of bricks and a pound of feathers weigh one pound. The difference lies in the volume and density of the materials, not the weight." \\n}'), + # 'parsed': { + # 'answer': 'They are both the same weight.', + # 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The difference lies in the volume and density of the materials, not the weight.' + # }, + # 'parsing_error': None + # } + """ # noqa: E501 + return super().with_structured_output( + schema, method=method, include_raw=include_raw, strict=strict, **kwargs + ) + + +def _is_pydantic_class(obj: Any) -> bool: + return isinstance(obj, type) and is_basemodel_subclass(obj) + + +def _lc_tool_call_to_openai_tool_call(tool_call: ToolCall) -> dict: + return { + "type": "function", + "id": tool_call["id"], + "function": { + "name": tool_call["name"], + "arguments": json.dumps(tool_call["args"], ensure_ascii=False), + }, + } + + +def _lc_invalid_tool_call_to_openai_tool_call( + invalid_tool_call: InvalidToolCall, +) -> dict: + return { + "type": "function", + "id": invalid_tool_call["id"], + "function": { + "name": invalid_tool_call["name"], + "arguments": invalid_tool_call["args"], + }, + } + + +def _url_to_size(image_source: str) -> Optional[tuple[int, int]]: + try: + from PIL import Image # type: ignore[import] + except ImportError: + logger.info( + "Unable to count image tokens. To count image tokens please install " + "`pip install -U pillow httpx`." + ) + return None + if _is_url(image_source): + try: + import httpx + except ImportError: + logger.info( + "Unable to count image tokens. To count image tokens please install " + "`pip install -U httpx`." + ) + return None + response = httpx.get(image_source) + response.raise_for_status() + width, height = Image.open(BytesIO(response.content)).size + return width, height + elif _is_b64(image_source): + _, encoded = image_source.split(",", 1) + data = base64.b64decode(encoded) + width, height = Image.open(BytesIO(data)).size + return width, height + else: + return None + + +def _count_image_tokens(width: int, height: int) -> int: + # Reference: https://platform.openai.com/docs/guides/vision/calculating-costs + width, height = _resize(width, height) + h = ceil(height / 512) + w = ceil(width / 512) + return (170 * h * w) + 85 + + +def _is_url(s: str) -> bool: + try: + result = urlparse(s) + return all([result.scheme, result.netloc]) + except Exception as e: + logger.debug(f"Unable to parse URL: {e}") + return False + + +def _is_b64(s: str) -> bool: + return s.startswith("data:image") + + +def _resize(width: int, height: int) -> tuple[int, int]: + # larger side must be <= 2048 + if width > 2048 or height > 2048: + if width > height: + height = (height * 2048) // width + width = 2048 + else: + width = (width * 2048) // height + height = 2048 + # smaller side must be <= 768 + if width > 768 and height > 768: + if width > height: + width = (width * 768) // height + height = 768 + else: + height = (width * 768) // height + width = 768 + return width, height + + +def _convert_to_openai_response_format( + schema: Union[dict[str, Any], type], *, strict: Optional[bool] = None +) -> Union[dict, TypeBaseModel]: + if isinstance(schema, type) and is_basemodel_subclass(schema): + return schema + + if ( + isinstance(schema, dict) + and "json_schema" in schema + and schema.get("type") == "json_schema" + ): + response_format = schema + elif isinstance(schema, dict) and "name" in schema and "schema" in schema: + response_format = {"type": "json_schema", "json_schema": schema} + else: + if strict is None: + if isinstance(schema, dict) and isinstance(schema.get("strict"), bool): + strict = schema["strict"] + else: + strict = False + function = convert_to_openai_function(schema, strict=strict) + function["schema"] = function.pop("parameters") + response_format = {"type": "json_schema", "json_schema": function} + + if ( + strict is not None + and strict is not response_format["json_schema"].get("strict") + and isinstance(schema, dict) + ): + msg = ( + f"Output schema already has 'strict' value set to " + f"{schema['json_schema']['strict']} but 'strict' also passed in to " + f"with_structured_output as {strict}. Please make sure that " + f"'strict' is only specified in one place." + ) + raise ValueError(msg) + return response_format + + +def _oai_structured_outputs_parser( + ai_msg: AIMessageV1, schema: type[_BM] +) -> Optional[PydanticBaseModel]: + if parsed := ai_msg.parsed: + if isinstance(parsed, dict): + return schema(**parsed) + else: + return parsed + elif any( + block["type"] == "non_standard" and block["value"].get("type") == "refusal" + for block in ai_msg.content + ): + refusal = next( + block["value"]["text"] + for block in ai_msg.content + if block["type"] == "non_standard" + and block["value"].get("type") == "refusal" + ) + raise OpenAIRefusalError(refusal) + elif ai_msg.tool_calls: + return None + else: + raise ValueError( + "Structured Output response does not have a 'parsed' field nor a 'refusal' " + f"field. Received message:\n\n{ai_msg}" + ) + + +class OpenAIRefusalError(Exception): + """Error raised when OpenAI Structured Outputs API returns a refusal. + + When using OpenAI's Structured Outputs API with user-generated input, the model + may occasionally refuse to fulfill the request for safety reasons. + + See here for more on refusals: + https://platform.openai.com/docs/guides/structured-outputs/refusals + + .. versionadded:: 0.1.21 + """ + + +def _create_usage_metadata(oai_token_usage: dict) -> UsageMetadata: + input_tokens = oai_token_usage.get("prompt_tokens") or 0 + output_tokens = oai_token_usage.get("completion_tokens") or 0 + total_tokens = oai_token_usage.get("total_tokens") or input_tokens + output_tokens + input_token_details: dict = { + "audio": (oai_token_usage.get("prompt_tokens_details") or {}).get( + "audio_tokens" + ), + "cache_read": (oai_token_usage.get("prompt_tokens_details") or {}).get( + "cached_tokens" + ), + } + output_token_details: dict = { + "audio": (oai_token_usage.get("completion_tokens_details") or {}).get( + "audio_tokens" + ), + "reasoning": (oai_token_usage.get("completion_tokens_details") or {}).get( + "reasoning_tokens" + ), + } + return UsageMetadata( + input_tokens=input_tokens, + output_tokens=output_tokens, + total_tokens=total_tokens, + input_token_details=InputTokenDetails( + **{k: v for k, v in input_token_details.items() if v is not None} + ), + output_token_details=OutputTokenDetails( + **{k: v for k, v in output_token_details.items() if v is not None} + ), + ) + + +def _create_usage_metadata_responses(oai_token_usage: dict) -> UsageMetadata: + input_tokens = oai_token_usage.get("input_tokens", 0) + output_tokens = oai_token_usage.get("output_tokens", 0) + total_tokens = oai_token_usage.get("total_tokens", input_tokens + output_tokens) + output_token_details: dict = { + "reasoning": (oai_token_usage.get("output_tokens_details") or {}).get( + "reasoning_tokens" + ) + } + input_token_details: dict = { + "cache_read": (oai_token_usage.get("input_tokens_details") or {}).get( + "cached_tokens" + ) + } + return UsageMetadata( + input_tokens=input_tokens, + output_tokens=output_tokens, + total_tokens=total_tokens, + input_token_details=InputTokenDetails( + **{k: v for k, v in input_token_details.items() if v is not None} + ), + output_token_details=OutputTokenDetails( + **{k: v for k, v in output_token_details.items() if v is not None} + ), + ) + + +def _is_builtin_tool(tool: dict) -> bool: + return "type" in tool and tool["type"] != "function" + + +def _use_responses_api(payload: dict) -> bool: + uses_builtin_tools = "tools" in payload and any( + _is_builtin_tool(tool) for tool in payload["tools"] + ) + responses_only_args = { + "include", + "previous_response_id", + "reasoning", + "text", + "truncation", + } + return bool(uses_builtin_tools or responses_only_args.intersection(payload)) + + +def _get_last_messages( + messages: Sequence[MessageV1], +) -> tuple[Sequence[MessageV1], Optional[str]]: + """ + Return + 1. Every message after the most-recent AIMessage that has a non-empty + ``id`` (may be an empty list), + 2. That id. + + If the most-recent AIMessage does not have an id (or there is no + AIMessage at all) the entire conversation is returned together with ``None``. + """ + for i in range(len(messages) - 1, -1, -1): + msg = messages[i] + if isinstance(msg, AIMessageV1): + response_id = msg.id + if response_id and response_id.startswith("resp_"): + return messages[i + 1 :], response_id + else: + return messages, None + + return messages, None + + +def _construct_responses_api_payload( + messages: Sequence[MessageV1], payload: dict +) -> dict: + # Rename legacy parameters + for legacy_token_param in ["max_tokens", "max_completion_tokens"]: + if legacy_token_param in payload: + payload["max_output_tokens"] = payload.pop(legacy_token_param) + if "reasoning_effort" in payload and "reasoning" not in payload: + payload["reasoning"] = {"effort": payload.pop("reasoning_effort")} + + payload["input"] = _construct_responses_api_input(messages) + if tools := payload.pop("tools", None): + new_tools: list = [] + for tool in tools: + # chat api: {"type": "function", "function": {"name": "...", "description": "...", "parameters": {...}, "strict": ...}} # noqa: E501 + # responses api: {"type": "function", "name": "...", "description": "...", "parameters": {...}, "strict": ...} # noqa: E501 + if tool["type"] == "function" and "function" in tool: + new_tools.append({"type": "function", **tool["function"]}) + else: + if tool["type"] == "image_generation": + # Handle partial images (not yet supported) + if "partial_images" in tool: + raise NotImplementedError( + "Partial image generation is not yet supported " + "via the LangChain ChatOpenAI client. Please " + "drop the 'partial_images' key from the image_generation " + "tool." + ) + elif payload.get("stream") and "partial_images" not in tool: + # OpenAI requires this parameter be set; we ignore it during + # streaming. + tool["partial_images"] = 1 + else: + pass + + new_tools.append(tool) + + payload["tools"] = new_tools + if tool_choice := payload.pop("tool_choice", None): + # chat api: {"type": "function", "function": {"name": "..."}} + # responses api: {"type": "function", "name": "..."} + if ( + isinstance(tool_choice, dict) + and tool_choice["type"] == "function" + and "function" in tool_choice + ): + payload["tool_choice"] = {"type": "function", **tool_choice["function"]} + else: + payload["tool_choice"] = tool_choice + + # Structured output + if schema := payload.pop("response_format", None): + if payload.get("text"): + text = payload["text"] + raise ValueError( + "Can specify at most one of 'response_format' or 'text', received both:" + f"\n{schema=}\n{text=}" + ) + + # For pydantic + non-streaming case, we use responses.parse. + # Otherwise, we use responses.create. + strict = payload.pop("strict", None) + if not payload.get("stream") and _is_pydantic_class(schema): + payload["text_format"] = schema + else: + if _is_pydantic_class(schema): + schema_dict = schema.model_json_schema() + strict = True + else: + schema_dict = schema + if schema_dict == {"type": "json_object"}: # JSON mode + payload["text"] = {"format": {"type": "json_object"}} + elif ( + ( + response_format := _convert_to_openai_response_format( + schema_dict, strict=strict + ) + ) + and (isinstance(response_format, dict)) + and (response_format["type"] == "json_schema") + ): + payload["text"] = { + "format": {"type": "json_schema", **response_format["json_schema"]} + } + else: + pass + return payload + + +def _make_computer_call_output_from_message(message: ToolMessageV1) -> Optional[dict]: + computer_call_output = None + for block in message.content: + if ( + block["type"] == "non_standard" + and block["value"].get("type") == "computer_call_output" + ): + computer_call_output = block["value"] + break + + return computer_call_output + + +def _pop_index_and_sub_index(block: dict) -> dict: + """When streaming, langchain-core uses the ``index`` key to aggregate + text blocks. OpenAI API does not support this key, so we need to remove it. + """ + new_block = {k: v for k, v in block.items() if k != "index"} + if "summary" in new_block and isinstance(new_block["summary"], list): + new_summary = [] + for sub_block in new_block["summary"]: + new_sub_block = {k: v for k, v in sub_block.items() if k != "index"} + new_summary.append(new_sub_block) + new_block["summary"] = new_summary + return new_block + + +def _construct_responses_api_input(messages: Sequence[MessageV1]) -> list: + """Construct the input for the OpenAI Responses API.""" + input_ = [] + for lc_msg in messages: + msg = _convert_message_to_dict(lc_msg, responses_api=True) + if isinstance(lc_msg, AIMessageV1): + msg["content"] = _convert_from_v1_to_responses( + msg["content"], lc_msg.tool_calls + ) + else: + # Get content from non-standard content blocks + for i, block in enumerate(msg["content"]): + if block.get("type") == "non_standard": + msg["content"][i] = block["value"] + + # "name" parameter unsupported + if "name" in msg: + msg.pop("name") + if msg["role"] == "tool": + tool_output = msg["content"] + computer_call_output = _make_computer_call_output_from_message( + cast(ToolMessageV1, lc_msg) + ) + if computer_call_output: + input_.append(computer_call_output) + else: + if not isinstance(tool_output, str): + tool_output = _stringify(tool_output) + function_call_output = { + "type": "function_call_output", + "output": tool_output, + "call_id": msg["tool_call_id"], + } + input_.append(function_call_output) + elif msg["role"] == "assistant": + if isinstance(msg.get("content"), list): + for block in msg["content"]: + if isinstance(block, dict) and (block_type := block.get("type")): + # Aggregate content blocks for a single message + if block_type in ("text", "output_text", "refusal"): + msg_id = block.get("id") + if block_type in ("text", "output_text"): + new_block = { + "type": "output_text", + "text": block["text"], + "annotations": block.get("annotations") or [], + } + elif block_type == "refusal": + new_block = { + "type": "refusal", + "refusal": block["refusal"], + } + for item in input_: + if (item_id := item.get("id")) and item_id == msg_id: + # If existing block with this ID, append to it + if "content" not in item: + item["content"] = [] + item["content"].append(new_block) + break + else: + # If no block with this ID, create a new one + input_.append( + { + "type": "message", + "content": [new_block], + "role": "assistant", + "id": msg_id, + } + ) + elif block_type in ( + "reasoning", + "web_search_call", + "file_search_call", + "function_call", + "computer_call", + "code_interpreter_call", + "mcp_call", + "mcp_list_tools", + "mcp_approval_request", + ): + input_.append(_pop_index_and_sub_index(block)) + elif block_type == "image_generation_call": + # A previous image generation call can be referenced by ID + input_.append( + {"type": "image_generation_call", "id": block["id"]} + ) + else: + pass + elif isinstance(msg.get("content"), str): + input_.append( + { + "type": "message", + "role": "assistant", + "content": [{"type": "output_text", "text": msg["content"]}], + } + ) + + # Add function calls from tool calls if not already present + if tool_calls := msg.pop("tool_calls", None): + content_call_ids = { + block["call_id"] + for block in input_ + if block.get("type") == "function_call" and "call_id" in block + } + for tool_call in tool_calls: + if tool_call["id"] not in content_call_ids: + function_call = { + "type": "function_call", + "name": tool_call["function"]["name"], + "arguments": tool_call["function"]["arguments"], + "call_id": tool_call["id"], + } + input_.append(function_call) + + elif msg["role"] in ("user", "system", "developer"): + if isinstance(msg["content"], list): + new_blocks = [] + non_message_item_types = ("mcp_approval_response",) + for block in msg["content"]: + # chat api: {"type": "text", "text": "..."} + # responses api: {"type": "input_text", "text": "..."} + if block["type"] == "text": + new_blocks.append({"type": "input_text", "text": block["text"]}) + # chat api: {"type": "image_url", "image_url": {"url": "...", "detail": "..."}} # noqa: E501 + # responses api: {"type": "image_url", "image_url": "...", "detail": "...", "file_id": "..."} # noqa: E501 + elif block["type"] == "image_url": + new_block = { + "type": "input_image", + "image_url": block["image_url"]["url"], + } + if block["image_url"].get("detail"): + new_block["detail"] = block["image_url"]["detail"] + new_blocks.append(new_block) + elif block["type"] == "file": + new_block = {"type": "input_file", **block["file"]} + new_blocks.append(new_block) + elif block["type"] in ("input_text", "input_image", "input_file"): + new_blocks.append(block) + elif block["type"] in non_message_item_types: + input_.append(block) + else: + pass + if len(new_blocks) == 1 and new_blocks[0]["type"] == "input_text": + msg["content"] = new_blocks[0]["text"] + else: + msg["content"] = new_blocks + if msg["content"]: + input_.append(msg) + else: + input_.append(msg) + else: + input_.append(msg) + + return input_ + + +def _construct_lc_result_from_responses_api( + response: Response, + schema: Optional[type[_BM]] = None, + metadata: Optional[dict] = None, +) -> AIMessageV1: + """Construct ChatResponse from OpenAI Response API response.""" + if response.error: + raise ValueError(response.error) + + response_metadata = { + k: v + for k, v in response.model_dump(exclude_none=True, mode="json").items() + if k + in ( + "created_at", + # backwards compatibility: keep response ID in response_metadata as well as + # top-level-id + "id", + "incomplete_details", + "metadata", + "object", + "status", + "user", + "model", + "service_tier", + ) + } + if metadata: + response_metadata.update(metadata) + # for compatibility with chat completion calls. + response_metadata["model_provider"] = "openai" + response_metadata["model_name"] = response_metadata.get("model") + if response.usage: + usage_metadata = _create_usage_metadata_responses(response.usage.model_dump()) + else: + usage_metadata = None + + content_blocks: list = [] + tool_calls: list[ToolCall] = [] + invalid_tool_calls: list[InvalidToolCall] = [] + parsed = None + for output in response.output: + if output.type == "message": + for content in output.content: + if content.type == "output_text": + block = { + "type": "text", + "text": content.text, + "annotations": [ + annotation.model_dump() + for annotation in content.annotations + ], + "id": output.id, + } + content_blocks.append(block) + if hasattr(content, "parsed"): + parsed = content.parsed + if content.type == "refusal": + content_blocks.append( + {"type": "refusal", "refusal": content.refusal, "id": output.id} + ) + elif output.type == "function_call": + content_blocks.append(output.model_dump(exclude_none=True, mode="json")) + try: + args = json.loads(output.arguments, strict=False) + error = None + except JSONDecodeError as e: + args = output.arguments + error = str(e) + if error is None: + tool_call = { + "type": "tool_call", + "name": output.name, + "args": args, + "id": output.call_id, + } + tool_calls.append(cast(ToolCall, tool_call)) + else: + tool_call = { + "type": "invalid_tool_call", + "name": output.name, + "args": args, + "id": output.call_id, + "error": error, + } + invalid_tool_calls.append(cast(InvalidToolCall, tool_call)) + elif output.type in ( + "reasoning", + "web_search_call", + "file_search_call", + "computer_call", + "code_interpreter_call", + "mcp_call", + "mcp_list_tools", + "mcp_approval_request", + "image_generation_call", + ): + content_blocks.append(output.model_dump(exclude_none=True, mode="json")) + + # Workaround for parsing structured output in the streaming case. + # from openai import OpenAI + # from pydantic import BaseModel + + # class Foo(BaseModel): + # response: str + + # client = OpenAI() + + # client.responses.parse( + # model="gpt-4o-mini", + # input=[{"content": "how are ya", "role": "user"}], + # text_format=Foo, + # stream=True, # <-- errors + # ) + if ( + schema is not None + and not parsed + and response.output_text # tool calls can generate empty output text + and response.text + and (text_config := response.text.model_dump()) + and (format_ := text_config.get("format", {})) + and (format_.get("type") == "json_schema") + ): + try: + parsed_dict = json.loads(response.output_text) + if schema and _is_pydantic_class(schema): + parsed = schema(**parsed_dict) + else: + parsed = parsed_dict + except json.JSONDecodeError: + pass + + content_v1 = _convert_to_v1_from_responses(content_blocks) + message = AIMessageV1( + content=content_v1, + id=response.id, + usage_metadata=usage_metadata, + response_metadata=cast(ResponseMetadata, response_metadata), + tool_calls=tool_calls, + invalid_tool_calls=invalid_tool_calls, + parsed=parsed, + ) + if response.tools and any( + tool.type == "image_generation" for tool in response.tools + ): + # Get mime_time from tool definition and add to image generations + # if missing (primarily for tracing purposes). + image_generation_call = next( + tool for tool in response.tools if tool.type == "image_generation" + ) + if image_generation_call.output_format: + mime_type = f"image/{image_generation_call.output_format}" + for content_block in message.content: + # OK to mutate output message + if ( + isinstance(content_block, dict) + and content_block.get("type") == "image" + and "base64" in content_block + and "mime_type" not in block + ): + block["mime_type"] = mime_type + + return message + + +def _convert_responses_chunk_to_generation_chunk( + chunk: Any, + current_index: int, # index in content + current_output_index: int, # index in Response output + current_sub_index: int, # index of content block in output item + schema: Optional[type[_BM]] = None, + metadata: Optional[dict] = None, +) -> tuple[int, int, int, Optional[AIMessageChunkV1]]: + def _advance(output_idx: int, sub_idx: Optional[int] = None) -> None: + """Advance indexes tracked during streaming. + + Example: we stream a response item of the form: + + .. code-block:: python + + { + "type": "message", # output_index 0 + "role": "assistant", + "id": "msg_123", + "content": [ + {"type": "output_text", "text": "foo"}, # sub_index 0 + {"type": "output_text", "text": "bar"}, # sub_index 1 + ], + } + + This is a single item with a shared ``output_index`` and two sub-indexes, one + for each content block. + + This will be processed into an AIMessage with two text blocks: + + .. code-block:: python + + AIMessage( + [ + {"type": "text", "text": "foo", "id": "msg_123"}, # index 0 + {"type": "text", "text": "bar", "id": "msg_123"}, # index 1 + ] + ) + + This function just identifies updates in output or sub-indexes and increments + the current index accordingly. + """ + nonlocal current_index, current_output_index, current_sub_index + if sub_idx is None: + if current_output_index != output_idx: + current_index += 1 + else: + if (current_output_index != output_idx) or (current_sub_index != sub_idx): + current_index += 1 + current_sub_index = sub_idx + current_output_index = output_idx + + content = [] + tool_call_chunks: list = [] + parsed = None + if metadata: + response_metadata = cast(ResponseMetadata, metadata) + else: + response_metadata = {} + usage_metadata = None + id = None + if chunk.type == "response.output_text.delta": + _advance(chunk.output_index, chunk.content_index) + content.append({"type": "text", "text": chunk.delta, "index": current_index}) + elif chunk.type == "response.output_text.annotation.added": + _advance(chunk.output_index, chunk.content_index) + if isinstance(chunk.annotation, dict): + # Appears to be a breaking change in openai==1.82.0 + annotation = chunk.annotation + else: + annotation = chunk.annotation.model_dump(exclude_none=True, mode="json") + content.append( + { + "type": "text", + "text": "", + "annotations": [annotation], + "index": current_index, + } + ) + elif chunk.type == "response.output_text.done": + content.append( + {"type": "text", "text": "", "id": chunk.item_id, "index": current_index} + ) + elif chunk.type == "response.created": + id = chunk.response.id + response_metadata["id"] = chunk.response.id # Backwards compatibility + elif chunk.type == "response.completed": + msg = _construct_lc_result_from_responses_api(chunk.response, schema=schema) + if msg.parsed: + parsed = msg.parsed + usage_metadata = msg.usage_metadata + response_metadata = { + **response_metadata, + **{k: v for k, v in msg.response_metadata.items() if k != "id"}, # type: ignore[typeddict-item] + } + elif chunk.type == "response.output_item.added" and chunk.item.type == "message": + pass + elif ( + chunk.type == "response.output_item.added" + and chunk.item.type == "function_call" + ): + _advance(chunk.output_index) + tool_call_chunks.append( + { + "type": "tool_call_chunk", + "name": chunk.item.name, + "args": chunk.item.arguments, + "id": chunk.item.call_id, + "index": current_index, + } + ) + content.append( + { + "type": "function_call", + "name": chunk.item.name, + "arguments": chunk.item.arguments, + "call_id": chunk.item.call_id, + "id": chunk.item.id, + "index": current_index, + } + ) + elif chunk.type == "response.output_item.done" and chunk.item.type in ( + "web_search_call", + "file_search_call", + "computer_call", + "code_interpreter_call", + "mcp_call", + "mcp_list_tools", + "mcp_approval_request", + "image_generation_call", + ): + _advance(chunk.output_index) + tool_output = chunk.item.model_dump(exclude_none=True, mode="json") + tool_output["index"] = current_index + content.append(tool_output) + elif chunk.type == "response.function_call_arguments.delta": + _advance(chunk.output_index) + tool_call_chunks.append( + {"type": "tool_call_chunk", "args": chunk.delta, "index": current_index} + ) + content.append( + {"type": "function_call", "arguments": chunk.delta, "index": current_index} + ) + elif chunk.type == "response.refusal.done": + content.append({"type": "refusal", "refusal": chunk.refusal}) + elif chunk.type == "response.output_item.added" and chunk.item.type == "reasoning": + _advance(chunk.output_index) + current_sub_index = 0 + reasoning = chunk.item.model_dump(exclude_none=True, mode="json") + reasoning["index"] = current_index + content.append(reasoning) + elif chunk.type == "response.reasoning_summary_part.added": + block: dict = {"type": "reasoning", "reasoning": ""} + if chunk.summary_index > 0: + _advance(chunk.output_index, chunk.summary_index) + block["id"] = chunk.item_id + block["index"] = current_index + content.append(block) + elif chunk.type == "response.image_generation_call.partial_image": + # Partial images are not supported yet. + pass + elif chunk.type == "response.reasoning_summary_text.delta": + _advance(chunk.output_index) + content.append( + { + "summary": [ + { + "index": chunk.summary_index, + "type": "summary_text", + "text": chunk.delta, + } + ], + "index": current_index, + "type": "reasoning", + } + ) + else: + return current_index, current_output_index, current_sub_index, None + + content_v1 = _convert_to_v1_from_responses(content) + for content_block in content_v1: + if ( + isinstance(content_block, dict) + and (content_block.get("index") or -1) > current_index # type: ignore[operator] + ): + # blocks were added for v1 + current_index = cast(int, content_block["index"]) + + message = AIMessageChunkV1( + content=content_v1, + tool_call_chunks=tool_call_chunks, + usage_metadata=usage_metadata, + response_metadata=response_metadata, + parsed=parsed, + id=id, + ) + + return (current_index, current_output_index, current_sub_index, message) diff --git a/libs/partners/openai/pyproject.toml b/libs/partners/openai/pyproject.toml index 5bffdabcf44..a54595796de 100644 --- a/libs/partners/openai/pyproject.toml +++ b/libs/partners/openai/pyproject.toml @@ -56,6 +56,8 @@ langchain-tests = { path = "../../standard-tests", editable = true } [tool.mypy] disallow_untyped_defs = "True" +disable_error_code = ["typeddict-unknown-key"] + [[tool.mypy.overrides]] module = "transformers" ignore_missing_imports = true diff --git a/libs/partners/openai/tests/cassettes/test_function_calling.yaml.gz b/libs/partners/openai/tests/cassettes/test_function_calling.yaml.gz new file mode 100644 index 0000000000000000000000000000000000000000..197a8402cf6ebaf57e31e7d5600363e77502e1c7 GIT binary patch literal 7912 zcmV%O&o{ibqo`^HHrJ|_H%KY-n%&O|Q+0X54y;=Y99}mCSOZWK4 zFaPS$}$p*&v(R;3~I$HQG;7gu()2>Q)l;&ywXsSCj% zvV6O%hWJddrP|s#rZ0B8;huFIS)v`O!c?zq(+=@gJ8EyzFGbPr4p&a+P^HiD*2Jvz zRt`S1rN>|p^p2J>6rJeNn>Fh{(^ z4B~Y#!?Yn1VP-T!n#okJ3YPAMPSC5NqiXuxuoAVO^>H)Ny93P5S%8FaP-U*WpvE-W)%*e*N_yr3MeP*jc(k?A>a)-T2G5 zAenB0WqhNZvy9`J^KqliEZ#hXv)J4CcQo7_SF;<<{N?)AEVt{~+r!jDM>iTB9@d*_ z5Qnq%`eFL!`1dwlA7kgvw&C=qh|5iv>pL5+rp^LWd)Q&oZwtNr)Z1+Hc6@M_+q*^b zhNeL@`&{b}M-|PWRqh|Z{5cqn@#pp}_~X3MzYRXc!=tND*0A!OEgc9c>@p%)v9>iU zDcrCQ@$lBv#nh?|ZS8i#y`AFD!ZOsk6NP(xwzC5o6%4tn9qpj;C^whhY$W4F4c{Sr zGlpSm3~`D@u=<_T(T;dHB$S*Hl$_yD0X>*zykO52NE)UH3VFp$76vuyg^r=EoJd>H z)a2L;7_f!(UEcA|j8>spd123#r=NBxX@OBuL%`~M+OF-Lefj7ATYPes z%SG@vfc*_%_X1c*0QTa!Qah15PoC4MB~xR#a-)f7M_R=>HWIf}JI+Rp2*9>U0jxU& znzgHc9tk^oJ3F;l(McHyv5bu^4MUq=5V?D|DG2sa+n!RQ;|1)xE2 zjow!7dcf!gV{J|cv<8R?F&r~s1BBG}AZB<@%gC9>z%0d^5~Af`HX>5>K3k*j@$!SK zhnVgQX2sZD<^%wh22DBdxE4$!ZSFSd!9#ia6-)2ELbkwYb!k)@V0RYiq65z&*8T>% zzk%-m9niha)~jW_o)tkQ>ukMwBaJ@CKOo?K>B-IBiozk8g-7b%NX$nydXdz~Ow2?P z1}*Pkj(a}n(xaJxIL#eFork`fcn77mNR;tQqPAoy>rxqOfuhS51sK>2>Rw^L#Vc&7 z3RvUctAu#~hv6##7j|7gsLAB0wCa1_<@_b-*V#ycxov=!Ld^)THwZTgtQG*TXSs>b zqV{|$xriRMEt^#*i)qv6!>mgO=d>r_R%v(H8nZcAOhf|m-W9bvXzcTR{0b z8+_qAn!VWRzHHRakBzN|+v!MvykrpPdZj;pdgQ`@5Co^I3G~XWOP#HF-J4}Uso7L# zVh^lHeO2e4WVKAmn!kuxRjhv7SEAlxFs@7LSPPZ0C#cd(MIGnMGZ-rewWSEk+&rkd zd@$!ul8qpJMUL3@5=F13lWk^Nz}ohbtjiBpIc*-bk5)z7K&AqdeY5ww(d<1M5Rf`L zIC301n7EL2`8qmz(;TjKZAjsQWCI=a?atv3$hpPA z^&^77Yq4xum*nKxYId~9q)GEtj8(zLNNEHJ+C(dxIF@ch;gZSnKTKGM{=Ckf;iE*f zH0zQcHh$+LqY-0d0-q4q*PI$OdxB;7g6yT=y}saQS6F9VrX%8p1ww{xAP;& zCYYzX=!r!v40D1^Tbb;$-`BB$ULno}QZAta>~xbYdvk13@-_t?5HH5W^a*oIW<(@j1H6{&Oi-zjmYIh z#fbvIv6A1OO4^oL8@a$-xzfmz8)Ul%l-Yzp?^9M$UKWf7C>2_H+jJJ^%9X=LO?116 zT~C`I2Dwao7tF%|eSl|Ou}HGO*+SKO#KaQ#=9L|}Cxk`G-d@-~$#v-4r>{@c@q#&z z)8$D6;0xYEf*4{6p4ez!Use}ETeJNztebyaW6;eaxO(Sn7A=h_TDsPtHQikQVl;aL z6MyHc%|kPdudaPPew)VYSA-MSY8Fp}hvl0$jd?_SquuIl=^$po+#aI!t%f|9xz>cj5eI=RiEFid z!yIQ0qM6O{kL#h*8k4J>15;Pu@b}KutK7p-dXH!J zz^>taL8qgXmEQ_NGxVg_5C;*>f{{~1jc!x#41Qo5{wt6E%1(-RaV_ZNL&!Z$TAI01 zmDxZ5N3|*J)uRB9!{nHHBg&+T0xz7bZ9Gq}$wygwTVBk$73JhJDtRWS$UYL8vLb{W zLaAxu>LLWdD#GkDmT!aya-(o=X*Eh@k~azvx(GH))uv%fu{?~{QBBy=Maq5B3Ll9u zDWk`IpNAck3yrLugFxRsD^6qS9w}p)2cKCE7ZKmIQ(Ku5OJMr&&O6ez`|`V`JnqMt z-1qA8*D}00Kf9jj{Ys9vH1TVx-dkDTewku3EzwvVo|7s5$h(zzwxYZOHiy!%ekd!teh26M(`FL`W)m6qCpohC~t81OA=Qj9XIy>h(F z2TuyoUI6HPn)s_B)|%c5@&vJOExj_5pMhMC{5+-jjsv)YjS20>0K+7sgrSN7%PY*E z)*OmCY!D(dV`GVrwA=O%Y0$D3>fDOdjx|@uK>BeQnsaxqCcT9@Hy;OMYcXia2Q{fn z$~+9r@q%Y52d$xRj@7xDu#x1EANMglLwS*%gfmgcGSD9Y*m_~m!5sx9H;$>5P*{f* zq^>9xc9E5|fQ=9ex`z=)wPOjr0B~wPaig)O+Vcrw2x~~D#wy(rqHBw(p=~mpyhxf{ z?5!XhZxAX!PIb6^i~_HiPmN)cp2B zjd`6rhdsJx`BRyB$LbvVS!{ZtEM;#|&SDf9jC_e-s?}F4sS}gr`T<^ z#Tg!-_&L#w-}m1aqeo!lL@vTF(p!a>vxxd*B|i-8KI$&t-9>o$bi?f{Y$ZXsU+-By zzC0hSV%f)ES01>yE|H$x#%JO_0I)qb;g+bjGzc0cN>D90d05MeNaZ>dx(3dgFDu5S ziaK5yxIW+AD5bO%EpvlB3z{hu1rmj}#udq$mjY!`)q8Ualqo{C#_k+Tg|5L%^(QH} zo?&z&P|oe$hRkR)E_uRCOK4V)WuX-l|FZ1Pg?3I2z974Ep=DjFqp!>ETxh(@&&iGo zl>vv`bFAg=thD<&qfxJfDjZJwnJRw_!IoGk(TCmLfMkj2D#16!>Ur?IJgXy5L^GmY z1W#3xnF^tp`Vcm(h341_t&X`@=W^AIo~n38F!iUzl$4~Dfywh2Qb=e*C9FN;LFwxL zCeJ7D%I;ifyvuLM?p$a&HTab5DrMQ73yrJ#H)MA%v|@QqWfvSA8R7a@oJH=(Q;`2F zoW-f{x#29lyPXAJo;zIrz0RVf>a$Vr%Z|1zw47~x&(W5JmZSQ)>=vJLv}K`jaQmjC zHCVPabr!`ag}=&K6fF4oS2+tFq)>KxT>rBC%7V*R{PXfto7a_&kMfhccT4H`@_bVF zt|WC19PqL(ui@TT0goHx72LZ3yjZx)S>&U<>KQ3*_i3=>zUM-AwXJo7q$p$gU4`QY z`GxrAJ%!6@7T3G8T~_hNkz=0-i`Z}PgPFERyJ77HW9GSy{4GSaM%4vHz4ye6r-34q z{Yi*_UEh*h@#3{WG0zNsEnd79D86Ss7xALH) zC&j|kNxrM@FYA+1T$cDPIoD&^4{ubd2?s6v9l=($xip&R+`d;KEtf`1j^aBN(sF5(`|;0KNEscdo^RDp)Yen6BjRk= zpO_+a2F<@;-NH9-)cRNU;uY?Uw(JELdo|9xj%gPPJSc`>i5K0oy?6z`EPbtr16G7d z%f7eky~}_8oA1B-x7%R%e`5FC|I!Yz56%*a=G+SDEYV;rS5;k|tGZG(lc&l+HxcZ; zByjy;Z1tm6oGh`%YG8sf{IzF^#J|rH{p-&Xxs`HQ4<<2R>?y2QAX8N`1*PTf)lP4r zj{Q)b=gUKz_LNpHR9cNtkwBay<=wMHXGh&R3A{L9QKslrpXK}-BQ5WnH?t+UJj#&! z?^2iPTTf0e(dZD08reShxUqAjP?#E4h@}kOisFqW&?t%Goccj&)=AFE#c7E2%+P!1 z3AlW1Lyb$;^+=XvCd|4Xi*p9x){3C&+8m)$K!^0oMs{IcXOkr-;Z#!Zc;%cyiEDf3 z5p4OO6=N50`{(C2@)J)Nhg3=0J{eq`Jn`+Qx+6nAx1RQSxAkiWR8C(PYe%+2 z&lqyLaP>4qzGEM-P^#(3tx2L>oQ)}j<;0g6mY|*t#>)C?_c0h4;De29;SMH21C(C& zMzHtUsQ%bb6-boZ2QddX;9axf$crb>j4-ke*ut>^j07!hHg;Cf0Num{iSA2a82IT> zxih#Nb#eX$6sfmPm%|sZ9LXT~v2!bKr5`bZQk_b*goXE`T6QM>2OC*Gadv|yeovG= zg5^ASaTL4bP~OnSo&ID6?!DLynnL=>4( zUFd5p(7{1vRg>j^Q zg(VlN{e+EdryhSi;`c;J>Vn#u@UXK#AvG6C%{Qr<3yS$W0FBn4JtNC<>87xkVsK{pqr zngy2F;iFW7yJ6!%X|ij}LOKf_U6*sc&7LvEsN}9ol@QN~!g$(!@!XETa*mCPrE6~> zjZ3@Y;IkhdUbl0a1RDp;CY(?8_7?4UmouGEW$%IUf@(E3EM3$Tyh-Welrg(F(eyNjJA)pk}>Og}f6Leb4} z8;h=C8HvT&OYIHJ_K_(^sysVVh_7hEiH%<>#8)(-if;5d_*d-9o??7F|l-goszHArJD40 z7DH~P9m=!U$lh0`bO;?RS3b%g2W1Y&i9m1Aa^PIWWfDR^O3N=6`2cJ8Uwx_ZN zV)x&SPSxRlJXro5({65Eg$x@_Iwml!43|`Lvm_rYzx+wf?LhFJgVhUY)f*k6v!%xv ziuN=$D4thCC{~e*ZwstK-&+2B4jLoN$U=Tr6lnM$&cd`a@ie`BHQ;Jb`s&ydv0$Qlsud1n_V8_ zvqGu+-1%NeN>NAF7ORpTj%i1O`~(Q;?7d;8EG&P-R^P>H4>5zr z#L{!hI!*O)Ox_Nbu>#;jG01mAW}}8?M|7uvN~}(8gYH7h7%t@wyO+WzE0Vj~c1oKtMvdbB9!#u5 z6FXpZDpryTE)!^gNj4l?g!Bv#5ovUKRO4A=^~u6$CYY5<2XH*IbZC~W#^_P48?qTL zr-qUnSSm9n<0P{#pKV*vb3a8#FaUh)EDt|=!S3=)VFNlj*~(4dJ`YZek$=uUa$QW7 zP}n)e30Egx?AYPh(T_?$@$fq|t@R?{?+ry`eVs-lGCb^$z4RdMNNfP<5wwtJbv6J( z*nEDx3NqU`h6aAKfkC=EtK-s{o&R!Sy9Rg!bF>rtz8-Z^| z2=JiR{h-kU;`nGPxH|rZuF|TeK-TbKhzkVPjH)VgqO;mOM|D^SMrn>p;FWftS`E># z!fO81o@yPrLyANCc?eB?$VOgf!BQBbi-`Fikmu5fFEEh_gKWF2u4aUzQdO9Cm0a=i zqiUbmzxNa`-{Ssx{d-UGitSvt^zY~M0+WtHz(2sgmH0k)7u6Psjn$1QF-EOTcWlwG8rzb`n=>{zbDEE+zuF{>IwMzQ?CdpQJ z4vQcTzJ}6}-&UYpOBeoUsz0q^629Ao%Ny}gYX!K~(k30tHzlT%e&&3MMIg$M$+;2& zp-r2xm3%yTO{P3fMN8Oe&ZwPlaR)ZiBXISM-(&~6JoxPFaA$+40%IKf7M*@jU+R^j z$k307AgcUkoD0twpEp(TH{2I|%WPDOZMZPbF_}U|iKvs?&9=82aowbL9_IrrvCfy!K_3Zq&M=L)mSpu2BS$E!0 zsxc?>+*Y546=jv$dD<-;NgnCK@aRY?J7SvJHbq>XGOmNbRQ+s=9F}Y-VcF(sesj2U zX$N?|bQq2>?B-n}AVFm1ri!$s{S|bal6($ix*(;ZmBuo{<`uH(IozDj!dAV#28*84 zckoRd+~5)UK}`+|N9Gl!bn+=Da;r=uq$1i)_rWMz=2`VN3tMln;Lk?5JhUyC+qWZJ z9@-Xh&$lC79@^#^>yIPc=TXRXk+KH}nY_Xr8q3u7(=69Jew-_d#rFylG7)(pKh)gQ za!FI-qmjrN^PKHPFnUsPBS5y=mmZJjJuXuKjRo7A4m!(d171akm-CcpZ)GA};wns6 zQ`qfxO@+JA+S(6E&c67hHbzX3O#&5N8)$e)VR{+Ut%L0NgF1Y;a&6Y(S@?u}=S8ulDU0%81@~byno?bF~a>3C{(NQ%w3Qe$$&dDyciRodrbNwNxdR}byx9C#-d&4fRD|bi|Lv8(ch~-XedXWR SSAKVM^8WyoQmuSQZvX)1?X4^T literal 0 HcmV?d00001 diff --git a/libs/partners/openai/tests/cassettes/test_parsed_pydantic_schema.yaml.gz b/libs/partners/openai/tests/cassettes/test_parsed_pydantic_schema.yaml.gz new file mode 100644 index 0000000000000000000000000000000000000000..13c0b8896decc64a0df2d5181abd8873694b343d GIT binary patch literal 4616 zcmV+j68G&NiwFR`0B~pm|Lt5`kE1%aexF~_^EA?yN&}>__ok(J$fZI^7i4lNZf^mS zKmsH?kWfVPMX#%eTI@Y_psHwV%gH{QYlF-|UT>egF3N zzuSH^+hyPW^S5vCfAegxwWBugcI4mnM(mq{xOXLOkN%cJ@8D)_&k5g_bN$7C?F=$o zcn{>&#&-=haQuO2Oj6(0#kHlUJ|1xB4u63#MXoTY$C^Uq;gKEdG~v0!j{Z5`|kw;}C+hZUwyD5jP%;P8MmT|4G)8U#+GeXt`nDBiqvrF?mBp>}j6 zaSt;^%jV`r8~^S!#@S47Msmc9NE~=T%joelV-Q$KhS?yq3^lcd=FyrAT-14HjN|I; z=n@ZRxDCw42))F!r12IH=t{u>mK4AB2M0&eGWa`iG9o)oTo}9v4ZZoyo$o-mcvX%w z-zNEgKYjbhZ@-OSx~y`qA=Q%YEW)cK&ge_?zfTJ7*I`bLV!W zWF93?!8}?f-W?5-Y&*Zw%-h7*R=JPoyQkR_2D;Md{V7gneiY2(_-VFtynCC*S>)W= zHke%%G23(#-`Q|GbJno5rvoVcy3pBPyLqz9o}A78ZjtPusUOZi*81kG!Z}!#`~BO$ zhm$G%x!?KU>y5q&MiE$nmpr4Fzyw0OF-S}!$1ArVq}ZXu?GXvkc)N+>dC9NvU~#fT7G1|NK(chR zlskhhVhZptW(vm9d{&RGRFMXr8os1zclkclRnH8S)at%u*2;WVrsliabpxgAWav3K zURVsN2%4`4CzDw zA3l2AO2!T=#~PG95DD~lm@tnt=zUi^!i&2%hw-!{~U)xNoUYn0ko1M@g)TDrDU zkHK2)dMl!F#t2Tdk6%IgDiR7nz3N$r$F|BTfIL07uN{ja^QP*-Hrlaej1iM%hg*RG zfx$HK!t`<_OSvEap#NRGnwUDBfyldk2;M!BvkZ?bWW{#z?67%2hK96@)!Iv3X0}9@ z_|NeDU^O6G3InWJo#&XD=8Gd?8;1VFU|m2<{b7*$lZl&f@%|A0N=~b|2KLi@ZE(%XY&dvo!U=0d+@@{>`WU zVj?pOZi{rbJ$B}lwZIB{M@I;d=Mhtf#pFZuIrz6-$Yzk52FMP)#|GKjf*2cuivGre zDQE_^O9y7|h5<}VT@de~#6nx-_E<4}1QAD&8730&?2lrmzNK4;3URDgvFbsPq4U6$ z22I)o4cRXi3zWl&tgQ*J#aWGy)dbYc^5FHvI*3DpnI1s=Xa<}UX#`ck7I~DUw&9i$Y z;l^CsZ-=J-ezZm#>Zmom1c9g=gO}LBiso1@)Wk__@ea_1_%!cIfBM9Jk+Tw@D~6MouUjYlZBLdZf9 zaVBUv{B}nvU3~Gp7?L*QA)PYD-b~NYh7E5JRFR3Vt3$Sa)U1X0I0>57=JK(eK^Z`7 zAql{6s#Jk7f1)0+G1Gu>29vjV(k+)|{&q&Ype}&gxcYkYh}tbCEVvc<+aNG-zlBZ6d zxtx1E-OZwS0g1^A$L4e{Bgvmkkj56;|LIo=#m^kjd&e=Tb} z*JQJuM>GFvvs=z0k}F!6k0J$~8c?svrV6!QYW;lL089P|MO3c%=-3QkV8PEtP~lCC3S^@OHORZr2C<4}L& zhuvOibz9Fvec(gWzzXGt8V*858n1_v>MOn+SJJ+y66p!h(uwy(4>f~~k5^VN4W$bS zW|q>N04*4T{{lrItAU16P&o+Vr@Di`4=!+F8V9#|*qo$3@G%IfvOf%Sbi9_{6^seJ z29>LLvr*K&a48K{=W{aJK{YpTaV|uVA9gO#O;Kt#0+t;#esj?@`pjG$CcPMgnAW@Yl6 z;@?qDhDwm31yq~&ih3tC{aFNYG!RLq2&^Ws^ExXK0CnYrK}x6xRmn$oWBwoZV{Y%r zknRj)yhsn}kjSp`;s#bj!3c?r^J?n}^ho%9Eo}!#7Gt#DFi=;kt{e;jX#EwEI87l$YU7E zf+ML?K8TSlI2J=bijgchmUj6NMzY{Y!0j=Nq{U_s3R$>4X`-F8z-ryZ%1U?IuTx?s z8M4kR(H0}|cL8!-Nqa7Y0@k19*%_-98 z`67mvD@kA_xSl}RhJO^iB^ZTsf2 zy;7$mGNhuv9EVf+*#x*y!k%Y_FHN=EUJs!u#AshVTE4YvudS}Pf;LEnCCC9MvWz9D zuN9-sCR#nu{kZxVe(U3kT7?|KaIr18C_4Ix;OKnSHcvJ;9=FZ1@EP1^o90d1B&pN` zFj2S7k_@hH+GddNg5jZRe$V19LG=pkR`>E=3pI5@94%2PwmcgXo%SPkS?i7ihoHT? zI*Q-g0s?UYi$|=jK|)$i5PyC+D^~JgCykiL=yzfTpxwbh;C>0aTz+-C z^xJ;{72|#hyKH`SyOft0^>4(VGD%5)R{SBdWV>I9KgzWA_3e@#4Y+2TG@R9`>gR2f z$ns40E4KNrT#4!Q3`rTpM{wuqbRWP9?Zl?etrob zS36(b$BUy=7j2U)oJlJmwawEL1^3z}f5|qj#TdHo-HGNh9lie|F3eSb%i5Q=61d$_Zf~< zNA>&4qAW*h+Kp-)Vvdm4Q}xfrq5D((D{)9p**H{^!ISjzvvG)?h&W9z?~FrU-Oq`} zYiar;eqP(?`h=fn^&F@?3Ud5eeqQ=``YAtuK{wZa2_Ij{U*E@9rvChE`1tY5`#9u5 zzlL3ozP?><;J;HAGfCCpOlkA8n8_S_egrewy&$5VD_cSz#muticK9@A0?!1;q`aLL zBF>kJKO;}F@C&8l56QF0gNPB`*kb#LJc$wA*kZdSPjo-&7m;Q5)n&P0Fq7`-Uf%Y- z1bXm$@Od9U*m=d}QtWyDnz4u8*+To~%nFT?xfvb2)wY~P8l*;TPwu18xUb@nsOVdz~GqkK9Qs#E=^-hw5U&@YeabRRMgN%>XJfp`aIVh1do zwZb#2S7*;3l@So%;ec(3d+QoEI<|1pv07fPI9-0|3?DcA5SG$ykDEqQo*~p>CHd() zeEt#$A&)Ow6?cv)87z*!QBN2lhr4*P>DrMdVwYD=HqCkV=&|NNS4Up2BYRTpL`yHr z;sBdxPf~ipH_|xy1vlGR#vlM=*;}19IEowC>&HNe{L6y@xf+;{_@Iyad?Y}S$UtTJcz__0ffjWh4-h0W z6ho2#aqZYEeg>i61g*mm=o=OXyC<&D-)ME@+d(B)r}z3#F7y+_V(-Fc80|ro)&U1* z+D0XT!q$0++3VubyO?vyfq;%`8x3^QTSjtaFYNpDmOee?54H-BQwCkmML4VoSPC)Zx; yL!Y?{PAMyW3y09fuDsoR$FjZWZou-(U-$lU_ql(G%i51$Nd7;Su*H6VQvd*`4k)+) literal 0 HcmV?d00001 diff --git a/libs/partners/openai/tests/cassettes/test_web_search.yaml.gz b/libs/partners/openai/tests/cassettes/test_web_search.yaml.gz index e99f1c2e13a14928c0b2b025eab331ac4ec326f3..a202dfe9c614179cbb87d0bb5321229a94be9c85 100644 GIT binary patch literal 27998 zcmV*cKvTaTiwFR#z=vo8|LncnlIlpZCVW3n(fewf^>51tL{hzIvwguCkw_G0q&tu@ zIY~ezkRaJS{J2LbLLnhQ9Hy4k|7x@9%E}Z$$iv;k51+e#ef)L!*zI@!{$D@-G2NKX zzyI-H|218#CtuD#{?GsV2mJ5DX>OwXg zy*e}M1$Q#;ZcTeIo7%&i;gnpnUT{8oCEKtId$OS1=zi<SD8!?}?CRa%;KDyb(d~iE4R`6;+zTwcX6J7k&*T??L`C)7hu}Sg;k1p(w`<}kJ zEBgbu%KY~~{%rU9@SlgT#lOQ3rBIMFly}!d3qF*YN`;-_&|R7h>!|l&fwgIAR>6f2 zIGQy7G#X9Iv@OeYnuYN=^FRN86=Y+xSuK7``fo{(E9q-0>Ds9IZQ{RsrGq{)=VslK z^j_(EY7Z75gU(bsS@LqTAPbsf)iM?!XmHC;M}?I>I-;bnjYem$)_?A&BkM5fp%$RG z>ohlX$GN6uPOaB0#qHoOT@$o`X9HvwZCjsD8|rTr@>_-cRw3y! z7BV~hLY?X-ckF6!?TWcnDsxNktMKpHsXfx?nl0y5cXlERs!I#hz$~Lw6i2D3b8S@3 zYp!mqu94A316M7>7umHXD7NqELjD83;AD=5uq*tM(zhX5naNnUk?7F%c+#l=%aiFY@;dcoZ3ep;}X%ypk6aZW0)D|y4FX7Xu^y>o@elH zov|{TO4_Wq9_~JRMWR&H1!kI+6vaId$tP{uStLYg#WHrz1G! zhBBMfYxw(x4*xBKri4>=@b%i(=`gOp?d9_3ugGsLeavoEUYCzxS79CBwUO;kuPbEF$(XiwqbCVA>hD%r&PA+>yLIZoi zeW>i@deZ{Qu#pT?+n8x~Au}nL7Usy>>2>R%JEd<;QnOSY*K6befIcf&cmoT%<`{0| z-0`r+WC+8f~-F#%ixD zwLRgnej#htv*2tOV27&p=csT5CZ11vtL?a+H>Qis?0vD7Cu21;%>g}~QlB1*WDy%e z$AeguGL!mXHn8E4#`0_+4S{BqZMt^B@kj!+gzLWy&yKQ|+)&{e)sidKJXug#0@3M% zJrr_dQR?gX1wHZw9Su$sPMz=`7$x5Zs5WFLv;GtQnZtsmtY)8ZhqiahGAIXgI~`^5 z1UX-kxB}=#ZFHOgC%`Gww}8cu@Ff{w7aMhCvr;{%i_&5OEJiJY5%5u6-GNd#nB(G1 zAL%yG1U^tZfE3^YE)l&tRkG9iaIKBhJXw&wnfDe&hrV(}&dAlm?gMylGE&{vqPQ8s z&ZZ5EoFdW8pweIm`IfDIPluV!q|pUMqWsZv3cwG>0kjT$p0iE&ppQ)#wDuNG8ChuG z0)x)Tb-pL-ArLCkg(LiX5AwQif#~VZ&s@usUDzc! z_u5`qbnhwH`Iak)e(??d$pK4bTm5Q=|Ln$66b(v4I0#J|%(`RL(9Ip}W`i^uHq39i znuSv&a^1%J1x^}oHnFS*C|{B-YXB$%z%>u9q1ghN~H+OqLxL*mFxR0T&sJ1+y%ONS{?XuYUjyS z03OWTlulf!&{G~0Q^bL9wRFi^!cM-C&+yhGrioKpfaoC`p{3x00(&`oe zdSBt&pyU+3fgFYm|4n6iy_$7wFhz!7Il$q_)moO&bOdU>M_8kzue*3P%;Db1D&=e* zEi~Yc1-u=oyTK0S9jytfzUmgxs*oY}iCtTO2;w2^$7Pv_UuQf-e{?Ew6 zj!STLWWfL+MiVtSVMpQ9+`=ondDF)TKUFT^;O2&_X%jkd0jF zWIRa;5I0Xu5W=h`8bS-o1*jXhKwA<_@Mw4f1u}H#9Yn1R?@kd+jqLda#OxMkmf%Iz zl4@Xh0`2QVw?!7~?2)d_6bbOtgqXb#Q=(nc&yJVz6u9 zaT-$FW?CyOVFw-^(Ip!?G^%{RMa%h6G;8m-XsP=KKdhi~A5y(w-Hb_kah zyt45794v^=BzRmjv^K04R@!*A20AuyLvodDh+g!c< z8&$xBX7D@wTa-yhPkBUW^AeNJ3ko)uf=LHH@hQMtgni(H;DHif84lqSbRjsJvqgJZ zJ%Xdo$w9RZY+v5z$>sa+wmtNe42>0y00_NKi%(o?)a`K7&{N5|>j8U@SHH$Q8j0y%QzCnaB>RkcRf-cgt-Ct!Q^<{tdrXp+A zt;|$-<$!<>$*xLc+!dH1?g8;9&z;tbQVTvRAjhNEo54QO0JzF)(;e zvkQ5{352QQ(HGQ%>EuKAGP!#(TmXy?ECe^+4!-%DD$^`c9<4DE!#x#4t{^0)Bs}Hd zJAVNsG0c)I0$i7r5AB)@*DKNyV~ac4rkR52SY5L)pl2Vhj=J4lsw5Vuz3F|^R+j@; z?XQlCtx0NKyATGX2Pg(GDP!N62)UL6p#jN2B1Fv?Xk)`G>UM2spfS1b0&Pz&3+Zuf zE)5qzDsVrV%So+pgwIJ{h+vq*JnCoXe}3mCe&;5B=O%t5l7372Z%O}8l=QEU-FCCy zefTg$ve<6_g&ZAL0H6Yno0(kpGKO$T>tppOW(}25ZK=7`Qc(Mvq*R8MTCc6tzVfEJ zD_67>yd}lFVR=!U$#!8ev4=}j_zDtS>9N17DD)Y9)cpjF19DwFsV;opm2=*bO27Kn zDw3U|fs--E3O*Lj7QV~nB1V*!6Un4m9{kF>2WFt3x4b0{P*27J|D4ZG>ql=V)%wNoN&Zg_(Zd#^0o{nm7 z;~cSRXi5_lowpXZj zLiUB3Q$N)u7~Ya{Y}bdDg8!~m^I&-`wB;%XKTGWqqDeYO#g&ijc%<4~>sw3J)^e)b zo$Gxi-U~lE%#*r*IrP?35CN?p*VZ0AEmeBP15H z3xy@=w=%xuyQ^hM#snS>=d&-QvftT~-V&c3`pus`??oCr|K+J6j@;ntS&r7pD|LOP zxgCnusSg;d>q{-GxP!b>X@XhawUy-s(2hU^?tTM@UcEBYsX_R)~DzbIzZ<=GAeV)?#5 zTM|x^gz}1;S;X>wSfG5{Q)NrWbXcQ&)$q>67iV#o^0}*ekC*B#dCOV7lVCK&nhp@^qkse(w%E*_V+Wcbt+xB*q?7|lptED7(SsQ9SA%e%v*mm;Lz zl3t)RdRH{yJQHK^*J7jW#P3ZIUWtyfC8L6*v+yaG3g_Zb`qx!WBwYXJ5%)-~SJc}oTIsvCfyLk6dK4mGT*{$@Pn3-dI zd4GcLqCOC;jYm>*QCT_0>USmQI0 zXJ|`r$y=byd+?UbJG%k9ya#c~?w%XK%X=`F?4q~Tmd&!oJAr*1OflJSI5avDjOC~)xV3_wHFd3`g0Ak*Q!DKIf1B!VM3X{F~ z4KU_CI81iw-hgA?leTBZ;0-|LJy=YBJl7$a_nXWoO$WG8+Do_P;4(>DgM0yOWzW^%r~4$-^^o$23@S3#Qh z;4>qw;B%PfJqS(q;x~Yr_n=f56WxGn-hS?n)e_zxhA>}*t`d; zDHt%l#3pC;>%h%>Fq>RoUWabpgWBY2kL%#gdvKfF z7`zVOjE&r!oyUS38SbbAoEDz0E-p0ShzU`}`3#GH5WlBJyx)-mRWzOvF=1YxEYfN0Veo;4&B%zVz{G5y}T)-poaI z2YrfvV^Np|Dn&m|lnG^=sDq4VftU6mf5Ro2JHJ-vx;@N%;0`epP{-5fOcYLFQE9N2 zu*(cSM%`8rPP+z%HA^luNpOO={te8X4T7}nS|Hb;61<`ijQRB5yvQrB%JYMcwhr8o zos%4e7-?8{^W5R{LQXUf@$R2Ki(t{}=CP!1rf~OoL5Vq7ci$|3#&8E`ISMi0C5AjN z6-J9OPg?!R1rqBRNy6m~P8;}cYkBD+3)F9R8i*BBD8*PheWM)}PJ9K!CH}7L+7Wf% z2iQ7z(S`yK3yi>!Voou48FKMK3z;Ad#(G6)IvgsYU=8x3NrE^*9u?wkpt3(~o-Nh= zZ+t(?qY1)>XfLBy2u2esMe%P(F*zeJ4G7>tq!aKY+}o%&{B*H7rnRD?^uWEvW5J=k zt>RYwYyhT#Y4zy$1a3}{?G@NR=#aRfd?e7g#tay+a##E#^kv>du{5zTRd{aTC_J)F z^K22j97|n`=truBL_QVNg&BOSqMyGEF%f3!rnLe{%HATtcalX>L@A>1p0lJj=FTz8 zR~4k^U!R@r88D|@`_V0G|BgaQlzjlrg+CKSOdk<3Q4drm`{+OPKEA#`zApS}|D#MX zG*N%-G11+8xJnwc&`k?#-U@k@?j5_*JgM;UTg5fUj1Ym|S9$M(CjwQGeMzHik{zJif2mhms z_Il?bRl+!>RYvcx+;(Ng@*3??=S^Q)bG@!F^@^5LM@mLn&gV)+%eF@v>~P?yb;VI6 zb)_QASi~r;{Ws$+2+#}hHeM?bt3&AeIoQPjE)cx?zje~OMM3y8B2~WN zzwsJf@%w6PGlZi~!lUw{ZPQEm8WzdZv-maWevz6s!8?X!=>=SF!hJvgjJizN5>ePA zwgrCoXTdQUlA;(Lyx&&8``sduLoc07b4aH)L7f+};5k!7X*RP9o5*~Z!ndudF?hY@ z4SyrF0ruco~F&QDS zR5%{$*nopLgqzeN{0ep!cq~6@KON81#pA)_<*DT%oaoRYFdTb;3F$tF8w{}0l z0sMpY)?hZNydI*%{W-0JhvKF00z3AMP6m1KZBK4*G9Cmr&!WdiY*1_1C%dtSYihRf z~QOqGb#`3cgbl1@TBa80AWz1_~@%hUZq{LT42^7_I`panzw@9#U!s22OjuFTl~fmu+-u^BJ&ocr=lt9f21uBza*?l-hB-0)ouZC=w9O&c)}Q z+xoKiTVMXxm%sJpZ+-cnp)Wa-6hrybULLd)sFUJNy~#i4XRG~>m*w3>{RwpBvhQP$ zkO*63M4;`zxQx^4x&UChKb-58+L8dKLA5Fs<4s+bmddERYS%GS*g2}pL0(^W=W1Wh zXs+yPQip~9f_1b}jjmWhs)yhOg*VPoPyvRpX_a~akd7OUY`Qhh7Rs}&W!l4oS=r1) z`#cJt-SYFm;y0FUa+(-_6Lv1R1(9#zUu%B0+NE`u7mmfbek`mfBAx|Sv#P@a$ai)@bnCLiFA%0L z#R^)y@sRadm^si8Zk5$Wof9qXKoEK{*RE(s&8@BU${bY8fW3IM3DCTgfj~KqOFRFf zh$F|5G>&W#N7|@hQ@R7*FQ9SunoUAd*_HrE_iMb1Xf7bg<;62kbw z-UVY0as$H4)9`dtgohElhluO!OYpOV#3`N>f>Dq;B4wQ|M4evrv4Nxj4_=aUo)x_Q zanF*7;@w=Bce(VCHU%CFXGAl9Z%C$abS8X7hZgYEn{#am$5okI+OqlvKU&7h$U`%B-TsH|%d3FcohD{0l#Z>=YqcMfrCO+uq_(X+vzxQw1jIueDmF{GEi6WTD5bP z5pma?#6GN{JBaQ44{imc15A!3kntO3FRO`A7gt<<2YTV1-;gmc*5Jn@%`PsGXnj>r zVG~qrwgDzq4z$gR_T24)P|9%ab>8%md}`NaaJZE$+EzRQ6B%XUmj<`$dPl!NFJ$ep zopX~_o*7^+y@EFxUv}bqU$=OZ*+k6D@5@i~=8fy@5#D4kj;eMc0?!|(DZ9=Dv457P z9AFu8o%+hdG-WR?i2buPWhh-i?184#Lc^u%U_~^V-&==c@Ek9qOO>1rN5Gy;q0w zFy6T;eN>0~@Z>X9iaJzBXxN?d?gKL-ulYyh-Pc5yLnVFfE_wH5Jo1`J$I3h(!J^)wHK-2%Vja?d`T-7|9X!}Pq9$qMf41ttXJt-jYUspz}#>(@1dvv z;#XZwd0CBp|M2kgWjFKQ`-d~pWjB-FKe7||{^364WjFKQ`^Qhf9Qf+eqpG`lMA z?8Re-mEy?zWmoE&ykGT&csed#SSgOYGps?(uo9fIMH01F_%k|N`V@Z#Fz&a*os>VX z`$24%F~gm4_|wly#R_)Kf?k8Q-2bz!DX zkfuIT=b>~_6iu%HsbbUA4f;e}t3Q@5ilS*asB=F}z0!A|p4>EeJpC4?r~l&4bM*9m zBHrlb@$}mj1li;C-25f#Po|0P2*uyhpFHIrKNKIKKV3xWG5yI{{fV@2SbzEv>xa|A zq_e}ySX@lG-=E41-|Zp>=D}1Zz1tO+-R-_|dpMODzT24!ej=4gBv0^**oi-xW*6l= zP%iweoKwGu2X&rHvx{=>r#-UI$vJ5PLwJ+x%jeSUB5yLw?a4H|$D70-5|H4p;!WPR z__;JY+MN_?!)quQptDk#txM?G};>SPc$fN;<4&iDOjw zP^KqnD#q1c1P9@FkElv#PtlaSQm=t7AWee_1yrTqdP-=yNA(N4q9fD zPfOChGYznsOkBU{*THJ!YJ401*QSZpijnj*v2D%MLd#k5Q|{Q`F_zBsd$J#1r8Ake z5i7LR$;Z)|Jhl0JN|WhKp>6RkojH)Ohw$tsJ;gHW7tu4?WW7Sq!A+ltUuUmL5r33< zY$I-{t`j4c6gPL)xw{EL+z_(bzwHlkTD}IlfO78pJ-0*1_HeDIgqCvRX<3T}p}Sm7 z{IFdVQ8{zPF3QG%(6J?709#;o(OEU|6}za0O$U^8=*vvv_2n)de5v*l&zE)e8+gwl z-O$qjQOaKPff52yDyIR4sP>Jd!w{Ep?v|X3^FUrC=YBLRHc#H_$iVWDFXG8SiDf*V zY?aet6RnbN6M;9Oq=S|*N{4lH=?g#OR^=sgq;S;|XmUMV8TIF=mQb{^3L*pK%ja4e(URoyka4%*n@#A?8 z;w9Qjflg^pL1ZQ76ZvC(x37Z8a#!j(h?n9PQvr|Fq@=@RWh)i1ZAo>K4%;R(6jMP< z(HBZ8Ev-yCN;6cUd?=}<86}+C>%fWQcs7O$o|az&jYYZ&1A1NojYYaL<>tPo;~dhJ zE%_p7EVldbaltsXG8J9tjO2HnJEQ^vpHq?#flrByD~XkHtGnS{;9n!(DA-JGyhy%L zuz9f2$h<|L}Z|FtYbaSr=M(JlnaC$vV%Z_`Q_XC%edUzI?uOACq;l zcL_0>$mru#K;R82`4D(ILOK-?_$SJT{x%3aos*IRTF#PBOO$PRRLwl1J1^wUp3$Ax zXDH+z>A?LrG(!)#o7X#5B&$njyHZSkm(ImxR9H=b4~@0DHN6mW(qT2TQ}SVmWL|k1 zQtofJ{G(r=a+iVDlX?wye%WqfFaAV0In1BetB|ho=fyH24%>b_oE+xQa988F{3+o} z;y`zASe&$+ZKGS1Am9)LE z>>jlC)^M_3Gt3sZhqtyb)J^^JHjwkF4QyYh9C7JyJo8K@_=?g4M}@PW zEk;+Ad3bqIt_K;#o2P9=4Tyo4CDVSb%#-%*-0(G9aZPe zQF|F~ZS>f$RISFZ1_wfB!Y{OXa~fQc-yICu8f3mXRv9^Ux*CRqKn#vf^C_&4V$s$7!N zkfZ1u$x1Xjd!@dV3XbUc1h&^JWV2ki)?)6*=AN@zPXaPRr+V zk0NjKIMIFii1)=NEwt<`4QcKM>*IQm90JFHxr6beF`{An)pw9wK> zJ}p5ttKWvuiZ96}menIGXG?Cms$4&Pn@%bwko1fB-?)ue;Wnx#0bHI`tU);)d|5pu zo-ZAxBBzBf7fI(!H7$JUCY~>Msc1WKQu-;Fezjn6PF{YsN<~><^`!QaKe4Iw4KKIz zhF!YPV6;Ll+h2jv@@I5~*j7t^$-BrBD#y%~|48?xkVR zNdtTVcr%~;$vE1$X{93Neo~hAV=}ov{+hHV{@ySBy$}kK@4nwJ=O`YAg!o$hl1c9< zci!(zxxHZW2)p0ULZZQI9tU0ObvTMU9=mreU(Dkh0l9km=^&Urb(YbFe#ocmLTE}HYhJME)i z$(Mt~^JS@Z&*Z>gIY|t7Gx5`}j7Bjnv~&_bNvu9}vHBn#(_G*_e@lLOf1Ig-iGX`K zw`$W8P~KlN;gaa21ESiJCmCGq59(sCB!@GRUMcbh)T=FE6 zUeuwtRq$0E8kJgNFaClqh)jnH^^kX?l^fmfDE{=8d{w0fevY&HV=ky#86;kTBV~3Nlktgl6?*nNoq*6^OpROs6NtdG0dEF zRDCA}RYp=RD#{?ZjS zxO>o+pN@Nb6&gJ3;$N6Iob2$j+8-o7y!=>qUiRVMstZ^at zb?3d&-Rs`&ydZ6ouiUn;G6;Do+vEA2z?)dnF}J|W%72KoGRMu`SW>sF!7m;a9NS9Z zQx2qZ2S^_{f~LsEZ)&9jzsGy%M3z*Yl55rr&SW%n$xPz9&ve*iXfjqE&-17X%fW8(4FrFD(&)lTD}S_{!ZO&{H~(!k;NK(5 zor2mmY~x#XvV5nlQNf0PeDN5IlNa}Nr^KhCM3&B0{t zXTJ?NDs8k{d?qliKFB3KuHH1J*6DDq>rbMfbJ=#{p>s+~y3P&qiSIghNX7K4lX|1P zjh*<5@)N-l%eX#w6;$nVn!_n=t90Zjq$p=QVL1xCbkmWeSjx%IQH&-j$x&!-;&T*p z%|2Nv$x$pj$?udVo4s`8D70+Sa}-T$nig70iKk_DT22WqbIH%0b?d`G*L{9bye4mk z^z@b6lX)|Z=OX=ZishAdcQr+s)T{EEcax5^N4sw%KkZ@bWAox&l(bO&>Mi-9dhjtL zQy)o0)vxkNN7YA{StqXA!%-^5q*r?^tEs3$l8?!+LTaU>N&m9wBwk;_(#5p!rIY;X zIFx-$nvA+xZFF)|(WD=AX9{K$gBWDzKzF|hfsqO%xO`0hY~ofrDioGQDfy|*WDk$v zE#fLhVuwdqVsng?$ap*X)gE;nS$F#iz3C5ge1BEvIi4o=ulx3sdXw|zYr3S#p+uKg z46lP=8BQwZk1wmo%3Ze$PAXFFj?$NtpK{-6jmP>(op~lm2BjpMtriIc&3ON!sX0 z_>)Joo_Fn8+%Dk*4?xzFKky(Wp?a^WU6Mof zem7}e?GeAJ^sR2vyscUMrqZ{%NqbA)(^UFaH)(EP#&0TptD7{Jo|sLgm$IA8PQ2l! z()@3W=x)*>tsbMP^hvFdmv`YAC-C)sIj{JE2>6!Flqfhk2j&y&R6YZynh8ubqfAvI zOeL3R-~_%MRp-s?(@cNn?W0z+RLh+LPEDPgqwY2pC6>UO zv*edpmT)50BVE3J#grl0Nx^{Y>Irx>znr9_WN4|Rpp@;N9Pcls$q?|6HVicV>= zt0sOx3{GacmNQRM9#Tm`97^r7-*J-<+wP2U z{+2aQ2h>=tbdnD>4m3wibVxL#CO;%vw+`3<9fZ`1+ZFW}cm7#+q{MgrnZ@mj`papV zOFk{9F~gz>nWww#yd{2~?#-}hV#0WtYiXsVgz@0yB{iFpCjHC7F%{ivtaNl(QTx^8 z&(q~@eJYyt15M@RH|gJr{ybx%7llfYro2DVYeFSzU-4sZuL+ePO}PqwMW`et5csT` zcnG}QFhRr51Se(pDtaB(vNnr;Ddop9S(l*2)~!c8_)j`MWz=XvgGtvH*eHYa{Cb?7U9J zzyn8JR+B&QV0+Je@?VK;)Kbt`{+R+el z?UWLi@z*r(Mtb^wPYky+UKL0@h|*o@r1R0y*W4s2j(^0{aj!v$qRqsk1~J^El%$Zf z#HWy&gGtQpKMCMVC-HpQDDAY-TRM>7(kSAV8KGA#_}UB9^6cuiwpyq|d5<28+a zm8BsVFxyvk2H{OqU((ojmy)!HDBN(AZNv&OSiCmjvEXQ#wB@CI4FPu|0x z?hC3Egd6foxakdBzr~ezex1Fhc{j?t-@yA~xCwrr0UEB%l%VQoiJwgjY9%eS%p|`; zA#Qr}*Qev!l|kauaq;F8CuW{5&{RwQJl!qm*Y9+M>f0BFa3v~Ke*lG-HSF3h9ie)- z%hM66$5yYrsaVn*c+c{QZ{U4MM{|sJznJ*u7(TR?r-@%voq+bd06cvXO`m6TikU#Z zs5$}dd7j084o&}F+b>xwXm&iLBXs^7LnT=7IR$|)HuE6 z8hazx{pQDGfhNg*>x-+9`xFE8HhIM1gx{LUZdh_q?hHINkA8|Sjn$;mVZ z`3vEj2!C_Ke}fw#LaW2QeEQTRi#3_Yqk>}#@abC{izC)5Jkt#9PIWcRAY}ado*hwe zOJjz-MVYDuOjQz?szjJdZV;xLy;rK4*0PvuExi`w-p}5W@4ez)u7<}^mHzaX!#ii_ zOo`B5XMcX;=Th(}`}0i7xfC$=m)8l51?Lqf>9OE(J#S2(>{|2pHSlitJ4tWgy&R;Y zfp`9y(zd^+f%mh+u0_21i*k|NI}>g0)l<1hzRTBi{Sjs%1-kx#UkF-#nT}@Y?FY-+ z{?=yby+?9Azov^)yFW{M7bT%-V$x~3N`6PGzCNn$Qjv#rl{Dlb+w|sax~V7`0-9En zUozB$3)KNWb#LW;64FJj6NJ&^-66G_Wc(rMW=rq=1u>7`;2hg)@$Kg51& z4|A#LEa@tJCHb8tU6S4$tC`72Wf7QZIThtsZZ(tq@~i2n055b*MW;07^op*C|8F~` z$@MPMY=`M#y_|~S5pGpV{_qHGy-G#N5b&sz{F0&Jn^7M+W+NTLBb3v5ndhrAzr2oaWH0`rvWy^nA+D(}!_O|z`fHMo;Suoi$sZnJ`*y(HzU)TjTooUWI zq$Kp@D#;H$*}#+gR1A**dUg^&JffJ6FrIr)YU$U6@zh=_I#Ricl=zNRX{k`SvqR!9 z(3i{NQ7NZ{FW-{iDGgqA2NY5oARPVNXX=u`)j!?aNqbey8q57NYxund@ zRx(QRXJ(hvQH9h=eihQG-8a(0mrm08Qb`M6W|GdAx2rdka`y~UaQEEm%$pd#l#*Yy zsf|mhC*LNcEHKq@_mL7Lcqf;9BzQWikkk}ZA<5aa&~ladO2cD1D!Q^^SX}}u4J*ZC zO8By0O@7DI%v9>m#rubsU%dtfg7=UA;!nar2HY3&17xdL-9Ic`@`9mc$*)3E(@=%9 z*T&5+hW$wVpf{jtC;5Zk=4qkjAo;X(CnNK~rAOatQte=tQ!!A>9VCCC7TQ9~X$}$e zDXBurrlAT+2foat0}7ITWVqJf3av=6SWO8SqznA1#)2m%B)pu0Dx|k`q}-)j4ZahQ za*ttu#*GEvaToce88KaX?Piw-F5|BW(~DK^@m%@MND8&cw+l!3_kJ=~_aKmtp3Q8S z`QPXQPbq)2oC2RK5j{}C_*R{)Yt{=6&X;P93O4-XFFWq?CCAF%l!n&C$(+=3=BPRu zN9|=CrN@5$`M-hm`*BW%V*p|8XTJ@2BW<+WzP}`)5Eyj8#yLD(lTteX{n$VR*d>U_ zsB^n>Ihlwx^pA2 z3)vxsChIOp(1|EG)hThN!aJWWS@(GhO;up38D*;CM(KABO2s&MqZioETe+N>bwZOK zlO1ZfZg6-s>)8%9qHf^TanE(Ap$?*&ndW%$qW)kRMifnXf1=lP{Xv>? zYeSKq9c;~L%50(+b^Q@)p5^*{t4s6UWMpohjl2C#!y{ZRpZwtwz&tLT{KEso|Moah zPDy^8$nQ|KM}9P%R1Cj$jZ^Z6-%dtvvG_Dze*7*zJr6HmyRLF^O51pO|7-k5XcQbS2X`*rooHSnnJrh zVqO7X!lO%31th&$rIAa1vr2uW+u&)%Hm!bjI?i}Yemc&S3eL3N1$EH1(lHB3byta> zh4dENt-5U!-R)L|9KI_i)mvP*io+LPW3Hkt<)=2EPItuUItPEY9!PEST~bH+G98my z<-R5iff6F?*kv9JJph_Lu@M!D#UUp^`R@*y1A)SYU^WT)*sb`UQh67-b=XDc5d@&pbuwj)M6LZGxDC6+ZseS5gtJ#Vcv0&4 zw2W`i{DLL7Q(9^{>%kI}QSAVPIq2n5U#L5qluIRjWLnzjZ8ngGu#@8q)T#t@1XXLy zu&)*z*HLb$prr_1zrdvWo{okYoGbUXDGyi`h(AHuD+VdLN_jAwO4{~r4HKVp4Cie&tk1UwM*bU3{M~voQfAXpfsg&_mFBQ4F7XW~>P$feXt@qNQgaBM z>#$o113j=Doc2c#>DhssG{4cvW;mZN-e%&U^>9Xj z@#PbP*25#FInM3zLF@j6b&D_@o0+}XIITsg2fPR0+L`s)orBC(ZGvim7q@jdKAd`G zZh(IeI!Mg~IE3(JZP;Qw>@C?D>A(!&yYxUwdCS5Dn~4sSDuKPtu?ojD!@K8*N6VX4 z3cK@O+M7SV0F8s}Y=cZ&C;~X{YuW%VK+rDL3!tipE>R;se8E_kcH~cRU4aBRtunqu z?Jxm)nf0Hz1LA;?W$Hs)hiA-&LY=%iIM49k+NgxMibe@fQF;E|Ch3 zT&wTRWM@CSaIe&}pN3S(8ek;;IVv1M0Oyn5YRmN~$a)ue*;6rjF~e8Ww;nBIu!r{M z=m?Jv!Nx3g0==}5sjBJ*=t{K~nhhfp-prbZ`Lb8qfU&ZLZ#$ZVrlfN@$~KXGWSWjN z-%jf&@mtXmS8u32q#a#18e@qswN?|t2A3xQk8 zZw25t!r~iL1;B6Lu=Eb3YOVLOa0IoTs27<+B)m!A0!%MH%N?_vP1MlvD7+ZlGuL(E zHD$=0lsk3eHD$;XUH9okP=*;$hJ9@eT+u6)46s)4^!s%Vj|G2cgGbbdZ-=|(drE3Z z@gt}T2%RdhQ!-&>T0iB%Jb>eZixyrK@AA$VjB?;j_?*IJynuJ~im~fJ0&}ar;*WYJ z0iG}{Tg%`wpj%OBUBYPO8U%|@QnUUINz|wCZ9Dcgz;iiZ`3-P@Wx}pV16p8xr7uS^ z7r%QLkNMge_;jb23D2P(+~G1zS0a&sZx;j*P$uRosuiMk*piFmC7rnQjFTl;6P~pO z?&*lDVDiEfOgPwul{w;BDUnqvX23e2>V;Fz0I#AwggZe6XrZO?pLCCBfpyDpY8f<_ z26$A`a3$AN(2GZYuLC0q6KhL#q%F{VhlU`XYFXmOAe=A`z*8E^U+?fuptVcHXm)~w zbbzLCQGgqON)4UnI%@h3PT81&M{xw&3YRW217V)wC@)%Qc<~BNyWuNF<{Fs>K7$G{ z;~g!3LvE{kFWR6lY9KUZbn+t6q(R_J&w(d_*4?`KJ?#~-p(#V4st}YYq^DZn+d{cBKOw)CYi8ZmEoY=N)+vdc!COXl? zwr$(Cory6q{(0y5F8-^owY%4@>b_}JZ3O_^jM|NzZ)_a}g+JnF*TNpDm#<^|wM&8>?*JaCSgd7IsZAkyfrNP`&dDfX$F6_I7=#kN zQ3z8YV3?GtKvz_2m#Y;JL!f?+dyadKUP?+qN9wmo?u--qP9;L_ZZB37) zhK1)g4f%^bMG^TUtf@DHmiQqR^PbO!4Ll_j3)@G1%wVATyh=7Gco*nGrziZNpZ=8gu3Sh>Q+E%ZGYsPl%CCsl*9#BcxG#)q{vN8* z5&xWu9Y7<_36>Gznuk)xPBhB#h>Om^pc)-<%R_otWQ8`CIb+!WT8}V>LZh%>E(f7E z)aFpTRO&7hwb1&C2%KiDyU(1T`{2y}i!he<%H~p}{s@e!&iK+Q?h|;YXdc7}H@^Y# zom!Y!@Ft}1gJt);wrM-g$sVZX;=&WbS~~?S>o?%>^-{6k_&!{yp=D^u?YnV0?I%i> zZN)={5Rfy2HCCslseZPa5q%t%@Y#d$IGw_vvNIH!+uysij763egM$ZjtF(5la#^+{ zp2|YmDVJ|iYp^y!PtwHPdqz3BhGY9UZG|0Fl${QmGIH*DxAV#}<6X3Z zSJiZ2&1V25+4bBwrm)GLO99cGwJ>-VMrTknyuo{&_m-pteYGbiZ>wMMw){4Ss)`+_GfAhgZyoo{esT&WuSVX&{`;a8JT8YL z?7c5VuoZjLcoh-Nko*u(LUsu9-=?E7j|C5awJHQGDp-QSH5A!6PT5-D#UpUy-+$@> z8AA%!YSFRi1Y;1SSObQ`QXhFEzAAGOqkJ$lGk(Amb#gS)Nr%uNQED({0#;_*O>|LX zEZ^!7m}==D+>Uo`bYy(23aMc?V>&|yf*9o>w{63jW7}j5i<^+(SFQPQtooA3mm&v} zb!(<&n-|0#EkyakTTSP061Wfc5c7s=f%x<$AillK;%MyLZfgbQ9E-PDVnK0m*jms@ z3aWXc?#q9wQpJP*R=tn{_@VI`(~OXnt8)a)W#McKi`cm#(etqawG_7tIY@pU4q&u| z8Z$pGxeezQ_4i~b--wHh$!l)^*)#v**Vk3o@A@PkBZ3>cEfEnzRXZfygB6r#%L zxp(Qh1sPqHA$CtaSny0y>V*OTapPGRNfQ->gqeKE`4NW{@GIR>SFaK1<)Mm|Cmu17 zv~y1MrFJg(Q&J#LQ4E`4UF9q!Y&T_9{4gYXmGIe;Gowe6+eM9hnK|9`dEYT9WGWE0 z2L%|i9)r7Vo9}LQJ2#-|FqUtQf#i%-(rlS$L_@IX0^Z}-Bes+;L*X1vTgcZ~R}%V; z`C~O2=Rr*5MJiEU8`q7P!we`*!&@qXGatGjl-ad50qo%E&rH^e$znA&N$A^7Sl>gH zA~iZjn=F1;Z=qAHdx^35;ZbJTw@}J)E7x5L35E!78!UzN^N2qQ3F+Ro6!~xJug5CS z@yjjNizD1mY*TsxFOa82FJ%FgjYOf;R@%^Fzt-ol&4p$3D}2r=@W_mn1n3!CPcI9j zSHu(xcci*SQREuKyI@+g)e;OJ)1s$s^E>WOQwLb3=wGdYj)ZriqxA%kb+E|KG9Y7a zVr^Ju#WD$st{|nueTfPyiXkT{U}t(FpR#>#?vg=NR9-ulT@o-57WDLL}ah&yLKcQ$eEB1o&ev`6CRy{`%5s!JqJ{N*nhazD&#vD zzNVEr`6X_buWS;yzAY`)_qd=l%qVf{!!<^8MHO8%PW%NhUC^L73 zMyb4}ycojwPzY0R&pYK}wJDRuc#@JQYn3cG)mD`eaAX%DY55Vp#M6JZDw=4(A5e8o zc*$HLR}8}ieNmM^zqX^*yW9p%UO=h^p9l(|qI4QXVR&oVxJi2bv2+FZL=7lQ5@tktLT=yO{Sm<0`vO24R9%`g9R;&oFQ9PVWYs9oK8YfBPdLMNEi}a98qU z#s5szjCvT?#;O?H>SvD7>YDJBN`HVc+#cGn71YjP_ascC0WU`;pmot@n{^}#bP!RL zH>XOU@@O?zNwQCNmHmT)BKW${BIT2nU!Zy@TLBPkHaY`=u*DxN_g*3*BJi7VgFn3) z-8EbW4D4$P(!wSfme={*B&k=fOa?5{zKAqT2Lr&WAS)pT0z@ z1c0!h@&`oT<$ETNDyiWeN1V!C2SE75|~zh<7ufZAY=qH1CE}Y4e$E)T;@ua%%6#sB6Jk1Zp1U zHwFzU4^MWOeMnO9E!O@^XfKLCd-g$1;6JMkN@T#f07y;zgNs7v&|IFO0xNg?jv8}4Tm!cy7f!{!okBl1p%6C zYQE!RRT>cb9UN7rQs>l@S2ZZ|F~Q!17-%*C`BVm;Kr;hxWYXkS)?RyPUu z6HTrhpU(!tMvAUDXbdi{G8i@`Y@5hUB|u^*^9o0m!59X|uq6)0FY8puu=p$9TlcbS zirdXDTz2H1&k`mbt4{oIAa>_G7s{)5+)@Sy=Bc;zkK|XF$>7^ZFAy==TK|Vk;cCMD z(b+7qI7^?=eso9D6qC5F=9y|)pEKp+LvH~V8U-Xzqj{8AhfOC(wq@Sl5ZvP{qUI}O zf_kaZGWYio6vv^=;MuikFz`AK*INWn2#O>{@-+$v>2}OKbE+Fh{RkcW?TNQMFT-4z z3OBCRgfyOay_%KYJTxe=Tm*#k0+G};UpV#BwtQ5=OD-;%4rE~5lC zL`q>(Q#bs_*dc*HGaF$gci(u8O>Ef2mUC@=+#hywza`6gbjvYb>y{{MqZl>=<*>r>Kjq476yv+&JhNtJGiH69Suuv@H zpXG=#iXChI{j-U@kiPlQkD_0sQ*RP%qoedNwW*C`JP4npfB`6&hZ99Cf#Hb+?$##x zB?lCvmuL~)O22u5LO2AtSTJzpAbam>U|x76CJ65x(eq!xcXI(eI>qzx0JiZWdYGdP zWYP-=xiXflzX8bJd|+@A+K3c5VEFIcBagumW=!y-C(?YF2OMm2LN8c1bhbxGoAm1;P+U~`Xt!=W z0;f7e=s#)G-A&TQ9d5>nk2LlwMe;Q)ON*Q~r0GS&bf? zy*d?S{{wb^1g%GFwu)XT@HN}xKZKItKSX%qvGp&)wcexd6*7#a(E;H^%jwsUtD_er z8qV>(c-ACF{KZqq+Uv`us1PZ>$E*aSXa^@3&P2e8@q=sYr63rS^R@H*_l5 z=fUFICH?p==fRY`c#-I{?j^Z^&r0fXlg`0JkuE7R-yr2wA(2j5K>f3<5jspsAP}y% zw;3iHoQWzYwpNHuCf+XABV;d;_!+9XaXqKan9}lbj!Bn<;*q;|LGSuIdaxpqS=z-t zUJEFXZG0M%Tl#wBL<=o#DkBQPjnkmd(4fN~xHQp3 zxRw8S?f+N7@N2C~J?hA>>2AjVljCE-yz7(kF?#BCKIe(>{3EWr;UmH)mw8R%EAQ#{ z&&MssOUdVoY1iBP^Q)2`F0M;mwirL#C$Znl-?DIywrC$$<4gaG?w_MQ*f|0*kxXCD zbsGwg;ioeW&4lM;`y{<+)V`A1GGj=`f!z1?4xL-!s_PnhS`}*DxmW>?`KRP~E z`u|rJ-qxJK;4y-9aMQHPlc12}=kuT3r9WUY0=jdhgmo>=(Na0Az5(Hy`Bxvq`t{fO zh(Iy!!VAZj7jMSbYt5N4-;49-t-ogEp%Y8+-}{8i1S{W_1#?v>w=#2Maz?k}=8{fE zA&r>?wpv`ePEyjh(W}z*72WL(`5y?ohk+GAoT->YzictTqv2)*Jsb2f-#lFB0p9aH zl=YrFC*Xid-Okn+WAdI*Sbxhv1$K^@->0kTC{cyYdt3TyD!s8iUp)oRB!KrY`C=d9 z?#7XlVFB#00^z6j3j@Y5#)LkAv`WmR*-13-dXpQYZACFza`-8eqW1V5dL=VCj%%&@ zkXVbzx4(In^ivIwJ`eX1nI2A$dmp%j1>gixa4aRS-~{4R$;uk3ru75!+SXT_^R!_F zvF**IRUDH{50mk?`YM-X0s# zkLQv3&+W7g?=s#5_2UxPt;$gW#+X13J)W;P2|!{24rj`j$}rjJnEnrk?)t^Tsm~X) z4p2k#28DI^2HD0C=6gnvUOlF?i9xO3bs9Xm<-!eqC&w1hPZd$9k$a%i53YCe1?@2W z%+#*YiozuWQ>aJ=_Xxb9H(5~aMIL3_lN%>DUw}Bp+!V5;5{+0s9HqJl*hBD9&j}+y zm}h!DpGMOb`x>0##~J@Y7lmC*=4N3q5xEqTHpH)@`y!XC5pd!mU%6S?Hv$`!BGkBt zup_5k^|?%g30Ww|PDHmKJ-@jxJNe@m7t2b0ixyx6x`ZwoG%L=q0=&pzs!0f;F`~?K zyhWThyEUhBPGFMrl358`!@2pcgDSp%nll6L3nKJd7iK8QAvY@$Pwo?WU8s7pUYaXi z5)42QPppsb5~b7W*v@Oi3)CR9_PWXJWT&nuzZ1m}a#Ck+*iL_4@^B8ZPj#D~2bvM1 zAxW5X3tywyJTa~N<06vuG4FJTy`14k-c|v2Ei|~wlJ|8c#y%sfxjfWiST>(vMX7Z{ z@$b(8D{D`kK)XLq$9=oT=Fht0%z+#z3hzRo@cv;8o)iI;X0XLRjjUNlbcfP^ARtIO zy2uXp$F#|)7Umf5>!=XmaVOossCHJB#(y5zD$=oPvW~aCnfB!}^;nhX3;w=StTATI z$$)~4*$OaI0%uU&Ho0l`VpsR3$mQrCxC6*`DDP!v z7y0WUgF7?1VCcpK^@}6BnfASxjSg?!95Gd`UUWXv!TwBp27#`z@H zfbBBByi0InJx}%%ap%QIK9A$wL2+~U#<*Gpa>QmZyJ_-N;*MY zU=}xO*~w3tLHw>h{TV9Vvy^i=cJ;clWS5lv)9{b?;@=j5!L!&NMWlMD9sBI;Z8+Cu zkLIAabU(-~inMm6Q&%m4w%b#&(u>4#GBzi4MPikEA^z&IXEsru%)K<=&j$U@Z(f2T{osyLvuWFWK{iG# z;mVSyHL;0`;Jx*7rFh`TXQ2%BFE^G|qeW~6c;!t)q!sPjiDNESUwLhFtH;@NdJheL z#wV-aB}=rQf4b8x`jq;*HC%#^?P>h3CbD9zAR1|xK6}TTnl&~!$z~rAm&FZ%xj%Iw z@u~NiHv~uCXKV3L^9YSZ8Uc1`;_erD?e+K6ty;MvL-tMx2q}jR6jbaYnUsNfM2R_N zdzSp^_h895X>gF{NyH81lU{a6JR7*@#Le{elBPYn>n3aoJE9;1M~#Qr-e4q+gtBz; z+3=6!eSU#>t!Q26_r`X%V-aJD-`G8CF29(WW{3j8`Rt9W#ynp$w}mWbAicaa`bzY( z){lb3rse@Pc&1MXjL1uh8v93;3Pf~)n-fP<26|@?Rm85P(%7QadGd>O_94rOh8+?c zjoX+R0<}9Bfpd#+JPb~wJcT#RQPrtE{O3!JL-TN^^-|zGxm{B?-qcz)mCLxww z1HxQbL3d4Lel|}2o`2v9L=5CvtyL`7^`Doc3zqwYd9PC5qTX#GJ#gYhZk`y|W(}^~ zuXmwaf#47X-ElrnEaBh41K-4#js%IG9&4D<$PNiUhfyo$r0 z#2FRFC>k@v^wF)Kuu=~LeO==G~MbLUa#TVBLj zS|i;75vQVZ*HRN}U8d$A>Q$(^Fg#i2>sAx&#l{H|eRQ+uWENXW1vFC{jJ5sX5*jgNA(p2>Q4F^Y35p-rrE?5mLqH(F_vA5fEGo?%$~5CR@Ij zeEjUn@*E|PUm-nrc~a7-3oe#neQocsI!0HfgIE8yXH=7Qto;;NP5vVegy@WEpS^|(Fq?*a0{0J(SZn8|bfc79U8LXud}m~?;!kYV z>Je$+-~U}-vdRX^!w0gAiJ_&Aa|eTatu z`L>w>i8f$lTwf7I$DrC*re&c3M7@Os; z9w71ZD;`8bk<|CVX46x$R4V5{zXNP9(4q;r@K@6B9+lOR$AD`QIc00(cZnF(*?fps zj&Vu-Sz@rGUiY$}eby#I%=_&sP?6GfunGN=58%+OxPf>fif>Y{YJR;|;Mbph3wWzE zK1r~}LcF37HHKs+M z56`pkVYcx?!)gTYZ81~G4Ml;x*ghSsEv}y^uis-hxTZp-)gLnndnRd;5mQZ!UNN`K z)GM12$>3*p@rZjPb*a0opH1 zM)Y^SBrmR4eJ`%H2nl<--B3YT{`^)lm`=3=OPkwHl3rP+jVgGwaIhX4JxzYOa{!-| z>;Yx*XD}2$*B~EKV}@Wf(TM9spsd;f&A`k^rIKi3env?1#9~xQr>*9aAPfRAPhsYe zOZi={SEqjIX9X*qvheno;mZM(W&9%m`{)^$@xBGze0y;{(+=-4Yu>5XYP z%;pDfo(PjRhqu&5$hc5*>A``B!hI|Ed~d24iPu*$%R?{9R5n z6K-k-t%ET^rm>;EZMPXd=JvE1LiB4UfYWQWV=?%BvEeh9GjV9zn;}4p$(woD(jJ;D zKe(6d0(W}kQ~YZEJYc|NF3lvBg;RQt49y1R1ljCDtgt~uoOp&&S?uv!LHwD_wgw4{ zw}CBguO!@IQ+Bv1HdF#&^${Oe2blCD&RAx5xUj9KbX~Qk?9OvWec}&AZ9N(4Z%tgZ zL02lA%i>8ltzihp*YGqTDfxxXd_v$f@1qwACgK9O)k&}&_T#Eav?)dB_ESQaWDfW^ zV+g#4{S3Gnmhdn`=Tji_4NNm0WoT=mjce^xwMH>r+3H62@JCA}HcODjY9d$|1z2TM z`$*xR5(~X1+iOOvf&g9H6fpT1kh1kWno~)(C)6b4(;p()v(#z6{y4J3HZ{9ZUURq^ zLa#^49@3W}^>W`HxGJG7WD^OdUG+<5-ylvI-vsfdwfDZkEi9Xf*uiro|y8S{|UKgT(Q36btQ z680i_7%fC=?eN)zvCEpBc+!CbE25g4i^4+f6W1D8l89qxoh6fYsiAyK+;y*w8v!en z-^Z`&bs$EDVFdPfO7PG_&Ov%BU+M|iQKHrhC}CE>UQ!4kT{yQw_8doS*>z4lZDgnj zJ=e44-^vE9-k5Fl5c7SEza*Ax%?i^0QUOl|2d3Pp6`#z?f;7O#CQkE|?9vyZ4E+vrRh}D1IL5up2N1;Z!ZQqFvT%&F_%=@U_3D^U4jiWde%n5u z2jg|mU0DerR_7br(bLChI(jy)t_t;U>^k|WePZcz-Z?h#VF0PUI zQT6LX;`SR6I_Eou+Jb+8$UzWlcsV#QpQQwj%Rxg*hILx_P<@*R*VLRk$SH=%(d7xz zPt(^zdF=2oU?S2$hbqxn{k3XXU@#fT`1ep~ zm~ZT;bFGR4Eb5lI`~=ReI|R-k^UmI)7|eweL_ZNCgN-Si-<5@CE_De=bI68hf-hH4 z8S=a;H1q^WO;kWUEJ*!-tZY{+GjpK zuv;#siG7Jmk=atPa3w{~MRS_IE53_}mB#_-$=QR4{X3L7f2LhS?R2$WJAqDlKngZY zaaIeYQO83^uwgO8-20jpNZ0%%N!17yweqF)v;@r3gqx$XDArhhLbIerJayP^?&|9w zY54erSQ4S$m>oSb97U0k9?hQO_f=Q#p&-q z*Iu^X4mnVdIZ!E@B(bC*d&H|I<+Xd#G*TlxsH>9vmei0bjcg=}s!bnWdzEPXLJF$! zmWsGEGBHVvxThV+CU<^@%%!IDj5g9XapfDTmH{0!+EbXV*Ds^2E7&FaE$!WlOe)8# z%xiPvY1FooNQ{;>I$C;AVHc`RWU}{ht@=%E{Cpgf7gE_|bP#o?uBwyFtjd-HFD6J} z^xVr-8#9+R{78-TgG0t>CZOXPX;0i9A0H2qd&a4p14i|LBAu6_-K{y#zNYPvNWd+D z=s(H_(;H<2TV0vEJfL;)*eIFt8m}wBAr#nMx1zM42_p^xD{YAtoiOlCQJ9K2z`Sid z5;nszS>9XFS=wNfKk99Z8PY^rtDUWtd#(z$Xr2ABho4(63|=NbJLZe}Tl6{huFiUw zp6+pXW|3lyUw1Qjm8VeyByI+^N}U{8M*M|{aspLfe2Y(>gv!gSwIlu59iKu}KJKI< z6Aq9RYHlTB2@Tk~D0tyd5Rtcv{7yToFWR=}ja951#-h>G33{(HVG9k_cq4Z99*Yv} z46I~-gzab{(i;=|cJrcrO*k{i;3Y$r@Z0vHEGd{Wiq_cs-rmvBe1hYpai=73Ij8H5 z3Xr_<9%5C2?N9JuY)&ZO8E%_AcTmw0^4@mbx}#`s-zxB;5&BaGi!8LklpTVN-MX88 zr(IrjgZ6l7$GaR+I$(-ieyFh$XuhC4R#K9`L)q~!L#eSR)v z2tFXFW^;%@oh1U5rqE%Qm3VuoI;ov;1*)T)&*TYoZr5!BJPVYB*{1MHR{PJ|Ig2n= zAOp{&^+Mh*3`e}vtp+IQ3*!W&wzAy|_C!*$v#JMygrD(6&?U4+qS+5rd%HNVFm@bB zJM@VCW!k2vFvVI-)XecW{aI7oqyVOoNn~zL&Yb}EK+I@!TIXrptt%W8(SCSyw6XQQ z!SY#2GJc4l`PSyFSgQ=LJWdpTu&Yi1*D-IQ@0`P`m|&uesjqUc@X0bEL}bpTvnSy4S-B z%dAEZOIlha8+a$_W@wHKYn{x=>-X`%^|S;kZZX=g)#?lyc3AAN(|yKpg4F}=Iz>0T zYc6-Kp^fWEk#M*!--2l&qhTN~j=;`1BBHe{es6W<*JkJUlE3BLF0Y=GtFK!p6}~dJ zN3ySmsh;=4lG}8hPKy4)YhwrX2)vS21d+1>`?Zv@Rq-e4AD=D(oyxvvh>Q#Qq+wK; zrfCv^(-AyC!p%yuv6iQ;)otDrUkp=ae3v3C$2k%Zyz9L-J^h-5&Oi#D*2FdCfmhev zD#m((JXvmLZU@G+l>j98s@UM!k3t~-v-vn06MJC<({)p#(aw*~(c_?Q5-^1Tl%x~62pMuEY z(mtwqTco@&N{iW~xm>HEq&gRGyw!v;O{gDH!|N2CdPs<{&8GxlhSNvUD793blrchc zeu0Dqv2eSsxTP{2Hzed0qRP_dyTY!PM(v_DN3s`TPK15FbGz)XWl=`G1 zf7^@Mj%7@fx@JdNJhQbv)}4m3==26S)Av^kBbpn^>aaqD#*w z{JM+@$R+P~1bxt`LZ@CzB+CRxai-M^6&o|a1Jjt@!mlnp{~!*haCibyy(CanWR72v zVr9$pXAj^EBE<%CF@ zPBC(*8!;G2R@11{g8HU{LB$k6TdQ78?%Z3q022sHNj(^v8Nsx&x=%kiT4O2NTWw>m zmOqLn^i5WkdK4QrXbw#CR($O?{j>Ihpc#vT8Ba-kt7J8~@V)HP{W{t*^5tsDLrdDq z=5)-FQ$lY^FY}%LL`UViSnQO@rN4(+p)(s!?I$egiKkJuSOZib2k|Hc8d2k#LfG8p z^}8LRG!^$NGIsNPl0YrWZqSkg?7spoo_(jUpiZ)&Q95*iUh7}zhwvTbG#f)j34|hY zs;i0RtKg^W9sI4dj!#G?*eWR|!&@}9f0!4m>zgisfVYOl3kzf3%S4t}>xOtfUim%d zaXMkTjwo+QnHT1YOw?9el$0f%G5~Wq>l;k6dC6mTFJ8FfnRzN@RRytXde*mL;bdf* zDS6MOIO~7?mvOPerK~(^>v4o}idJspJGcLE-IF#F;i5I_>o-+|y!neHo{6Y`!wG^2 z|I!r~aGPR;NH?gIvz7)a{t{nXvwrHQCHl*M7k$+Mvy{5adDBd&dMctWTuK2={!^a2 s1p$GCnc_(Z#Eg)<=QUEncV8vvc@cV7r3du>eRvO%037810g6-g9RL6T literal 24336 zcmV*8KykkxiwFR_c2#Hs|Lwh7lj1tEHhlm7irH5?Vj?;=7+ABIh<(8sQ)M$8jeUph zYB1QqDvox%KmN;q` z+QexTqw%P_a)ZId4u&}=EJtpm7=HB1ffE!r@d3-8-`wywEfbk=5IK@J&0r zsd;UG^DlMLEjaYWIm>8$hnG*CKwpk><EykPn`}EVnm{(+~ZE8#zODcQN$gLm68suAPvM(rx;Cs|PPwxW4HZBltjQ zD%0;yv*o*i@4I2EI2vXC_y5(6>@1ga@Appsz0+^q=?l8kO{2AQ@y}j)YZ>m;ZTO1S zEAQ-J;K3dYZROyrv!zE~=#F&X@nEBYm$i-J+%opK(-%&&v(X!WZfwKfj(cbYsCTt< z!)TOi`A!aAxX}HP;?MDAPB4t9*4F=2+SNf;kIaG@sG(l%Pymnws?WX?Qg#0!kzfH*BXhM$m zRN!x&<}fqLb^VWK2RCKL8cj2}+lR_@>Q>4*xZr*MqvR zH}t7FJ8w$J3yD2x)HCi#!xztNCA+s9!;EFnX?={!bR^2q8|8H09JOZSdT|En2N=H0 z8brpQAuqIzVdNw~OjphjB@J8g&mM;R!^!5qVKl!@oo1OJ#>I zwVU$^Om&4~a+JXW7X~)LCf+IVb|t@6D^nX|@Atrk(YB^`!`(qRO?J&Ys?#vD95=fe zr>Xt%9v0!}&el>6I}-+BDMbj)3v`u11-2l+&3jbLx{WO4!o@7)!v24Z;@&a}({XRU z8Z`=z?JfV(y!@os7_LEgQSssL_+lSq+9iudUN{JbuuEE7OaRoH4v96&!=Hja8e@a( z=at!8{zYCouLmIKP_1@CwTUlk;hu-*b2VagF^6|mBfNlb$= zC^%ik(xKg-MDQguQ05GL5s2gf&keWmJWPEU;)tnn5T-CrO(E7h&oYKE105_JjAxOH+tIxs`b3K}9T7hYVE<6>K7TapZu~kHM(Cd!?nJz}^lE3%gPD7z(n0 zsAo>owE~(i_pLF_xXp6Z_Ws;mRi)ma{{yqyfLVjtwPtit`q>?PDl{~^wg4lwu*&e6`rfIOVP;wUeRa&H45nyhf+gt;=3BMiU5@cZI0f(>@RB*Tg{V?P z7-5H4Ce(y@A^!p=m4Q`9actuh8KwuW(*h#^za&<}L`#j+Zvsk!}$_qXPN4=leiaGf(idp;;jL0owdGchmHf%Lh zA#MVcU0>QkaUILhEu{77QB|UD^o%~8SEUa4nW=9ROAU)so8pVn=t2YmMxq`&DYrQc z(eZ!*t-{utBYa?jjE1nS5R(`iwZGno^D2}raGj1<%4lAhS#L1Hg&9tp%McvVQmwl_ z8c0X!PQe{uc(H_i8ln|PnQ8rbUJLiAMi{OTClAm0ppT%FOpT^~>{H??G*VLlMMeb0 z42I}RVgD90Ru7S=8)8)E}YBbJ~zzTg0m@aDttBgK< zGd2!BI{w&v4al@m7ZIKTM+FA7xCP@GSV1vFCxw5aMaCCH;s$z)5kr3RR^J@?7HS+~ z2g7{Js0V>&v%7bqz%5n6Dt-o0`M z7AOmTn-~X>;~!vn*YWRK=-|PhY%!08y&uca$imk0GoS_-l9g@ZBeE;lt5YjCoZ@PO zzM;^kW^lltlb{J@3zlb#FJkYYF*eaBFqFCZ1fz&?fVPqQ5p=1rNOc;@HOnFF`;`U$ z1JwsY75oyPc)w9=!Cn{(6^9Kj5kofI5dkbnh4>lB^>ut9%5#k$25$%pm{=KS1jeF~ zj1Z>^0TBk1%m~uQ@n$=6r};*0_QetRBSGM2_I#Q-M2oscbMiQ_WgT+6i8d>RI)RIL z5(EM1;XBAW6U!E713%l-*ajYW*t#&o5~paf#SXgP!PfP<__-5l z6)Y?HnHbt`(l6alx~P|xa+`ZSwUS{!q-kv&mVl5+QWIO29O&cPs(}2eDwwe>_#T?d zAs)_hYWl(j_y@=?!Oo$xhr9&k1wSKT`h8&?UFB0>?0xSY{-r{!V%3Y7)c7Cpa;>i( z&WodV(5oe)9dQKvn2Ma!4jMT;s3PaIBlgRWsmQT*)UxgVU>z?C-8RCGIq3ab9^nC4 zs5L=E=J8@w%1!)Yh+;_+{@#oK9Vg|~jbhoPSB5+T!?sRr2wce4A+iOPg1rjKHY7uk z^=q@iF%B_mtw#!?DRjTn5uzT!FYtj!jH+l)sT=K4%q3+-AQhWuV#FkLkJM*Q)aTr; zAuklS&a=BFnKFlXpW8LJM6jf8j7T^v8eQ?A9)aaXQEUl*6Hv6<9_dsMN0FJ=`-7Z5 z^L@Qx=Je|1VD{C5UR6VsmpOB%W`C~eN_VbR&Ae50BC~G=ZKHG0jMm(Y*8ropc7Yl0yF53Um5}J}91nk8$l8s4JQ9B^yz)B)B^lH3NsN(x z4o#Jda0d&H;gu(kK`>b_3MIJWCq%jt#5ogt0SS18jyNGblz99TzcLPkHY8)IWFVEy z9NHBw*DQpb#CogsHNI-1Hse1lWRw^wP9P$Ra7p7=#w$D1A~UPSD{Crhq*vXURV^u| zG02!o-8&yE#$QdoBF^wZiD=R=NylFB8kpqKM=melWJ#WhU-`YazdWHJ8(BhO*6Mcy zJ)#_scE1y9vsytj)SPM53i`~Q$FIc6ouzt|mhtFHNfZf=3iLEG4cCtqi#|hgv~E^h z@k*+iE4=Q=q1J_&RLcKKghHQq;{MBh)MAJI2l=>Ov|?TO#I3dISQ{3zV0*y zPG)lY%2XnYu=s4A9-$z!qr7&aMT3yNUL0j5(Jh|xR?Z_M*U~#C%iFM#^Mavy?t~?F z%(&fI>=IMEv~bwOo0(I&J-A$FWl7|jUaq`Wb#7WN_DYb29%=h?N^$RUu?_hclyz1x zY{gTn_RMm{uRL9v$CoR9%8c9I5_*efq!OLxOc zSM@Rm*-CcTrYmt>hqT-qt-b5@=ypc7=DAbV$&=#pLR55T`) zut+iJ#Vt|kCFm#uqwJL_eDwUNW5BC&9W{?%c@r`^NiRjYc&h5Q-3jiRtTuW)5x6yMJ6@%o9a^x|L! zg~17Em$+x_l@CzRF0s$Jal4;{c8Pz+WcdIM?Ggixxw8j|XqPx>%=bJ%MZ3g8i(mN& z8SN4ejmh#wWnA7#+B%P z%GxEi8uyU*lh!Wr)%Ymyr>$LLtg#h;f?AU1tZ}`(pSpI5wRRj|-bG%!#9K?;+1>QD zOUyN<>JJduE^*h`C?BA(U1G0sY~y|s+a>-QbGlD5E<^?!ALac-wo4qgBqq9>%65sx z#x2qPWVTB@wlg{8_YO9DKf>WAFj1Z8eV))wvEV+UZ=yZpprx|V?RR7NyRHFnD z5vo5|B@vN>&%cq6hVBS)XHd;3?nk*&S|ea96lTZaf2?&R=Qb|Gf06FIN zN3v`&ZJ}O-72f!?^ihhmK7G9~ao8>p#GTp!=~Mh!6A#Utb(~J zi93aItzg1i7pze%*A2w-Q^+GTCXwx?SXrFXqbl`p-IDq5a~14LLT^ifV26wL2<2?pD@P{XZz`G!hfHqDty9H`Cj0`M9BYtvc5jIJ-%7aKNep< z{P_`#y#>zf=JzKbx%oTRADesLkB?t}H+|PP&pq=%tQxT{xS+L4hK7g%EaG?qKWM+* z#?*j#0Ry8yFfd~A!aJup`%LZOAPF+F*cCuQv1Lj^UXfc-@vd4(9l8pi~ zJ|W7)S7}Heg=9j65l--oX{4EkQZN){?5mf@Lqt-B%BWu8nTkm-Lr$0s% zNEsenV~7dpcM!nF#4-%d?3h!=q97!?SR!&%g>5kmqC?V5y7_ z9!`Saw*tMP?cuq7yWg7W4KuIxhYW1e8u3O{T7Mx+xS;Pf)vGt74XlsHc;_M?P%V7~=+UXb!oUoD*GV7pF zq@YCELR{G{(bQI3PNbnm?GktQy^p2-&PJ;`8?8Si-hcVg9I)29uvPGh}Z&3&aM8Fkzp# zJ)S*QIAV$LBI*<~rzd&tthS}cxM)!mlrAoBQOlK>84fm~QTslkdo-25LaH|enX2q} zC=^@t=jjBM<5xUS^*9?^p*=x9n%(b?;{QQyV%CQQD{J@YUO!1j7weM@JgD!{p?-j+ zK_`q~f%FwF%gX*ZyYz>BGK^rjhz(#$iNG#q{f_`KezqL+#0$?9nM^_E5ScLA1q`LT zf>h80Rp)-397xiqmNF1rRGjT3>WwMi97aDy4f#IG7Aun#sZhAp<%Dfuj#v)1+;77~ zEhDovjqd!G5G(yTUX>sp_o#=gXDtNbhegD-(yL>+3VD>{ujqIp46 zbWc;Pzy!r=_bnL4fJYsSs2v9%B4hk7qeR0K;r6Q)HFh_Lm<}fR6sU^w)TdfQg)QL1&f^pnEpk1rU!UC|Pm%C`HmPPx zl$cnW7GZISrKD$1t+pEkG+W?lLdsOcOG-w}WQhY$e!Y(a<#eWd+QluX zZ|$HMvxrHoZZqT7OTtmrYhB6A5_}pnTiE-)-Rw?9(~%Aa7hiw;N~TsoeNLEd+)m!A z(wB6ZEl&a|orY4qeR3EhWwG$+F-$n_h)10?8|4{aEr2=*P;BFmf9ZdiB|87{D|%Wx zaV$(=;)$LIDQCDqz0iw^hJP0D5Yxa`tG z^``GoA!LStc;D7z~G^(~w9`0$!{!^f{}(nxmWriNNYLgSW^-ngx= z1>YQ37yUD+t#QbhoiBrz-MP=P74Kb+8@2x6=HLaR7QP{06TF}xPESsto1zm(E98~D zdm|6V&3S=QW7dq5-ls{9WS15i zmnDIa_m9XfEi{f_J}$eo(6}X1@jRO^lW`+kMQY>b#fRQvp$VwLqq0j2?OFhHRd(vd z2!uy&kIODCG`@T8lAUlL!0CqNFy3>KulTv7!goXDt46nMSf=VvM!w>Phk2#BE;zWM5&45HHzN$!{;-i3O zLU#5FXUiOhz;k=d**Yf-!<5 zOUjmt)$f6rNXqZY7X@pf{82FL5b@O}O_t{+I52L~uZa4QcK0S_+1ZP5;M45v*fHSA za0!iDM|R`pl~r=gP9nG@8psZpEc_#Z@rl0%D!I1DugcB5s%$*kcnwr?OiH^=Zb95= zLcT&*X}UUO7@Vsg{s6W&#x&M;zs4F{+fc}`SAZ~ z95X3D4fZ~2lmpWDl)IIJ06@Tg2L zAC%iUfhCyR?$I=#c6G9Ljwc`8tCN+V%Su`dUGS>ODWu)2B4KXazH+XnoP2dLFu~T9 zkt?I4$jR?0$YJgW3tl{jd=0Y%g~k!LQbiIaIfD@RTE;N3;|2eox{M>QgbF=L9D^-48oOupz5{1T9 z{dU>Wn>;RW)a$9#L(qwEoZw13qq`15@MzH1R>o465%?y#MrSt{vy~Z zJ<6LQ6Z7ivYUU_j1Uscq$GxD}#?Rqc>kk}+_`w`&uUvM?#gp~*JuH(SMnlYLJ=tl9 z@18^_4V_T_lZFz)C07nZNCKDm-6gqlOFp>djsp=t4KA6D8})EJ(#hqq<`>7RFMG&` z3$aNKd1>F?eXN@EHeQoWoU8YwYh!q=V^fnaqsaXV83o=?H}K?XshPtLvWx@sf+98( z8nc%#C}OLP176z6M)@k1PP#+%1F}1JID%QC7qN6wA?w|;Yl7K5lm7p7zIv`LJzt$% zUwuRRdhRU#lh`_2o->ZNU486WHNRfzHQB^z=W@}};zg(OF?I6KWD!4xsiQ)GVi`4w zaP_=MZ;}*OZ{R&sMc!3SCZ^6L`ps8=ETd>we}Rmm>?jlY;F7#9JzR2pZ@hd3*opEB zbVFPWqwx!s&O_;ohFkfH zhV8!ejhP;3(Eqom_yxTpdx}4VY-@cNqkZCmdbRm>fVk0BRECrH8?OTT@tx{0WV3&h$#FDjhXb7r4&|HfCuRc8Tk(5 zw6C0xkmfMXW^5dM>;-N=Q8?t6n!Lv03eA1B>TBA(Ub?sRxkgvqUV0LpfUWK2Z}qA+ zbfOu~!iw+DZWUQGp%=;V(w*7&$kCI&0NaSKlTR@wt7@`$mh`^nu3wn=GtT$= z1=k?A&hPB({_G6oHjut;?$pLhOWp%_Z7Z#*-Eeod(%o7kHzJn|4>QYgvkPCL{qY_a z;pfiQQVu&asIM)hxWk%;o6|r5MFxVuMJ*dxNGV}XYJ-Pjtv+DEPudh6=?QbDj{S4m5(RIdGewX*!i!;a|q?YyK z3?>$PJIuOs>=%BkdI#{E8#sU z{Zc{tt{@VU(zJeUFaL#F9q`HaXt)J)Qn+WciiDLHff2kuek8-gGbK3=kiDi27YZ+l zFS!i6M!|OixA3U3OFHWR>s4nW`NV6X{Cx&{Mgc}Rmk9koXhNw;(rXz42zNnPGV(TR zhf5_V-e>SNAcSDm?{7RX+`vS0)|s-?PiKN3Zn=y6CtptHl{Jz*c=8UQ*&QXxZ|=@a=p^#RgDPJ?(F``$;lB8Nf7HV+H%Q&SeHB27yOPZZF(LxbV8VQ zS#~wqdp0hH%GjuHWz5dhQ2y+!V`QfvL2)Z%c6Qr!-zZJnMKNMT*)lLTQiHF$<2Z!#K>lCaBE*3Bl;=X zS$J>#;wNNhjbb1lHw*S~UiuRX;BXHaJCvyNPmSDJE_p+TT6Tr!CZ%iWa5#5oxoj5@ z))%gf7Z8H%YVyN)OLL^j$R~+vpcKIg^Tzr6ci1`maFZ-CCp8BAsE# zaE%Sf?+c^HHsp(%cOMzITTgG5Lt9z-X09XO`EhW*6X_d`NZ7#pfH$2#L*`|A`J%cR z3hcPG%)=z;zvuk;fn9Gd@ijJncR7wDWFG#%CNsNXk6)MF`Rsy4!7s|pL=*hLX(M#9 zV}p`aL`WsU)x%f8qL+@OUxh_G@}I?@I8E?3fe(&E!sDu-hqs-}Pm0-vS2>HMpzxI* zhVb{5X|&dsjF4#N-b~|XK%&F7Wne>ee|9Td^hYypJ6ZV|wg#)txBMulJNO0eG_(~{rlT3U{6{mq z@_KAo5?+1_3ik`#l71~h{&~HZL3TH17re3j^8_j;v?P_fe#3lFjNq9NBISjA9sEFs ziu|{^K$g3+gW_gvXi_>|`HyCl=R{_W;@*adxDvK{t!3L4>2Jol@v@B{bmKg1-h%vB zs4?~}tLeUoxqOT7xhgyJ>|t<*tG|f3e9Y9jTXwLs7T7{XM)tCK_}zry5lHm0Tho6m zBr4<43{&Ex8O!*U#?|IFS@*3m&A?YgZST+B)do(zKmR8Q1~;P|-tpX;S^ezJfsvTT zeD!w6eH0MC!_S1iS=cZHdLYwpCuEZXDgYMwaj$XS&-no#PaN54~N zD=wr263sfn;k&8$ymu3P4AfYRCgulJB%~%fM^VOv5ZYipGWO9cn^b}JlP-4as+8NP z7}#hj{x?w+wPA&}l9`wz2y8GzYj&4bubi`Ut+iDtZPcpDq7Ai{oqqI6p;lG1C?`;+ zHMy|<#wKB6cQDQ;5}SmziZY@9vw`gBzkk?{8u`ECSH4Nr`19n_d-)oFYG=r)w@v)6 z1m=_%d|hs#?6(EpdjIneB}6NaCVoGV5M4WfzwYiHA@=^=mq3C1UT|X|*UQ%=&f;Nj zm}q`kv7At%OQ32wts?t9DQX?UU2thAIOzz{XD_>#j1XNL<{g5(Vu+BG>o_;-HGIJ@ zS|wZ!RIZ$;k>oCSuiPIS?sjZgD^sbTAcsE%s;j+411!RFc7YXBWBIS6_~p8eOGi~&sU(VHxPTmXYzU~L~MAG$B&F=|JV{}}Zv z`femC{*%bRBmFIwYN%C9Qaas<>`r%9lCUw8e&JeFJg z1$WQnt!<$?k9pS&VuJ%I%n1bfX-{k7B8Y=?{FCc99HR&%PEa#p*^84ab zNbP<{`U_$XQ#pq>y?wD!K-g?>4>lWd$Vt4f>9v7B=c7Cqv|!NOkr7_*Ph^LegVxQRIDZWJr@$Cf5|e)#{ucSdcH?kN z+m@)aq`DWg4LLhYzDo!%gWP)3!^;vn3Xq#8yQ3I3roQt)R`ItxicqU+^4B4KE3kIX zqAL|7IxA)5heS=o-$-~t%&e6C6)uSRQ@&Uws=pGkOJb@ufg?LYsX=W}-N z=XIS6wN&RXDX*NwXSUdOQ^ICKv&)yW%hOEARs76Y!rB=q&M$D2*liHrkFNKC?Bu`H zgfJYtSNr?_#O$o!kv}_I+loH5D-zu#rXFtMX2%h`X! zcz4Jx{0-wh0OK7VIq>~sVZ68cW=5j1QA?q1JqkOg34a=XWJ84J1nyBY7YuyB!t*^I zAM>+wW%v^>63oEgTZ{L|+0bKvzVC$Do#kbP*Jd;0h9A9;T;DR*8*OH8APn2^jA8)) zRN+7P@ocdB4~QS+7r`=wrZOnPS3y8nU%mC__tR?XTRSMS{v}(}=z{U@JX$Sf-DZy3 z%v7$1tpzz-aUIRX7k=j@zdI_E+cp*JlB}1 zukJClVl84IfX=B+LO@$tBMn=@P6Q=GG-2h0<(y>b=Zf!ykG1 zQ18tr@4a&MSk3d3e)2lpNm(b@V>GgKo87$=EqCvVE_R4^78aFHP~5|xoAF5BfS!d` z_-F2Y#7DY;k#2;MZsovo)OxbFfz)LxW`n>y*^Aj?^u|{F1syEDz98nh+^$4#oT>AC2MZ>! zqiEyi4i;+}adkB#JFX6HOYxO^&ITOgL@zOL?3F3K)1NSK{7~&{pb}I#&L9)JRg`zn z+(^QY6TF5Ri(ZvS!RGo^J>B??O|PNGstKqgRRLDb$zOmSK=NWL{t=g59!GI4U6sU9 z)LXswHw1o!X9KZ?RO8d z#|p5~FVcf-UX#*MnDRS{nL`eJ>UboeAQe4U`vp1%SV>@Q8ij>5lI%>18nxwbNZU+c z0jVxthYdWJ{o-{Pt&G`OXD|O1YR(ySK$>E9=8<~Ek1b}uKWON<*?3LYaugU34M1+* z^IgkPUvTt*bW*N-5y$B8!ucN!+JlW5$o$1wyw6!y@Y;e7#oFTI(Q+Ae- zzhi(HSC1+CZ>Ln<}rOct; zSLMHyIe=GeEg7@3?4##-eqeU?g3Fj+no$gXfsCT36p!jY$qCr-RhJ20y2Spv?6TLQ zI<_HSbZ{|U%)Uu>t=U+{j3O(4M&XY1KP2oBJsad??-1Skv}L+9PtE=UHHumxK8{dd z%gDwX6n+6VUZWn4LkSPuDPMIrmf>24ucMMn9`Xre$5s83-kEpbox(TdYbe|2ItHZl zt{)6@GCB$~l>aJ$fr4kGCQ{WXX1_p+ji5+Exvv|qaM=4*{hoLwMU>&2 z$4pviUvR7+F?eEgjH|M{{fq^#>AvPRP!yWbrSppHmSe+R+6{j(ZhqZ@{Aa*;PELLp z@7|8aKMKZk^76xYVP=6&M_y|i$(HBN?vdBpMsoUs*gbN-x+6@0v>hmN!o8P~4&Nf`Vf$o);mF^`@s33e)OZ_hIwHKn0hM`?A&Jy%LcAgka54V;W zu=E{r$0lBi=NvhZnxfAjFJE*a#5xdfsfXo=2iC=qAXpd_HWFRKTd2n)ZQ_Ew#$jox z&CD%?SKGMFF8tlZzpvCvX3{y|+*hp$uY-|cyI1^n$Q@xvX@9(j#V>jYJItNwVK!nT zeK(2${Fm&C5gF-*jx-bMUS_ZN;7`)a75?tw-~ap{ShCG1rzcQ-Yi9McI|l+k)0lhT zM-p`mm`L9-pyk;9ff=_qZz?OMju(ZPDAAi9Z{?@lL#F zH!o&v8QY;0bm`lnsC9n_ zE5E@}%6RkQT#^6g#g1WRaK+_2<=*Z)5}$H!vB27si9uFqOHVb1q&pgeqRDu8abA%9 z@Z$Ln+p^resO`0i^fxcoTfS`+kBBMK+&L3z)tc;g&Unke9?39RXz(IZ0B%|)OWeCr zDSQP4#fbs0Aq5CWe;E?h4T@{KQOtpUE+k~*MWyIhxbFCVV#?o142VjE@Ebd4 z@oPV2A_jo9`UQvqm)R&kvGHeAQsi&^Nv>s(u-i81o&47wKhfQo$?`?)F%%fb++M>T zLxHj7c@cX|>zhoLud4OjNLaa&)k0)I0j%JIclZ1~FkZw{d`)5)kJG)3!&<70 zq9%Pt(F&YE!mh^PFr@El9Jn%`Nr`HT{AW_EMQdmGTK1scc-wL?u!o89=o%9^=~aB^rRks0q6-g?2XyQ555sJQbYM~1hw%D&jbC){&qHA zq|G$h*?0>VCffDSk)wXznG_IQNB(|pxVz>Oc3+6pto+><+~zREGZZpj!>xt7_bXf_ zaE*fe)4DIf-gV^fzTjCfatZI8iPXIOch1;Ki<~sVb7V3)-MRb}_>eM^3m|GoMo2U& zNeqcHS=uu7#?vBA{@!@a?lRr#LxyhKAUjX~Zrkaf;GwAUqV9pz04h2cH~Zmam^%bnV`EY5KWEuA$E$j6GZXxg{;V zGcryG)>pGy?WTTYoMiDl^_@k=JW07Bt0m5(6~VKiHPU3v&Z3F@+1Xu=Ivjq^BD<8?UKBVuZ)B4HD>Ov=^}QI0EyU8l)IzQNqv0Y1C(KMWW8- zT1K!$5_c|_v3Ax8rLUb$T+A+%VI~c!$U#`qzvx9KMd$KV0~`qtdxIQ*fy3TYGe;T- zT_pgefQVPrBa^)6+Fs7y1CMfIz-#KcxQWm0@e_D7FJBjU&^=TBF7Rtx(fRQoIgS%# zc;75(j}xd3&Vhm8uYmPj`%3ch6Uaa8K6shQKY0mJ1N}-HDOpg1l7x_`erFr0p8<)| zo|gACvlNcGT&o|gACvpfk|&-b*vV#Twv7H2Lsv+R)H z@iOuCo|adeSpFwaCn*4nRlT9e$xwAp$7r5dAuUBatR+ci& z$atJ!kp11s;%+!^p!~k#;;Bv+*-NRcOr0z)U(Fx^jMtz10x;gSySaW|`zXCXm%e>e zQU|y0fKh&29a5i_#{rJc$pHt2!u$T%zo%@DI=S;!Muo9cT00LL(wb0z`ar%m}t%#wKixg)6w&- zQScqv+qq+bSA%Ht%glpA^{?q8Djp}ewI;3qd!DD^QSZMGfhpQ*-<%b;Zf|zX7in0@yca~RvbBzKUrCP-k z>!nbu;$g({5^SMQ81%hzWGh);ZI*YA;?r}_je_syw271J?wkPo2A2aTC~h#=hR~O* zdZi3H=|^qf4$9WTeUXz?MBvzrI^FtL6*aUWz;js!NfMYc*@Ww{p-tBbJUu{ z3|3}u&*IBr;Lj+=R{QIXytdG=bMdvZ4Qu5r^o&X%=fPxbsk6}5;3-2` zmX)d0*LHJ0Ntlgu(iMX0%}PT=|lg?Suk58Gxd=BajaWp<`sHA3<>oyNUd+2Z{#r z12YSw43Yhm-M|K1xuA=-V#4zWRC}Gr>iW_&+$@M4^iG?ALCcs%YmGm*jLrl`kF}nh z?H#`gChj|dTYwE4+RDl|bG+NOTBoHT+*B~3DJ7kj_po;r^w}^fP$JMt_!{^*Y}Db* zZQ_J6wj=-reikADeeX0|yN`yy zF)@$^S?V)&GFJRGn&#L#p+(p5ggXd^ktK0O*@(uCI<7W7NK0T5wBE#sEkOga7MO}7 z%>Ye-<@$urKxEuyC@a{?Ihs<-V0#;rH(Zk8+{D+xGGXb`ZXDulI5BXRVWD-(z_6xc zBaaHSbSWyk{9* z7y;xHhRrO%8&V4fr-9J9O`$ zn_$dZ#HE2*$Wlv_pY)U>MD0F!%he}*rX81+aiwYJp9V(;)f(*Ku-wN?6%=G-qH79? z?!hvY=df%u(ldC0mdFZ7!~vs)NEyE}j7a4&WeM!Qb#lslOpz zqDu$e+oLT-GXsLs@iI8E$tANy<^udp!ve?Yf^y)WnTcT(A-~CjT^vk=R$buhAubB{ zNjIQI0he+KvLtRT2@Oc!5zsGoE7-9qZWKLgna~uo*b*yIL`X!rK-GN=6j>BZRXRER z)x`Y@P5=}LL7P~Wg@Yv!;f;_d+#x~{xa$+)tkiMDR-X|a1jhmX1ROzehLeVUzjJav zPH=;}>#e>yxyO`8ZlWH#?ptG;fmAALdw=e}GRN(mu$)}|P9HbL7&Hw(HC-6OVeTf?QVSOUN`{-D z{JApZIvo;`SU1;n&wuI*OrufiW|CKig$3+m4?N=nzYPOtC&bmyT-+s~bZb$TNc|cq z`ie(xvAOW}9wyR4m8qcN&6bdlgYgopPkC~RTOhA><3ODIaSY)*Bw%sE(uVz;jy47w zK+Z5yj5~>aq`+eb7uBF0;nTaLiLDToOiN8 zwZ5s~H6b93j7^qN%|Up}VtV2&Id6kcctS(_G70U~$|TgiJj6@+I9`Ikd-(S`?(1sm zkkh1}<56(@x(oaEi(oL7eehuL_9W!1?1PQ6*DAg@pX0EewD5SI!^+63oRs!Fhm|c= zl0!ZnP_syyTcR5|ti#Mjz?LDk$;6)qY#CC^mgfb4EyFvxsy`3dGNhIVFwXacaEAcJ-3jh4#kFX0ibIj&!6kTcr%+sq)8bkjiqqom?p)|Q-#2&W&Sd5} zXP>pUBp54`7kBO~ie+d1VoFKi|U|AXc7kdXZOK$bFPB_9T8i zABJ>bUg1h7+pq?q)3o+&#sALL=c!k^*ZjIy$JMdKvt;F770hed%p)|I7hr8fJIo88 z1^Ti9_ZrTq68_glUG~epUU`zHdN$(A*#oyv@Gt*n(|`GYoW8_g@C7UX?olB-R?CK)9k5`tgWkKa9!&)6zsP3^Rj6O*dZkI3s z8%&?%6<;1lN9NT5YLi_82$w)+VLygqmBeH^?L_J>X?CF)lMy>b?Gm zK^fr+fICGh8kzH7vc;tS)REd!QPRzjRKZ=h)m{lh&dL8QPq?4VnXV%<%)fE7>1Vc8 z?H!%>VEPz1pfw)C{_7Iu=L8V6EraUvwb6Rzphs;@3Yf7&52TmG<>R$|OV(@QM1m7O zxfgE$-Xe9E0bcP@9pW;;!tYnmvlt!s#i#OzeP zoER%rb)LDr-;6uxSwiwd@;Oga4vWPO3x;~A8>b9`NhpdSj>tPU$H_CS&tjY6koue`0EP!M4x-*z2LP!247%SME4U<0Z>zWD@aely*C|NcUGk3msf}V5T6NoDSi}}1 zJEN4{nleeK>E)B1#nRY6Ft34H+>WceXfZ&B|_0WhnbcT~2@?wGum)Oj>Gin#1%-W*`RkiVo~ z!f>X&Vg1J>E)ZKwe#Ty^!1%PFi%E{7yL&T|T10U%$HMz+?X`V%LP7S^cw7zZ`y#&@ zSb*q->Yv~HjZT*q?W6<|dG>l{57t4~t&9CO+%L8gmd{~ho!zr%CUIpfRQNdZDO6N) zirLT^*&*qgaY3D%i&O`iXMQrJt1~@e<>Top;PST|0#7F~MkkFhzRdCzEtwyD6FF#u zQRac4#+-jowu;|suED$%*K7XExr+x>n=gP^Dw64vjQAs(%<+yfP&Z5(8}Xw?c4n;P zyvbEd_%0e9RtCydk~+@I+kx72qJw+0i$E@}+Cm^!``Ltd57>3M;?Dds+;vxU+t{d7 z7FQRYwS!uNd7-@DIRrJC$>w)=H*;rlo(-@y(2yT0ldYJ%T0`DL7J@VGDJwMf5L>~8 z!iD6P0EZWcxWl9_{N9b#BU>@R;Gs`=PHMRAr;sa@BN4tuVfgaRV-WR*0j;dg@A*Ar zTd}eIgxMJc_C`?@Wyu1Q%8B%JnXoLchEjd}HdAf1;o*9&YfG^b)NdMnfbTR`!65A0 zQMC7DaC1Ap0|*8B(|tw`I?l#~@wIwAZ8}wC0Sq>KB0*wpB7YVrapwCtf!U2bfO(9l zYOGd`@L}Ndaz0qRffJc#ojJ5z39gFe!(pFD9hRi<)%)5*1E{9gKvyL3+cAGV3MT!xln25plo zq-Kx5L7PNzU)dfa|4jX1LowRD`wH!?5&*DI9lwQ?81~J1-Kd(5weNOmiwMlARU3Rw zk$|dFPjAF>KPWgGc$&=;LypJ3r^vh4G-V8pKrJ&l2`GwOqKf0xqmN|2EfGwV-=8}n z-e$#b>J0qV(RA?B`fVe#9|r%xMVgAEnJL@P<3?9r?}eL13%K>jk)3XOHIMtrDiE z$JOV+D7EGgI}24bL#SHwJiYO|6|-cPuP^(TPBr5x+v}>hP3oFQkRG3|9lxZZED$^0 zzea^z1TEY|6*ixU7hea-I$|ieS7YR(E6?O?c^QaK_XOoN@F_tuoBcK5s^~WKlE613 zb4u9sm8jy-B>3Nj%4p_)5K+HZ$k0ggPSV69eX`)Locq6`BNkq_o*h4ik}`J${-e;J z{MW@pz}bIwp+Txd%TV=ZoQE^WKW>fGe~7cCbmy3%5r(Yde{Fy6{SOBo^8T-uj92I@ z-oQk-=wE{7A431;O5yw|l;=X%n}#EEU7+bej+6CouHv#AxPODnBL0`i>#NVdL|H&p zRl?T}5{J&eg#3P4)cI5U=!~r|@Yt^2>;Lk_6{`Q&5DHwPrCV#9tQabhm8 zwKw7Kp}6Gu>0R7AEBoq3>JZY=)!b40kR7g|IpD<`${l$BL>AgPuO-?9$^~6ZxH{$1 zceq2)^&7zSDm2KvLus{uz`rdmuR;)?@EpCpdqaK^%1X%N(A=Okkg^XUhBtdrN zQ!(yGi=Du{bMfpt=O2V9w^T-q!W;{J_EUd2yRWDX91 z+tt9aQIrzpLn$)~o=A!^6IEONt@qJmIw$!vuE+0B?B2OgPsBAIx+2l?1prX6?_Olp zynk_G7?iXpXS=>c>uoL(tgpR0$w@rz zis~aM!V+z~GZdiM=RtokaJmcu1zZ+=s^H7Xe$|rdmA`071OL;K)~Qnb^!0nFf7EsS z>2;KbwAMU!JU}O3kG` zvn2Tjq8Osh8sZ@ht~1UtYo)5I+N!~}LddFK&Y>v(na%~z1)t60sFgKvaBg4v(G4Sg zBHf&6Gg2qL!}64VmhBc*o3yB16}7MjViu13=E_@Z{ZoU?d6!Q_v?hIGa+{{~jdDfD zSkRq)d6P31!mB%i;_mhof<}0f+(cq*4?)zhtbxEIshOpNlI)5f-{V{n+@1?|h8m|? zIM#Mb4INi4=*IKd)%X;{Ih<^muW(u;aD{J}w3d25OfU@j1Og+3fuT_ z^z?K`Oig6mEJnnFMO$6VOpi?6@#-9pdXt6q)X(sCGl*!v>{WFAe-8wdt+MOu>!bEPMZRwbTMf2&J`qSO=9GZPl7Zwdw5CN)2JIQa=#$^e+xSA^F^^XYrg6yc@@-);0)WU{7+%)J6#KerC!nNJbl}vjdY7C{sv!ZJcvcoe86cmi;Dre%= zR`aTcL|9>u!Q`W3!cI<(c0{c3I=iQw`VUMckV@%Q2UuN9gj z#TY~Xo)U$V7K+w6BTUH=fbCKZdwyBg;*ciwiz=js%l>_F)&X~er~@9E-eY`Gg)lJwrKR0ld z{H)>0L)3s`cE}Su>`pO{=~)gD8mNiNdcCtoH_VMy?kAcjM_apv^o68VEn0gqWy#bFW6r2(ioagq&v3o5i$Fsc^= z8W@sGrMfN@23v5!)g_nh+%0cq?io~(>guY3l=Qx=#PM#{Qc}H{DQjzReNwAc+}R6Z z*gl_GV#a?mDU3UjfW!Y)3GJmXcY-);eF!uhVvvw!@?H;MB~_%>a7OLXXQ%$qtKGj5p4{pKozSr4-5G@8wx^0or}9Gf z3Yf$44+nqaD(5{dQkW0yX*#U7eEC~=nK~v}ng#+2VT5>6ha%n%#s^zlHWiTfipTBH z^Z%}OLvQJ+3i#}lKf^VR#rCc8azk;Q@rjoB$J1RXsuvOSWG zWL~T52%GNb&QYefbB;FpoR)zaG6N`z)fnBP;#vA_3v>Cfq zXx}>ZZ~ikz=m##|_4V`py6s=v4-*v)s%h*M3A=ERW*DRnU?Pt%Yg>{c6CLYx&_W^0C5&cx>=s zwa$(?8e-uZDqY;YrwwoK8(oixs=%1cC{?Z*`3WH~pXO1)Pd(ht4od{DusD>k@_U$# z9|I~=ln`=DF)vGhGM&NGI1mlTf;^A#6V+W!OH2 zop$%uMpxXmkTW{cDc;0iwqVPoif7t)mWicaZrM71zv&VCAjRb}rhfAyI;DYvNfwOW zME^}PB8AuS?A6^#976Gm4Ug-Y_V2Uf$I(DvX*#JnB+NV&WGFyAxv&5T=8sgu&2fJt zzYx;F2@2H`kIjFIPz;QP|7PK=h@Tk`7Xh#CR@_$B9hfkY(aqC+^9S5*xJ*?*fL#vs zQ-PuVPyzCxu*9dzC{iU9-z2`0T`jyc?ScBdW!9K!4BoZGOh(31@${oOC(zNf1~5qM zFoW4Q?~&}oX#f>6&ZZN}3i2GGQQK8mo5KPUe1iG6 zmlcxwz6U7P2i4Hk36`aM84p<-FoK1U?IHbAII|{287}Un66+q^EvTfKdc|6zS6uNs z-u#dEd1&$kgEMGD5yeYArs5UNZF%&<;lhgygEjlKvPHTBlq9hKmv>kD-;rAT$qug*oj2YZe(%TXw z(lM$q2Q?euhG8A`VT$ZE`vG~Jh3`m!3C4MbkY*rW?*=Hr?oU2rNpiEd^A-f($0Z^rjQ(Q`K8c&1Fis zhpW|0ZaWBnD&Dj(ke7-Hip?cA&!9#)AGi7Z61N^z@CwhI43CuXbSu$q+JkIF0$4}u z;l0#%GTcbJuZ%SgwmV6sD@pj6V9D0QTy7OCO=)~qJ`L~^cHmcMbl+Id24)I|E7v&? zGab>losFaD>)My;UjU>e^7}0H-1^^LfmeOWXB?PGGk7`H6jN`=hr&yH`i4PF;XfL! zb+EiskY#bPGl(*9q;`-;eV_7KTZG={keAb+5tDR%Qzl0Fo;=$$<( zB(!oc5aUVK-V*QR*c8B9hC17vNJYd<>a@S|LoC8yHaIMd|pvfkFW$b^azD;;-=u$tY#B6-jlKbOs+xDZ{rF0RQS zc;&)!$Ftn>Y5C5oTN1I?BGyLc|t5GIJ8ecEdh>uYhfA% z3P?kd7?oEkuQ)i;Oo9O`lF8VsnYD>-$zcj_nm8t8Nx;E zY-QNAh27Vqdh4Cty2{x~5BLg!H*USdwlVtJpkiC!k5yr8=iKEGp!=lWg>A3Cw4 z#^5-zBzF9zcqsJpBeqjD9&fR%vTA-}>icKUcI|(P z<4gim7H5C|sv4<8x9xaa^CDNrgZtP5WdV9xx;?22&O}-*m5dR6Kt$1#>Qry3>h%yZ zGY_<-)6DSf7vHzfAS=g82LTqgX&fHrZ_Z18(|5ba9z~|BCh!sR*GZw3U+pz=~x)W`h+*UP)G!HF|FyA__uD4HiB z#B$Pnq6nA%fp7FN@`vIM2LO5MJL(~F$1+hlt&Mda+hL|4m#hR%Mw4CTVs))YX@ZQJ z*Bn@xLN9);f)=ITGUm6%?|&uD5)oP0?6Fr!G@J zB)Qt#NhnmDN|OM33*F3@$V^;*M@ZgHVsfF62qBqs>WPF~W(RY4u|4;X54*B&>4lyj zT5o{~xi}m!^U}6WlxTk;O}spyMJuO};u^Ccl=mMGdqsW?7!nS8r6%Hg|3ePrG~D>U zQqOmiuM1wN!*8|Q@kZW`AhMn|VD%~KX9_|7;b~1SPM?H)Fw?y-`aV>BMjjzOxGZ1^ uLM=uOQv&q)P7ZavtzE`tiW+uQxpL$_=S#nJ;XZ$c-=F$1`>_=c=6?VW+IW!w diff --git a/libs/partners/openai/tests/integration_tests/chat_models/test_base.py b/libs/partners/openai/tests/integration_tests/chat_models/test_base.py index 48fa87bbb19..d10103e4ec8 100644 --- a/libs/partners/openai/tests/integration_tests/chat_models/test_base.py +++ b/libs/partners/openai/tests/integration_tests/chat_models/test_base.py @@ -474,6 +474,7 @@ def test_manual_tool_call_msg(use_responses_api: bool) -> None: name="GenerateUsername", args={"name": "Sally", "hair_color": "green"}, id="foo", + type="tool_call", ) ], ), @@ -495,6 +496,7 @@ def test_manual_tool_call_msg(use_responses_api: bool) -> None: name="GenerateUsername", args={"name": "Sally", "hair_color": "green"}, id="bar", + type="tool_call", ) ], ), diff --git a/libs/partners/openai/tests/integration_tests/chat_models/test_responses_api.py b/libs/partners/openai/tests/integration_tests/chat_models/test_responses_api.py index 0e23d0e3f06..fbe2a5e6aa0 100644 --- a/libs/partners/openai/tests/integration_tests/chat_models/test_responses_api.py +++ b/libs/partners/openai/tests/integration_tests/chat_models/test_responses_api.py @@ -2,7 +2,7 @@ import json import os -from typing import Annotated, Any, Literal, Optional, cast +from typing import Annotated, Any, Literal, Optional, Union, cast import openai import pytest @@ -14,22 +14,33 @@ from langchain_core.messages import ( HumanMessage, MessageLikeRepresentation, ) +from langchain_core.v1.messages import AIMessage as AIMessageV1 +from langchain_core.v1.messages import AIMessageChunk as AIMessageChunkV1 +from langchain_core.v1.messages import HumanMessage as HumanMessageV1 from pydantic import BaseModel from typing_extensions import TypedDict from langchain_openai import ChatOpenAI +from langchain_openai.v1 import ChatOpenAI as ChatOpenAIV1 MODEL_NAME = "gpt-4o-mini" -def _check_response(response: Optional[BaseMessage]) -> None: - assert isinstance(response, AIMessage) +def _check_response( + response: Optional[Union[BaseMessage, AIMessageV1]], output_version: str +) -> None: + if output_version == "v1": + assert isinstance(response, AIMessageV1) or isinstance( + response, AIMessageChunkV1 + ) + else: + assert isinstance(response, AIMessage) assert isinstance(response.content, list) for block in response.content: assert isinstance(block, dict) if block["type"] == "text": - assert isinstance(block["text"], str) - for annotation in block["annotations"]: + assert isinstance(block["text"], str) # type: ignore[typeddict-item] + for annotation in block["annotations"]: # type: ignore[typeddict-item] if annotation["type"] == "file_citation": assert all( key in annotation @@ -40,8 +51,16 @@ def _check_response(response: Optional[BaseMessage]) -> None: key in annotation for key in ["end_index", "start_index", "title", "type", "url"] ) + elif annotation["type"] == "citation": + assert all(key in annotation for key in ["title", "type"]) + if "url" in annotation: + assert "start_index" in annotation + assert "end_index" in annotation - text_content = response.text() + if output_version == "v1": + text_content = response.text + else: + text_content = response.text() # type: ignore[operator,misc] assert isinstance(text_content, str) assert text_content assert response.usage_metadata @@ -49,68 +68,74 @@ def _check_response(response: Optional[BaseMessage]) -> None: assert response.usage_metadata["output_tokens"] > 0 assert response.usage_metadata["total_tokens"] > 0 assert response.response_metadata["model_name"] - assert response.response_metadata["service_tier"] + assert response.response_metadata["service_tier"] # type: ignore[typeddict-item] +@pytest.mark.default_cassette("test_web_search.yaml.gz") @pytest.mark.vcr -def test_web_search() -> None: - llm = ChatOpenAI(model=MODEL_NAME, output_version="responses/v1") +@pytest.mark.parametrize("output_version", ["responses/v1", "v1"]) +def test_web_search(output_version: Literal["responses/v1", "v1"]) -> None: + if output_version == "v1": + llm = ChatOpenAIV1(model=MODEL_NAME) + else: + llm = ChatOpenAI(model=MODEL_NAME, output_version=output_version) # type: ignore[assignment] first_response = llm.invoke( "What was a positive news story from today?", tools=[{"type": "web_search_preview"}], ) - _check_response(first_response) + _check_response(first_response, output_version) # Test streaming - full: Optional[BaseMessageChunk] = None - for chunk in llm.stream( - "What was a positive news story from today?", - tools=[{"type": "web_search_preview"}], - ): - assert isinstance(chunk, AIMessageChunk) - full = chunk if full is None else full + chunk - _check_response(full) + if isinstance(llm, ChatOpenAIV1): + full: Optional[AIMessageChunkV1] = None + for chunk in llm.stream( + "What was a positive news story from today?", + tools=[{"type": "web_search_preview"}], + ): + assert isinstance(chunk, AIMessageChunkV1) + full = chunk if full is None else full + chunk + else: + full: Optional[BaseMessageChunk] = None # type: ignore[no-redef] + for chunk in llm.stream( + "What was a positive news story from today?", + tools=[{"type": "web_search_preview"}], + ): + assert isinstance(chunk, AIMessageChunk) + full = chunk if full is None else full + chunk + _check_response(full, output_version) # Use OpenAI's stateful API response = llm.invoke( "what about a negative one", tools=[{"type": "web_search_preview"}], - previous_response_id=first_response.response_metadata["id"], + previous_response_id=first_response.response_metadata["id"], # type: ignore[typeddict-item] ) - _check_response(response) + _check_response(response, output_version) # Manually pass in chat history response = llm.invoke( [ - { - "role": "user", - "content": [ - { - "type": "text", - "text": "What was a positive news story from today?", - } - ], - }, + {"role": "user", "content": "What was a positive news story from today?"}, first_response, - { - "role": "user", - "content": [{"type": "text", "text": "what about a negative one"}], - }, + {"role": "user", "content": "what about a negative one"}, ], tools=[{"type": "web_search_preview"}], ) - _check_response(response) + _check_response(response, output_version) # Bind tool response = llm.bind_tools([{"type": "web_search_preview"}]).invoke( "What was a positive news story from today?" ) - _check_response(response) + _check_response(response, output_version) for msg in [first_response, full, response]: - assert isinstance(msg, AIMessage) + assert msg is not None block_types = [block["type"] for block in msg.content] # type: ignore[index] - assert block_types == ["web_search_call", "text"] + if output_version == "responses/v1": + assert block_types == ["web_search_call", "text"] + else: + assert block_types == ["web_search_call", "web_search_result", "text"] @pytest.mark.flaky(retries=3, delay=1) @@ -120,7 +145,7 @@ async def test_web_search_async() -> None: "What was a positive news story from today?", tools=[{"type": "web_search_preview"}], ) - _check_response(response) + _check_response(response, "v0") assert response.response_metadata["status"] # Test streaming @@ -132,7 +157,7 @@ async def test_web_search_async() -> None: assert isinstance(chunk, AIMessageChunk) full = chunk if full is None else full + chunk assert isinstance(full, AIMessageChunk) - _check_response(full) + _check_response(full, "v0") for msg in [response, full]: assert msg.additional_kwargs["tool_outputs"] @@ -141,13 +166,15 @@ async def test_web_search_async() -> None: assert tool_output["type"] == "web_search_call" -@pytest.mark.flaky(retries=3, delay=1) -def test_function_calling() -> None: +@pytest.mark.default_cassette("test_function_calling.yaml.gz") +@pytest.mark.vcr +@pytest.mark.parametrize("output_version", ["v0", "responses/v1"]) +def test_function_calling(output_version: Literal["v0", "responses/v1"]) -> None: def multiply(x: int, y: int) -> int: """return x * y""" return x * y - llm = ChatOpenAI(model=MODEL_NAME) + llm = ChatOpenAI(model=MODEL_NAME, output_version=output_version) bound_llm = llm.bind_tools([multiply, {"type": "web_search_preview"}]) ai_msg = cast(AIMessage, bound_llm.invoke("whats 5 * 4")) assert len(ai_msg.tool_calls) == 1 @@ -163,7 +190,33 @@ def test_function_calling() -> None: assert set(full.tool_calls[0]["args"]) == {"x", "y"} response = bound_llm.invoke("What was a positive news story from today?") - _check_response(response) + _check_response(response, output_version) + + +@pytest.mark.default_cassette("test_function_calling.yaml.gz") +@pytest.mark.vcr +def test_function_calling_v1() -> None: + def multiply(x: int, y: int) -> int: + """return x * y""" + return x * y + + llm = ChatOpenAIV1(model=MODEL_NAME) + bound_llm = llm.bind_tools([multiply, {"type": "web_search_preview"}]) + ai_msg = bound_llm.invoke("whats 5 * 4") + assert len(ai_msg.tool_calls) == 1 + assert ai_msg.tool_calls[0]["name"] == "multiply" + assert set(ai_msg.tool_calls[0]["args"]) == {"x", "y"} + + full: Any = None + for chunk in bound_llm.stream("whats 5 * 4"): + assert isinstance(chunk, AIMessageChunkV1) + full = chunk if full is None else full + chunk + assert len(full.tool_calls) == 1 + assert full.tool_calls[0]["name"] == "multiply" + assert set(full.tool_calls[0]["args"]) == {"x", "y"} + + response = bound_llm.invoke("What was a positive news story from today?") + _check_response(response, "v1") class Foo(BaseModel): @@ -174,8 +227,13 @@ class FooDict(TypedDict): response: str -def test_parsed_pydantic_schema() -> None: - llm = ChatOpenAI(model=MODEL_NAME, use_responses_api=True) +@pytest.mark.default_cassette("test_parsed_pydantic_schema.yaml.gz") +@pytest.mark.vcr +@pytest.mark.parametrize("output_version", ["v0", "responses/v1"]) +def test_parsed_pydantic_schema(output_version: Literal["v0", "responses/v1"]) -> None: + llm = ChatOpenAI( + model=MODEL_NAME, use_responses_api=True, output_version=output_version + ) response = llm.invoke("how are ya", response_format=Foo) parsed = Foo(**json.loads(response.text())) assert parsed == response.additional_kwargs["parsed"] @@ -192,6 +250,30 @@ def test_parsed_pydantic_schema() -> None: assert parsed.response +@pytest.mark.default_cassette("test_parsed_pydantic_schema.yaml.gz") +@pytest.mark.vcr +def test_parsed_pydantic_schema_v1() -> None: + llm = ChatOpenAIV1(model=MODEL_NAME, use_responses_api=True) + response = llm.invoke("how are ya", response_format=Foo) + assert response.text + parsed = Foo(**json.loads(response.text)) + assert parsed == response.parsed + assert parsed.response + + # Test stream + full: Optional[AIMessageChunkV1] = None + chunks = [] + for chunk in llm.stream("how are ya", response_format=Foo): + assert isinstance(chunk, AIMessageChunkV1) + full = chunk if full is None else full + chunk + chunks.append(chunk) + assert isinstance(full, AIMessageChunkV1) + assert full.text + parsed = Foo(**json.loads(full.text)) + assert parsed == full.parsed + assert parsed.response + + async def test_parsed_pydantic_schema_async() -> None: llm = ChatOpenAI(model=MODEL_NAME, use_responses_api=True) response = await llm.ainvoke("how are ya", response_format=Foo) @@ -323,6 +405,26 @@ def test_reasoning(output_version: Literal["v0", "responses/v1"]) -> None: assert block_types == ["reasoning", "text"] +@pytest.mark.default_cassette("test_reasoning.yaml.gz") +@pytest.mark.vcr +def test_reasoning_v1() -> None: + llm = ChatOpenAIV1(model="o4-mini", use_responses_api=True) + response = llm.invoke("Hello", reasoning={"effort": "low"}) + assert isinstance(response, AIMessageV1) + + # Test init params + streaming + llm = ChatOpenAIV1(model="o4-mini", reasoning={"effort": "low"}) + full: Optional[AIMessageChunkV1] = None + for chunk in llm.stream("Hello"): + assert isinstance(chunk, AIMessageChunkV1) + full = chunk if full is None else full + chunk + assert isinstance(full, AIMessageChunkV1) + + for msg in [response, full]: + block_types = [block["type"] for block in msg.content] + assert block_types == ["reasoning", "text"] + + def test_stateful_api() -> None: llm = ChatOpenAI(model=MODEL_NAME, use_responses_api=True) response = llm.invoke("how are you, my name is Bobo") @@ -358,20 +460,25 @@ def test_computer_calls() -> None: def test_file_search() -> None: pytest.skip() # TODO: set up infra - llm = ChatOpenAI(model=MODEL_NAME) + llm = ChatOpenAI(model=MODEL_NAME, use_responses_api=True) tool = { "type": "file_search", "vector_store_ids": [os.environ["OPENAI_VECTOR_STORE_ID"]], } - response = llm.invoke("What is deep research by OpenAI?", tools=[tool]) - _check_response(response) + + input_message = {"role": "user", "content": "What is deep research by OpenAI?"} + response = llm.invoke([input_message], tools=[tool]) + _check_response(response, "v0") full: Optional[BaseMessageChunk] = None - for chunk in llm.stream("What is deep research by OpenAI?", tools=[tool]): + for chunk in llm.stream([input_message], tools=[tool]): assert isinstance(chunk, AIMessageChunk) full = chunk if full is None else full + chunk assert isinstance(full, AIMessageChunk) - _check_response(full) + _check_response(full, "v0") + + next_message = {"role": "user", "content": "Thank you."} + _ = llm.invoke([input_message, full, next_message]) @pytest.mark.default_cassette("test_stream_reasoning_summary.yaml.gz") @@ -398,20 +505,28 @@ def test_stream_reasoning_summary( if output_version == "v0": reasoning = response_1.additional_kwargs["reasoning"] assert set(reasoning.keys()) == {"id", "type", "summary"} + summary = reasoning["summary"] + assert isinstance(summary, list) + for block in summary: + assert isinstance(block, dict) + assert isinstance(block["type"], str) + assert isinstance(block["text"], str) + assert block["text"] else: + # output_version == "responses/v1" reasoning = next( block for block in response_1.content if block["type"] == "reasoning" # type: ignore[index] ) assert set(reasoning.keys()) == {"id", "type", "summary", "index"} - summary = reasoning["summary"] - assert isinstance(summary, list) - for block in summary: - assert isinstance(block, dict) - assert isinstance(block["type"], str) - assert isinstance(block["text"], str) - assert block["text"] + summary = reasoning["summary"] + assert isinstance(summary, list) + for block in summary: + assert isinstance(block, dict) + assert isinstance(block["type"], str) + assert isinstance(block["text"], str) + assert block["text"] # Check we can pass back summaries message_2 = {"role": "user", "content": "Thank you."} @@ -419,9 +534,48 @@ def test_stream_reasoning_summary( assert isinstance(response_2, AIMessage) +@pytest.mark.default_cassette("test_stream_reasoning_summary.yaml.gz") @pytest.mark.vcr -def test_code_interpreter() -> None: - llm = ChatOpenAI(model="o4-mini", use_responses_api=True) +def test_stream_reasoning_summary_v1() -> None: + llm = ChatOpenAIV1( + model="o4-mini", + # Routes to Responses API if `reasoning` is set. + reasoning={"effort": "medium", "summary": "auto"}, + ) + message_1 = { + "role": "user", + "content": "What was the third tallest buliding in the year 2000?", + } + response_1: Optional[AIMessageChunkV1] = None + for chunk in llm.stream([message_1]): + assert isinstance(chunk, AIMessageChunkV1) + response_1 = chunk if response_1 is None else response_1 + chunk + assert isinstance(response_1, AIMessageChunkV1) + + total_reasoning_blocks = 0 + for block in response_1.content: + if block["type"] == "reasoning": + total_reasoning_blocks += 1 + assert isinstance(block["id"], str) and block["id"].startswith("rs_") + assert isinstance(block["reasoning"], str) + assert isinstance(block["index"], int) + assert ( + total_reasoning_blocks > 1 + ) # This query typically generates multiple reasoning blocks + + # Check we can pass back summaries + message_2 = {"role": "user", "content": "Thank you."} + response_2 = llm.invoke([message_1, response_1, message_2]) + assert isinstance(response_2, AIMessageV1) + + +@pytest.mark.default_cassette("test_code_interpreter.yaml.gz") +@pytest.mark.vcr +@pytest.mark.parametrize("output_version", ["v0", "responses/v1"]) +def test_code_interpreter(output_version: Literal["v0", "responses/v1"]) -> None: + llm = ChatOpenAI( + model="o4-mini", use_responses_api=True, output_version=output_version + ) llm_with_tools = llm.bind_tools( [{"type": "code_interpreter", "container": {"type": "auto"}}] ) @@ -430,15 +584,25 @@ def test_code_interpreter() -> None: "content": "Write and run code to answer the question: what is 3^3?", } response = llm_with_tools.invoke([input_message]) - _check_response(response) - tool_outputs = response.additional_kwargs["tool_outputs"] - assert tool_outputs - assert any(output["type"] == "code_interpreter_call" for output in tool_outputs) + assert isinstance(response, AIMessage) + _check_response(response, output_version) + if output_version == "v0": + tool_outputs = [ + item + for item in response.additional_kwargs["tool_outputs"] + if item["type"] == "code_interpreter_call" + ] + else: + # responses/v1 + tool_outputs = [ + item + for item in response.content + if isinstance(item, dict) and item["type"] == "code_interpreter_call" + ] + assert len(tool_outputs) == 1 # Test streaming # Use same container - tool_outputs = response.additional_kwargs["tool_outputs"] - assert len(tool_outputs) == 1 container_id = tool_outputs[0]["container_id"] llm_with_tools = llm.bind_tools( [{"type": "code_interpreter", "container": container_id}] @@ -449,9 +613,72 @@ def test_code_interpreter() -> None: assert isinstance(chunk, AIMessageChunk) full = chunk if full is None else full + chunk assert isinstance(full, AIMessageChunk) - tool_outputs = full.additional_kwargs["tool_outputs"] + if output_version == "v0": + tool_outputs = [ + item + for item in response.additional_kwargs["tool_outputs"] + if item["type"] == "code_interpreter_call" + ] + else: + # responses/v1 + tool_outputs = [ + item + for item in response.content + if isinstance(item, dict) and item["type"] == "code_interpreter_call" + ] + assert tool_outputs + + # Test we can pass back in + next_message = {"role": "user", "content": "Please add more comments to the code."} + _ = llm_with_tools.invoke([input_message, full, next_message]) + + +@pytest.mark.default_cassette("test_code_interpreter.yaml.gz") +@pytest.mark.vcr +def test_code_interpreter_v1() -> None: + llm = ChatOpenAIV1(model="o4-mini", use_responses_api=True) + llm_with_tools = llm.bind_tools( + [{"type": "code_interpreter", "container": {"type": "auto"}}] + ) + input_message = { + "role": "user", + "content": "Write and run code to answer the question: what is 3^3?", + } + response = llm_with_tools.invoke([input_message]) + assert isinstance(response, AIMessageV1) + _check_response(response, "v1") + + tool_outputs = [ + item for item in response.content if item["type"] == "code_interpreter_call" + ] + code_interpreter_result = next( + item for item in response.content if item["type"] == "code_interpreter_result" + ) + assert tool_outputs + assert code_interpreter_result + assert len(tool_outputs) == 1 + + # Test streaming + # Use same container + container_id = tool_outputs[0]["container_id"] # type: ignore[typeddict-item] + llm_with_tools = llm.bind_tools( + [{"type": "code_interpreter", "container": container_id}] + ) + + full: Optional[AIMessageChunkV1] = None + for chunk in llm_with_tools.stream([input_message]): + assert isinstance(chunk, AIMessageChunkV1) + full = chunk if full is None else full + chunk + assert isinstance(full, AIMessageChunkV1) + code_interpreter_call = next( + item for item in full.content if item["type"] == "code_interpreter_call" + ) + code_interpreter_result = next( + item for item in full.content if item["type"] == "code_interpreter_result" + ) + assert code_interpreter_call + assert code_interpreter_result assert tool_outputs - assert any(output["type"] == "code_interpreter_call" for output in tool_outputs) # Test we can pass back in next_message = {"role": "user", "content": "Please add more comments to the code."} @@ -546,10 +773,66 @@ def test_mcp_builtin_zdr() -> None: _ = llm_with_tools.invoke([input_message, full, approval_message]) -@pytest.mark.vcr() -def test_image_generation_streaming() -> None: +@pytest.mark.default_cassette("test_mcp_builtin_zdr.yaml.gz") +@pytest.mark.vcr +def test_mcp_builtin_zdr_v1() -> None: + llm = ChatOpenAIV1( + model="o4-mini", store=False, include=["reasoning.encrypted_content"] + ) + + llm_with_tools = llm.bind_tools( + [ + { + "type": "mcp", + "server_label": "deepwiki", + "server_url": "https://mcp.deepwiki.com/mcp", + "require_approval": {"always": {"tool_names": ["read_wiki_structure"]}}, + } + ] + ) + input_message = { + "role": "user", + "content": ( + "What transport protocols does the 2025-03-26 version of the MCP spec " + "support?" + ), + } + full: Optional[AIMessageChunkV1] = None + for chunk in llm_with_tools.stream([input_message]): + assert isinstance(chunk, AIMessageChunkV1) + full = chunk if full is None else full + chunk + + assert isinstance(full, AIMessageChunkV1) + assert all(isinstance(block, dict) for block in full.content) + + approval_message = HumanMessageV1( + [ + { + "type": "non_standard", + "value": { + "type": "mcp_approval_response", + "approve": True, + "approval_request_id": block["value"]["id"], # type: ignore[index] + }, + } + for block in full.content + if block["type"] == "non_standard" + and block["value"]["type"] == "mcp_approval_request" # type: ignore[index] + ] + ) + _ = llm_with_tools.invoke([input_message, full, approval_message]) + + +@pytest.mark.default_cassette("test_image_generation_streaming.yaml.gz") +@pytest.mark.vcr +@pytest.mark.parametrize("output_version", ["v0", "responses/v1"]) +def test_image_generation_streaming( + output_version: Literal["v0", "responses/v1"], +) -> None: """Test image generation streaming.""" - llm = ChatOpenAI(model="gpt-4.1", use_responses_api=True) + llm = ChatOpenAI( + model="gpt-4.1", use_responses_api=True, output_version=output_version + ) tool = { "type": "image_generation", # For testing purposes let's keep the quality low, so the test runs faster. @@ -596,15 +879,82 @@ def test_image_generation_streaming() -> None: # At the moment, the streaming API does not pick up annotations fully. # So the following check is commented out. # _check_response(complete_ai_message) - tool_output = complete_ai_message.additional_kwargs["tool_outputs"][0] - assert set(tool_output.keys()).issubset(expected_keys) + if output_version == "v0": + assert complete_ai_message.additional_kwargs["tool_outputs"] + tool_output = complete_ai_message.additional_kwargs["tool_outputs"][0] + assert set(tool_output.keys()).issubset(expected_keys) + elif output_version == "responses/v1": + tool_output = next( + block + for block in complete_ai_message.content + if isinstance(block, dict) and block["type"] == "image_generation_call" + ) + assert set(tool_output.keys()).issubset(expected_keys) + else: + # v1 + standard_keys = {"type", "base64", "id", "status", "index"} + tool_output = next( + block + for block in complete_ai_message.content + if isinstance(block, dict) and block["type"] == "image" + ) + assert set(standard_keys).issubset(tool_output.keys()) -@pytest.mark.vcr() -def test_image_generation_multi_turn() -> None: +@pytest.mark.default_cassette("test_image_generation_streaming.yaml.gz") +@pytest.mark.vcr +def test_image_generation_streaming_v1() -> None: + """Test image generation streaming.""" + llm = ChatOpenAIV1(model="gpt-4.1", use_responses_api=True) + tool = { + "type": "image_generation", + "quality": "low", + "output_format": "jpeg", + "output_compression": 100, + "size": "1024x1024", + } + + expected_keys = { + # Standard + "type", + "base64", + "mime_type", + "id", + "index", + # OpenAI-specific + "background", + "output_format", + "quality", + "revised_prompt", + "size", + "status", + } + + full: Optional[AIMessageChunkV1] = None + for chunk in llm.stream("Draw a random short word in green font.", tools=[tool]): + assert isinstance(chunk, AIMessageChunkV1) + full = chunk if full is None else full + chunk + complete_ai_message = cast(AIMessageChunkV1, full) + + tool_output = next( + block + for block in complete_ai_message.content + if isinstance(block, dict) and block["type"] == "image" + ) + assert set(expected_keys).issubset(tool_output.keys()) + + +@pytest.mark.default_cassette("test_image_generation_multi_turn.yaml.gz") +@pytest.mark.vcr +@pytest.mark.parametrize("output_version", ["v0", "responses/v1"]) +def test_image_generation_multi_turn( + output_version: Literal["v0", "responses/v1"], +) -> None: """Test multi-turn editing of image generation by passing in history.""" # Test multi-turn - llm = ChatOpenAI(model="gpt-4.1", use_responses_api=True) + llm = ChatOpenAI( + model="gpt-4.1", use_responses_api=True, output_version=output_version + ) # Test invocation tool = { "type": "image_generation", @@ -620,10 +970,41 @@ def test_image_generation_multi_turn() -> None: {"role": "user", "content": "Draw a random short word in green font."} ] ai_message = llm_with_tools.invoke(chat_history) - _check_response(ai_message) - tool_output = ai_message.additional_kwargs["tool_outputs"][0] + assert isinstance(ai_message, AIMessage) + _check_response(ai_message, output_version) - # Example tool output for an image + expected_keys = { + "id", + "background", + "output_format", + "quality", + "result", + "revised_prompt", + "size", + "status", + "type", + } + + if output_version == "v0": + tool_output = ai_message.additional_kwargs["tool_outputs"][0] + assert set(tool_output.keys()).issubset(expected_keys) + elif output_version == "responses/v1": + tool_output = next( + block + for block in ai_message.content + if isinstance(block, dict) and block["type"] == "image_generation_call" + ) + assert set(tool_output.keys()).issubset(expected_keys) + else: + standard_keys = {"type", "base64", "id", "status"} + tool_output = next( + block + for block in ai_message.content + if isinstance(block, dict) and block["type"] == "image" + ) + assert set(standard_keys).issubset(tool_output.keys()) + + # Example tool output for an image (v0) # { # "background": "opaque", # "id": "ig_683716a8ddf0819888572b20621c7ae4029ec8c11f8dacf8", @@ -639,20 +1020,6 @@ def test_image_generation_multi_turn() -> None: # "result": # base64 encode image data # } - expected_keys = { - "id", - "background", - "output_format", - "quality", - "result", - "revised_prompt", - "size", - "status", - "type", - } - - assert set(tool_output.keys()).issubset(expected_keys) - chat_history.extend( [ # AI message with tool output @@ -669,6 +1036,97 @@ def test_image_generation_multi_turn() -> None: ) ai_message2 = llm_with_tools.invoke(chat_history) - _check_response(ai_message2) - tool_output2 = ai_message2.additional_kwargs["tool_outputs"][0] - assert set(tool_output2.keys()).issubset(expected_keys) + assert isinstance(ai_message2, AIMessage) + _check_response(ai_message2, output_version) + + if output_version == "v0": + tool_output = ai_message2.additional_kwargs["tool_outputs"][0] + assert set(tool_output.keys()).issubset(expected_keys) + elif output_version == "responses/v1": + tool_output = next( + block + for block in ai_message2.content + if isinstance(block, dict) and block["type"] == "image_generation_call" + ) + assert set(tool_output.keys()).issubset(expected_keys) + else: + standard_keys = {"type", "base64", "id", "status"} + tool_output = next( + block + for block in ai_message2.content + if isinstance(block, dict) and block["type"] == "image" + ) + assert set(standard_keys).issubset(tool_output.keys()) + + +@pytest.mark.default_cassette("test_image_generation_multi_turn.yaml.gz") +@pytest.mark.vcr +def test_image_generation_multi_turn_v1() -> None: + """Test multi-turn editing of image generation by passing in history.""" + # Test multi-turn + llm = ChatOpenAIV1(model="gpt-4.1", use_responses_api=True) + # Test invocation + tool = { + "type": "image_generation", + "quality": "low", + "output_format": "jpeg", + "output_compression": 100, + "size": "1024x1024", + } + llm_with_tools = llm.bind_tools([tool]) + + chat_history: list[MessageLikeRepresentation] = [ + {"role": "user", "content": "Draw a random short word in green font."} + ] + ai_message = llm_with_tools.invoke(chat_history) + assert isinstance(ai_message, AIMessageV1) + _check_response(ai_message, "v1") + + expected_keys = { + # Standard + "type", + "base64", + "mime_type", + "id", + # OpenAI-specific + "background", + "output_format", + "quality", + "revised_prompt", + "size", + "status", + } + + standard_keys = {"type", "base64", "id", "status"} + tool_output = next( + block + for block in ai_message.content + if isinstance(block, dict) and block["type"] == "image" + ) + assert set(standard_keys).issubset(tool_output.keys()) + + chat_history.extend( + [ + # AI message with tool output + ai_message, + # New request + { + "role": "user", + "content": ( + "Now, change the font to blue. Keep the word and everything else " + "the same." + ), + }, + ] + ) + + ai_message2 = llm_with_tools.invoke(chat_history) + assert isinstance(ai_message2, AIMessageV1) + _check_response(ai_message2, "v1") + + tool_output = next( + block + for block in ai_message2.content + if isinstance(block, dict) and block["type"] == "image" + ) + assert set(expected_keys).issubset(tool_output.keys()) diff --git a/libs/partners/openai/tests/unit_tests/chat_models/test_base.py b/libs/partners/openai/tests/unit_tests/chat_models/test_base.py index c4176711482..119bd1282ab 100644 --- a/libs/partners/openai/tests/unit_tests/chat_models/test_base.py +++ b/libs/partners/openai/tests/unit_tests/chat_models/test_base.py @@ -20,11 +20,13 @@ from langchain_core.messages import ( ToolCall, ToolMessage, ) +from langchain_core.messages import content_blocks as types from langchain_core.messages.ai import UsageMetadata from langchain_core.outputs import ChatGeneration, ChatResult from langchain_core.runnables import RunnableLambda from langchain_core.tracers.base import BaseTracer from langchain_core.tracers.schemas import Run +from langchain_core.v1.messages import AIMessage as AIMessageV1 from openai.types.responses import ResponseOutputMessage, ResponseReasoningItem from openai.types.responses.response import IncompleteDetails, Response, ResponseUsage from openai.types.responses.response_error import ResponseError @@ -51,7 +53,10 @@ from langchain_openai import ChatOpenAI from langchain_openai.chat_models._compat import ( _FUNCTION_CALL_IDS_MAP_KEY, _convert_from_v03_ai_message, + _convert_from_v1_to_chat_completions, + _convert_from_v1_to_responses, _convert_to_v03_ai_message, + _convert_to_v1_from_responses, ) from langchain_openai.chat_models.base import ( _construct_lc_result_from_responses_api, @@ -2297,7 +2302,7 @@ def test_mcp_tracing() -> None: assert payload["tools"][0]["headers"]["Authorization"] == "Bearer PLACEHOLDER" -def test_compat() -> None: +def test_compat_responses_v03() -> None: # Check compatibility with v0.3 message format message_v03 = AIMessage( content=[ @@ -2358,6 +2363,258 @@ def test_compat() -> None: assert message_v03_output is not message_v03 +@pytest.mark.parametrize( + "message_v1, expected", + [ + ( + AIMessageV1( + [ + {"type": "reasoning", "reasoning": "Reasoning text"}, + { + "type": "tool_call", + "id": "call_123", + "name": "get_weather", + "args": {"location": "San Francisco"}, + }, + { + "type": "text", + "text": "Hello, world!", + "annotations": [ + {"type": "citation", "url": "https://example.com"} + ], + }, + ], + id="chatcmpl-123", + response_metadata={"model_provider": "openai", "model_name": "gpt-4.1"}, + ), + AIMessageV1( + [{"type": "text", "text": "Hello, world!"}], + id="chatcmpl-123", + response_metadata={"model_provider": "openai", "model_name": "gpt-4.1"}, + ), + ) + ], +) +def test_convert_from_v1_to_chat_completions( + message_v1: AIMessageV1, expected: AIMessageV1 +) -> None: + result = _convert_from_v1_to_chat_completions(message_v1) + assert result == expected + assert result.tool_calls == message_v1.tool_calls # tool calls remain cached + + # Check no mutation + assert message_v1 != result + + +@pytest.mark.parametrize( + "message_v1, expected", + [ + ( + AIMessageV1( + [ + {"type": "reasoning", "id": "abc123"}, + {"type": "reasoning", "id": "abc234", "reasoning": "foo "}, + {"type": "reasoning", "id": "abc234", "reasoning": "bar"}, + { + "type": "tool_call", + "id": "call_123", + "name": "get_weather", + "args": {"location": "San Francisco"}, + }, + { + "type": "tool_call", + "id": "call_234", + "name": "get_weather_2", + "args": {"location": "New York"}, + "extras": {"item_id": "fc_123"}, + }, + {"type": "text", "text": "Hello "}, + { + "type": "text", + "text": "world", + "annotations": [ + {"type": "citation", "url": "https://example.com"}, + { + "type": "citation", + "title": "my doc", + "extras": {"file_id": "file_123", "index": 1}, + }, + { + "type": "non_standard_annotation", + "value": {"bar": "baz"}, + }, + ], + }, + {"type": "image", "base64": "...", "id": "ig_123"}, + { + "type": "non_standard", + "value": {"type": "something_else", "foo": "bar"}, + }, + ], + id="resp123", + ), + [ + {"type": "reasoning", "id": "abc123", "summary": []}, + { + "type": "reasoning", + "id": "abc234", + "summary": [ + {"type": "summary_text", "text": "foo "}, + {"type": "summary_text", "text": "bar"}, + ], + }, + { + "type": "function_call", + "call_id": "call_123", + "name": "get_weather", + "arguments": '{"location": "San Francisco"}', + }, + { + "type": "function_call", + "call_id": "call_234", + "name": "get_weather_2", + "arguments": '{"location": "New York"}', + "id": "fc_123", + }, + {"type": "text", "text": "Hello "}, + { + "type": "text", + "text": "world", + "annotations": [ + {"type": "url_citation", "url": "https://example.com"}, + { + "type": "file_citation", + "filename": "my doc", + "index": 1, + "file_id": "file_123", + }, + {"bar": "baz"}, + ], + }, + {"type": "image_generation_call", "id": "ig_123", "result": "..."}, + {"type": "something_else", "foo": "bar"}, + ], + ) + ], +) +def test_convert_from_v1_to_responses( + message_v1: AIMessageV1, expected: AIMessageV1 +) -> None: + result = _convert_from_v1_to_responses(message_v1.content, message_v1.tool_calls) + assert result == expected + + # Check no mutation + assert message_v1 != result + + +@pytest.mark.parametrize( + "responses_content, tool_calls, expected_content", + [ + ( + [ + {"type": "reasoning", "id": "abc123", "summary": []}, + { + "type": "reasoning", + "id": "abc234", + "summary": [ + {"type": "summary_text", "text": "foo "}, + {"type": "summary_text", "text": "bar"}, + ], + }, + { + "type": "function_call", + "call_id": "call_123", + "name": "get_weather", + "arguments": '{"location": "San Francisco"}', + }, + { + "type": "function_call", + "call_id": "call_234", + "name": "get_weather_2", + "arguments": '{"location": "New York"}', + "id": "fc_123", + }, + {"type": "text", "text": "Hello "}, + { + "type": "text", + "text": "world", + "annotations": [ + {"type": "url_citation", "url": "https://example.com"}, + { + "type": "file_citation", + "filename": "my doc", + "index": 1, + "file_id": "file_123", + }, + {"bar": "baz"}, + ], + }, + {"type": "image_generation_call", "id": "ig_123", "result": "..."}, + {"type": "something_else", "foo": "bar"}, + ], + [ + { + "type": "tool_call", + "id": "call_123", + "name": "get_weather", + "args": {"location": "San Francisco"}, + }, + { + "type": "tool_call", + "id": "call_234", + "name": "get_weather_2", + "args": {"location": "New York"}, + }, + ], + [ + {"type": "reasoning", "id": "abc123"}, + {"type": "reasoning", "id": "abc234", "reasoning": "foo "}, + {"type": "reasoning", "id": "abc234", "reasoning": "bar"}, + { + "type": "tool_call", + "id": "call_123", + "name": "get_weather", + "args": {"location": "San Francisco"}, + }, + { + "type": "tool_call", + "id": "call_234", + "name": "get_weather_2", + "args": {"location": "New York"}, + "extras": {"item_id": "fc_123"}, + }, + {"type": "text", "text": "Hello "}, + { + "type": "text", + "text": "world", + "annotations": [ + {"type": "citation", "url": "https://example.com"}, + { + "type": "citation", + "title": "my doc", + "extras": {"file_id": "file_123", "index": 1}, + }, + {"type": "non_standard_annotation", "value": {"bar": "baz"}}, + ], + }, + {"type": "image", "base64": "...", "id": "ig_123"}, + { + "type": "non_standard", + "value": {"type": "something_else", "foo": "bar"}, + }, + ], + ) + ], +) +def test_convert_to_v1_from_responses( + responses_content: list[dict[str, Any]], + tool_calls: list[ToolCall], + expected_content: list[types.ContentBlock], +) -> None: + result = _convert_to_v1_from_responses(responses_content, tool_calls) + assert result == expected_content + + def test_get_last_messages() -> None: messages: list[BaseMessage] = [HumanMessage("Hello")] last_messages, previous_response_id = _get_last_messages(messages) diff --git a/libs/partners/openai/tests/unit_tests/chat_models/test_imports.py b/libs/partners/openai/tests/unit_tests/chat_models/test_imports.py index ef3ae2fb3e8..948e278b0fd 100644 --- a/libs/partners/openai/tests/unit_tests/chat_models/test_imports.py +++ b/libs/partners/openai/tests/unit_tests/chat_models/test_imports.py @@ -1,7 +1,10 @@ from langchain_openai.chat_models import __all__ +from langchain_openai.v1.chat_models import __all__ as v1_all EXPECTED_ALL = ["ChatOpenAI", "AzureChatOpenAI"] +EXPECTED_ALL_V1 = ["ChatOpenAI"] def test_all_imports() -> None: assert sorted(EXPECTED_ALL) == sorted(__all__) + assert sorted(EXPECTED_ALL_V1) == sorted(v1_all) diff --git a/libs/partners/openai/tests/unit_tests/chat_models/test_responses_stream.py b/libs/partners/openai/tests/unit_tests/chat_models/test_responses_stream.py index eca5ee1c255..6b5318d9b1c 100644 --- a/libs/partners/openai/tests/unit_tests/chat_models/test_responses_stream.py +++ b/libs/partners/openai/tests/unit_tests/chat_models/test_responses_stream.py @@ -2,6 +2,7 @@ from typing import Any, Optional from unittest.mock import MagicMock, patch from langchain_core.messages import AIMessageChunk, BaseMessageChunk +from langchain_core.v1.messages import AIMessageChunk as AIMessageChunkV1 from openai.types.responses import ( ResponseCompletedEvent, ResponseContentPartAddedEvent, @@ -37,6 +38,7 @@ from openai.types.shared.reasoning import Reasoning from openai.types.shared.response_format_text import ResponseFormatText from langchain_openai import ChatOpenAI +from langchain_openai.v1 import ChatOpenAI as ChatOpenAIV1 from tests.unit_tests.chat_models.test_base import MockSyncContextManager responses_stream = [ @@ -337,7 +339,7 @@ responses_stream = [ id="rs_234", summary=[], type="reasoning", - encrypted_content=None, + encrypted_content="encrypted-content", status=None, ), output_index=2, @@ -416,7 +418,7 @@ responses_stream = [ Summary(text="still more reasoning", type="summary_text"), ], type="reasoning", - encrypted_content=None, + encrypted_content="encrypted-content", status=None, ), output_index=2, @@ -562,7 +564,7 @@ responses_stream = [ Summary(text="still more reasoning", type="summary_text"), ], type="reasoning", - encrypted_content=None, + encrypted_content="encrypted-content", status=None, ), ResponseOutputMessage( @@ -621,7 +623,9 @@ def _strip_none(obj: Any) -> Any: def test_responses_stream() -> None: - llm = ChatOpenAI(model="o4-mini", output_version="responses/v1") + llm = ChatOpenAI( + model="o4-mini", use_responses_api=True, output_version="responses/v1" + ) mock_client = MagicMock() def mock_create(*args: Any, **kwargs: Any) -> MockSyncContextManager: @@ -630,10 +634,12 @@ def test_responses_stream() -> None: mock_client.responses.create = mock_create full: Optional[BaseMessageChunk] = None + chunks = [] with patch.object(llm, "root_client", mock_client): for chunk in llm.stream("test"): assert isinstance(chunk, AIMessageChunk) full = chunk if full is None else full + chunk + chunks.append(chunk) assert isinstance(full, AIMessageChunk) expected_content = [ @@ -654,6 +660,7 @@ def test_responses_stream() -> None: {"index": 0, "type": "summary_text", "text": "more reasoning"}, {"index": 1, "type": "summary_text", "text": "still more reasoning"}, ], + "encrypted_content": "encrypted-content", "type": "reasoning", "index": 3, }, @@ -679,3 +686,72 @@ def test_responses_stream() -> None: dumped = _strip_none(item.model_dump()) _ = dumped.pop("status", None) assert dumped == payload["input"][idx] + + +def test_responses_stream_v1() -> None: + llm = ChatOpenAIV1(model="o4-mini", use_responses_api=True) + mock_client = MagicMock() + + def mock_create(*args: Any, **kwargs: Any) -> MockSyncContextManager: + return MockSyncContextManager(responses_stream) + + mock_client.responses.create = mock_create + + full: Optional[AIMessageChunkV1] = None + chunks = [] + with patch.object(llm, "root_client", mock_client): + for chunk in llm.stream("test"): + assert isinstance(chunk, AIMessageChunkV1) + full = chunk if full is None else full + chunk + chunks.append(chunk) + assert isinstance(full, AIMessageChunkV1) + + expected_content = [ + { + "type": "reasoning", + "reasoning": "reasoning block one", + "id": "rs_123", + "index": 0, + }, + { + "type": "reasoning", + "reasoning": "another reasoning block", + "id": "rs_123", + "index": 1, + }, + {"type": "text", "text": "text block one", "index": 2, "id": "msg_123"}, + {"type": "text", "text": "another text block", "index": 3, "id": "msg_123"}, + { + "type": "reasoning", + "reasoning": "more reasoning", + "id": "rs_234", + "extras": {"encrypted_content": "encrypted-content"}, + "index": 4, + }, + { + "type": "reasoning", + "reasoning": "still more reasoning", + "id": "rs_234", + "index": 5, + }, + {"type": "text", "text": "more", "index": 6, "id": "msg_234"}, + {"type": "text", "text": "text", "index": 7, "id": "msg_234"}, + ] + assert full.content == expected_content + assert full.id == "resp_123" + + # Test reconstruction + payload = llm._get_request_payload([full]) + completed = [ + item + for item in responses_stream + if item.type == "response.completed" # type: ignore[attr-defined] + ] + assert len(completed) == 1 + response = completed[0].response # type: ignore[attr-defined] + + assert len(response.output) == len(payload["input"]) + for idx, item in enumerate(response.output): + dumped = _strip_none(item.model_dump()) + _ = dumped.pop("status", None) + assert dumped == payload["input"][idx] diff --git a/libs/partners/openai/tests/unit_tests/fake/callbacks.py b/libs/partners/openai/tests/unit_tests/fake/callbacks.py index da3fa0664a0..e3e695fcc6b 100644 --- a/libs/partners/openai/tests/unit_tests/fake/callbacks.py +++ b/libs/partners/openai/tests/unit_tests/fake/callbacks.py @@ -6,6 +6,7 @@ from uuid import UUID from langchain_core.callbacks.base import AsyncCallbackHandler, BaseCallbackHandler from langchain_core.messages import BaseMessage +from langchain_core.v1.messages import MessageV1 from pydantic import BaseModel @@ -196,7 +197,7 @@ class FakeCallbackHandlerWithChatStart(FakeCallbackHandler): def on_chat_model_start( self, serialized: dict[str, Any], - messages: list[list[BaseMessage]], + messages: Union[list[list[BaseMessage]], list[MessageV1]], *, run_id: UUID, parent_run_id: Optional[UUID] = None, diff --git a/libs/partners/openai/tests/unit_tests/test_imports.py b/libs/partners/openai/tests/unit_tests/test_imports.py index 324e71bda9b..d64cc6bf5f7 100644 --- a/libs/partners/openai/tests/unit_tests/test_imports.py +++ b/libs/partners/openai/tests/unit_tests/test_imports.py @@ -1,4 +1,5 @@ from langchain_openai import __all__ +from langchain_openai.v1 import __all__ as v1_all EXPECTED_ALL = [ "OpenAI", @@ -9,6 +10,9 @@ EXPECTED_ALL = [ "AzureOpenAIEmbeddings", ] +EXPECTED_ALL_V1 = ["ChatOpenAI"] + def test_all_imports() -> None: assert sorted(EXPECTED_ALL) == sorted(__all__) + assert sorted(EXPECTED_ALL_V1) == sorted(v1_all) diff --git a/uv.lock b/uv.lock index bb8f22f98e7..00e1cafc046 100644 --- a/uv.lock +++ b/uv.lock @@ -181,7 +181,7 @@ wheels = [ [[package]] name = "anthropic" -version = "0.57.1" +version = "0.60.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, @@ -192,9 +192,9 @@ dependencies = [ { name = "sniffio" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/d7/75/6261a1a8d92aed47e27d2fcfb3a411af73b1435e6ae1186da02b760565d0/anthropic-0.57.1.tar.gz", hash = "sha256:7815dd92245a70d21f65f356f33fc80c5072eada87fb49437767ea2918b2c4b0", size = 423775, upload-time = "2025-07-03T16:57:35.932Z" } +sdist = { url = "https://files.pythonhosted.org/packages/4e/03/3334921dc54ed822b3dd993ae72d823a7402588521bbba3e024b3333a1fd/anthropic-0.60.0.tar.gz", hash = "sha256:a22ba187c6f4fd5afecb2fc913b960feccf72bc0d25c1b7ce0345e87caede577", size = 425983, upload-time = "2025-07-28T19:53:47.685Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/e5/cf/ca0ba77805aec6171629a8b665c7dc224dab374539c3d27005b5d8c100a0/anthropic-0.57.1-py3-none-any.whl", hash = "sha256:33afc1f395af207d07ff1bffc0a3d1caac53c371793792569c5d2f09283ea306", size = 292779, upload-time = "2025-07-03T16:57:34.636Z" }, + { url = "https://files.pythonhosted.org/packages/da/bb/d84f287fb1c217b30c328af987cf8bbe3897edf0518dcc5fa39412f794ec/anthropic-0.60.0-py3-none-any.whl", hash = "sha256:65ad1f088a960217aaf82ba91ff743d6c89e9d811c6d64275b9a7c59ee9ac3c6", size = 293116, upload-time = "2025-07-28T19:53:45.944Z" }, ] [[package]] @@ -2354,7 +2354,7 @@ typing = [ [[package]] name = "langchain-anthropic" -version = "0.3.17" +version = "0.3.18" source = { editable = "libs/partners/anthropic" } dependencies = [ { name = "anthropic" }, @@ -2364,7 +2364,7 @@ dependencies = [ [package.metadata] requires-dist = [ - { name = "anthropic", specifier = ">=0.57.0,<1" }, + { name = "anthropic", specifier = ">=0.60.0,<1" }, { name = "langchain-core", editable = "libs/core" }, { name = "pydantic", specifier = ">=2.7.4,<3.0.0" }, ] From ac2de920b1811b024bae06fc03ac989d633624e2 Mon Sep 17 00:00:00 2001 From: ccurme Date: Tue, 5 Aug 2025 16:39:37 -0300 Subject: [PATCH 2/2] chore: increment versions for 0.4 branch (#32419) --- libs/core/langchain_core/version.py | 2 +- libs/core/pyproject.toml | 2 +- libs/core/uv.lock | 2 +- libs/langchain/pyproject.toml | 14 +- libs/langchain/uv.lock | 1466 ++------------------------- libs/partners/openai/pyproject.toml | 6 +- libs/partners/openai/uv.lock | 4 +- 7 files changed, 127 insertions(+), 1369 deletions(-) diff --git a/libs/core/langchain_core/version.py b/libs/core/langchain_core/version.py index 232073a8a84..85ab41d0cd6 100644 --- a/libs/core/langchain_core/version.py +++ b/libs/core/langchain_core/version.py @@ -1,3 +1,3 @@ """langchain-core version information and utilities.""" -VERSION = "0.3.72" +VERSION = "0.4.0.dev0" diff --git a/libs/core/pyproject.toml b/libs/core/pyproject.toml index e54a785bf5d..44bbfbeeee6 100644 --- a/libs/core/pyproject.toml +++ b/libs/core/pyproject.toml @@ -16,7 +16,7 @@ dependencies = [ "pydantic>=2.7.4", ] name = "langchain-core" -version = "0.3.72" +version = "0.4.0.dev0" description = "Building applications with LLMs through composability" readme = "README.md" diff --git a/libs/core/uv.lock b/libs/core/uv.lock index 7d20a625e3d..4228e7dae97 100644 --- a/libs/core/uv.lock +++ b/libs/core/uv.lock @@ -987,7 +987,7 @@ wheels = [ [[package]] name = "langchain-core" -version = "0.3.72" +version = "0.4.0.dev0" source = { editable = "." } dependencies = [ { name = "jsonpatch" }, diff --git a/libs/langchain/pyproject.toml b/libs/langchain/pyproject.toml index 1ec57de2480..e84e7ec58f3 100644 --- a/libs/langchain/pyproject.toml +++ b/libs/langchain/pyproject.toml @@ -7,7 +7,7 @@ authors = [] license = { text = "MIT" } requires-python = ">=3.9, <4.0" dependencies = [ - "langchain-core<1.0.0,>=0.3.72", + "langchain-core<1.0.0,>=0.4.0.dev0", "langchain-text-splitters<1.0.0,>=0.3.9", "langsmith>=0.1.17", "pydantic<3.0.0,>=2.7.4", @@ -17,7 +17,7 @@ dependencies = [ "async-timeout<5.0.0,>=4.0.0; python_version < \"3.11\"", ] name = "langchain" -version = "0.3.27" +version = "0.4.0.dev0" description = "Building applications with LLMs through composability" readme = "README.md" @@ -25,9 +25,9 @@ readme = "README.md" community = ["langchain-community"] anthropic = ["langchain-anthropic"] openai = ["langchain-openai"] -azure-ai = ["langchain-azure-ai"] -cohere = ["langchain-cohere"] -google-vertexai = ["langchain-google-vertexai"] +# azure-ai = ["langchain-azure-ai"] +# cohere = ["langchain-cohere"] +# google-vertexai = ["langchain-google-vertexai"] google-genai = ["langchain-google-genai"] fireworks = ["langchain-fireworks"] ollama = ["langchain-ollama"] @@ -35,9 +35,9 @@ together = ["langchain-together"] mistralai = ["langchain-mistralai"] huggingface = ["langchain-huggingface"] groq = ["langchain-groq"] -aws = ["langchain-aws"] +# aws = ["langchain-aws"] deepseek = ["langchain-deepseek"] -xai = ["langchain-xai"] +# xai = ["langchain-xai"] perplexity = ["langchain-perplexity"] [project.urls] diff --git a/libs/langchain/uv.lock b/libs/langchain/uv.lock index e7cf3e95dfe..47e886a827d 100644 --- a/libs/langchain/uv.lock +++ b/libs/langchain/uv.lock @@ -272,81 +272,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/fc/30/d4986a882011f9df997a55e6becd864812ccfcd821d64aac8570ee39f719/attrs-25.1.0-py3-none-any.whl", hash = "sha256:c75a69e28a550a7e93789579c22aa26b0f5b83b75dc4e08fe092980051e1090a", size = 63152, upload-time = "2025-01-25T11:30:10.164Z" }, ] -[[package]] -name = "azure-ai-inference" -version = "1.0.0b9" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "azure-core" }, - { name = "isodate" }, - { name = "typing-extensions" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/4e/6a/ed85592e5c64e08c291992f58b1a94dab6869f28fb0f40fd753dced73ba6/azure_ai_inference-1.0.0b9.tar.gz", hash = "sha256:1feb496bd84b01ee2691befc04358fa25d7c344d8288e99364438859ad7cd5a4", size = 182408, upload-time = "2025-02-15T00:37:28.464Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/4f/0f/27520da74769db6e58327d96c98e7b9a07ce686dff582c9a5ec60b03f9dd/azure_ai_inference-1.0.0b9-py3-none-any.whl", hash = "sha256:49823732e674092dad83bb8b0d1b65aa73111fab924d61349eb2a8cdc0493990", size = 124885, upload-time = "2025-02-15T00:37:29.964Z" }, -] - -[package.optional-dependencies] -opentelemetry = [ - { name = "azure-core-tracing-opentelemetry" }, -] - -[[package]] -name = "azure-core" -version = "1.32.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "requests" }, - { name = "six" }, - { name = "typing-extensions" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/cc/ee/668328306a9e963a5ad9f152cd98c7adad86c822729fd1d2a01613ad1e67/azure_core-1.32.0.tar.gz", hash = "sha256:22b3c35d6b2dae14990f6c1be2912bf23ffe50b220e708a28ab1bb92b1c730e5", size = 279128, upload-time = "2024-10-31T17:45:17.528Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/39/83/325bf5e02504dbd8b4faa98197a44cdf8a325ef259b48326a2b6f17f8383/azure_core-1.32.0-py3-none-any.whl", hash = "sha256:eac191a0efb23bfa83fddf321b27b122b4ec847befa3091fa736a5c32c50d7b4", size = 198855, upload-time = "2024-10-31T17:45:19.415Z" }, -] - -[[package]] -name = "azure-core-tracing-opentelemetry" -version = "1.0.0b11" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "azure-core" }, - { name = "opentelemetry-api" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/ab/f2/2ede1654987b82f45dffe0e82d0d77b13940bb88d044138090c8b7baa439/azure-core-tracing-opentelemetry-1.0.0b11.tar.gz", hash = "sha256:a230d1555838b5d07b7594221cd639ea7bc24e29c881e5675e311c6067bad4f5", size = 19097, upload-time = "2023-09-07T19:49:25.064Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/e6/6e/3ef6dfba8e0faa4692caa6d103c721ccba6ac37a24744848a3a10bb3fe89/azure_core_tracing_opentelemetry-1.0.0b11-py3-none-any.whl", hash = "sha256:016cefcaff2900fb5cdb7a8a7abd03e9c266622c06e26b3fe6dafa54c4b48bf5", size = 10710, upload-time = "2023-09-07T19:49:26.492Z" }, -] - -[[package]] -name = "azure-cosmos" -version = "4.9.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "azure-core", marker = "python_full_version < '3.13'" }, - { name = "typing-extensions", marker = "python_full_version < '3.13'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/be/7c/a4e7810f85e7f83d94265ef5ff0fb1efad55a768de737d940151ea2eec45/azure_cosmos-4.9.0.tar.gz", hash = "sha256:c70db4cbf55b0ff261ed7bb8aa325a5dfa565d3c6eaa43d75d26ae5e2ad6d74f", size = 1824155, upload-time = "2024-11-19T04:09:30.195Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/61/dc/380f843744535497acd0b85aacb59565c84fc28bf938c8d6e897a858cd95/azure_cosmos-4.9.0-py3-none-any.whl", hash = "sha256:3b60eaa01a16a857d0faf0cec304bac6fa8620a81bc268ce760339032ef617fe", size = 303157, upload-time = "2024-11-19T04:09:32.148Z" }, -] - -[[package]] -name = "azure-identity" -version = "1.21.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "azure-core" }, - { name = "cryptography" }, - { name = "msal" }, - { name = "msal-extensions" }, - { name = "typing-extensions" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/b5/a1/f1a683672e7a88ea0e3119f57b6c7843ed52650fdcac8bfa66ed84e86e40/azure_identity-1.21.0.tar.gz", hash = "sha256:ea22ce6e6b0f429bc1b8d9212d5b9f9877bd4c82f1724bfa910760612c07a9a6", size = 266445, upload-time = "2025-03-11T20:53:07.463Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/3d/9f/1f9f3ef4f49729ee207a712a5971a9ca747f2ca47d9cbf13cf6953e3478a/azure_identity-1.21.0-py3-none-any.whl", hash = "sha256:258ea6325537352440f71b35c3dffe9d240eae4a5126c1b7ce5efd5766bd9fd9", size = 189190, upload-time = "2025-03-11T20:53:09.197Z" }, -] - [[package]] name = "babel" version = "2.17.0" @@ -398,35 +323,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/93/cb/0885a99be92700adc3f2294179e2ca8700b3197c89d2c87b1ca20ab2537a/blockbuster-1.5.18-py3-none-any.whl", hash = "sha256:0062af118fbddb6e85b4fa5e41df5da95ce4ceba44ff1efcc26be1ade11cad53", size = 13043, upload-time = "2025-02-10T21:59:26.111Z" }, ] -[[package]] -name = "boto3" -version = "1.36.20" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "botocore" }, - { name = "jmespath" }, - { name = "s3transfer" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/00/9a/f63b820ece3f0411645e686fe48a9ea9daf9ee07fa5dcb07dfb5df8267ec/boto3-1.36.20.tar.gz", hash = "sha256:4a27ffc0543c2a429600542047f00c6a1e95270139d36d8cc636e9cc9a78b835", size = 111015, upload-time = "2025-02-13T20:22:54.46Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/0f/44/882ab747cfe2164eecd6ab780c11e3d875cffb53b86118664a44b9fb71c1/boto3-1.36.20-py3-none-any.whl", hash = "sha256:e132e31232ee107f1c187f566d96863a907433e5bdd8d8928effddd30a96242f", size = 139179, upload-time = "2025-02-13T20:22:52.087Z" }, -] - -[[package]] -name = "botocore" -version = "1.36.20" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "jmespath" }, - { name = "python-dateutil" }, - { name = "urllib3", version = "1.26.20", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10' or platform_python_implementation == 'PyPy'" }, - { name = "urllib3", version = "2.3.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10' and platform_python_implementation != 'PyPy'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/1d/d5/2d54195cefa89708186c036db360d9be23aabdb93d8982df9c08e6df1ef8/botocore-1.36.20.tar.gz", hash = "sha256:3815a05518ff03a8dbc8d5a3c29b95889409a25ac87a282067f6e26fefb7c40a", size = 13521540, upload-time = "2025-02-13T20:22:40.193Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/41/6c/0553a1641c749ec8838e153fb77ff17f4de56792d289adf039385294257f/botocore-1.36.20-py3-none-any.whl", hash = "sha256:0110bf2208e4569659d0ccfca94baa4999501334397987b02712a94493cbf48b", size = 13349288, upload-time = "2025-02-13T20:22:36.677Z" }, -] - [[package]] name = "cachetools" version = "5.5.1" @@ -744,27 +640,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/20/01/b394922252051e97aab231d416c86da3d8a6d781eeadcdca1082867de64e/codespell-2.4.1-py3-none-any.whl", hash = "sha256:3dadafa67df7e4a3dbf51e0d7315061b80d265f9552ebd699b3dd6834b47e425", size = 344501, upload-time = "2025-01-28T18:52:37.057Z" }, ] -[[package]] -name = "cohere" -version = "5.13.12" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "fastavro" }, - { name = "httpx" }, - { name = "httpx-sse" }, - { name = "pydantic" }, - { name = "pydantic-core" }, - { name = "requests" }, - { name = "tokenizers" }, - { name = "types-requests", version = "2.31.0.6", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10' or platform_python_implementation == 'PyPy'" }, - { name = "types-requests", version = "2.32.0.20241016", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10' and platform_python_implementation != 'PyPy'" }, - { name = "typing-extensions" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/f4/48/795c53b25b08ec353cc4f48dc5c199ac4615b3c331e716ac50c7cb07034c/cohere-5.13.12.tar.gz", hash = "sha256:97bb9ac107e580780b941acbabd3aa5e71960e6835398292c46aaa8a0a4cab88", size = 132860, upload-time = "2025-02-06T14:00:21.691Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/50/c6/cffec9284d9713d28c6235a653a9a34c49b0f880f00cfa002252cdb8d033/cohere-5.13.12-py3-none-any.whl", hash = "sha256:2a043591a3e5280b47716a6b311e4c7f58e799364113a9cb81b50cd4f6c95f7e", size = 252856, upload-time = "2025-02-06T14:00:19.111Z" }, -] - [[package]] name = "colorama" version = "0.4.6" @@ -963,18 +838,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/07/6c/aa3f2f849e01cb6a001cd8554a88d4c77c5c1a31c95bdf1cf9301e6d9ef4/defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61", size = 25604, upload-time = "2021-03-08T10:59:24.45Z" }, ] -[[package]] -name = "deprecated" -version = "1.2.18" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "wrapt" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/98/97/06afe62762c9a8a86af0cfb7bfdab22a43ad17138b07af5b1a58442690a2/deprecated-1.2.18.tar.gz", hash = "sha256:422b6f6d859da6f2ef57857761bfb392480502a64c3028ca9bbe86085d72115d", size = 2928744, upload-time = "2025-01-27T10:46:25.7Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/6e/c6/ac0b6c1e2d138f1002bcf799d330bd6d85084fece321e662a14223794041/Deprecated-1.2.18-py2.py3-none-any.whl", hash = "sha256:bd5011788200372a32418f888e326a09ff80d0214bd961147cfed01b5c018eec", size = 9998, upload-time = "2025-01-27T10:46:09.186Z" }, -] - [[package]] name = "distro" version = "1.9.0" @@ -984,24 +847,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/12/b3/231ffd4ab1fc9d679809f356cebee130ac7daa00d6d6f3206dd4fd137e9e/distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2", size = 20277, upload-time = "2023-12-24T09:54:30.421Z" }, ] -[[package]] -name = "dnspython" -version = "2.7.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/b5/4a/263763cb2ba3816dd94b08ad3a33d5fdae34ecb856678773cc40a3605829/dnspython-2.7.0.tar.gz", hash = "sha256:ce9c432eda0dc91cf618a5cedf1a4e142651196bbcd2c80e89ed5a907e5cfaf1", size = 345197, upload-time = "2024-10-05T20:14:59.362Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/68/1b/e0a87d256e40e8c888847551b20a017a6b98139178505dc7ffb96f04e954/dnspython-2.7.0-py3-none-any.whl", hash = "sha256:b4c34b7d10b51bcc3a5071e7b8dee77939f1e878477eeecc965e9835f63c6c86", size = 313632, upload-time = "2024-10-05T20:14:57.687Z" }, -] - -[[package]] -name = "docstring-parser" -version = "0.16" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/08/12/9c22a58c0b1e29271051222d8906257616da84135af9ed167c9e28f85cb3/docstring_parser-0.16.tar.gz", hash = "sha256:538beabd0af1e2db0146b6bd3caa526c35a34d61af9fd2887f3a8a27a739aa6e", size = 26565, upload-time = "2024-03-15T10:39:44.419Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/d5/7c/e9fcff7623954d86bdc17782036cbf715ecab1bec4847c008557affe1ca8/docstring_parser-0.16-py3-none-any.whl", hash = "sha256:bf0a1387354d3691d102edef7ec124f219ef639982d096e26e3b60aeffa90637", size = 36533, upload-time = "2024-03-15T10:39:41.527Z" }, -] - [[package]] name = "duckdb" version = "1.2.0" @@ -1091,44 +936,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/7b/8f/c4d9bafc34ad7ad5d8dc16dd1347ee0e507a52c3adb6bfa8887e1c6a26ba/executing-2.2.0-py2.py3-none-any.whl", hash = "sha256:11387150cad388d62750327a53d3339fad4888b39a6fe233c3afbb54ecffd3aa", size = 26702, upload-time = "2025-01-22T15:41:25.929Z" }, ] -[[package]] -name = "fastavro" -version = "1.10.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/f3/67/7121d2221e998706cac00fa779ec44c1c943cb65e8a7ed1bd57d78d93f2c/fastavro-1.10.0.tar.gz", hash = "sha256:47bf41ac6d52cdfe4a3da88c75a802321321b37b663a900d12765101a5d6886f", size = 987970, upload-time = "2024-12-20T12:56:21.335Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/0c/e9/f5813450d672f500c4794a39a7cfea99316cb63d5ea11f215e320ea5243b/fastavro-1.10.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1a9fe0672d2caf0fe54e3be659b13de3cad25a267f2073d6f4b9f8862acc31eb", size = 1037355, upload-time = "2024-12-20T12:56:26.386Z" }, - { url = "https://files.pythonhosted.org/packages/6a/41/3f120f72e65f0c80e9bc4f855ac1c9578c8c0e2cdac4d4d4da1f91ca73b9/fastavro-1.10.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:86dd0410770e0c99363788f0584523709d85e57bb457372ec5c285a482c17fe6", size = 3024739, upload-time = "2024-12-20T12:56:30.75Z" }, - { url = "https://files.pythonhosted.org/packages/e1/e3/7d9b019158498b45c383e696ba8733b01535337136e9402b0487afeb92b6/fastavro-1.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:190e80dc7d77d03a6a8597a026146b32a0bbe45e3487ab4904dc8c1bebecb26d", size = 3074020, upload-time = "2024-12-20T12:56:34.419Z" }, - { url = "https://files.pythonhosted.org/packages/36/31/7ede5629e66eeb71c234d17a799000e737fe0ffd71ef9e1d57a3510def46/fastavro-1.10.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:bf570d63be9155c3fdc415f60a49c171548334b70fff0679a184b69c29b6bc61", size = 2968623, upload-time = "2024-12-20T12:56:37.911Z" }, - { url = "https://files.pythonhosted.org/packages/10/13/d215411ff5d5de23d6ed62a31eb7f7fa53941681d86bcd5c6388a0918fc3/fastavro-1.10.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e07abb6798e95dccecaec316265e35a018b523d1f3944ad396d0a93cb95e0a08", size = 3122217, upload-time = "2024-12-20T12:56:40.399Z" }, - { url = "https://files.pythonhosted.org/packages/6a/1d/7a54fac3f90f0dc120b92f244067976831e393789d3b78c08f2b035ccb19/fastavro-1.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:37203097ed11d0b8fd3c004904748777d730cafd26e278167ea602eebdef8eb2", size = 497256, upload-time = "2024-12-20T12:56:42.066Z" }, - { url = "https://files.pythonhosted.org/packages/ac/bf/e7e8e0f841e608dc6f78c746ef2d971fb1f6fe8a9a428d0731ef0abf8b59/fastavro-1.10.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:d183c075f527ab695a27ae75f210d4a86bce660cda2f85ae84d5606efc15ef50", size = 1040292, upload-time = "2024-12-20T12:56:44.453Z" }, - { url = "https://files.pythonhosted.org/packages/3a/96/43a65881f061bc5ec6dcf39e59f639a7344e822d4caadae748d076aaf4d0/fastavro-1.10.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7a95a2c0639bffd7c079b59e9a796bfc3a9acd78acff7088f7c54ade24e4a77", size = 3312624, upload-time = "2024-12-20T12:56:47.479Z" }, - { url = "https://files.pythonhosted.org/packages/c8/45/dba0cc08cf42500dd0f1e552e0fefe1cd81c47099d99277828a1081cbd87/fastavro-1.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0a678153b5da1b024a32ec3f611b2e7afd24deac588cb51dd1b0019935191a6d", size = 3334284, upload-time = "2024-12-20T12:56:51.332Z" }, - { url = "https://files.pythonhosted.org/packages/76/e3/3d9b0824e2e2da56e6a435a70a4db7ed801136daa451577a819bbedc6cf8/fastavro-1.10.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:67a597a5cfea4dddcf8b49eaf8c2b5ffee7fda15b578849185bc690ec0cd0d8f", size = 3283647, upload-time = "2024-12-20T12:56:53.982Z" }, - { url = "https://files.pythonhosted.org/packages/a1/dc/83d985f8212194e8283ebae86491fccde8710fd81d81ef8659e5373f4f1b/fastavro-1.10.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1fd689724760b17f69565d8a4e7785ed79becd451d1c99263c40cb2d6491f1d4", size = 3419520, upload-time = "2024-12-20T12:56:56.527Z" }, - { url = "https://files.pythonhosted.org/packages/fd/7f/21711a9ec9937c84406e0773ba3fc6f8d66389a364da46618706f9c37d30/fastavro-1.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:4f949d463f9ac4221128a51e4e34e2562f401e5925adcadfd28637a73df6c2d8", size = 499750, upload-time = "2024-12-20T12:56:58.034Z" }, - { url = "https://files.pythonhosted.org/packages/9c/a4/8e69c0a5cd121e5d476237de1bde5a7947f791ae45768ae52ed0d3ea8d18/fastavro-1.10.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:cfe57cb0d72f304bd0dcc5a3208ca6a7363a9ae76f3073307d095c9d053b29d4", size = 1036343, upload-time = "2024-12-20T12:56:59.557Z" }, - { url = "https://files.pythonhosted.org/packages/1e/01/aa219e2b33e5873d27b867ec0fad9f35f23d461114e1135a7e46c06786d2/fastavro-1.10.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:74e517440c824cb65fb29d3e3903a9406f4d7c75490cef47e55c4c82cdc66270", size = 3263368, upload-time = "2024-12-20T12:57:01.97Z" }, - { url = "https://files.pythonhosted.org/packages/a7/ba/1766e2d7d95df2e95e9e9a089dc7a537c0616720b053a111a918fa7ee6b6/fastavro-1.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:203c17d44cadde76e8eecb30f2d1b4f33eb478877552d71f049265dc6f2ecd10", size = 3328933, upload-time = "2024-12-20T12:57:05.898Z" }, - { url = "https://files.pythonhosted.org/packages/2e/40/26e56696b9696ab4fbba25a96b8037ca3f9fd8a8cc55b4b36400ef023e49/fastavro-1.10.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6575be7f2b5f94023b5a4e766b0251924945ad55e9a96672dc523656d17fe251", size = 3258045, upload-time = "2024-12-20T12:57:12.789Z" }, - { url = "https://files.pythonhosted.org/packages/4e/bc/2f6c92c06c5363372abe828bccdd95762f2c1983b261509f94189c38c8a1/fastavro-1.10.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fe471deb675ed2f01ee2aac958fbf8ebb13ea00fa4ce7f87e57710a0bc592208", size = 3418001, upload-time = "2024-12-20T12:57:16.556Z" }, - { url = "https://files.pythonhosted.org/packages/0c/ce/cfd16546c04ebbca1be80873b533c788cec76f7bfac231bfac6786047572/fastavro-1.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:567ff515f2a5d26d9674b31c95477f3e6022ec206124c62169bc2ffaf0889089", size = 487855, upload-time = "2024-12-20T12:57:19.335Z" }, - { url = "https://files.pythonhosted.org/packages/c9/c4/163cf154cc694c2dccc70cd6796db6214ac668a1260bf0310401dad188dc/fastavro-1.10.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:82263af0adfddb39c85f9517d736e1e940fe506dfcc35bc9ab9f85e0fa9236d8", size = 1022741, upload-time = "2024-12-20T12:57:21.055Z" }, - { url = "https://files.pythonhosted.org/packages/38/01/a24598f5f31b8582a92fe9c41bf91caeed50d5b5eaa7576e6f8b23cb488d/fastavro-1.10.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:566c193109ff0ff84f1072a165b7106c4f96050078a4e6ac7391f81ca1ef3efa", size = 3237421, upload-time = "2024-12-20T12:57:24.525Z" }, - { url = "https://files.pythonhosted.org/packages/a7/bf/08bcf65cfb7feb0e5b1329fafeb4a9b95b7b5ec723ba58c7dbd0d04ded34/fastavro-1.10.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e400d2e55d068404d9fea7c5021f8b999c6f9d9afa1d1f3652ec92c105ffcbdd", size = 3300222, upload-time = "2024-12-20T12:57:28.342Z" }, - { url = "https://files.pythonhosted.org/packages/53/4d/a6c25f3166328f8306ec2e6be1123ed78a55b8ab774a43a661124508881f/fastavro-1.10.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:9b8227497f71565270f9249fc9af32a93644ca683a0167cfe66d203845c3a038", size = 3233276, upload-time = "2024-12-20T12:57:32.303Z" }, - { url = "https://files.pythonhosted.org/packages/47/1c/b2b2ce2bf866a248ae23e96a87b3b8369427ff79be9112073039bee1d245/fastavro-1.10.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8e62d04c65461b30ac6d314e4197ad666371e97ae8cb2c16f971d802f6c7f514", size = 3388936, upload-time = "2024-12-20T12:57:34.778Z" }, - { url = "https://files.pythonhosted.org/packages/1f/2c/43927e22a2d57587b3aa09765098a6d833246b672d34c10c5f135414745a/fastavro-1.10.0-cp313-cp313-win_amd64.whl", hash = "sha256:86baf8c9740ab570d0d4d18517da71626fe9be4d1142bea684db52bd5adb078f", size = 483967, upload-time = "2024-12-20T12:57:37.618Z" }, - { url = "https://files.pythonhosted.org/packages/4b/43/4f294f748b252eeaf07d3540b5936e80622f92df649ea42022d404d6285c/fastavro-1.10.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5bccbb6f8e9e5b834cca964f0e6ebc27ebe65319d3940b0b397751a470f45612", size = 1037564, upload-time = "2024-12-20T12:57:39.353Z" }, - { url = "https://files.pythonhosted.org/packages/64/ce/03f0bfd21ff2ebfc1520eb14101a3ecd9eda3da032ce966e5be3d724809c/fastavro-1.10.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b0132f6b0b53f61a0a508a577f64beb5de1a5e068a9b4c0e1df6e3b66568eec4", size = 3024068, upload-time = "2024-12-20T12:57:41.712Z" }, - { url = "https://files.pythonhosted.org/packages/f8/70/97cb9512be1179b77e1cf382ffbfb5f7fe601237024f8a69d8b44ba1b576/fastavro-1.10.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ca37a363b711202c6071a6d4787e68e15fa3ab108261058c4aae853c582339af", size = 3069625, upload-time = "2024-12-20T12:57:44.608Z" }, - { url = "https://files.pythonhosted.org/packages/5c/cb/a1e043319fde2a8b87dff2e0d7751b9de55fca705e1dbb183c805f55fe73/fastavro-1.10.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:cf38cecdd67ca9bd92e6e9ba34a30db6343e7a3bedf171753ee78f8bd9f8a670", size = 2968653, upload-time = "2024-12-20T12:57:47.522Z" }, - { url = "https://files.pythonhosted.org/packages/07/98/1cabfe975493dbc829af7aa8739f86313a54577290b5ae4ea07501fa6a59/fastavro-1.10.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:f4dd10e0ed42982122d20cdf1a88aa50ee09e5a9cd9b39abdffb1aa4f5b76435", size = 3115893, upload-time = "2024-12-20T12:57:50.19Z" }, - { url = "https://files.pythonhosted.org/packages/eb/c1/057b6ad6c3d0cb7ab5f23ac44a10cf6676c6c59155c40f40ac93f3c5960a/fastavro-1.10.0-cp39-cp39-win_amd64.whl", hash = "sha256:aaef147dc14dd2d7823246178fd06fc5e477460e070dc6d9e07dd8193a6bc93c", size = 546089, upload-time = "2024-12-20T12:57:57.741Z" }, -] - [[package]] name = "fastjsonschema" version = "2.21.1" @@ -1147,15 +954,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/89/ec/00d68c4ddfedfe64159999e5f8a98fb8442729a63e2077eb9dcd89623d27/filelock-3.17.0-py3-none-any.whl", hash = "sha256:533dc2f7ba78dc2f0f531fc6c4940addf7b70a481e269a5a3b93be94ffbe8338", size = 16164, upload-time = "2025-01-21T20:04:47.734Z" }, ] -[[package]] -name = "filetype" -version = "1.2.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/bb/29/745f7d30d47fe0f251d3ad3dc2978a23141917661998763bebb6da007eb1/filetype-1.2.0.tar.gz", hash = "sha256:66b56cd6474bf41d8c54660347d37afcc3f7d1970648de365c102ef77548aadb", size = 998020, upload-time = "2022-11-02T17:34:04.141Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/18/79/1b8fa1bb3568781e84c9200f951c735f3f157429f44be0495da55894d620/filetype-1.2.0-py2.py3-none-any.whl", hash = "sha256:7ce71b6880181241cf7ac8697a2f1eb6a8bd9b429f7ad6d27b8db9ba5f1c2d25", size = 19970, upload-time = "2022-11-02T17:34:01.425Z" }, -] - [[package]] name = "fireworks-ai" version = "0.15.12" @@ -1307,17 +1105,16 @@ wheels = [ [[package]] name = "google-ai-generativelanguage" -version = "0.6.15" +version = "0.4.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "google-api-core", extra = ["grpc"] }, - { name = "google-auth" }, { name = "proto-plus" }, { name = "protobuf" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/11/d1/48fe5d7a43d278e9f6b5ada810b0a3530bbeac7ed7fcbcd366f932f05316/google_ai_generativelanguage-0.6.15.tar.gz", hash = "sha256:8f6d9dc4c12b065fe2d0289026171acea5183ebf2d0b11cefe12f3821e159ec3", size = 1375443, upload-time = "2025-01-13T21:50:47.459Z" } +sdist = { url = "https://files.pythonhosted.org/packages/5c/d2/c784e8a922ae1606daa7b92f5c7be89b5ef3c05fc99931678a4372ab7c0b/google-ai-generativelanguage-0.4.0.tar.gz", hash = "sha256:c8199066c08f74c4e91290778329bb9f357ba1ea5d6f82de2bc0d10552bf4f8c", size = 565744, upload-time = "2023-12-11T15:16:40.993Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/7c/a3/67b8a6ff5001a1d8864922f2d6488dc2a14367ceb651bc3f09a947f2f306/google_ai_generativelanguage-0.6.15-py3-none-any.whl", hash = "sha256:5a03ef86377aa184ffef3662ca28f19eeee158733e45d7947982eb953c6ebb6c", size = 1327356, upload-time = "2025-01-13T21:50:44.174Z" }, + { url = "https://files.pythonhosted.org/packages/40/c2/d28988d3cba74e712f47a498e2b3e3b58ac215106019bf5d8c20f8ab9822/google_ai_generativelanguage-0.4.0-py3-none-any.whl", hash = "sha256:e4c425376c1ee26c78acbc49a24f735f90ebfa81bf1a06495fae509a2433232c", size = 598714, upload-time = "2023-12-11T15:16:38.583Z" }, ] [[package]] @@ -1342,22 +1139,6 @@ grpc = [ { name = "grpcio-status" }, ] -[[package]] -name = "google-api-python-client" -version = "2.161.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "google-api-core" }, - { name = "google-auth" }, - { name = "google-auth-httplib2" }, - { name = "httplib2" }, - { name = "uritemplate" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/0a/50/c8d2d3c4e65e081c4c07b15e4fe35671676c5ecdb3674a167229e83ce49a/google_api_python_client-2.161.0.tar.gz", hash = "sha256:324c0cce73e9ea0a0d2afd5937e01b7c2d6a4d7e2579cdb6c384f9699d6c9f37", size = 12358839, upload-time = "2025-02-13T16:40:56.751Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/9c/e8/ca1efe224166a4c77ac92b4314b90f2fb70fdde1f763c1613ba3b9f50752/google_api_python_client-2.161.0-py2.py3-none-any.whl", hash = "sha256:9476a5a4f200bae368140453df40f9cda36be53fa7d0e9a9aac4cdb859a26448", size = 12869974, upload-time = "2025-02-13T16:40:52.452Z" }, -] - [[package]] name = "google-auth" version = "2.38.0" @@ -1372,168 +1153,20 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/9d/47/603554949a37bca5b7f894d51896a9c534b9eab808e2520a748e081669d0/google_auth-2.38.0-py2.py3-none-any.whl", hash = "sha256:e7dae6694313f434a2727bf2906f27ad259bae090d7aa896590d86feec3d9d4a", size = 210770, upload-time = "2025-01-23T01:05:26.572Z" }, ] -[[package]] -name = "google-auth-httplib2" -version = "0.2.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "google-auth" }, - { name = "httplib2" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/56/be/217a598a818567b28e859ff087f347475c807a5649296fb5a817c58dacef/google-auth-httplib2-0.2.0.tar.gz", hash = "sha256:38aa7badf48f974f1eb9861794e9c0cb2a0511a4ec0679b1f886d108f5640e05", size = 10842, upload-time = "2023-12-12T17:40:30.722Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/be/8a/fe34d2f3f9470a27b01c9e76226965863f153d5fbe276f83608562e49c04/google_auth_httplib2-0.2.0-py2.py3-none-any.whl", hash = "sha256:b65a0a2123300dd71281a7bf6e64d65a0759287df52729bdd1ae2e47dc311a3d", size = 9253, upload-time = "2023-12-12T17:40:13.055Z" }, -] - -[[package]] -name = "google-cloud-aiplatform" -version = "1.80.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "docstring-parser" }, - { name = "google-api-core", extra = ["grpc"] }, - { name = "google-auth" }, - { name = "google-cloud-bigquery" }, - { name = "google-cloud-resource-manager" }, - { name = "google-cloud-storage" }, - { name = "packaging" }, - { name = "proto-plus" }, - { name = "protobuf" }, - { name = "pydantic" }, - { name = "shapely" }, - { name = "typing-extensions" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/f0/88/d36384280cc4653e190a4a30025e66b285fbaef06024f68a4264cc588a33/google_cloud_aiplatform-1.80.0.tar.gz", hash = "sha256:bcaa4570a6fb56d3d29cb6b8f92588d4d1a1931de5f90cf07761853dab4c76fd", size = 8459480, upload-time = "2025-02-11T22:00:20.35Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/b5/57/5e761e7a8b03efc8e7faa4c0b2775991177bbd4dae7a6656a60dfd092ca8/google_cloud_aiplatform-1.80.0-py2.py3-none-any.whl", hash = "sha256:45d2a170f22431dae977551eccb740400bdb899807d0c8d4c16c53b2c1dbc6a5", size = 7089949, upload-time = "2025-02-11T22:00:16.38Z" }, -] - -[[package]] -name = "google-cloud-bigquery" -version = "3.29.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "google-api-core", extra = ["grpc"] }, - { name = "google-auth" }, - { name = "google-cloud-core" }, - { name = "google-resumable-media" }, - { name = "packaging" }, - { name = "python-dateutil" }, - { name = "requests" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/21/36/87875a9775985849f18d4b3e320e4acdeb5232db3d49cfa6269e7c7867b8/google_cloud_bigquery-3.29.0.tar.gz", hash = "sha256:fafc2b455ffce3bcc6ce0e884184ef50b6a11350a83b91e327fadda4d5566e72", size = 467180, upload-time = "2025-01-21T18:15:06.788Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/68/60/9e1430f0fe17f8e8e931eff468021516f74f2573f261221529767dd59591/google_cloud_bigquery-3.29.0-py2.py3-none-any.whl", hash = "sha256:5453a4eabe50118254eda9778f3d7dad413490de5f7046b5e66c98f5a1580308", size = 244605, upload-time = "2025-01-21T18:15:03.862Z" }, -] - -[[package]] -name = "google-cloud-core" -version = "2.4.1" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "google-api-core" }, - { name = "google-auth" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/b8/1f/9d1e0ba6919668608570418a9a51e47070ac15aeff64261fb092d8be94c0/google-cloud-core-2.4.1.tar.gz", hash = "sha256:9b7749272a812bde58fff28868d0c5e2f585b82f37e09a1f6ed2d4d10f134073", size = 35587, upload-time = "2023-12-07T21:12:32.127Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/5e/0f/2e2061e3fbcb9d535d5da3f58cc8de4947df1786fe6a1355960feb05a681/google_cloud_core-2.4.1-py2.py3-none-any.whl", hash = "sha256:a9e6a4422b9ac5c29f79a0ede9485473338e2ce78d91f2370c01e730eab22e61", size = 29233, upload-time = "2023-12-07T21:12:29.894Z" }, -] - -[[package]] -name = "google-cloud-resource-manager" -version = "1.14.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "google-api-core", extra = ["grpc"] }, - { name = "google-auth" }, - { name = "grpc-google-iam-v1" }, - { name = "proto-plus" }, - { name = "protobuf" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/cd/74/db14f34283b325b775b3287cd72ce8c43688bdea26801d02017a2ccded08/google_cloud_resource_manager-1.14.0.tar.gz", hash = "sha256:daa70a3a4704759d31f812ed221e3b6f7b660af30c7862e4a0060ea91291db30", size = 430148, upload-time = "2024-12-13T01:11:31.139Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/64/c4/2275ca35419f9a2ae66846f389490b356856bf55a9ad9f95a88399a89294/google_cloud_resource_manager-1.14.0-py2.py3-none-any.whl", hash = "sha256:4860c3ea9ace760b317ea90d4e27f1b32e54ededdcc340a7cb70c8ef238d8f7c", size = 384138, upload-time = "2024-12-13T01:11:29.651Z" }, -] - -[[package]] -name = "google-cloud-storage" -version = "2.19.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "google-api-core" }, - { name = "google-auth" }, - { name = "google-cloud-core" }, - { name = "google-crc32c" }, - { name = "google-resumable-media" }, - { name = "requests" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/36/76/4d965702e96bb67976e755bed9828fa50306dca003dbee08b67f41dd265e/google_cloud_storage-2.19.0.tar.gz", hash = "sha256:cd05e9e7191ba6cb68934d8eb76054d9be4562aa89dbc4236feee4d7d51342b2", size = 5535488, upload-time = "2024-12-05T01:35:06.49Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/d5/94/6db383d8ee1adf45dc6c73477152b82731fa4c4a46d9c1932cc8757e0fd4/google_cloud_storage-2.19.0-py2.py3-none-any.whl", hash = "sha256:aeb971b5c29cf8ab98445082cbfe7b161a1f48ed275822f59ed3f1524ea54fba", size = 131787, upload-time = "2024-12-05T01:35:04.736Z" }, -] - -[[package]] -name = "google-crc32c" -version = "1.6.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/67/72/c3298da1a3773102359c5a78f20dae8925f5ea876e37354415f68594a6fb/google_crc32c-1.6.0.tar.gz", hash = "sha256:6eceb6ad197656a1ff49ebfbbfa870678c75be4344feb35ac1edf694309413dc", size = 14472, upload-time = "2024-09-03T11:44:35.585Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/1a/be/d7846cb50e17bf72a70ea2d8159478ac5de0f1170b10cac279f50079e78d/google_crc32c-1.6.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:5bcc90b34df28a4b38653c36bb5ada35671ad105c99cfe915fb5bed7ad6924aa", size = 30267, upload-time = "2024-09-03T11:37:50.402Z" }, - { url = "https://files.pythonhosted.org/packages/84/3b/29cadae166132e4991087a49dc88906a1d3d5ec22b80f63bc4bc7b6e0431/google_crc32c-1.6.0-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:d9e9913f7bd69e093b81da4535ce27af842e7bf371cde42d1ae9e9bd382dc0e9", size = 30113, upload-time = "2024-09-03T11:49:24.674Z" }, - { url = "https://files.pythonhosted.org/packages/18/a9/49a7b2c4b7cc69d15778a820734f9beb647b1b4cf1a629ca43e3d3a54c70/google_crc32c-1.6.0-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:a184243544811e4a50d345838a883733461e67578959ac59964e43cca2c791e7", size = 37702, upload-time = "2024-09-03T11:53:43.454Z" }, - { url = "https://files.pythonhosted.org/packages/4b/aa/52538cceddefc7c2d66c6bd59dfe67a50f65a4952f441f91049e4188eb57/google_crc32c-1.6.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:236c87a46cdf06384f614e9092b82c05f81bd34b80248021f729396a78e55d7e", size = 32847, upload-time = "2024-09-03T11:53:44.646Z" }, - { url = "https://files.pythonhosted.org/packages/b1/2c/1928413d3faae74ae0d7bdba648cf36ed6b03328c562b47046af016b7249/google_crc32c-1.6.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ebab974b1687509e5c973b5c4b8b146683e101e102e17a86bd196ecaa4d099fc", size = 37844, upload-time = "2024-09-03T11:53:45.814Z" }, - { url = "https://files.pythonhosted.org/packages/d6/f4/f62fa405e442b37c5676973b759dd6e56cd8d58a5c78662912456526f716/google_crc32c-1.6.0-cp310-cp310-win_amd64.whl", hash = "sha256:50cf2a96da226dcbff8671233ecf37bf6e95de98b2a2ebadbfdf455e6d05df42", size = 33444, upload-time = "2024-09-03T11:44:30.301Z" }, - { url = "https://files.pythonhosted.org/packages/7d/14/ab47972ac79b6e7b03c8be3a7ef44b530a60e69555668dbbf08fc5692a98/google_crc32c-1.6.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:f7a1fc29803712f80879b0806cb83ab24ce62fc8daf0569f2204a0cfd7f68ed4", size = 30267, upload-time = "2024-09-03T11:39:16.928Z" }, - { url = "https://files.pythonhosted.org/packages/54/7d/738cb0d25ee55629e7d07da686decf03864a366e5e863091a97b7bd2b8aa/google_crc32c-1.6.0-cp311-cp311-macosx_12_0_x86_64.whl", hash = "sha256:40b05ab32a5067525670880eb5d169529089a26fe35dce8891127aeddc1950e8", size = 30112, upload-time = "2024-09-03T11:54:27.648Z" }, - { url = "https://files.pythonhosted.org/packages/3e/6d/33ca50cbdeec09c31bb5dac277c90994edee975662a4c890bda7ffac90ef/google_crc32c-1.6.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a9e4b426c3702f3cd23b933436487eb34e01e00327fac20c9aebb68ccf34117d", size = 32861, upload-time = "2024-09-03T11:53:47.007Z" }, - { url = "https://files.pythonhosted.org/packages/67/1e/4870896fc81ec77b1b5ebae7fdd680d5a4d40e19a4b6d724032f996ca77a/google_crc32c-1.6.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:51c4f54dd8c6dfeb58d1df5e4f7f97df8abf17a36626a217f169893d1d7f3e9f", size = 32490, upload-time = "2024-09-03T11:53:47.95Z" }, - { url = "https://files.pythonhosted.org/packages/00/9c/f5f5af3ddaa7a639d915f8f58b09bbb8d1db90ecd0459b62cd430eb9a4b6/google_crc32c-1.6.0-cp311-cp311-win_amd64.whl", hash = "sha256:bb8b3c75bd157010459b15222c3fd30577042a7060e29d42dabce449c087f2b3", size = 33446, upload-time = "2024-09-03T11:44:31.876Z" }, - { url = "https://files.pythonhosted.org/packages/cf/41/65a91657d6a8123c6c12f9aac72127b6ac76dda9e2ba1834026a842eb77c/google_crc32c-1.6.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:ed767bf4ba90104c1216b68111613f0d5926fb3780660ea1198fc469af410e9d", size = 30268, upload-time = "2024-09-03T11:39:27.716Z" }, - { url = "https://files.pythonhosted.org/packages/59/d0/ee743a267c7d5c4bb8bd865f7d4c039505f1c8a4b439df047fdc17be9769/google_crc32c-1.6.0-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:62f6d4a29fea082ac4a3c9be5e415218255cf11684ac6ef5488eea0c9132689b", size = 30113, upload-time = "2024-09-03T11:55:07.637Z" }, - { url = "https://files.pythonhosted.org/packages/25/53/e5e449c368dd26ade5fb2bb209e046d4309ed0623be65b13f0ce026cb520/google_crc32c-1.6.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c87d98c7c4a69066fd31701c4e10d178a648c2cac3452e62c6b24dc51f9fcc00", size = 32995, upload-time = "2024-09-03T11:53:49.129Z" }, - { url = "https://files.pythonhosted.org/packages/52/12/9bf6042d5b0ac8c25afed562fb78e51b0641474097e4139e858b45de40a5/google_crc32c-1.6.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd5e7d2445d1a958c266bfa5d04c39932dc54093fa391736dbfdb0f1929c1fb3", size = 32614, upload-time = "2024-09-03T11:53:50.158Z" }, - { url = "https://files.pythonhosted.org/packages/76/29/fc20f5ec36eac1eea0d0b2de4118c774c5f59c513f2a8630d4db6991f3e0/google_crc32c-1.6.0-cp312-cp312-win_amd64.whl", hash = "sha256:7aec8e88a3583515f9e0957fe4f5f6d8d4997e36d0f61624e70469771584c760", size = 33445, upload-time = "2024-09-03T11:44:33.317Z" }, - { url = "https://files.pythonhosted.org/packages/3d/72/e7ac76dfd77dac46b0de63f0f117522e309f1bf79b29fc024b3570aa6f70/google_crc32c-1.6.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:e2806553238cd076f0a55bddab37a532b53580e699ed8e5606d0de1f856b5205", size = 30267, upload-time = "2024-09-03T11:36:29.514Z" }, - { url = "https://files.pythonhosted.org/packages/75/d0/8ca5b4b7982b6671cb5caccef230deb52c24f80e022f1d4b85b704d83a6e/google_crc32c-1.6.0-cp39-cp39-macosx_12_0_x86_64.whl", hash = "sha256:bb0966e1c50d0ef5bc743312cc730b533491d60585a9a08f897274e57c3f70e0", size = 30107, upload-time = "2024-09-03T11:44:43.226Z" }, - { url = "https://files.pythonhosted.org/packages/04/b2/42487d0bfc032f4b35f0675efa0a2cf89ae6a46a5ae5b01786d225c37211/google_crc32c-1.6.0-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:386122eeaaa76951a8196310432c5b0ef3b53590ef4c317ec7588ec554fec5d2", size = 37547, upload-time = "2024-09-03T11:53:51.436Z" }, - { url = "https://files.pythonhosted.org/packages/0f/fc/f8b5ae0273d0ecd8773944a5204e744adbb5ef2e471caaec6d220c95c478/google_crc32c-1.6.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d2952396dc604544ea7476b33fe87faedc24d666fb0c2d5ac971a2b9576ab871", size = 32686, upload-time = "2024-09-03T11:53:52.61Z" }, - { url = "https://files.pythonhosted.org/packages/38/27/d9370090b5e399e04a92d6c45d1f66f35cf87c6799c7777a3c250a36a9f1/google_crc32c-1.6.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:35834855408429cecf495cac67ccbab802de269e948e27478b1e47dfb6465e57", size = 37690, upload-time = "2024-09-03T11:53:54.065Z" }, - { url = "https://files.pythonhosted.org/packages/64/64/e83a0c71e380af513ea9b3a23ecd8c84b055fb806e2d8ecea8453eb72eda/google_crc32c-1.6.0-cp39-cp39-win_amd64.whl", hash = "sha256:d8797406499f28b5ef791f339594b0b5fdedf54e203b5066675c406ba69d705c", size = 33442, upload-time = "2024-09-03T11:44:34.733Z" }, - { url = "https://files.pythonhosted.org/packages/e7/ff/ed48d136b65ddc61f5aef6261c58cd817c8cd60640b16680e5419fb17018/google_crc32c-1.6.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48abd62ca76a2cbe034542ed1b6aee851b6f28aaca4e6551b5599b6f3ef175cc", size = 28057, upload-time = "2024-09-03T11:53:55.267Z" }, - { url = "https://files.pythonhosted.org/packages/14/fb/54deefe679b7d1c1cc81d83396fcf28ad1a66d213bddeb275a8d28665918/google_crc32c-1.6.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18e311c64008f1f1379158158bb3f0c8d72635b9eb4f9545f8cf990c5668e59d", size = 27866, upload-time = "2024-09-03T11:53:56.114Z" }, - { url = "https://files.pythonhosted.org/packages/b0/9e/5c01e8032d359fc78db914f32b7609ef64e63b894669536cd8b0d20409e1/google_crc32c-1.6.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05e2d8c9a2f853ff116db9706b4a27350587f341eda835f46db3c0a8c8ce2f24", size = 28051, upload-time = "2024-09-03T11:53:56.895Z" }, - { url = "https://files.pythonhosted.org/packages/50/1f/3b6c645c2d1d35e577404d25551c889a34b70de9ffc4ebd97141b16cedec/google_crc32c-1.6.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:91ca8145b060679ec9176e6de4f89b07363d6805bd4760631ef254905503598d", size = 27860, upload-time = "2024-09-03T11:53:58.111Z" }, -] - [[package]] name = "google-generativeai" -version = "0.8.4" +version = "0.3.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "google-ai-generativelanguage" }, { name = "google-api-core" }, - { name = "google-api-python-client" }, { name = "google-auth" }, { name = "protobuf" }, - { name = "pydantic" }, { name = "tqdm" }, { name = "typing-extensions" }, ] wheels = [ - { url = "https://files.pythonhosted.org/packages/9b/b0/6c6af327a8a6ef3be6fe79be1d6f1e2914d6c363aa6b081b93396f4460a7/google_generativeai-0.8.4-py3-none-any.whl", hash = "sha256:e987b33ea6decde1e69191ddcaec6ef974458864d243de7191db50c21a7c5b82", size = 175409, upload-time = "2025-01-21T00:51:50.361Z" }, -] - -[[package]] -name = "google-resumable-media" -version = "2.7.2" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "google-crc32c" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/58/5a/0efdc02665dca14e0837b62c8a1a93132c264bd02054a15abb2218afe0ae/google_resumable_media-2.7.2.tar.gz", hash = "sha256:5280aed4629f2b60b847b0d42f9857fd4935c11af266744df33d8074cae92fe0", size = 2163099, upload-time = "2024-08-07T22:20:38.555Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/82/35/b8d3baf8c46695858cb9d8835a53baa1eeb9906ddaf2f728a5f5b640fd1e/google_resumable_media-2.7.2-py2.py3-none-any.whl", hash = "sha256:3ce7551e9fe6d99e9a126101d2536612bb73486721951e9562fee0f90c6ababa", size = 81251, upload-time = "2024-08-07T22:20:36.409Z" }, + { url = "https://files.pythonhosted.org/packages/b5/7f/35f89209487f8473edc9d2cecef894a54680cf666e32893a767d12a8dba9/google_generativeai-0.3.2-py3-none-any.whl", hash = "sha256:8761147e6e167141932dc14a7b7af08f2310dd56668a78d206c19bb8bd85bcd7", size = 146909, upload-time = "2023-12-20T06:03:42.203Z" }, ] [[package]] @@ -1548,11 +1181,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/89/30/2bd0eb03a7dee7727cd2ec643d1e992979e62d5e7443507381cce0455132/googleapis_common_protos-1.67.0-py2.py3-none-any.whl", hash = "sha256:579de760800d13616f51cf8be00c876f00a9f146d3e6510e19d1f4111758b741", size = 164985, upload-time = "2025-02-12T20:29:50.702Z" }, ] -[package.optional-dependencies] -grpc = [ - { name = "grpcio" }, -] - [[package]] name = "greenlet" version = "3.1.1" @@ -1616,7 +1244,7 @@ wheels = [ [[package]] name = "groq" -version = "0.18.0" +version = "0.30.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, @@ -1626,23 +1254,9 @@ dependencies = [ { name = "sniffio" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/40/8c/e72c164474a88dfed6c7327ad53cb87ff11566b74b3a76d41dc7b94fc51c/groq-0.18.0.tar.gz", hash = "sha256:8e2ccfea406d68b3525af4b7c0e321fcb3d2a73fc60bb70b4156e6cd88c72f03", size = 117322, upload-time = "2025-02-05T01:30:14.551Z" } +sdist = { url = "https://files.pythonhosted.org/packages/a9/b1/72ca20dc9b977b7f604648e8944c77b267bddeb90d8e16bda0cf0e397844/groq-0.30.0.tar.gz", hash = "sha256:919466e48fcbebef08fed3f71debb0f96b0ea8d2ec77842c384aa843019f6e2c", size = 134928, upload-time = "2025-07-11T20:28:36.583Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/b0/6c/5a53d632b44ef7655ac8d9b34432e13160917f9307c94b1467efd34e336e/groq-0.18.0-py3-none-any.whl", hash = "sha256:81d5ac00057a45d8ce559d23ab5d3b3893011d1f12c35187ab35a9182d826ea6", size = 121911, upload-time = "2025-02-05T01:30:12.504Z" }, -] - -[[package]] -name = "grpc-google-iam-v1" -version = "0.14.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "googleapis-common-protos", extra = ["grpc"] }, - { name = "grpcio" }, - { name = "protobuf" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/47/2f/68e43b0e551974fa7dd18798a5974710586a72dc484ecaa2fc023d961342/grpc_google_iam_v1-0.14.0.tar.gz", hash = "sha256:c66e07aa642e39bb37950f9e7f491f70dad150ac9801263b42b2814307c2df99", size = 18327, upload-time = "2025-01-02T14:39:37.057Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/66/b4/ab54f7fda4af43ca5c094bc1d6341780fd669c44ae18952b5337029b1d98/grpc_google_iam_v1-0.14.0-py2.py3-none-any.whl", hash = "sha256:fb4a084b30099ba3ab07d61d620a0d4429570b13ff53bd37bac75235f98b7da4", size = 27276, upload-time = "2025-01-02T14:39:34.76Z" }, + { url = "https://files.pythonhosted.org/packages/19/b8/5b90edf9fbd795597220e3d1b5534d845e69a73ffe1fdeb967443ed2a6cf/groq-0.30.0-py3-none-any.whl", hash = "sha256:6d9609a7778ba56432f45c1bac21b005f02c6c0aca9c1c094e65536f162c1e83", size = 131056, upload-time = "2025-07-11T20:28:35.591Z" }, ] [[package]] @@ -1700,16 +1314,16 @@ wheels = [ [[package]] name = "grpcio-status" -version = "1.70.0" +version = "1.62.3" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "googleapis-common-protos" }, { name = "grpcio" }, { name = "protobuf" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/4c/d1/2397797c810020eac424e1aac10fbdc5edb6b9b4ad6617e0ed53ca907653/grpcio_status-1.70.0.tar.gz", hash = "sha256:0e7b42816512433b18b9d764285ff029bde059e9d41f8fe10a60631bd8348101", size = 13681, upload-time = "2025-01-23T18:00:33.637Z" } +sdist = { url = "https://files.pythonhosted.org/packages/7c/d7/013ef01c5a1c2fd0932c27c904934162f69f41ca0f28396d3ffe4d386123/grpcio-status-1.62.3.tar.gz", hash = "sha256:289bdd7b2459794a12cf95dc0cb727bd4a1742c37bd823f760236c937e53a485", size = 13063, upload-time = "2024-08-06T00:37:08.003Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/e6/34/49e558040e069feebac70cdd1b605f38738c0277ac5d38e2ce3d03e1b1ec/grpcio_status-1.70.0-py3-none-any.whl", hash = "sha256:fc5a2ae2b9b1c1969cc49f3262676e6854aa2398ec69cb5bd6c47cd501904a85", size = 14429, upload-time = "2025-01-23T17:57:35.392Z" }, + { url = "https://files.pythonhosted.org/packages/90/40/972271de05f9315c0d69f9f7ebbcadd83bc85322f538637d11bb8c67803d/grpcio_status-1.62.3-py3-none-any.whl", hash = "sha256:f9049b762ba8de6b1086789d8315846e094edac2c50beaf462338b301a8fd4b8", size = 14448, upload-time = "2024-08-06T00:30:15.702Z" }, ] [[package]] @@ -1721,6 +1335,21 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/95/04/ff642e65ad6b90db43e668d70ffb6736436c7ce41fcc549f4e9472234127/h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761", size = 58259, upload-time = "2022-09-25T15:39:59.68Z" }, ] +[[package]] +name = "hf-xet" +version = "1.1.5" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ed/d4/7685999e85945ed0d7f0762b686ae7015035390de1161dcea9d5276c134c/hf_xet-1.1.5.tar.gz", hash = "sha256:69ebbcfd9ec44fdc2af73441619eeb06b94ee34511bbcf57cd423820090f5694", size = 495969, upload-time = "2025-06-20T21:48:38.007Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/00/89/a1119eebe2836cb25758e7661d6410d3eae982e2b5e974bcc4d250be9012/hf_xet-1.1.5-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:f52c2fa3635b8c37c7764d8796dfa72706cc4eded19d638331161e82b0792e23", size = 2687929, upload-time = "2025-06-20T21:48:32.284Z" }, + { url = "https://files.pythonhosted.org/packages/de/5f/2c78e28f309396e71ec8e4e9304a6483dcbc36172b5cea8f291994163425/hf_xet-1.1.5-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:9fa6e3ee5d61912c4a113e0708eaaef987047616465ac7aa30f7121a48fc1af8", size = 2556338, upload-time = "2025-06-20T21:48:30.079Z" }, + { url = "https://files.pythonhosted.org/packages/6d/2f/6cad7b5fe86b7652579346cb7f85156c11761df26435651cbba89376cd2c/hf_xet-1.1.5-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc874b5c843e642f45fd85cda1ce599e123308ad2901ead23d3510a47ff506d1", size = 3102894, upload-time = "2025-06-20T21:48:28.114Z" }, + { url = "https://files.pythonhosted.org/packages/d0/54/0fcf2b619720a26fbb6cc941e89f2472a522cd963a776c089b189559447f/hf_xet-1.1.5-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:dbba1660e5d810bd0ea77c511a99e9242d920790d0e63c0e4673ed36c4022d18", size = 3002134, upload-time = "2025-06-20T21:48:25.906Z" }, + { url = "https://files.pythonhosted.org/packages/f3/92/1d351ac6cef7c4ba8c85744d37ffbfac2d53d0a6c04d2cabeba614640a78/hf_xet-1.1.5-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ab34c4c3104133c495785d5d8bba3b1efc99de52c02e759cf711a91fd39d3a14", size = 3171009, upload-time = "2025-06-20T21:48:33.987Z" }, + { url = "https://files.pythonhosted.org/packages/c9/65/4b2ddb0e3e983f2508528eb4501288ae2f84963586fbdfae596836d5e57a/hf_xet-1.1.5-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:83088ecea236d5113de478acb2339f92c95b4fb0462acaa30621fac02f5a534a", size = 3279245, upload-time = "2025-06-20T21:48:36.051Z" }, + { url = "https://files.pythonhosted.org/packages/f0/55/ef77a85ee443ae05a9e9cba1c9f0dd9241eb42da2aeba1dc50f51154c81a/hf_xet-1.1.5-cp37-abi3-win_amd64.whl", hash = "sha256:73e167d9807d166596b4b2f0b585c6d5bd84a26dea32843665a8b58f6edba245", size = 2738931, upload-time = "2025-06-20T21:48:39.482Z" }, +] + [[package]] name = "httpcore" version = "1.0.7" @@ -1734,18 +1363,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/87/f5/72347bc88306acb359581ac4d52f23c0ef445b57157adedb9aee0cd689d2/httpcore-1.0.7-py3-none-any.whl", hash = "sha256:a3fff8f43dc260d5bd363d9f9cf1830fa3a458b332856f34282de498ed420edd", size = 78551, upload-time = "2024-11-15T12:30:45.782Z" }, ] -[[package]] -name = "httplib2" -version = "0.22.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "pyparsing" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/3d/ad/2371116b22d616c194aa25ec410c9c6c37f23599dcd590502b74db197584/httplib2-0.22.0.tar.gz", hash = "sha256:d7a10bc5ef5ab08322488bde8c726eeee5c8618723fdb399597ec58f3d82df81", size = 351116, upload-time = "2023-03-21T22:29:37.214Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/a8/6c/d2fbdaaa5959339d53ba38e94c123e4e84b8fbc4b84beb0e70d7c1608486/httplib2-0.22.0-py3-none-any.whl", hash = "sha256:14ae0a53c1ba8f3d37e9e27cf37eabb0fb9980f435ba405d546948b009dd64dc", size = 96854, upload-time = "2023-03-21T22:29:35.683Z" }, -] - [[package]] name = "httpx" version = "0.28.1" @@ -1787,20 +1404,21 @@ wheels = [ [[package]] name = "huggingface-hub" -version = "0.28.1" +version = "0.34.3" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "filelock" }, { name = "fsspec" }, + { name = "hf-xet", marker = "platform_machine == 'aarch64' or platform_machine == 'amd64' or platform_machine == 'arm64' or platform_machine == 'x86_64'" }, { name = "packaging" }, { name = "pyyaml" }, { name = "requests" }, { name = "tqdm" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/e7/ce/a734204aaae6c35a22f9956ebcd8d8708ae5b842e15d6f42bd6f49e634a4/huggingface_hub-0.28.1.tar.gz", hash = "sha256:893471090c98e3b6efbdfdacafe4052b20b84d59866fb6f54c33d9af18c303ae", size = 387074, upload-time = "2025-01-30T13:45:41.519Z" } +sdist = { url = "https://files.pythonhosted.org/packages/91/b4/e6b465eca5386b52cf23cb6df8644ad318a6b0e12b4b96a7e0be09cbfbcc/huggingface_hub-0.34.3.tar.gz", hash = "sha256:d58130fd5aa7408480681475491c0abd7e835442082fbc3ef4d45b6c39f83853", size = 456800, upload-time = "2025-07-29T08:38:53.885Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/ea/da/6c2bea5327b640920267d3bf2c9fc114cfbd0a5de234d81cda80cc9e33c8/huggingface_hub-0.28.1-py3-none-any.whl", hash = "sha256:aa6b9a3ffdae939b72c464dbb0d7f99f56e649b55c3d52406f49e0a5a620c0a7", size = 464068, upload-time = "2025-01-30T13:45:39.514Z" }, + { url = "https://files.pythonhosted.org/packages/59/a8/4677014e771ed1591a87b63a2392ce6923baf807193deef302dcfde17542/huggingface_hub-0.34.3-py3-none-any.whl", hash = "sha256:5444550099e2d86e68b2898b09e85878fbd788fc2957b506c6a79ce060e39492", size = 558847, upload-time = "2025-07-29T08:38:51.904Z" }, ] [[package]] @@ -1817,7 +1435,7 @@ name = "importlib-metadata" version = "8.6.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "zipp" }, + { name = "zipp", marker = "python_full_version < '3.10'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/33/08/c1395a292bb23fd03bdf572a1357c5a733d3eecbab877641ceacab23db6e/importlib_metadata-8.6.1.tar.gz", hash = "sha256:310b41d755445d74569f993ccfc22838295d9fe005425094fad953d7f15c8580", size = 55767, upload-time = "2025-01-20T22:21:30.429Z" } wheels = [ @@ -1935,15 +1553,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/22/2d/9c0b76f2f9cc0ebede1b9371b6f317243028ed60b90705863d493bae622e/ipywidgets-8.1.5-py3-none-any.whl", hash = "sha256:3290f526f87ae6e77655555baba4f36681c555b8bdbbff430b70e52c34c86245", size = 139767, upload-time = "2024-08-22T12:19:49.494Z" }, ] -[[package]] -name = "isodate" -version = "0.7.2" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/54/4d/e940025e2ce31a8ce1202635910747e5a87cc3a6a6bb2d00973375014749/isodate-0.7.2.tar.gz", hash = "sha256:4cd1aa0f43ca76f4a6c6c0292a85f40b35ec2e43e315b59f06e6d32171a953e6", size = 29705, upload-time = "2024-10-08T23:04:11.5Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/15/aa/0aca39a37d3c7eb941ba736ede56d689e7be91cab5d9ca846bde3999eba6/isodate-0.7.2-py3-none-any.whl", hash = "sha256:28009937d8031054830160fce6d409ed342816b543597cece116d966c6d99e15", size = 22320, upload-time = "2024-10-08T23:04:09.501Z" }, -] - [[package]] name = "isoduration" version = "20.11.0" @@ -2051,24 +1660,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/32/b7/a3cde72c644fd1caf9da07fb38cf2c130f43484d8f91011940b7c4f42c8f/jiter-0.8.2-cp39-cp39-win_amd64.whl", hash = "sha256:1c0dfbd1be3cbefc7510102370d86e35d1d53e5a93d48519688b1bf0f761160a", size = 207527, upload-time = "2024-12-09T18:11:06.549Z" }, ] -[[package]] -name = "jmespath" -version = "1.0.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/00/2a/e867e8531cf3e36b41201936b7fa7ba7b5702dbef42922193f05c8976cd6/jmespath-1.0.1.tar.gz", hash = "sha256:90261b206d6defd58fdd5e85f478bf633a2901798906be2ad389150c5c60edbe", size = 25843, upload-time = "2022-06-17T18:00:12.224Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/31/b4/b9b800c45527aadd64d5b442f9b932b00648617eb5d63d2c7a6587b7cafc/jmespath-1.0.1-py3-none-any.whl", hash = "sha256:02e2e4cc71b5bcab88332eebf907519190dd9e6e82107fa7f83b1003a6252980", size = 20256, upload-time = "2022-06-17T18:00:10.251Z" }, -] - -[[package]] -name = "joblib" -version = "1.4.2" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/64/33/60135848598c076ce4b231e1b1895170f45fbcaeaa2c9d5e38b04db70c35/joblib-1.4.2.tar.gz", hash = "sha256:2382c5816b2636fbd20a09e0f4e9dad4736765fdfb7dca582943b9c1366b3f0e", size = 2116621, upload-time = "2024-05-02T12:15:05.765Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/91/29/df4b9b42f2be0b623cbd5e2140cafcaa2bef0759a00b7b70104dcfe2fb51/joblib-1.4.2-py3-none-any.whl", hash = "sha256:06d478d5674cbc267e7496a410ee875abd68e4340feff4490bcb7afb88060ae6", size = 301817, upload-time = "2024-05-02T12:15:00.765Z" }, -] - [[package]] name = "json5" version = "0.10.0" @@ -2346,7 +1937,7 @@ wheels = [ [[package]] name = "langchain" -version = "0.3.27" +version = "0.4.0.dev0" source = { editable = "." } dependencies = [ { name = "async-timeout", marker = "python_full_version < '3.11'" }, @@ -2363,16 +1954,6 @@ dependencies = [ anthropic = [ { name = "langchain-anthropic" }, ] -aws = [ - { name = "langchain-aws" }, -] -azure-ai = [ - { name = "langchain-azure-ai", version = "0.1.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.13'" }, - { name = "langchain-azure-ai", version = "0.1.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.13'" }, -] -cohere = [ - { name = "langchain-cohere" }, -] community = [ { name = "langchain-community" }, ] @@ -2385,9 +1966,6 @@ fireworks = [ google-genai = [ { name = "langchain-google-genai" }, ] -google-vertexai = [ - { name = "langchain-google-vertexai" }, -] groq = [ { name = "langchain-groq" }, ] @@ -2409,9 +1987,6 @@ perplexity = [ together = [ { name = "langchain-together" }, ] -xai = [ - { name = "langchain-xai" }, -] [package.dev-dependencies] codespell = [ @@ -2487,15 +2062,11 @@ typing = [ requires-dist = [ { name = "async-timeout", marker = "python_full_version < '3.11'", specifier = ">=4.0.0,<5.0.0" }, { name = "langchain-anthropic", marker = "extra == 'anthropic'" }, - { name = "langchain-aws", marker = "extra == 'aws'" }, - { name = "langchain-azure-ai", marker = "extra == 'azure-ai'" }, - { name = "langchain-cohere", marker = "extra == 'cohere'" }, { name = "langchain-community", marker = "extra == 'community'" }, { name = "langchain-core", editable = "../core" }, { name = "langchain-deepseek", marker = "extra == 'deepseek'" }, { name = "langchain-fireworks", marker = "extra == 'fireworks'" }, { name = "langchain-google-genai", marker = "extra == 'google-genai'" }, - { name = "langchain-google-vertexai", marker = "extra == 'google-vertexai'" }, { name = "langchain-groq", marker = "extra == 'groq'" }, { name = "langchain-huggingface", marker = "extra == 'huggingface'" }, { name = "langchain-mistralai", marker = "extra == 'mistralai'" }, @@ -2504,14 +2075,13 @@ requires-dist = [ { name = "langchain-perplexity", marker = "extra == 'perplexity'" }, { name = "langchain-text-splitters", editable = "../text-splitters" }, { name = "langchain-together", marker = "extra == 'together'" }, - { name = "langchain-xai", marker = "extra == 'xai'" }, { name = "langsmith", specifier = ">=0.1.17" }, { name = "pydantic", specifier = ">=2.7.4,<3.0.0" }, { name = "pyyaml", specifier = ">=5.3" }, { name = "requests", specifier = ">=2,<3" }, { name = "sqlalchemy", specifier = ">=1.4,<3" }, ] -provides-extras = ["community", "anthropic", "openai", "azure-ai", "cohere", "google-vertexai", "google-genai", "fireworks", "ollama", "together", "mistralai", "huggingface", "groq", "aws", "deepseek", "xai", "perplexity"] +provides-extras = ["community", "anthropic", "openai", "google-genai", "fireworks", "ollama", "together", "mistralai", "huggingface", "groq", "deepseek", "perplexity"] [package.metadata.requires-dev] codespell = [{ name = "codespell", specifier = ">=2.2.0,<3.0.0" }] @@ -2594,91 +2164,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/f3/b3/111e1f41b0044687ec0c34c921ad52d33d2802282b1bc45343d5dd923fb6/langchain_anthropic-0.3.7-py3-none-any.whl", hash = "sha256:adec0a1daabd3c25249753c6cd625654917fb9e3feee68e72c7dc3f4449c0f3c", size = 22998, upload-time = "2025-02-06T22:12:51.94Z" }, ] -[[package]] -name = "langchain-aws" -version = "0.2.12" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "boto3" }, - { name = "langchain-core" }, - { name = "numpy", version = "1.26.4", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.13'" }, - { name = "numpy", version = "2.2.4", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.13'" }, - { name = "pydantic" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/06/e8/7b6adbf0d922889da98905a6052819d17dd6501e370b6f115e0e4e074452/langchain_aws-0.2.12.tar.gz", hash = "sha256:98122987923b5f7933d62611efa632465de1ba335736e67ff09a6f9f18f285f8", size = 80069, upload-time = "2025-01-31T22:57:10.368Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/9f/d9/34239d9d6b92dd87fb36d48435639a3862d3decc124fd6419d36144b8739/langchain_aws-0.2.12-py3-none-any.whl", hash = "sha256:1ed3713aa9cd68016b29af1f6eba74c705d2d3e4b6b1fd92cd359cb69eb06851", size = 96705, upload-time = "2025-01-31T22:57:09.219Z" }, -] - -[[package]] -name = "langchain-azure-ai" -version = "0.1.0" -source = { registry = "https://pypi.org/simple" } -resolution-markers = [ - "python_full_version >= '3.13' and platform_python_implementation == 'PyPy'", - "python_full_version >= '3.13' and platform_python_implementation != 'PyPy'", -] -dependencies = [ - { name = "aiohttp", marker = "python_full_version >= '3.13'" }, - { name = "azure-ai-inference", extra = ["opentelemetry"], marker = "python_full_version >= '3.13'" }, - { name = "azure-core", marker = "python_full_version >= '3.13'" }, - { name = "azure-identity", marker = "python_full_version >= '3.13'" }, - { name = "langchain-core", marker = "python_full_version >= '3.13'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/21/f5/daebf13516db60d6c215af51721af96b0c5294680a06b97e805ddde8ca13/langchain_azure_ai-0.1.0.tar.gz", hash = "sha256:34f7f7cbb1978e648028f33cdf9dd58147f97e0caa857bf6c3c8976807d05539", size = 13617, upload-time = "2024-12-16T20:40:45.684Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/5e/75/49d2778a8be5deccfca16c6354e958300e02acbd233eaf5606033ab4d7af/langchain_azure_ai-0.1.0-py3-none-any.whl", hash = "sha256:2f1773e431dd230e53592e15807f3ff6a34f1f637f79f5126d91276ce10b0655", size = 17269, upload-time = "2024-12-16T20:40:44.415Z" }, -] - -[[package]] -name = "langchain-azure-ai" -version = "0.1.2" -source = { registry = "https://pypi.org/simple" } -resolution-markers = [ - "python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_python_implementation == 'PyPy'", - "python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_python_implementation != 'PyPy'", - "python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_python_implementation == 'PyPy'", - "python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_python_implementation != 'PyPy'", - "python_full_version == '3.11.*' and platform_python_implementation == 'PyPy'", - "python_full_version == '3.11.*' and platform_python_implementation != 'PyPy'", - "python_full_version == '3.10.*' and platform_python_implementation == 'PyPy'", - "python_full_version == '3.10.*' and platform_python_implementation != 'PyPy'", - "python_full_version < '3.10' and platform_python_implementation == 'PyPy'", - "python_full_version < '3.10' and platform_python_implementation != 'PyPy'", -] -dependencies = [ - { name = "aiohttp", marker = "python_full_version < '3.13'" }, - { name = "azure-ai-inference", extra = ["opentelemetry"], marker = "python_full_version < '3.13'" }, - { name = "azure-core", marker = "python_full_version < '3.13'" }, - { name = "azure-cosmos", marker = "python_full_version < '3.13'" }, - { name = "azure-identity", marker = "python_full_version < '3.13'" }, - { name = "langchain-core", marker = "python_full_version < '3.13'" }, - { name = "langchain-openai", marker = "python_full_version < '3.13'" }, - { name = "numpy", version = "1.26.4", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.13'" }, - { name = "pymongo", marker = "python_full_version < '3.13'" }, - { name = "simsimd", marker = "python_full_version < '3.13'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/43/a8/e6e8b15b909c6941c5b946e4072cb2250d7ee529559ff3aba46c26dfe072/langchain_azure_ai-0.1.2.tar.gz", hash = "sha256:9be07aa32d2b7a22937f5dd9c9ea4fa022315f03a883bd3bd8d0b6d028700421", size = 36113, upload-time = "2025-02-25T17:32:17.846Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/ce/ad/d09e25365255cf961cd33a14c7d53bfa2360c90194baab656f712a2bd0f5/langchain_azure_ai-0.1.2-py3-none-any.whl", hash = "sha256:45ca6d4897661d36a935bfb34cf3786477932a4f38e728f77507e52f223b1d0a", size = 44135, upload-time = "2025-02-25T17:32:15.003Z" }, -] - -[[package]] -name = "langchain-cohere" -version = "0.4.2" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "cohere" }, - { name = "langchain-community" }, - { name = "langchain-core" }, - { name = "pydantic" }, - { name = "types-pyyaml" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/24/c8/5bbc627c88231317f2c6f463a44b40fb1cb2cf0a0f61cdf4e2ee95f9eb28/langchain_cohere-0.4.2.tar.gz", hash = "sha256:96971055a806b63e388d08e4663fc16957cf07ad234a1524ef31f039790bb35b", size = 36391, upload-time = "2025-01-24T17:04:12.289Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/f4/f0/0f2e4dc99c52d458cd23f7d8d18bb25280ff49529a4fd34e59a92faf6d46/langchain_cohere-0.4.2-py3-none-any.whl", hash = "sha256:1d4e5f9212daa64d997785de7a79e3e1c3971acba1d987f1a99a9a1be6acfb40", size = 42205, upload-time = "2025-01-24T17:04:10.322Z" }, -] - [[package]] name = "langchain-community" version = "0.3.17" @@ -2705,7 +2190,7 @@ wheels = [ [[package]] name = "langchain-core" -version = "0.3.72" +version = "0.4.0.dev0" source = { editable = "../core" } dependencies = [ { name = "jsonpatch" }, @@ -2776,7 +2261,7 @@ wheels = [ [[package]] name = "langchain-fireworks" -version = "0.2.7" +version = "0.3.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "aiohttp" }, @@ -2785,75 +2270,54 @@ dependencies = [ { name = "openai" }, { name = "requests" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/cd/3f/442676108dff48962c6747381a1e8c388f2599d37a3cbae72ecc939b2b34/langchain_fireworks-0.2.7.tar.gz", hash = "sha256:cc6a04d5d5735bdea642de524cb4bca544b8ed8ca867a1d64ec47ea14367210a", size = 16887, upload-time = "2025-01-30T00:24:06.969Z" } +sdist = { url = "https://files.pythonhosted.org/packages/1d/80/78ea4a04b1170cfa7564557808fd80e4c6f812cb5655c95a0374ca79c7ac/langchain_fireworks-0.3.0.tar.gz", hash = "sha256:09db8a06cd50df07068c07c4862e87d70b0da0f7d4e1b06f062c292af61c1433", size = 20900, upload-time = "2025-04-23T14:14:32.438Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/98/cc/dbc2fe087a0c2b927cb213536a9c81c1bfbdf15b240f744abb8751dc1323/langchain_fireworks-0.2.7-py3-none-any.whl", hash = "sha256:15fd8b21f69ac45728efb058a08f58ab013f5212601d14d12fba793f6e7cbb8a", size = 17497, upload-time = "2025-01-30T00:24:04.963Z" }, + { url = "https://files.pythonhosted.org/packages/05/68/79696d5e1573a674141a44c9c59c04629e1ba25673d64a7b03f3843ae162/langchain_fireworks-0.3.0-py3-none-any.whl", hash = "sha256:ef2ea22f8cae3e654f0e1d3eb3a60c5fcd4a914643ab324507997f89f5831166", size = 17770, upload-time = "2025-04-23T14:14:31.373Z" }, ] [[package]] name = "langchain-google-genai" -version = "2.0.9" +version = "0.0.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "filetype" }, { name = "google-generativeai" }, { name = "langchain-core" }, - { name = "pydantic" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/fb/1f/2a275165ba5a455147472682db71ca4cc45e414cfb37c1245efe283d4f43/langchain_google_genai-2.0.9.tar.gz", hash = "sha256:65205089da1f72688a0ed6e7c6914af308b6514ab8038fd8126ecb20f1df234c", size = 37437, upload-time = "2025-01-18T06:36:58.895Z" } +sdist = { url = "https://files.pythonhosted.org/packages/f1/55/9c6ca444734cd684baee5205d1ed6b02103e24de34e31b7230fbc1524f9f/langchain_google_genai-0.0.1.tar.gz", hash = "sha256:97e1d31c4eac0049e9c7f5cd82f5b96289708c249bc6f4a85e43b1f47c22d54b", size = 8299, upload-time = "2023-12-13T16:22:36.14Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/c2/6a/b6cbd72b820d31cf35fe677cc844220aa82a09b92167d43ade815acdec4d/langchain_google_genai-2.0.9-py3-none-any.whl", hash = "sha256:48d8c78c42048d54f40dff333db9d359746644e0feb0e08b5eabdf34ad7149ca", size = 41698, upload-time = "2025-01-18T06:36:56.597Z" }, -] - -[[package]] -name = "langchain-google-vertexai" -version = "2.0.13" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "google-cloud-aiplatform" }, - { name = "google-cloud-storage" }, - { name = "httpx" }, - { name = "httpx-sse" }, - { name = "langchain-core" }, - { name = "pydantic" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/24/e5/6e7d9d257863e9d7293f69d3f58f056736833e6a70ed6e88db121125c1df/langchain_google_vertexai-2.0.13.tar.gz", hash = "sha256:7edc4de0ef7e8caf6cf075f31ebe29f17d99c6c739ac43ed5b9df4f8c1c4c1fb", size = 78204, upload-time = "2025-02-03T21:14:05.919Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/e7/8b/9729e14d03c1e01779187f488d2950cc9a8ce29f0366d106b8d444f0b46b/langchain_google_vertexai-2.0.13-py3-none-any.whl", hash = "sha256:eba80d6a2e2e1cbc1973843822df584e5d39a007851d5d5aebd28093a4ea7bdc", size = 93318, upload-time = "2025-02-03T21:14:04.154Z" }, + { url = "https://files.pythonhosted.org/packages/4f/0f/56fb6c499484e4a60616a5f590470f2c43eae15e1e55b82b4bc8baa265d4/langchain_google_genai-0.0.1-py3-none-any.whl", hash = "sha256:5eee26d10cb1cdeddaac1180c984ed3bcf73e72d11a458c3eba1de457cf11298", size = 8507, upload-time = "2023-12-13T16:22:35.077Z" }, ] [[package]] name = "langchain-groq" -version = "0.2.4" +version = "0.3.7" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "groq" }, { name = "langchain-core" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/2f/38/6073ce095072830284a7155d64a685c45138f427282a4d0721d86e7a09eb/langchain_groq-0.2.4.tar.gz", hash = "sha256:90d76ca59679c021858112440de732de3d780675cc2dbd9572cbf011c0e2afe8", size = 15180, upload-time = "2025-01-29T22:29:39.149Z" } +sdist = { url = "https://files.pythonhosted.org/packages/54/9d/1bbe0b8d8368ac1df2eabd14be93e052cddac969573b910db4457237bce2/langchain_groq-0.3.7.tar.gz", hash = "sha256:4d799aa565f31e51c632f0f20c588f21c5a0e6c6dd2303efaef43351f9b41bd4", size = 25004, upload-time = "2025-08-05T19:20:45.436Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/bb/e7/aec777a50942059be78b0677135f866a2ead2447e308e3f075f5ff9a93a6/langchain_groq-0.2.4-py3-none-any.whl", hash = "sha256:ef02923b780486e1f9799ad156a2a6078581e43c4b80a334a88589f99f977eaf", size = 14828, upload-time = "2025-01-29T22:29:37.237Z" }, + { url = "https://files.pythonhosted.org/packages/9d/31/5f32d15105d0160c3753113d5cca5787236c6d2717d25126597d8adc39dd/langchain_groq-0.3.7-py3-none-any.whl", hash = "sha256:2e13870534c032fe1facde3a6ab1fb59af6f1e7a2763125ad549d8e403fc9d00", size = 16418, upload-time = "2025-08-05T19:20:44.4Z" }, ] [[package]] name = "langchain-huggingface" -version = "0.1.2" +version = "0.3.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "huggingface-hub" }, { name = "langchain-core" }, - { name = "sentence-transformers" }, { name = "tokenizers" }, - { name = "transformers" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/a1/0f/8277d993d5307f06523e72c9bc8a505ed028f7b1c1e5276d8e89044b6036/langchain_huggingface-0.1.2.tar.gz", hash = "sha256:4a66d5c449298fd353bd84c9ed01f9bf4303bf2e4ffce14aab8c55c584eee57c", size = 16129, upload-time = "2024-10-31T18:56:53.894Z" } +sdist = { url = "https://files.pythonhosted.org/packages/3f/15/f832ae485707bf52f9a8f055db389850de06c46bc6e3e4420a0ef105fbbf/langchain_huggingface-0.3.1.tar.gz", hash = "sha256:0a145534ce65b5a723c8562c456100a92513bbbf212e6d8c93fdbae174b41341", size = 25154, upload-time = "2025-07-22T17:22:26.77Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/9d/f8/77a303ddc492f6eed8bf0979f2bc6db4fa6eb1089c5e9f0f977dd87bc9c2/langchain_huggingface-0.1.2-py3-none-any.whl", hash = "sha256:7de5cfcae32bfb6a99c084fc16176f02583a4f8d94febb6bb45bed5b34699174", size = 21251, upload-time = "2024-10-31T18:56:52.21Z" }, + { url = "https://files.pythonhosted.org/packages/bf/26/7c5d4b4d3e1a7385863acc49fb6f96c55ccf941a750991d18e3f6a69a14a/langchain_huggingface-0.3.1-py3-none-any.whl", hash = "sha256:de10a692dc812885696fbaab607d28ac86b833b0f305bccd5d82d60336b07b7d", size = 27609, upload-time = "2025-07-22T17:22:25.282Z" }, ] [[package]] name = "langchain-mistralai" -version = "0.2.6" +version = "0.2.11" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "httpx" }, @@ -2862,27 +2326,27 @@ dependencies = [ { name = "pydantic" }, { name = "tokenizers" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/d6/16/b21233e816a3f7eddbca3cdbec673b47361af2d75db0dd511e34ce690ec4/langchain_mistralai-0.2.6.tar.gz", hash = "sha256:a861c6a5858a933ce63abb2cf9ea59e91d87103da2ddf08694a69a26be339d35", size = 15022, upload-time = "2025-01-29T22:27:52.89Z" } +sdist = { url = "https://files.pythonhosted.org/packages/6d/f5/7afd7dcd252e5abaaf3a13849733a32aaf0b5f2290dc62bd2c72afdc33e4/langchain_mistralai-0.2.11.tar.gz", hash = "sha256:0816bb9972c9e407d9eca567ad16095ec4f0f5bb9094890692ceb149aa72c71e", size = 21718, upload-time = "2025-07-07T19:38:04.35Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/00/4f/bc04e96e95fb39bcb7afdad95a66ccb2037ca3d5a6e8c76cfd8f46e87f0f/langchain_mistralai-0.2.6-py3-none-any.whl", hash = "sha256:a7fde8094641e89309e2ee0db57fa06a1270b461c19d44e5d93e518270bbafee", size = 15768, upload-time = "2025-01-29T22:27:51.945Z" }, + { url = "https://files.pythonhosted.org/packages/48/35/cf2e31b5af5b6798437bb7c92b13d0ed7c4bdde87034227b49be907fe272/langchain_mistralai-0.2.11-py3-none-any.whl", hash = "sha256:6940b551f8e63ca9163e8f5a156aab6814238f9b19302405b6af9d8703e7f762", size = 16560, upload-time = "2025-07-07T19:38:03.272Z" }, ] [[package]] name = "langchain-ollama" -version = "0.2.3" +version = "0.3.6" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "langchain-core" }, { name = "ollama" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/47/76/dfcf5f09ba2f7651161110ba5ddb621e92634f9fb34e4ad919ae0428205a/langchain_ollama-0.2.3.tar.gz", hash = "sha256:d13fe8735176b652ca6e6656d7902c1265e8c0601097569f7c95433f3d034b38", size = 17231, upload-time = "2025-01-29T22:22:40.062Z" } +sdist = { url = "https://files.pythonhosted.org/packages/82/67/93429a78d6fd40e2addf27e881db37e7f0076d712ffe9759ca0d5e10910e/langchain_ollama-0.3.6.tar.gz", hash = "sha256:4270c4b30b3f3d10850cb9a1183b8c77d616195e0d9717ac745ef7f7f6cc2b6e", size = 30479, upload-time = "2025-07-22T17:26:59.605Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/85/72/f7301340a544fea1c139c64590044f0beb5bfba889b8f6e50766b933e660/langchain_ollama-0.2.3-py3-none-any.whl", hash = "sha256:c47700ca68b013358b1e954493ecafb3bd10fa2cda71a9f15ba7897587a9aab2", size = 19543, upload-time = "2025-01-29T22:22:38.96Z" }, + { url = "https://files.pythonhosted.org/packages/f3/c5/1e559f5b43d62850ea2b44097afc944f38894eac00e7feef3b42f0428916/langchain_ollama-0.3.6-py3-none-any.whl", hash = "sha256:b339bd3fcf913b8d606ad426ef39e7122695532507fcd85aa96271b3f33dc3df", size = 24535, upload-time = "2025-07-22T17:26:58.556Z" }, ] [[package]] name = "langchain-openai" -version = "0.3.28" +version = "0.4.0.dev0" source = { editable = "../partners/openai" } dependencies = [ { name = "langchain-core" }, @@ -3035,32 +2499,17 @@ typing = [ [[package]] name = "langchain-together" -version = "0.3.0" +version = "0.0.2.post1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "aiohttp" }, { name = "langchain-core" }, - { name = "langchain-openai" }, { name = "requests" }, + { name = "together" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/24/c4/64b92524121eaf4e805ade7ea68e71d52eab5e8fdad9b6b8e62f5a99eff4/langchain_together-0.3.0.tar.gz", hash = "sha256:c8a96377e49c065526435f766c6e1c7da3f7d054361326f079de8bd368ea76f2", size = 10247, upload-time = "2025-01-10T17:06:08.729Z" } +sdist = { url = "https://files.pythonhosted.org/packages/f7/5c/d52e38ca0d7d1fa3d96058825e66e70685d69dbdddbc2bddb74491961e4f/langchain_together-0.0.2.post1.tar.gz", hash = "sha256:60eaed0267d86edad2dde1f1dd28dea3aeed126c8f322eea986d46cd2c252394", size = 5124, upload-time = "2024-01-12T18:14:26.115Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/a8/69/b3cbcf5b43acbc098c012ef75035fb0dc1e0f227f5161329ef9884a25ba4/langchain_together-0.3.0-py3-none-any.whl", hash = "sha256:4dcb4f6858c910c23d2268da1ed5f54e8cd01224ecf086dc7a8adbacdc6cb686", size = 12338, upload-time = "2025-01-10T17:06:06.678Z" }, -] - -[[package]] -name = "langchain-xai" -version = "0.2.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "aiohttp" }, - { name = "langchain-core" }, - { name = "langchain-openai" }, - { name = "requests" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/71/44/873149f06d04156bd56d8ce24b0c6c0f6f4b145ff4e70801c6b89d580492/langchain_xai-0.2.0.tar.gz", hash = "sha256:aaf25f7e587bf6a043303e88ab4e9a6dd9319cf2a4a8c4d06c4546f0210063f8", size = 5645, upload-time = "2025-01-10T16:52:57.596Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/4f/7b/c0e2025d7ed7c6c558020b942f5785db76fa66ade9cf77bbc18e63de3011/langchain_xai-0.2.0-py3-none-any.whl", hash = "sha256:541c2fe7f83da82ccd86930fd27c0afc56517243996cbe9fa81ea96321b8c0c5", size = 6037, upload-time = "2025-01-10T16:52:54.009Z" }, + { url = "https://files.pythonhosted.org/packages/f4/24/4de8d0d0cd71c88a0deb6dc6ff0c46e545246af280647093dc11c95d93e2/langchain_together-0.0.2.post1-py3-none-any.whl", hash = "sha256:6fb618939e2e200daed027ae611cdb7783e5587457fca465c2afe5fa38984680", size = 6076, upload-time = "2024-01-12T18:14:25.216Z" }, ] [[package]] @@ -3230,41 +2679,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/c6/02/c66bdfdadbb021adb642ca4e8a5ed32ada0b4a3e4b39c5d076d19543452f/mistune-3.1.1-py3-none-any.whl", hash = "sha256:02106ac2aa4f66e769debbfa028509a275069dcffce0dfa578edd7b991ee700a", size = 53696, upload-time = "2025-01-28T13:33:04.099Z" }, ] -[[package]] -name = "mpmath" -version = "1.3.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/e0/47/dd32fa426cc72114383ac549964eecb20ecfd886d1e5ccf5340b55b02f57/mpmath-1.3.0.tar.gz", hash = "sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f", size = 508106, upload-time = "2023-03-07T16:47:11.061Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/43/e3/7d92a15f894aa0c9c4b49b8ee9ac9850d6e63b03c9c32c0367a13ae62209/mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c", size = 536198, upload-time = "2023-03-07T16:47:09.197Z" }, -] - -[[package]] -name = "msal" -version = "1.32.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "cryptography" }, - { name = "pyjwt", extra = ["crypto"] }, - { name = "requests" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/aa/5f/ef42ef25fba682e83a8ee326a1a788e60c25affb58d014495349e37bce50/msal-1.32.0.tar.gz", hash = "sha256:5445fe3af1da6be484991a7ab32eaa82461dc2347de105b76af92c610c3335c2", size = 149817, upload-time = "2025-03-12T21:23:51.844Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/93/5a/2e663ef56a5d89eba962941b267ebe5be8c5ea340a9929d286e2f5fac505/msal-1.32.0-py3-none-any.whl", hash = "sha256:9dbac5384a10bbbf4dae5c7ea0d707d14e087b92c5aa4954b3feaa2d1aa0bcb7", size = 114655, upload-time = "2025-03-12T21:23:50.268Z" }, -] - -[[package]] -name = "msal-extensions" -version = "1.3.1" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "msal" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/01/99/5d239b6156eddf761a636bded1118414d161bd6b7b37a9335549ed159396/msal_extensions-1.3.1.tar.gz", hash = "sha256:c5b0fd10f65ef62b5f1d62f4251d51cbcaf003fcedae8c91b040a488614be1a4", size = 23315, upload-time = "2025-03-14T23:51:03.902Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/5e/75/bd9b7bb966668920f06b200e84454c8f3566b102183bc55c5473d96cb2b9/msal_extensions-1.3.1-py3-none-any.whl", hash = "sha256:96d3de4d034504e969ac5e85bae8106c8373b5c6568e4c8fa7af2eca9dbe6bca", size = 20583, upload-time = "2025-03-14T23:51:03.016Z" }, -] - [[package]] name = "multidict" version = "6.1.0" @@ -3483,40 +2897,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/a0/c4/c2971a3ba4c6103a3d10c4b0f24f461ddc027f0f09763220cf35ca1401b3/nest_asyncio-1.6.0-py3-none-any.whl", hash = "sha256:87af6efd6b5e897c81050477ef65c62e2b2f35d51703cae01aff2905b1852e1c", size = 5195, upload-time = "2024-01-21T14:25:17.223Z" }, ] -[[package]] -name = "networkx" -version = "3.2.1" -source = { registry = "https://pypi.org/simple" } -resolution-markers = [ - "python_full_version < '3.10' and platform_python_implementation == 'PyPy'", - "python_full_version < '3.10' and platform_python_implementation != 'PyPy'", -] -sdist = { url = "https://files.pythonhosted.org/packages/c4/80/a84676339aaae2f1cfdf9f418701dd634aef9cc76f708ef55c36ff39c3ca/networkx-3.2.1.tar.gz", hash = "sha256:9f1bb5cf3409bf324e0a722c20bdb4c20ee39bf1c30ce8ae499c8502b0b5e0c6", size = 2073928, upload-time = "2023-10-28T08:41:39.364Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/d5/f0/8fbc882ca80cf077f1b246c0e3c3465f7f415439bdea6b899f6b19f61f70/networkx-3.2.1-py3-none-any.whl", hash = "sha256:f18c69adc97877c42332c170849c96cefa91881c99a7cb3e95b7c659ebdc1ec2", size = 1647772, upload-time = "2023-10-28T08:41:36.945Z" }, -] - -[[package]] -name = "networkx" -version = "3.4.2" -source = { registry = "https://pypi.org/simple" } -resolution-markers = [ - "python_full_version >= '3.13' and platform_python_implementation == 'PyPy'", - "python_full_version >= '3.13' and platform_python_implementation != 'PyPy'", - "python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_python_implementation == 'PyPy'", - "python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_python_implementation != 'PyPy'", - "python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_python_implementation == 'PyPy'", - "python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_python_implementation != 'PyPy'", - "python_full_version == '3.11.*' and platform_python_implementation == 'PyPy'", - "python_full_version == '3.11.*' and platform_python_implementation != 'PyPy'", - "python_full_version == '3.10.*' and platform_python_implementation == 'PyPy'", - "python_full_version == '3.10.*' and platform_python_implementation != 'PyPy'", -] -sdist = { url = "https://files.pythonhosted.org/packages/fd/1d/06475e1cd5264c0b870ea2cc6fdb3e37177c1e565c43f56ff17a10e3937f/networkx-3.4.2.tar.gz", hash = "sha256:307c3669428c5362aab27c8a1260aa8f47c4e91d3891f48be0141738d8d053e1", size = 2151368, upload-time = "2024-10-21T12:39:38.695Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/b9/54/dd730b32ea14ea797530a4479b2ed46a6fb250f682a9cfb997e968bf0261/networkx-3.4.2-py3-none-any.whl", hash = "sha256:df5d4365b724cf81b8c6a7312509d0c22386097011ad1abe274afd5e9d3bbc5f", size = 1723263, upload-time = "2024-10-21T12:39:36.247Z" }, -] - [[package]] name = "notebook" version = "7.3.2" @@ -3666,135 +3046,17 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/3b/3a/2f6d8c1f8e45d496bca6baaec93208035faeb40d5735c25afac092ec9a12/numpy-2.2.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:b4adfbbc64014976d2f91084915ca4e626fbf2057fb81af209c1a6d776d23e3d", size = 12857565, upload-time = "2025-03-16T18:22:17.631Z" }, ] -[[package]] -name = "nvidia-cublas-cu12" -version = "12.4.5.8" -source = { registry = "https://pypi.org/simple" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/ae/71/1c91302526c45ab494c23f61c7a84aa568b8c1f9d196efa5993957faf906/nvidia_cublas_cu12-12.4.5.8-py3-none-manylinux2014_x86_64.whl", hash = "sha256:2fc8da60df463fdefa81e323eef2e36489e1c94335b5358bcb38360adf75ac9b", size = 363438805, upload-time = "2024-04-03T20:57:06.025Z" }, -] - -[[package]] -name = "nvidia-cuda-cupti-cu12" -version = "12.4.127" -source = { registry = "https://pypi.org/simple" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/67/42/f4f60238e8194a3106d06a058d494b18e006c10bb2b915655bd9f6ea4cb1/nvidia_cuda_cupti_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl", hash = "sha256:9dec60f5ac126f7bb551c055072b69d85392b13311fcc1bcda2202d172df30fb", size = 13813957, upload-time = "2024-04-03T20:55:01.564Z" }, -] - -[[package]] -name = "nvidia-cuda-nvrtc-cu12" -version = "12.4.127" -source = { registry = "https://pypi.org/simple" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/2c/14/91ae57cd4db3f9ef7aa99f4019cfa8d54cb4caa7e00975df6467e9725a9f/nvidia_cuda_nvrtc_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl", hash = "sha256:a178759ebb095827bd30ef56598ec182b85547f1508941a3d560eb7ea1fbf338", size = 24640306, upload-time = "2024-04-03T20:56:01.463Z" }, -] - -[[package]] -name = "nvidia-cuda-runtime-cu12" -version = "12.4.127" -source = { registry = "https://pypi.org/simple" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/ea/27/1795d86fe88ef397885f2e580ac37628ed058a92ed2c39dc8eac3adf0619/nvidia_cuda_runtime_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl", hash = "sha256:64403288fa2136ee8e467cdc9c9427e0434110899d07c779f25b5c068934faa5", size = 883737, upload-time = "2024-04-03T20:54:51.355Z" }, -] - -[[package]] -name = "nvidia-cudnn-cu12" -version = "9.1.0.70" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "nvidia-cublas-cu12" }, -] -wheels = [ - { url = "https://files.pythonhosted.org/packages/9f/fd/713452cd72343f682b1c7b9321e23829f00b842ceaedcda96e742ea0b0b3/nvidia_cudnn_cu12-9.1.0.70-py3-none-manylinux2014_x86_64.whl", hash = "sha256:165764f44ef8c61fcdfdfdbe769d687e06374059fbb388b6c89ecb0e28793a6f", size = 664752741, upload-time = "2024-04-22T15:24:15.253Z" }, -] - -[[package]] -name = "nvidia-cufft-cu12" -version = "11.2.1.3" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "nvidia-nvjitlink-cu12" }, -] -wheels = [ - { url = "https://files.pythonhosted.org/packages/27/94/3266821f65b92b3138631e9c8e7fe1fb513804ac934485a8d05776e1dd43/nvidia_cufft_cu12-11.2.1.3-py3-none-manylinux2014_x86_64.whl", hash = "sha256:f083fc24912aa410be21fa16d157fed2055dab1cc4b6934a0e03cba69eb242b9", size = 211459117, upload-time = "2024-04-03T20:57:40.402Z" }, -] - -[[package]] -name = "nvidia-curand-cu12" -version = "10.3.5.147" -source = { registry = "https://pypi.org/simple" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/8a/6d/44ad094874c6f1b9c654f8ed939590bdc408349f137f9b98a3a23ccec411/nvidia_curand_cu12-10.3.5.147-py3-none-manylinux2014_x86_64.whl", hash = "sha256:a88f583d4e0bb643c49743469964103aa59f7f708d862c3ddb0fc07f851e3b8b", size = 56305206, upload-time = "2024-04-03T20:58:08.722Z" }, -] - -[[package]] -name = "nvidia-cusolver-cu12" -version = "11.6.1.9" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "nvidia-cublas-cu12" }, - { name = "nvidia-cusparse-cu12" }, - { name = "nvidia-nvjitlink-cu12" }, -] -wheels = [ - { url = "https://files.pythonhosted.org/packages/3a/e1/5b9089a4b2a4790dfdea8b3a006052cfecff58139d5a4e34cb1a51df8d6f/nvidia_cusolver_cu12-11.6.1.9-py3-none-manylinux2014_x86_64.whl", hash = "sha256:19e33fa442bcfd085b3086c4ebf7e8debc07cfe01e11513cc6d332fd918ac260", size = 127936057, upload-time = "2024-04-03T20:58:28.735Z" }, -] - -[[package]] -name = "nvidia-cusparse-cu12" -version = "12.3.1.170" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "nvidia-nvjitlink-cu12" }, -] -wheels = [ - { url = "https://files.pythonhosted.org/packages/db/f7/97a9ea26ed4bbbfc2d470994b8b4f338ef663be97b8f677519ac195e113d/nvidia_cusparse_cu12-12.3.1.170-py3-none-manylinux2014_x86_64.whl", hash = "sha256:ea4f11a2904e2a8dc4b1833cc1b5181cde564edd0d5cd33e3c168eff2d1863f1", size = 207454763, upload-time = "2024-04-03T20:58:59.995Z" }, -] - -[[package]] -name = "nvidia-cusparselt-cu12" -version = "0.6.2" -source = { registry = "https://pypi.org/simple" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/78/a8/bcbb63b53a4b1234feeafb65544ee55495e1bb37ec31b999b963cbccfd1d/nvidia_cusparselt_cu12-0.6.2-py3-none-manylinux2014_x86_64.whl", hash = "sha256:df2c24502fd76ebafe7457dbc4716b2fec071aabaed4fb7691a201cde03704d9", size = 150057751, upload-time = "2024-07-23T02:35:53.074Z" }, -] - -[[package]] -name = "nvidia-nccl-cu12" -version = "2.21.5" -source = { registry = "https://pypi.org/simple" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/df/99/12cd266d6233f47d00daf3a72739872bdc10267d0383508b0b9c84a18bb6/nvidia_nccl_cu12-2.21.5-py3-none-manylinux2014_x86_64.whl", hash = "sha256:8579076d30a8c24988834445f8d633c697d42397e92ffc3f63fa26766d25e0a0", size = 188654414, upload-time = "2024-04-03T15:32:57.427Z" }, -] - -[[package]] -name = "nvidia-nvjitlink-cu12" -version = "12.4.127" -source = { registry = "https://pypi.org/simple" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/ff/ff/847841bacfbefc97a00036e0fce5a0f086b640756dc38caea5e1bb002655/nvidia_nvjitlink_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl", hash = "sha256:06b3b9b25bf3f8af351d664978ca26a16d2c5127dbd53c0497e28d1fb9611d57", size = 21066810, upload-time = "2024-04-03T20:59:46.957Z" }, -] - -[[package]] -name = "nvidia-nvtx-cu12" -version = "12.4.127" -source = { registry = "https://pypi.org/simple" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/87/20/199b8713428322a2f22b722c62b8cc278cc53dffa9705d744484b5035ee9/nvidia_nvtx_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl", hash = "sha256:781e950d9b9f60d8241ccea575b32f5105a5baf4c2351cab5256a24869f12a1a", size = 99144, upload-time = "2024-04-03T20:56:12.406Z" }, -] - [[package]] name = "ollama" -version = "0.4.7" +version = "0.5.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "httpx" }, { name = "pydantic" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/b0/6d/dc77539c735bbed5d0c873fb029fb86aa9f0163df169b34152914331c369/ollama-0.4.7.tar.gz", hash = "sha256:891dcbe54f55397d82d289c459de0ea897e103b86a3f1fad0fdb1895922a75ff", size = 12843, upload-time = "2025-01-21T18:51:48.288Z" } +sdist = { url = "https://files.pythonhosted.org/packages/8d/96/c7fe0d2d1b3053be614822a7b722c7465161b3672ce90df71515137580a0/ollama-0.5.1.tar.gz", hash = "sha256:5a799e4dc4e7af638b11e3ae588ab17623ee019e496caaf4323efbaa8feeff93", size = 41112, upload-time = "2025-05-30T21:32:48.679Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/31/83/c3ffac86906c10184c88c2e916460806b072a2cfe34cdcaf3a0c0e836d39/ollama-0.4.7-py3-none-any.whl", hash = "sha256:85505663cca67a83707be5fb3aeff0ea72e67846cea5985529d8eca4366564a1", size = 13210, upload-time = "2025-01-21T18:51:46.199Z" }, + { url = "https://files.pythonhosted.org/packages/d6/76/3f96c8cdbf3955d7a73ee94ce3e0db0755d6de1e0098a70275940d1aff2f/ollama-0.5.1-py3-none-any.whl", hash = "sha256:4c8839f35bc173c7057b1eb2cbe7f498c1a7e134eafc9192824c8aecb3617506", size = 13369, upload-time = "2025-05-30T21:32:47.429Z" }, ] [[package]] @@ -3816,19 +3078,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/36/ac/313ded47ce1d5bc2ec02ed5dd5506bf5718678a4655ac20f337231d9aae3/openai-1.87.0-py3-none-any.whl", hash = "sha256:f9bcae02ac4fff6522276eee85d33047335cfb692b863bd8261353ce4ada5692", size = 734368, upload-time = "2025-06-16T19:04:23.181Z" }, ] -[[package]] -name = "opentelemetry-api" -version = "1.31.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "deprecated" }, - { name = "importlib-metadata" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/58/89/9d80fa1265a25306b5d9b2707ef09094a6dda9feeac2ee159d5a214f989c/opentelemetry_api-1.31.0.tar.gz", hash = "sha256:d8da59e83e8e3993b4726e4c1023cd46f57c4d5a73142e239247e7d814309de1", size = 63853, upload-time = "2025-03-12T17:18:22.316Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/a3/87/5413da9dd80d66ff86205bbd08a9cf69165642565c00cfce6590e0e82980/opentelemetry_api-1.31.0-py3-none-any.whl", hash = "sha256:145b72c6c16977c005c568ec32f4946054ab793d8474a17fd884b0397582c5f2", size = 65099, upload-time = "2025-03-12T17:17:58.344Z" }, -] - [[package]] name = "orjson" version = "3.10.15" @@ -4244,18 +3493,18 @@ wheels = [ [[package]] name = "protobuf" -version = "5.29.3" +version = "4.25.8" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/f7/d1/e0a911544ca9993e0f17ce6d3cc0932752356c1b0a834397f28e63479344/protobuf-5.29.3.tar.gz", hash = "sha256:5da0f41edaf117bde316404bad1a486cb4ededf8e4a54891296f648e8e076620", size = 424945, upload-time = "2025-01-08T21:38:51.572Z" } +sdist = { url = "https://files.pythonhosted.org/packages/df/01/34c8d2b6354906d728703cb9d546a0e534de479e25f1b581e4094c4a85cc/protobuf-4.25.8.tar.gz", hash = "sha256:6135cf8affe1fc6f76cced2641e4ea8d3e59518d1f24ae41ba97bcad82d397cd", size = 380920, upload-time = "2025-05-28T14:22:25.153Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/dc/7a/1e38f3cafa022f477ca0f57a1f49962f21ad25850c3ca0acd3b9d0091518/protobuf-5.29.3-cp310-abi3-win32.whl", hash = "sha256:3ea51771449e1035f26069c4c7fd51fba990d07bc55ba80701c78f886bf9c888", size = 422708, upload-time = "2025-01-08T21:38:31.799Z" }, - { url = "https://files.pythonhosted.org/packages/61/fa/aae8e10512b83de633f2646506a6d835b151edf4b30d18d73afd01447253/protobuf-5.29.3-cp310-abi3-win_amd64.whl", hash = "sha256:a4fa6f80816a9a0678429e84973f2f98cbc218cca434abe8db2ad0bffc98503a", size = 434508, upload-time = "2025-01-08T21:38:35.489Z" }, - { url = "https://files.pythonhosted.org/packages/dd/04/3eaedc2ba17a088961d0e3bd396eac764450f431621b58a04ce898acd126/protobuf-5.29.3-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:a8434404bbf139aa9e1300dbf989667a83d42ddda9153d8ab76e0d5dcaca484e", size = 417825, upload-time = "2025-01-08T21:38:36.642Z" }, - { url = "https://files.pythonhosted.org/packages/4f/06/7c467744d23c3979ce250397e26d8ad8eeb2bea7b18ca12ad58313c1b8d5/protobuf-5.29.3-cp38-abi3-manylinux2014_aarch64.whl", hash = "sha256:daaf63f70f25e8689c072cfad4334ca0ac1d1e05a92fc15c54eb9cf23c3efd84", size = 319573, upload-time = "2025-01-08T21:38:37.896Z" }, - { url = "https://files.pythonhosted.org/packages/a8/45/2ebbde52ad2be18d3675b6bee50e68cd73c9e0654de77d595540b5129df8/protobuf-5.29.3-cp38-abi3-manylinux2014_x86_64.whl", hash = "sha256:c027e08a08be10b67c06bf2370b99c811c466398c357e615ca88c91c07f0910f", size = 319672, upload-time = "2025-01-08T21:38:40.204Z" }, - { url = "https://files.pythonhosted.org/packages/85/a6/bf65a38f8be5ab8c3b575822acfd338702fdf7ac9abd8c81630cc7c9f4bd/protobuf-5.29.3-cp39-cp39-win32.whl", hash = "sha256:0eb32bfa5219fc8d4111803e9a690658aa2e6366384fd0851064b963b6d1f2a7", size = 422676, upload-time = "2025-01-08T21:38:46.611Z" }, - { url = "https://files.pythonhosted.org/packages/ac/e2/48d46adc86369ff092eaece3e537f76b3baaab45ca3dde257838cde831d2/protobuf-5.29.3-cp39-cp39-win_amd64.whl", hash = "sha256:6ce8cc3389a20693bfde6c6562e03474c40851b44975c9b2bf6df7d8c4f864da", size = 434593, upload-time = "2025-01-08T21:38:49.108Z" }, - { url = "https://files.pythonhosted.org/packages/fd/b2/ab07b09e0f6d143dfb839693aa05765257bceaa13d03bf1a696b78323e7a/protobuf-5.29.3-py3-none-any.whl", hash = "sha256:0a18ed4a24198528f2333802eb075e59dea9d679ab7a6c5efb017a59004d849f", size = 172550, upload-time = "2025-01-08T21:38:50.439Z" }, + { url = "https://files.pythonhosted.org/packages/45/ff/05f34305fe6b85bbfbecbc559d423a5985605cad5eda4f47eae9e9c9c5c5/protobuf-4.25.8-cp310-abi3-win32.whl", hash = "sha256:504435d831565f7cfac9f0714440028907f1975e4bed228e58e72ecfff58a1e0", size = 392745, upload-time = "2025-05-28T14:22:10.524Z" }, + { url = "https://files.pythonhosted.org/packages/08/35/8b8a8405c564caf4ba835b1fdf554da869954712b26d8f2a98c0e434469b/protobuf-4.25.8-cp310-abi3-win_amd64.whl", hash = "sha256:bd551eb1fe1d7e92c1af1d75bdfa572eff1ab0e5bf1736716814cdccdb2360f9", size = 413736, upload-time = "2025-05-28T14:22:13.156Z" }, + { url = "https://files.pythonhosted.org/packages/28/d7/ab27049a035b258dab43445eb6ec84a26277b16105b277cbe0a7698bdc6c/protobuf-4.25.8-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:ca809b42f4444f144f2115c4c1a747b9a404d590f18f37e9402422033e464e0f", size = 394537, upload-time = "2025-05-28T14:22:14.768Z" }, + { url = "https://files.pythonhosted.org/packages/bd/6d/a4a198b61808dd3d1ee187082ccc21499bc949d639feb948961b48be9a7e/protobuf-4.25.8-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:9ad7ef62d92baf5a8654fbb88dac7fa5594cfa70fd3440488a5ca3bfc6d795a7", size = 294005, upload-time = "2025-05-28T14:22:16.052Z" }, + { url = "https://files.pythonhosted.org/packages/d6/c6/c9deaa6e789b6fc41b88ccbdfe7a42d2b82663248b715f55aa77fbc00724/protobuf-4.25.8-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:83e6e54e93d2b696a92cad6e6efc924f3850f82b52e1563778dfab8b355101b0", size = 294924, upload-time = "2025-05-28T14:22:17.105Z" }, + { url = "https://files.pythonhosted.org/packages/f3/d5/31cc45286413746927cf46251f87b0120e304e6f233f5e89019b1bc00de8/protobuf-4.25.8-cp39-cp39-win32.whl", hash = "sha256:077ff8badf2acf8bc474406706ad890466274191a48d0abd3bd6987107c9cde5", size = 392789, upload-time = "2025-05-28T14:22:21.249Z" }, + { url = "https://files.pythonhosted.org/packages/de/3f/2e1812771b4e28b2a70b566527963e40670d1ec90d3639b6b5f7206ac287/protobuf-4.25.8-cp39-cp39-win_amd64.whl", hash = "sha256:f4510b93a3bec6eba8fd8f1093e9d7fb0d4a24d1a81377c10c0e5bbfe9e4ed24", size = 413684, upload-time = "2025-05-28T14:22:22.72Z" }, + { url = "https://files.pythonhosted.org/packages/0c/c1/6aece0ab5209981a70cd186f164c133fdba2f51e124ff92b73de7fd24d78/protobuf-4.25.8-py3-none-any.whl", hash = "sha256:15a0af558aa3b13efef102ae6e4f3efac06f1eea11afb3a57db2901447d9fb59", size = 156757, upload-time = "2025-05-28T14:22:24.135Z" }, ] [[package]] @@ -4475,96 +3724,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/8a/0b/9fcc47d19c48b59121088dd6da2488a49d5f72dacf8262e2790a1d2c7d15/pygments-2.19.1-py3-none-any.whl", hash = "sha256:9ea1544ad55cecf4b8242fab6dd35a93bbce657034b0611ee383099054ab6d8c", size = 1225293, upload-time = "2025-01-06T17:26:25.553Z" }, ] -[[package]] -name = "pyjwt" -version = "2.10.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/e7/46/bd74733ff231675599650d3e47f361794b22ef3e3770998dda30d3b63726/pyjwt-2.10.1.tar.gz", hash = "sha256:3cc5772eb20009233caf06e9d8a0577824723b44e6648ee0a2aedb6cf9381953", size = 87785, upload-time = "2024-11-28T03:43:29.933Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/61/ad/689f02752eeec26aed679477e80e632ef1b682313be70793d798c1d5fc8f/PyJWT-2.10.1-py3-none-any.whl", hash = "sha256:dcdd193e30abefd5debf142f9adfcdd2b58004e644f25406ffaebd50bd98dacb", size = 22997, upload-time = "2024-11-28T03:43:27.893Z" }, -] - -[package.optional-dependencies] -crypto = [ - { name = "cryptography" }, -] - -[[package]] -name = "pymongo" -version = "4.11.3" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "dnspython", marker = "python_full_version < '3.13'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/db/e6/cdb1105c14a86aa2b1663a6cccc6bf54722bb12fb5d479979628142dde42/pymongo-4.11.3.tar.gz", hash = "sha256:b6f24aec7c0cfcf0ea9f89e92b7d40ba18a1e18c134815758f111ecb0122e61c", size = 2054848, upload-time = "2025-03-18T12:44:44.078Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/21/dd/61e6a43442b13533ddf0e798e05206a7ebc4ebcb03a3e6c1aace73a94d19/pymongo-4.11.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:78f19598246dd61ba2a4fc4dddfa6a4f9af704fff7d81cb4fe0d02c7b17b1f68", size = 786122, upload-time = "2025-03-18T12:43:04.985Z" }, - { url = "https://files.pythonhosted.org/packages/ef/0c/e810c2a98a6a4dd3374400fce1744e4594075091b3067fb440f855f3eac9/pymongo-4.11.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1c9cbe81184ec81ad8c76ccedbf5b743639448008d68f51f9a3c8a9abe6d9a46", size = 786419, upload-time = "2025-03-18T12:43:06.651Z" }, - { url = "https://files.pythonhosted.org/packages/b0/91/f48cbcc9cff5196a82a9ca88d7a8f721bae2a3f9b8afddfe346f8659fff7/pymongo-4.11.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b9047ecb3bc47c43ada7d6f98baf8060c637b1e880c803a2bbd1dc63b49d2f92", size = 1163792, upload-time = "2025-03-18T12:43:08.444Z" }, - { url = "https://files.pythonhosted.org/packages/ad/77/81fe752967fa1ed7adc5b75d7bdf7c15546f0734c7c21d1924b564ff421d/pymongo-4.11.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f1a16ec731b42f6b2b4f1aa3a94e74ff2722aacf691922a2e8e607b7f6b8d9f1", size = 1198006, upload-time = "2025-03-18T12:43:10.325Z" }, - { url = "https://files.pythonhosted.org/packages/dc/6e/440d56354e95352ac1dc5f1ab27d5e45d4d1c6e1d2cf174727061ddddb85/pymongo-4.11.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e9120e25ac468fda3e3a1749695e0c5e52ff2294334fcc81e70ccb65c897bb58", size = 1180927, upload-time = "2025-03-18T12:43:11.917Z" }, - { url = "https://files.pythonhosted.org/packages/68/57/e3d5508fa8ff8a536f1dfbcefe4ac18d954c0b8d67eb05b8aadddb0b51b5/pymongo-4.11.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f618bd6ed5c3c08b350b157b1d9066d3d389785b7359d2b7b7d82ca4083595d3", size = 1166941, upload-time = "2025-03-18T12:43:13.547Z" }, - { url = "https://files.pythonhosted.org/packages/11/9e/60f40c5b6dd1f710208dc9eb72755698df607eb20429eec3e65009e73df2/pymongo-4.11.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:98017f006e047f5ed6c99c2cb1cac71534f0e11862beeff4d0bc9227189bedcd", size = 1146097, upload-time = "2025-03-18T12:43:15.233Z" }, - { url = "https://files.pythonhosted.org/packages/96/15/ad8464d6084a8c06fc9937277b527c6f6782877864b5a994cd86e3a85ed9/pymongo-4.11.3-cp310-cp310-win32.whl", hash = "sha256:84b9300ed411fef776c60feab40f3ee03db5d0ac8921285c6e03a3e27efa2c20", size = 772068, upload-time = "2025-03-18T12:43:16.613Z" }, - { url = "https://files.pythonhosted.org/packages/92/55/fd9fa9d0f296793944c615f2bb0a292168050d374e7f37685f57ac79c9c7/pymongo-4.11.3-cp310-cp310-win_amd64.whl", hash = "sha256:07231d0bac54e32503507777719dd05ca63bc68896e64ea852edde2f1986b868", size = 781410, upload-time = "2025-03-18T12:43:17.961Z" }, - { url = "https://files.pythonhosted.org/packages/7b/9a/11d68ecb0260454e46404302c5a1cb16d93c0d9ad0c8a7bc4df1859f95a7/pymongo-4.11.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:31b5ad4ce148b201fa8426d0767517dc68424c3380ef4a981038d4d4350f10ee", size = 840506, upload-time = "2025-03-18T12:43:19.955Z" }, - { url = "https://files.pythonhosted.org/packages/46/db/bfe487b1b1b6c3e86b8152845550d7db15476c12516f5093ec122d840602/pymongo-4.11.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:505fb3facf54623b45c96e8e6ad6516f58bb8069f9456e1d7c0abdfdb6929c21", size = 840798, upload-time = "2025-03-18T12:43:21.561Z" }, - { url = "https://files.pythonhosted.org/packages/d4/4b/d1378adbac16829745e57781b140ab7cdbd1046a18cdb796e3adf280c963/pymongo-4.11.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b3f20467d695f49ce4c2d6cb87de458ebb3d098cbc951834a74f36a2e992a6bb", size = 1409884, upload-time = "2025-03-18T12:43:23.1Z" }, - { url = "https://files.pythonhosted.org/packages/33/97/4882a0b6be225d0358b431e6d0fe70fba368b2cedabf38c005f2a73917c9/pymongo-4.11.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:65e8a397b03156880a099d55067daa1580a5333aaf4da3b0313bd7e1731e408f", size = 1460828, upload-time = "2025-03-18T12:43:24.504Z" }, - { url = "https://files.pythonhosted.org/packages/4b/a8/fde60995524f5b2794bdf07cad98f5b369a3cfa7e90b6ec081fc57d3b5ea/pymongo-4.11.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0992917ed259f5ca3506ec8009e7c82d398737a4230a607bf44d102cae31e1d6", size = 1435261, upload-time = "2025-03-18T12:43:26.034Z" }, - { url = "https://files.pythonhosted.org/packages/ce/42/d0ac7f445edd6abf5c7197ad83d9902ad1e8f4be767af257bd892684560a/pymongo-4.11.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2f2f0c3ab8284e0e2674367fa47774411212c86482bbbe78e8ae9fb223b8f6ee", size = 1414380, upload-time = "2025-03-18T12:43:27.458Z" }, - { url = "https://files.pythonhosted.org/packages/e7/02/dd67685b67f7408ed72d801b268988986343208f712b0e90c639358b2d19/pymongo-4.11.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c2240126683f55160f83f587d76955ad1e419a72d5c09539a509bd9d1e20bd53", size = 1383026, upload-time = "2025-03-18T12:43:29.328Z" }, - { url = "https://files.pythonhosted.org/packages/2b/60/07f61ad5ddd39c4d52466ac1ce089c0c8c3d337145efcadbfa61072b1913/pymongo-4.11.3-cp311-cp311-win32.whl", hash = "sha256:be89776c5b8272437a85c904d45e0f1bbc0f21bf11688341938380843dd7fe5f", size = 817664, upload-time = "2025-03-18T12:43:31.551Z" }, - { url = "https://files.pythonhosted.org/packages/e1/f3/073f763f6673ecfb33c13568037cdba499284758cfa54c556cac8a406cb7/pymongo-4.11.3-cp311-cp311-win_amd64.whl", hash = "sha256:c237780760f891cae79abbfc52fda55b584492d5d9452762040aadb2c64ac691", size = 831617, upload-time = "2025-03-18T12:43:33.327Z" }, - { url = "https://files.pythonhosted.org/packages/6d/cf/c606c9d889d8f34dcf80455e045854ef2fa187c439b22a6d30357790c12a/pymongo-4.11.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:5f48b7faf4064e5f484989608a59503b11b7f134ca344635e416b1b12e7dc255", size = 895374, upload-time = "2025-03-18T12:43:34.734Z" }, - { url = "https://files.pythonhosted.org/packages/c6/f5/287e84ba6c8e34cb13f798e7e859b4dcbc5fab99261f91202a8027f62ba6/pymongo-4.11.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:722f22bf18d208aa752591bde93e018065641711594e7a2fef0432da429264e8", size = 895063, upload-time = "2025-03-18T12:43:36.152Z" }, - { url = "https://files.pythonhosted.org/packages/0e/ba/fe8964ec3f8d7348e9cd6a11864e1e84b2be62ea98ca0ba01a4f5b4d417d/pymongo-4.11.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5be1b35c4897626327c4e8bae14655807c2bc710504fa790bc19a72403142264", size = 1673722, upload-time = "2025-03-18T12:43:37.667Z" }, - { url = "https://files.pythonhosted.org/packages/92/89/925b7160c517b66c80d05b36f63d4cc0d0ff23f01b5150b55936b5fab097/pymongo-4.11.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:14f9e4d2172545798738d27bc6293b972c4f1f98cce248aa56e1e62c4c258ca7", size = 1737946, upload-time = "2025-03-18T12:43:39.194Z" }, - { url = "https://files.pythonhosted.org/packages/f8/97/bcedba78ddbc1b8837bf556da55eb08a055e93b331722ecd1dad602a3427/pymongo-4.11.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd3f7bafe441135f58d2b91a312714f423e15fed5afe3854880c8c61ad78d3ce", size = 1706981, upload-time = "2025-03-18T12:43:41.019Z" }, - { url = "https://files.pythonhosted.org/packages/d7/ce/63719be395ec29b8f71fd267014af4957736b5297a1f51f76ef32d05a0cf/pymongo-4.11.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:73de1b9f416a2662ba95b4b49edc963d47b93760a7e2b561b932c8099d160151", size = 1676948, upload-time = "2025-03-18T12:43:42.502Z" }, - { url = "https://files.pythonhosted.org/packages/c1/36/de366cee39e6c2e64d824d1f2e5672381ec766c51224304d1aebf7db3507/pymongo-4.11.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e24268e2d7ae96eab12161985b39e75a75185393134fc671f4bb1a16f50bf6f4", size = 1636072, upload-time = "2025-03-18T12:43:44.171Z" }, - { url = "https://files.pythonhosted.org/packages/07/48/34751291a152e8098b4cf6f467046f00edd71b695d5cf6be1b15778cda63/pymongo-4.11.3-cp312-cp312-win32.whl", hash = "sha256:33a936d3c1828e4f52bed3dad6191a3618cc28ab056e2770390aec88d9e9f9ea", size = 864025, upload-time = "2025-03-18T12:43:45.663Z" }, - { url = "https://files.pythonhosted.org/packages/96/8a/604fab1e1f45deb0dc19e06053369e7db44e3d1359a39e0fe376bdb95b41/pymongo-4.11.3-cp312-cp312-win_amd64.whl", hash = "sha256:c4673d8ef0c8ef712491a750adf64f7998202a82abd72be5be749749275b3edb", size = 882290, upload-time = "2025-03-18T12:43:47.136Z" }, - { url = "https://files.pythonhosted.org/packages/01/f1/19f8a81ca1ef180983b89e24f8003863612aea358a06d7685566ccc18a87/pymongo-4.11.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5e53b98c9700bb69f33a322b648d028bfe223ad135fb04ec48c0226998b80d0e", size = 949622, upload-time = "2025-03-18T12:43:48.671Z" }, - { url = "https://files.pythonhosted.org/packages/67/9a/ae232aa9379a9e6cf325facf0f65176d70520d6a16807f4de2e1ccfb76ec/pymongo-4.11.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:8464aff011208cf86eae28f4a3624ebc4a40783634e119b2b35852252b901ef3", size = 949299, upload-time = "2025-03-18T12:43:50.202Z" }, - { url = "https://files.pythonhosted.org/packages/70/6d/1ddef8b6c6d598fe21c917d93c49a6304611a252a07e98a9b7e70e1b995b/pymongo-4.11.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3742ffc1951bec1450a5a6a02cfd40ddd4b1c9416b36c70ae439a532e8be0e05", size = 1937616, upload-time = "2025-03-18T12:43:52.093Z" }, - { url = "https://files.pythonhosted.org/packages/13/9c/e735715789a876140f453def1b2015948708d224f1728f9b8412b6e495d2/pymongo-4.11.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a29294b508975a5dfd384f4b902cd121dc2b6e5d55ea2be2debffd2a63461cd9", size = 2015041, upload-time = "2025-03-18T12:43:53.681Z" }, - { url = "https://files.pythonhosted.org/packages/fc/d3/cf41e9ce81644de9d8db54cc039823863e7240e021466ae093edc061683a/pymongo-4.11.3-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:051c741586ab6efafe72e027504ac4e5f01c88eceec579e4e1a438a369a61b0c", size = 1978716, upload-time = "2025-03-18T12:43:55.426Z" }, - { url = "https://files.pythonhosted.org/packages/be/c8/c3f15c6cc5a9e0a75d18ae86209584cb14fdca017197def9741bff19c151/pymongo-4.11.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b05e03a327cdef28ec2bb72c974d412d308f5cf867a472ef17f9ac95d18ec05", size = 1939524, upload-time = "2025-03-18T12:43:57.37Z" }, - { url = "https://files.pythonhosted.org/packages/1b/0d/613cd91c736325d05d2d5d389d06ed899bcdce5a265cb486b948729bf1eb/pymongo-4.11.3-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dafeddf1db51df19effd0828ae75492b15d60c7faec388da08f1fe9593c88e7a", size = 1888960, upload-time = "2025-03-18T12:43:59.281Z" }, - { url = "https://files.pythonhosted.org/packages/e7/eb/b1e9cf2e03a47c4f35ffc5db1cb0ed0f92c5fe58c6f5f04d5a2da9d6bb77/pymongo-4.11.3-cp313-cp313-win32.whl", hash = "sha256:40c55afb34788ae6a6b8c175421fa46a37cfc45de41fe4669d762c3b1bbda48e", size = 910370, upload-time = "2025-03-18T12:44:00.967Z" }, - { url = "https://files.pythonhosted.org/packages/77/f3/023f12ee9028f341880016fd6251255bf755f70730440ad11bf745f5f9e4/pymongo-4.11.3-cp313-cp313-win_amd64.whl", hash = "sha256:a5b8b7ba9614a081d1f932724b7a6a20847f6c9629420ae81ce827db3b599af2", size = 932930, upload-time = "2025-03-18T12:44:02.571Z" }, - { url = "https://files.pythonhosted.org/packages/d3/c7/0a145cc66fc756cea547b948150583357e5518cfa60b3ad0d3266d3ee168/pymongo-4.11.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0f23f849693e829655f667ea18b87bf34e1395237eb45084f3495317d455beb2", size = 1006138, upload-time = "2025-03-18T12:44:04.122Z" }, - { url = "https://files.pythonhosted.org/packages/81/88/4ed3cd03d2f7835393a72ed87f5e9186f6fc54bcb0e9b7f718424c0b5db8/pymongo-4.11.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:62bcfa88deb4a6152a7c93bedd1a808497f6c2881424ca54c3c81964a51c5040", size = 1006125, upload-time = "2025-03-18T12:44:05.673Z" }, - { url = "https://files.pythonhosted.org/packages/91/a9/d86844a9aff958c959e84b8223b9d226c3b39a71f2f2fbf2aa3a4a748212/pymongo-4.11.3-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2eaa0233858f72074bf0319f5034018092b43f19202bd7ecb822980c35bfd623", size = 2266315, upload-time = "2025-03-18T12:44:07.274Z" }, - { url = "https://files.pythonhosted.org/packages/1d/06/fff82b09382a887dab6207bb23778395c5986a5ddab6f55905ebdd82e10c/pymongo-4.11.3-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0a434e081017be360595237cd1aeac3d047dd38e8785c549be80748608c1d4ca", size = 2353538, upload-time = "2025-03-18T12:44:09.396Z" }, - { url = "https://files.pythonhosted.org/packages/5d/f7/ff5399baee5888eb686c1508d28b4e9d82b9da5ca63215f958356dee4016/pymongo-4.11.3-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3e8aa65a9e4a989245198c249816d86cb240221861b748db92b8b3a5356bd6f1", size = 2312410, upload-time = "2025-03-18T12:44:10.959Z" }, - { url = "https://files.pythonhosted.org/packages/b0/4d/1746ee984b229eddf5f768265b553a90b31b2395fb5ae1d30d28e430a862/pymongo-4.11.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d0a91004029d1fc9e66a800e6da4170afaa9b93bcf41299e4b5951b837b3467a", size = 2263706, upload-time = "2025-03-18T12:44:12.532Z" }, - { url = "https://files.pythonhosted.org/packages/1c/dc/5d4154c5baf62af9ffb9391cf41848a87cda97798f92e4336730690be7d5/pymongo-4.11.3-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1b992904ac78cb712b42c4b7348974ba1739137c1692cdf8bf75c3eeb22881a4", size = 2202724, upload-time = "2025-03-18T12:44:14.25Z" }, - { url = "https://files.pythonhosted.org/packages/72/15/c18fcc456fdcb793714776da273fc4cba4579f21818f2219e23ff9512314/pymongo-4.11.3-cp313-cp313t-win32.whl", hash = "sha256:45e18bda802d95a2aed88e487f06becc3bd0b22286a25aeca8c46b8c64980dbb", size = 959256, upload-time = "2025-03-18T12:44:15.842Z" }, - { url = "https://files.pythonhosted.org/packages/7d/64/11d87df61cdca4fef90388af592247e17f3d31b15a909780f186d2739592/pymongo-4.11.3-cp313-cp313t-win_amd64.whl", hash = "sha256:07d40b831590bc458b624f421849c2b09ad2b9110b956f658b583fe01fe01c01", size = 987855, upload-time = "2025-03-18T12:44:17.63Z" }, - { url = "https://files.pythonhosted.org/packages/7d/28/343647ad019a041f1a9a74972b612974d860add385b2059c661e04f43b51/pymongo-4.11.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4a1c241d8424c0e5d66a1710ff2b691f361b5fd354754a086ddea99ee19cc2d3", size = 731742, upload-time = "2025-03-18T12:44:23.642Z" }, - { url = "https://files.pythonhosted.org/packages/8a/74/6a1c51f851b8f7e621f6c42798e8af3e0c5708eba424e9f18b60085c0a4c/pymongo-4.11.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1b1aaccbcb4a5aaaa3acaabc59b30edd047c38c6cdfc97eb64e0611b6882a6d6", size = 732036, upload-time = "2025-03-18T12:44:25.258Z" }, - { url = "https://files.pythonhosted.org/packages/f4/c1/6942dbf031856fade676a7fe10e1c31f3a29ef99f12842fb4ef10a40b229/pymongo-4.11.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:be60f63a310d0d2824e9fb2ef0f821bb45d23e73446af6d50bddda32564f285d", size = 919700, upload-time = "2025-03-18T12:44:27.493Z" }, - { url = "https://files.pythonhosted.org/packages/4d/4b/77c58d370adbb446964a4d0a29faa04802dd25982d2389c92a63498236c3/pymongo-4.11.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f1b943d1b13f1232cb92762c82a5154f02b01234db8d632ea9525ab042bd7619", size = 937034, upload-time = "2025-03-18T12:44:29.103Z" }, - { url = "https://files.pythonhosted.org/packages/56/30/a49337dd636153c09df42fc8cdf4562c1e425d34dbf91bdd655fe9cf44c3/pymongo-4.11.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:afc7d1d2bd1997bb42fdba8a5a104198e4ff7990f096ac90353dcb87c69bb57f", size = 928701, upload-time = "2025-03-18T12:44:31.178Z" }, - { url = "https://files.pythonhosted.org/packages/e2/79/7867221913a4a0e9ec67b3d57869c3daaf4985ccb6595b55917000a59eb4/pymongo-4.11.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:730fe9a6c432669fa69af0905a7a4835e5a3752363b2ae3b34007919003394cd", size = 921590, upload-time = "2025-03-18T12:44:33.375Z" }, - { url = "https://files.pythonhosted.org/packages/c3/37/75bf93e3bac8e2d8206bbb7e4ea3d81ebb8064c045b5b130b955cd634c1e/pymongo-4.11.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0633536b31980a8af7262edb03a20df88d8aa0ad803e48c49609b6408a33486d", size = 911246, upload-time = "2025-03-18T12:44:35.047Z" }, - { url = "https://files.pythonhosted.org/packages/12/9d/7e2c8b6447deced99af767877c244264f51952905c4b9dcc7c2c8ef3afd5/pymongo-4.11.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e88e99f33a89e8f58f7401201e79e29f98b2da21d4082ba50eeae0828bb35451", size = 894526, upload-time = "2025-03-18T12:44:36.647Z" }, - { url = "https://files.pythonhosted.org/packages/db/93/ef216513ad0ed8d48f15eba73ce1b43dd0153b14922c85da50389d9cd6e4/pymongo-4.11.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a30f1b9bf79f53f995198ed42bc9b675fc38e6ec30d8f6f7e53094085b5eb803", size = 920931, upload-time = "2025-03-18T12:44:38.413Z" }, - { url = "https://files.pythonhosted.org/packages/2f/77/06d611413ab855630c98a0a7e661fb13afaaae009ceaa7bb1637708c61c6/pymongo-4.11.3-cp39-cp39-win32.whl", hash = "sha256:e1872a33f1d4266c14fae1dc4744b955d0ef5d6fad87cc72141d04d8c97245dc", size = 726470, upload-time = "2025-03-18T12:44:40.304Z" }, - { url = "https://files.pythonhosted.org/packages/eb/a6/2711b906fdb42ea0f74568a8d74cc8e79dc4103f72bb29aecd970e65e9ad/pymongo-4.11.3-cp39-cp39-win_amd64.whl", hash = "sha256:a19f186455e4b3af1e11ee877346418d18303800ecc688ef732b5725c2795f13", size = 731209, upload-time = "2025-03-18T12:44:42.421Z" }, -] - -[[package]] -name = "pyparsing" -version = "3.2.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/8b/1a/3544f4f299a47911c2ab3710f534e52fea62a633c96806995da5d25be4b2/pyparsing-3.2.1.tar.gz", hash = "sha256:61980854fd66de3a90028d679a954d5f2623e83144b5afe5ee86f43d762e5f0a", size = 1067694, upload-time = "2024-12-31T20:59:46.157Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/1c/a7/c8a2d361bf89c0d9577c934ebb7421b25dc84bf3a8e3ac0a40aed9acc547/pyparsing-3.2.1-py3-none-any.whl", hash = "sha256:506ff4f4386c4cec0590ec19e6302d3aedb992fdc02c761e90416f158dacf8e1", size = 107716, upload-time = "2024-12-31T20:59:42.738Z" }, -] - [[package]] name = "pytest" version = "8.3.4" @@ -5307,187 +4466,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/e2/1f/72d2946e3cc7456bb837e88000eb3437e55f80db339c840c04015a11115d/ruff-0.12.2-py3-none-win_arm64.whl", hash = "sha256:48d6c6bfb4761df68bc05ae630e24f506755e702d4fb08f08460be778c7ccb12", size = 10735334, upload-time = "2025-07-03T16:40:17.677Z" }, ] -[[package]] -name = "s3transfer" -version = "0.11.2" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "botocore" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/62/45/2323b5928f86fd29f9afdcef4659f68fa73eaa5356912b774227f5cf46b5/s3transfer-0.11.2.tar.gz", hash = "sha256:3b39185cb72f5acc77db1a58b6e25b977f28d20496b6e58d6813d75f464d632f", size = 147885, upload-time = "2025-01-23T20:20:52.9Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/1b/ac/e7dc469e49048dc57f62e0c555d2ee3117fa30813d2a1a2962cce3a2a82a/s3transfer-0.11.2-py3-none-any.whl", hash = "sha256:be6ecb39fadd986ef1701097771f87e4d2f821f27f6071c872143884d2950fbc", size = 84151, upload-time = "2025-01-23T20:20:50.982Z" }, -] - -[[package]] -name = "safetensors" -version = "0.5.2" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/f4/4f/2ef9ef1766f8c194b01b67a63a444d2e557c8fe1d82faf3ebd85f370a917/safetensors-0.5.2.tar.gz", hash = "sha256:cb4a8d98ba12fa016f4241932b1fc5e702e5143f5374bba0bbcf7ddc1c4cf2b8", size = 66957, upload-time = "2025-01-08T17:44:20.307Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/96/d1/017e31e75e274492a11a456a9e7c171f8f7911fe50735b4ec6ff37221220/safetensors-0.5.2-cp38-abi3-macosx_10_12_x86_64.whl", hash = "sha256:45b6092997ceb8aa3801693781a71a99909ab9cc776fbc3fa9322d29b1d3bef2", size = 427067, upload-time = "2025-01-08T17:44:09.598Z" }, - { url = "https://files.pythonhosted.org/packages/24/84/e9d3ff57ae50dd0028f301c9ee064e5087fe8b00e55696677a0413c377a7/safetensors-0.5.2-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:6d0d6a8ee2215a440e1296b843edf44fd377b055ba350eaba74655a2fe2c4bae", size = 408856, upload-time = "2025-01-08T17:44:06.398Z" }, - { url = "https://files.pythonhosted.org/packages/f1/1d/fe95f5dd73db16757b11915e8a5106337663182d0381811c81993e0014a9/safetensors-0.5.2-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:86016d40bcaa3bcc9a56cd74d97e654b5f4f4abe42b038c71e4f00a089c4526c", size = 450088, upload-time = "2025-01-08T17:43:51.548Z" }, - { url = "https://files.pythonhosted.org/packages/cf/21/e527961b12d5ab528c6e47b92d5f57f33563c28a972750b238b871924e49/safetensors-0.5.2-cp38-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:990833f70a5f9c7d3fc82c94507f03179930ff7d00941c287f73b6fcbf67f19e", size = 458966, upload-time = "2025-01-08T17:43:53.553Z" }, - { url = "https://files.pythonhosted.org/packages/a5/8b/1a037d7a57f86837c0b41905040369aea7d8ca1ec4b2a77592372b2ec380/safetensors-0.5.2-cp38-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3dfa7c2f3fe55db34eba90c29df94bcdac4821043fc391cb5d082d9922013869", size = 509915, upload-time = "2025-01-08T17:43:57.463Z" }, - { url = "https://files.pythonhosted.org/packages/61/3d/03dd5cfd33839df0ee3f4581a20bd09c40246d169c0e4518f20b21d5f077/safetensors-0.5.2-cp38-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:46ff2116150ae70a4e9c490d2ab6b6e1b1b93f25e520e540abe1b81b48560c3a", size = 527664, upload-time = "2025-01-08T17:43:59.428Z" }, - { url = "https://files.pythonhosted.org/packages/c5/dc/8952caafa9a10a3c0f40fa86bacf3190ae7f55fa5eef87415b97b29cb97f/safetensors-0.5.2-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ab696dfdc060caffb61dbe4066b86419107a24c804a4e373ba59be699ebd8d5", size = 461978, upload-time = "2025-01-08T17:44:03.156Z" }, - { url = "https://files.pythonhosted.org/packages/60/da/82de1fcf1194e3dbefd4faa92dc98b33c06bed5d67890e0962dd98e18287/safetensors-0.5.2-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:03c937100f38c9ff4c1507abea9928a6a9b02c9c1c9c3609ed4fb2bf413d4975", size = 491253, upload-time = "2025-01-08T17:44:01.385Z" }, - { url = "https://files.pythonhosted.org/packages/5a/9a/d90e273c25f90c3ba1b0196a972003786f04c39e302fbd6649325b1272bb/safetensors-0.5.2-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:a00e737948791b94dad83cf0eafc09a02c4d8c2171a239e8c8572fe04e25960e", size = 628644, upload-time = "2025-01-08T17:44:11.304Z" }, - { url = "https://files.pythonhosted.org/packages/70/3c/acb23e05aa34b4f5edd2e7f393f8e6480fbccd10601ab42cd03a57d4ab5f/safetensors-0.5.2-cp38-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:d3a06fae62418ec8e5c635b61a8086032c9e281f16c63c3af46a6efbab33156f", size = 721648, upload-time = "2025-01-08T17:44:12.853Z" }, - { url = "https://files.pythonhosted.org/packages/71/45/eaa3dba5253a7c6931230dc961641455710ab231f8a89cb3c4c2af70f8c8/safetensors-0.5.2-cp38-abi3-musllinux_1_2_i686.whl", hash = "sha256:1506e4c2eda1431099cebe9abf6c76853e95d0b7a95addceaa74c6019c65d8cf", size = 659588, upload-time = "2025-01-08T17:44:16.391Z" }, - { url = "https://files.pythonhosted.org/packages/b0/71/2f9851164f821064d43b481ddbea0149c2d676c4f4e077b178e7eeaa6660/safetensors-0.5.2-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:5c5b5d9da594f638a259fca766046f44c97244cc7ab8bef161b3e80d04becc76", size = 632533, upload-time = "2025-01-08T17:44:17.946Z" }, - { url = "https://files.pythonhosted.org/packages/00/f1/5680e2ef61d9c61454fad82c344f0e40b8741a9dbd1e31484f0d31a9b1c3/safetensors-0.5.2-cp38-abi3-win32.whl", hash = "sha256:fe55c039d97090d1f85277d402954dd6ad27f63034fa81985a9cc59655ac3ee2", size = 291167, upload-time = "2025-01-08T17:44:27.123Z" }, - { url = "https://files.pythonhosted.org/packages/86/ca/aa489392ec6fb59223ffce825461e1f811a3affd417121a2088be7a5758b/safetensors-0.5.2-cp38-abi3-win_amd64.whl", hash = "sha256:78abdddd03a406646107f973c7843276e7b64e5e32623529dc17f3d94a20f589", size = 303756, upload-time = "2025-01-08T17:44:24.513Z" }, -] - -[[package]] -name = "scikit-learn" -version = "1.6.1" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "joblib" }, - { name = "numpy", version = "1.26.4", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.13'" }, - { name = "numpy", version = "2.2.4", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.13'" }, - { name = "scipy", version = "1.13.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, - { name = "scipy", version = "1.15.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, - { name = "threadpoolctl" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/9e/a5/4ae3b3a0755f7b35a280ac90b28817d1f380318973cff14075ab41ef50d9/scikit_learn-1.6.1.tar.gz", hash = "sha256:b4fc2525eca2c69a59260f583c56a7557c6ccdf8deafdba6e060f94c1c59738e", size = 7068312, upload-time = "2025-01-10T08:07:55.348Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/2e/3a/f4597eb41049110b21ebcbb0bcb43e4035017545daa5eedcfeb45c08b9c5/scikit_learn-1.6.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d056391530ccd1e501056160e3c9673b4da4805eb67eb2bdf4e983e1f9c9204e", size = 12067702, upload-time = "2025-01-10T08:05:56.515Z" }, - { url = "https://files.pythonhosted.org/packages/37/19/0423e5e1fd1c6ec5be2352ba05a537a473c1677f8188b9306097d684b327/scikit_learn-1.6.1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:0c8d036eb937dbb568c6242fa598d551d88fb4399c0344d95c001980ec1c7d36", size = 11112765, upload-time = "2025-01-10T08:06:00.272Z" }, - { url = "https://files.pythonhosted.org/packages/70/95/d5cb2297a835b0f5fc9a77042b0a2d029866379091ab8b3f52cc62277808/scikit_learn-1.6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8634c4bd21a2a813e0a7e3900464e6d593162a29dd35d25bdf0103b3fce60ed5", size = 12643991, upload-time = "2025-01-10T08:06:04.813Z" }, - { url = "https://files.pythonhosted.org/packages/b7/91/ab3c697188f224d658969f678be86b0968ccc52774c8ab4a86a07be13c25/scikit_learn-1.6.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:775da975a471c4f6f467725dff0ced5c7ac7bda5e9316b260225b48475279a1b", size = 13497182, upload-time = "2025-01-10T08:06:08.42Z" }, - { url = "https://files.pythonhosted.org/packages/17/04/d5d556b6c88886c092cc989433b2bab62488e0f0dafe616a1d5c9cb0efb1/scikit_learn-1.6.1-cp310-cp310-win_amd64.whl", hash = "sha256:8a600c31592bd7dab31e1c61b9bbd6dea1b3433e67d264d17ce1017dbdce8002", size = 11125517, upload-time = "2025-01-10T08:06:12.783Z" }, - { url = "https://files.pythonhosted.org/packages/6c/2a/e291c29670795406a824567d1dfc91db7b699799a002fdaa452bceea8f6e/scikit_learn-1.6.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:72abc587c75234935e97d09aa4913a82f7b03ee0b74111dcc2881cba3c5a7b33", size = 12102620, upload-time = "2025-01-10T08:06:16.675Z" }, - { url = "https://files.pythonhosted.org/packages/25/92/ee1d7a00bb6b8c55755d4984fd82608603a3cc59959245068ce32e7fb808/scikit_learn-1.6.1-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:b3b00cdc8f1317b5f33191df1386c0befd16625f49d979fe77a8d44cae82410d", size = 11116234, upload-time = "2025-01-10T08:06:21.83Z" }, - { url = "https://files.pythonhosted.org/packages/30/cd/ed4399485ef364bb25f388ab438e3724e60dc218c547a407b6e90ccccaef/scikit_learn-1.6.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dc4765af3386811c3ca21638f63b9cf5ecf66261cc4815c1db3f1e7dc7b79db2", size = 12592155, upload-time = "2025-01-10T08:06:27.309Z" }, - { url = "https://files.pythonhosted.org/packages/a8/f3/62fc9a5a659bb58a03cdd7e258956a5824bdc9b4bb3c5d932f55880be569/scikit_learn-1.6.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:25fc636bdaf1cc2f4a124a116312d837148b5e10872147bdaf4887926b8c03d8", size = 13497069, upload-time = "2025-01-10T08:06:32.515Z" }, - { url = "https://files.pythonhosted.org/packages/a1/a6/c5b78606743a1f28eae8f11973de6613a5ee87366796583fb74c67d54939/scikit_learn-1.6.1-cp311-cp311-win_amd64.whl", hash = "sha256:fa909b1a36e000a03c382aade0bd2063fd5680ff8b8e501660c0f59f021a6415", size = 11139809, upload-time = "2025-01-10T08:06:35.514Z" }, - { url = "https://files.pythonhosted.org/packages/0a/18/c797c9b8c10380d05616db3bfb48e2a3358c767affd0857d56c2eb501caa/scikit_learn-1.6.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:926f207c804104677af4857b2c609940b743d04c4c35ce0ddc8ff4f053cddc1b", size = 12104516, upload-time = "2025-01-10T08:06:40.009Z" }, - { url = "https://files.pythonhosted.org/packages/c4/b7/2e35f8e289ab70108f8cbb2e7a2208f0575dc704749721286519dcf35f6f/scikit_learn-1.6.1-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:2c2cae262064e6a9b77eee1c8e768fc46aa0b8338c6a8297b9b6759720ec0ff2", size = 11167837, upload-time = "2025-01-10T08:06:43.305Z" }, - { url = "https://files.pythonhosted.org/packages/a4/f6/ff7beaeb644bcad72bcfd5a03ff36d32ee4e53a8b29a639f11bcb65d06cd/scikit_learn-1.6.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1061b7c028a8663fb9a1a1baf9317b64a257fcb036dae5c8752b2abef31d136f", size = 12253728, upload-time = "2025-01-10T08:06:47.618Z" }, - { url = "https://files.pythonhosted.org/packages/29/7a/8bce8968883e9465de20be15542f4c7e221952441727c4dad24d534c6d99/scikit_learn-1.6.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e69fab4ebfc9c9b580a7a80111b43d214ab06250f8a7ef590a4edf72464dd86", size = 13147700, upload-time = "2025-01-10T08:06:50.888Z" }, - { url = "https://files.pythonhosted.org/packages/62/27/585859e72e117fe861c2079bcba35591a84f801e21bc1ab85bce6ce60305/scikit_learn-1.6.1-cp312-cp312-win_amd64.whl", hash = "sha256:70b1d7e85b1c96383f872a519b3375f92f14731e279a7b4c6cfd650cf5dffc52", size = 11110613, upload-time = "2025-01-10T08:06:54.115Z" }, - { url = "https://files.pythonhosted.org/packages/2e/59/8eb1872ca87009bdcdb7f3cdc679ad557b992c12f4b61f9250659e592c63/scikit_learn-1.6.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:2ffa1e9e25b3d93990e74a4be2c2fc61ee5af85811562f1288d5d055880c4322", size = 12010001, upload-time = "2025-01-10T08:06:58.613Z" }, - { url = "https://files.pythonhosted.org/packages/9d/05/f2fc4effc5b32e525408524c982c468c29d22f828834f0625c5ef3d601be/scikit_learn-1.6.1-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:dc5cf3d68c5a20ad6d571584c0750ec641cc46aeef1c1507be51300e6003a7e1", size = 11096360, upload-time = "2025-01-10T08:07:01.556Z" }, - { url = "https://files.pythonhosted.org/packages/c8/e4/4195d52cf4f113573fb8ebc44ed5a81bd511a92c0228889125fac2f4c3d1/scikit_learn-1.6.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c06beb2e839ecc641366000ca84f3cf6fa9faa1777e29cf0c04be6e4d096a348", size = 12209004, upload-time = "2025-01-10T08:07:06.931Z" }, - { url = "https://files.pythonhosted.org/packages/94/be/47e16cdd1e7fcf97d95b3cb08bde1abb13e627861af427a3651fcb80b517/scikit_learn-1.6.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8ca8cb270fee8f1f76fa9bfd5c3507d60c6438bbee5687f81042e2bb98e5a97", size = 13171776, upload-time = "2025-01-10T08:07:11.715Z" }, - { url = "https://files.pythonhosted.org/packages/34/b0/ca92b90859070a1487827dbc672f998da95ce83edce1270fc23f96f1f61a/scikit_learn-1.6.1-cp313-cp313-win_amd64.whl", hash = "sha256:7a1c43c8ec9fde528d664d947dc4c0789be4077a3647f232869f41d9bf50e0fb", size = 11071865, upload-time = "2025-01-10T08:07:16.088Z" }, - { url = "https://files.pythonhosted.org/packages/12/ae/993b0fb24a356e71e9a894e42b8a9eec528d4c70217353a1cd7a48bc25d4/scikit_learn-1.6.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:a17c1dea1d56dcda2fac315712f3651a1fea86565b64b48fa1bc090249cbf236", size = 11955804, upload-time = "2025-01-10T08:07:20.385Z" }, - { url = "https://files.pythonhosted.org/packages/d6/54/32fa2ee591af44507eac86406fa6bba968d1eb22831494470d0a2e4a1eb1/scikit_learn-1.6.1-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:6a7aa5f9908f0f28f4edaa6963c0a6183f1911e63a69aa03782f0d924c830a35", size = 11100530, upload-time = "2025-01-10T08:07:23.675Z" }, - { url = "https://files.pythonhosted.org/packages/3f/58/55856da1adec655bdce77b502e94a267bf40a8c0b89f8622837f89503b5a/scikit_learn-1.6.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0650e730afb87402baa88afbf31c07b84c98272622aaba002559b614600ca691", size = 12433852, upload-time = "2025-01-10T08:07:26.817Z" }, - { url = "https://files.pythonhosted.org/packages/ff/4f/c83853af13901a574f8f13b645467285a48940f185b690936bb700a50863/scikit_learn-1.6.1-cp313-cp313t-win_amd64.whl", hash = "sha256:3f59fe08dc03ea158605170eb52b22a105f238a5d512c4470ddeca71feae8e5f", size = 11337256, upload-time = "2025-01-10T08:07:31.084Z" }, - { url = "https://files.pythonhosted.org/packages/d2/37/b305b759cc65829fe1b8853ff3e308b12cdd9d8884aa27840835560f2b42/scikit_learn-1.6.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6849dd3234e87f55dce1db34c89a810b489ead832aaf4d4550b7ea85628be6c1", size = 12101868, upload-time = "2025-01-10T08:07:34.189Z" }, - { url = "https://files.pythonhosted.org/packages/83/74/f64379a4ed5879d9db744fe37cfe1978c07c66684d2439c3060d19a536d8/scikit_learn-1.6.1-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:e7be3fa5d2eb9be7d77c3734ff1d599151bb523674be9b834e8da6abe132f44e", size = 11144062, upload-time = "2025-01-10T08:07:37.67Z" }, - { url = "https://files.pythonhosted.org/packages/fd/dc/d5457e03dc9c971ce2b0d750e33148dd060fefb8b7dc71acd6054e4bb51b/scikit_learn-1.6.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:44a17798172df1d3c1065e8fcf9019183f06c87609b49a124ebdf57ae6cb0107", size = 12693173, upload-time = "2025-01-10T08:07:42.713Z" }, - { url = "https://files.pythonhosted.org/packages/79/35/b1d2188967c3204c78fa79c9263668cf1b98060e8e58d1a730fe5b2317bb/scikit_learn-1.6.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8b7a3b86e411e4bce21186e1c180d792f3d99223dcfa3b4f597ecc92fa1a422", size = 13518605, upload-time = "2025-01-10T08:07:46.551Z" }, - { url = "https://files.pythonhosted.org/packages/fb/d8/8d603bdd26601f4b07e2363032b8565ab82eb857f93d86d0f7956fcf4523/scikit_learn-1.6.1-cp39-cp39-win_amd64.whl", hash = "sha256:7a73d457070e3318e32bdb3aa79a8d990474f19035464dfd8bede2883ab5dc3b", size = 11155078, upload-time = "2025-01-10T08:07:51.376Z" }, -] - -[[package]] -name = "scipy" -version = "1.13.1" -source = { registry = "https://pypi.org/simple" } -resolution-markers = [ - "python_full_version < '3.10' and platform_python_implementation == 'PyPy'", - "python_full_version < '3.10' and platform_python_implementation != 'PyPy'", -] -dependencies = [ - { name = "numpy", version = "1.26.4", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/ae/00/48c2f661e2816ccf2ecd77982f6605b2950afe60f60a52b4cbbc2504aa8f/scipy-1.13.1.tar.gz", hash = "sha256:095a87a0312b08dfd6a6155cbbd310a8c51800fc931b8c0b84003014b874ed3c", size = 57210720, upload-time = "2024-05-23T03:29:26.079Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/33/59/41b2529908c002ade869623b87eecff3e11e3ce62e996d0bdcb536984187/scipy-1.13.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:20335853b85e9a49ff7572ab453794298bcf0354d8068c5f6775a0eabf350aca", size = 39328076, upload-time = "2024-05-23T03:19:01.687Z" }, - { url = "https://files.pythonhosted.org/packages/d5/33/f1307601f492f764062ce7dd471a14750f3360e33cd0f8c614dae208492c/scipy-1.13.1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:d605e9c23906d1994f55ace80e0125c587f96c020037ea6aa98d01b4bd2e222f", size = 30306232, upload-time = "2024-05-23T03:19:09.089Z" }, - { url = "https://files.pythonhosted.org/packages/c0/66/9cd4f501dd5ea03e4a4572ecd874936d0da296bd04d1c45ae1a4a75d9c3a/scipy-1.13.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cfa31f1def5c819b19ecc3a8b52d28ffdcc7ed52bb20c9a7589669dd3c250989", size = 33743202, upload-time = "2024-05-23T03:19:15.138Z" }, - { url = "https://files.pythonhosted.org/packages/a3/ba/7255e5dc82a65adbe83771c72f384d99c43063648456796436c9a5585ec3/scipy-1.13.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f26264b282b9da0952a024ae34710c2aff7d27480ee91a2e82b7b7073c24722f", size = 38577335, upload-time = "2024-05-23T03:19:21.984Z" }, - { url = "https://files.pythonhosted.org/packages/49/a5/bb9ded8326e9f0cdfdc412eeda1054b914dfea952bda2097d174f8832cc0/scipy-1.13.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:eccfa1906eacc02de42d70ef4aecea45415f5be17e72b61bafcfd329bdc52e94", size = 38820728, upload-time = "2024-05-23T03:19:28.225Z" }, - { url = "https://files.pythonhosted.org/packages/12/30/df7a8fcc08f9b4a83f5f27cfaaa7d43f9a2d2ad0b6562cced433e5b04e31/scipy-1.13.1-cp310-cp310-win_amd64.whl", hash = "sha256:2831f0dc9c5ea9edd6e51e6e769b655f08ec6db6e2e10f86ef39bd32eb11da54", size = 46210588, upload-time = "2024-05-23T03:19:35.661Z" }, - { url = "https://files.pythonhosted.org/packages/b4/15/4a4bb1b15bbd2cd2786c4f46e76b871b28799b67891f23f455323a0cdcfb/scipy-1.13.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:27e52b09c0d3a1d5b63e1105f24177e544a222b43611aaf5bc44d4a0979e32f9", size = 39333805, upload-time = "2024-05-23T03:19:43.081Z" }, - { url = "https://files.pythonhosted.org/packages/ba/92/42476de1af309c27710004f5cdebc27bec62c204db42e05b23a302cb0c9a/scipy-1.13.1-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:54f430b00f0133e2224c3ba42b805bfd0086fe488835effa33fa291561932326", size = 30317687, upload-time = "2024-05-23T03:19:48.799Z" }, - { url = "https://files.pythonhosted.org/packages/80/ba/8be64fe225360a4beb6840f3cbee494c107c0887f33350d0a47d55400b01/scipy-1.13.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e89369d27f9e7b0884ae559a3a956e77c02114cc60a6058b4e5011572eea9299", size = 33694638, upload-time = "2024-05-23T03:19:55.104Z" }, - { url = "https://files.pythonhosted.org/packages/36/07/035d22ff9795129c5a847c64cb43c1fa9188826b59344fee28a3ab02e283/scipy-1.13.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a78b4b3345f1b6f68a763c6e25c0c9a23a9fd0f39f5f3d200efe8feda560a5fa", size = 38569931, upload-time = "2024-05-23T03:20:01.82Z" }, - { url = "https://files.pythonhosted.org/packages/d9/10/f9b43de37e5ed91facc0cfff31d45ed0104f359e4f9a68416cbf4e790241/scipy-1.13.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:45484bee6d65633752c490404513b9ef02475b4284c4cfab0ef946def50b3f59", size = 38838145, upload-time = "2024-05-23T03:20:09.173Z" }, - { url = "https://files.pythonhosted.org/packages/4a/48/4513a1a5623a23e95f94abd675ed91cfb19989c58e9f6f7d03990f6caf3d/scipy-1.13.1-cp311-cp311-win_amd64.whl", hash = "sha256:5713f62f781eebd8d597eb3f88b8bf9274e79eeabf63afb4a737abc6c84ad37b", size = 46196227, upload-time = "2024-05-23T03:20:16.433Z" }, - { url = "https://files.pythonhosted.org/packages/f2/7b/fb6b46fbee30fc7051913068758414f2721003a89dd9a707ad49174e3843/scipy-1.13.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5d72782f39716b2b3509cd7c33cdc08c96f2f4d2b06d51e52fb45a19ca0c86a1", size = 39357301, upload-time = "2024-05-23T03:20:23.538Z" }, - { url = "https://files.pythonhosted.org/packages/dc/5a/2043a3bde1443d94014aaa41e0b50c39d046dda8360abd3b2a1d3f79907d/scipy-1.13.1-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:017367484ce5498445aade74b1d5ab377acdc65e27095155e448c88497755a5d", size = 30363348, upload-time = "2024-05-23T03:20:29.885Z" }, - { url = "https://files.pythonhosted.org/packages/e7/cb/26e4a47364bbfdb3b7fb3363be6d8a1c543bcd70a7753ab397350f5f189a/scipy-1.13.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:949ae67db5fa78a86e8fa644b9a6b07252f449dcf74247108c50e1d20d2b4627", size = 33406062, upload-time = "2024-05-23T03:20:36.012Z" }, - { url = "https://files.pythonhosted.org/packages/88/ab/6ecdc526d509d33814835447bbbeedbebdec7cca46ef495a61b00a35b4bf/scipy-1.13.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de3ade0e53bc1f21358aa74ff4830235d716211d7d077e340c7349bc3542e884", size = 38218311, upload-time = "2024-05-23T03:20:42.086Z" }, - { url = "https://files.pythonhosted.org/packages/0b/00/9f54554f0f8318100a71515122d8f4f503b1a2c4b4cfab3b4b68c0eb08fa/scipy-1.13.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2ac65fb503dad64218c228e2dc2d0a0193f7904747db43014645ae139c8fad16", size = 38442493, upload-time = "2024-05-23T03:20:48.292Z" }, - { url = "https://files.pythonhosted.org/packages/3e/df/963384e90733e08eac978cd103c34df181d1fec424de383cdc443f418dd4/scipy-1.13.1-cp312-cp312-win_amd64.whl", hash = "sha256:cdd7dacfb95fea358916410ec61bbc20440f7860333aee6d882bb8046264e949", size = 45910955, upload-time = "2024-05-23T03:20:55.091Z" }, - { url = "https://files.pythonhosted.org/packages/7f/29/c2ea58c9731b9ecb30b6738113a95d147e83922986b34c685b8f6eefde21/scipy-1.13.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:436bbb42a94a8aeef855d755ce5a465479c721e9d684de76bf61a62e7c2b81d5", size = 39352927, upload-time = "2024-05-23T03:21:01.95Z" }, - { url = "https://files.pythonhosted.org/packages/5c/c0/e71b94b20ccf9effb38d7147c0064c08c622309fd487b1b677771a97d18c/scipy-1.13.1-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:8335549ebbca860c52bf3d02f80784e91a004b71b059e3eea9678ba994796a24", size = 30324538, upload-time = "2024-05-23T03:21:07.634Z" }, - { url = "https://files.pythonhosted.org/packages/6d/0f/aaa55b06d474817cea311e7b10aab2ea1fd5d43bc6a2861ccc9caec9f418/scipy-1.13.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d533654b7d221a6a97304ab63c41c96473ff04459e404b83275b60aa8f4b7004", size = 33732190, upload-time = "2024-05-23T03:21:14.41Z" }, - { url = "https://files.pythonhosted.org/packages/35/f5/d0ad1a96f80962ba65e2ce1de6a1e59edecd1f0a7b55990ed208848012e0/scipy-1.13.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:637e98dcf185ba7f8e663e122ebf908c4702420477ae52a04f9908707456ba4d", size = 38612244, upload-time = "2024-05-23T03:21:21.827Z" }, - { url = "https://files.pythonhosted.org/packages/8d/02/1165905f14962174e6569076bcc3315809ae1291ed14de6448cc151eedfd/scipy-1.13.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a014c2b3697bde71724244f63de2476925596c24285c7a637364761f8710891c", size = 38845637, upload-time = "2024-05-23T03:21:28.729Z" }, - { url = "https://files.pythonhosted.org/packages/3e/77/dab54fe647a08ee4253963bcd8f9cf17509c8ca64d6335141422fe2e2114/scipy-1.13.1-cp39-cp39-win_amd64.whl", hash = "sha256:392e4ec766654852c25ebad4f64e4e584cf19820b980bc04960bca0b0cd6eaa2", size = 46227440, upload-time = "2024-05-23T03:21:35.888Z" }, -] - -[[package]] -name = "scipy" -version = "1.15.1" -source = { registry = "https://pypi.org/simple" } -resolution-markers = [ - "python_full_version >= '3.13' and platform_python_implementation == 'PyPy'", - "python_full_version >= '3.13' and platform_python_implementation != 'PyPy'", - "python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_python_implementation == 'PyPy'", - "python_full_version >= '3.12.4' and python_full_version < '3.13' and platform_python_implementation != 'PyPy'", - "python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_python_implementation == 'PyPy'", - "python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_python_implementation != 'PyPy'", - "python_full_version == '3.11.*' and platform_python_implementation == 'PyPy'", - "python_full_version == '3.11.*' and platform_python_implementation != 'PyPy'", - "python_full_version == '3.10.*' and platform_python_implementation == 'PyPy'", - "python_full_version == '3.10.*' and platform_python_implementation != 'PyPy'", -] -dependencies = [ - { name = "numpy", version = "1.26.4", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10' and python_full_version < '3.13'" }, - { name = "numpy", version = "2.2.4", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.13'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/76/c6/8eb0654ba0c7d0bb1bf67bf8fbace101a8e4f250f7722371105e8b6f68fc/scipy-1.15.1.tar.gz", hash = "sha256:033a75ddad1463970c96a88063a1df87ccfddd526437136b6ee81ff0312ebdf6", size = 59407493, upload-time = "2025-01-11T00:06:16.883Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/86/53/b204ce5a4433f1864001b9d16f103b9c25f5002a602ae83585d0ea5f9c4a/scipy-1.15.1-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:c64ded12dcab08afff9e805a67ff4480f5e69993310e093434b10e85dc9d43e1", size = 41414518, upload-time = "2025-01-10T23:59:19.173Z" }, - { url = "https://files.pythonhosted.org/packages/c7/fc/54ffa7a8847f7f303197a6ba65a66104724beba2e38f328135a78f0dc480/scipy-1.15.1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:5b190b935e7db569960b48840e5bef71dc513314cc4e79a1b7d14664f57fd4ff", size = 32519265, upload-time = "2025-01-10T23:59:27.6Z" }, - { url = "https://files.pythonhosted.org/packages/f1/77/a98b8ba03d6f371dc31a38719affd53426d4665729dcffbed4afe296784a/scipy-1.15.1-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:4b17d4220df99bacb63065c76b0d1126d82bbf00167d1730019d2a30d6ae01ea", size = 24792859, upload-time = "2025-01-10T23:59:33.906Z" }, - { url = "https://files.pythonhosted.org/packages/a7/78/70bb9f0df7444b18b108580934bfef774822e28fd34a68e5c263c7d2828a/scipy-1.15.1-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:63b9b6cd0333d0eb1a49de6f834e8aeaefe438df8f6372352084535ad095219e", size = 27886506, upload-time = "2025-01-10T23:59:39.288Z" }, - { url = "https://files.pythonhosted.org/packages/14/a7/f40f6033e06de4176ddd6cc8c3ae9f10a226c3bca5d6b4ab883bc9914a14/scipy-1.15.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f151e9fb60fbf8e52426132f473221a49362091ce7a5e72f8aa41f8e0da4f25", size = 38375041, upload-time = "2025-01-10T23:59:47.066Z" }, - { url = "https://files.pythonhosted.org/packages/17/03/390a1c5c61fd76b0fa4b3c5aa3bdd7e60f6c46f712924f1a9df5705ec046/scipy-1.15.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21e10b1dd56ce92fba3e786007322542361984f8463c6d37f6f25935a5a6ef52", size = 40597556, upload-time = "2025-01-10T23:59:55.199Z" }, - { url = "https://files.pythonhosted.org/packages/4e/70/fa95b3ae026b97eeca58204a90868802e5155ac71b9d7bdee92b68115dd3/scipy-1.15.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:5dff14e75cdbcf07cdaa1c7707db6017d130f0af9ac41f6ce443a93318d6c6e0", size = 42938505, upload-time = "2025-01-11T00:00:04.734Z" }, - { url = "https://files.pythonhosted.org/packages/d6/07/427859116bdd71847c898180f01802691f203c3e2455a1eb496130ff07c5/scipy-1.15.1-cp310-cp310-win_amd64.whl", hash = "sha256:f82fcf4e5b377f819542fbc8541f7b5fbcf1c0017d0df0bc22c781bf60abc4d8", size = 43909663, upload-time = "2025-01-11T00:00:15.339Z" }, - { url = "https://files.pythonhosted.org/packages/8e/2e/7b71312da9c2dabff53e7c9a9d08231bc34d9d8fdabe88a6f1155b44591c/scipy-1.15.1-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:5bd8d27d44e2c13d0c1124e6a556454f52cd3f704742985f6b09e75e163d20d2", size = 41424362, upload-time = "2025-01-11T00:00:22.985Z" }, - { url = "https://files.pythonhosted.org/packages/81/8c/ab85f1aa1cc200c796532a385b6ebf6a81089747adc1da7482a062acc46c/scipy-1.15.1-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:be3deeb32844c27599347faa077b359584ba96664c5c79d71a354b80a0ad0ce0", size = 32535910, upload-time = "2025-01-11T00:00:29.569Z" }, - { url = "https://files.pythonhosted.org/packages/3b/9c/6f4b787058daa8d8da21ddff881b4320e28de4704a65ec147adb50cb2230/scipy-1.15.1-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:5eb0ca35d4b08e95da99a9f9c400dc9f6c21c424298a0ba876fdc69c7afacedf", size = 24809398, upload-time = "2025-01-11T00:00:36.218Z" }, - { url = "https://files.pythonhosted.org/packages/16/2b/949460a796df75fc7a1ee1becea202cf072edbe325ebe29f6d2029947aa7/scipy-1.15.1-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:74bb864ff7640dea310a1377d8567dc2cb7599c26a79ca852fc184cc851954ac", size = 27918045, upload-time = "2025-01-11T00:00:42.627Z" }, - { url = "https://files.pythonhosted.org/packages/5f/36/67fe249dd7ccfcd2a38b25a640e3af7e59d9169c802478b6035ba91dfd6d/scipy-1.15.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:667f950bf8b7c3a23b4199db24cb9bf7512e27e86d0e3813f015b74ec2c6e3df", size = 38332074, upload-time = "2025-01-11T00:00:52.633Z" }, - { url = "https://files.pythonhosted.org/packages/fc/da/452e1119e6f720df3feb588cce3c42c5e3d628d4bfd4aec097bd30b7de0c/scipy-1.15.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:395be70220d1189756068b3173853029a013d8c8dd5fd3d1361d505b2aa58fa7", size = 40588469, upload-time = "2025-01-11T00:01:00.149Z" }, - { url = "https://files.pythonhosted.org/packages/7f/71/5f94aceeac99a4941478af94fe9f459c6752d497035b6b0761a700f5f9ff/scipy-1.15.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ce3a000cd28b4430426db2ca44d96636f701ed12e2b3ca1f2b1dd7abdd84b39a", size = 42965214, upload-time = "2025-01-11T00:01:10.131Z" }, - { url = "https://files.pythonhosted.org/packages/af/25/caa430865749d504271757cafd24066d596217e83326155993980bc22f97/scipy-1.15.1-cp311-cp311-win_amd64.whl", hash = "sha256:3fe1d95944f9cf6ba77aa28b82dd6bb2a5b52f2026beb39ecf05304b8392864b", size = 43896034, upload-time = "2025-01-11T00:01:40.933Z" }, - { url = "https://files.pythonhosted.org/packages/d8/6e/a9c42d0d39e09ed7fd203d0ac17adfea759cba61ab457671fe66e523dbec/scipy-1.15.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:c09aa9d90f3500ea4c9b393ee96f96b0ccb27f2f350d09a47f533293c78ea776", size = 41478318, upload-time = "2025-01-11T00:01:53.571Z" }, - { url = "https://files.pythonhosted.org/packages/04/ee/e3e535c81828618878a7433992fecc92fa4df79393f31a8fea1d05615091/scipy-1.15.1-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:0ac102ce99934b162914b1e4a6b94ca7da0f4058b6d6fd65b0cef330c0f3346f", size = 32596696, upload-time = "2025-01-11T00:02:03.859Z" }, - { url = "https://files.pythonhosted.org/packages/c4/5e/b1b0124be8e76f87115f16b8915003eec4b7060298117715baf13f51942c/scipy-1.15.1-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:09c52320c42d7f5c7748b69e9f0389266fd4f82cf34c38485c14ee976cb8cb04", size = 24870366, upload-time = "2025-01-11T00:02:12.434Z" }, - { url = "https://files.pythonhosted.org/packages/14/36/c00cb73eefda85946172c27913ab995c6ad4eee00fa4f007572e8c50cd51/scipy-1.15.1-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:cdde8414154054763b42b74fe8ce89d7f3d17a7ac5dd77204f0e142cdc9239e9", size = 28007461, upload-time = "2025-01-11T00:02:20.237Z" }, - { url = "https://files.pythonhosted.org/packages/68/94/aff5c51b3799349a9d1e67a056772a0f8a47db371e83b498d43467806557/scipy-1.15.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4c9d8fc81d6a3b6844235e6fd175ee1d4c060163905a2becce8e74cb0d7554ce", size = 38068174, upload-time = "2025-01-11T00:02:30.21Z" }, - { url = "https://files.pythonhosted.org/packages/b0/3c/0de11ca154e24a57b579fb648151d901326d3102115bc4f9a7a86526ce54/scipy-1.15.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0fb57b30f0017d4afa5fe5f5b150b8f807618819287c21cbe51130de7ccdaed2", size = 40249869, upload-time = "2025-01-11T00:02:41.811Z" }, - { url = "https://files.pythonhosted.org/packages/15/09/472e8d0a6b33199d1bb95e49bedcabc0976c3724edd9b0ef7602ccacf41e/scipy-1.15.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:491d57fe89927fa1aafbe260f4cfa5ffa20ab9f1435025045a5315006a91b8f5", size = 42629068, upload-time = "2025-01-11T00:02:53.118Z" }, - { url = "https://files.pythonhosted.org/packages/ff/ba/31c7a8131152822b3a2cdeba76398ffb404d81d640de98287d236da90c49/scipy-1.15.1-cp312-cp312-win_amd64.whl", hash = "sha256:900f3fa3db87257510f011c292a5779eb627043dd89731b9c461cd16ef76ab3d", size = 43621992, upload-time = "2025-01-11T00:03:04.53Z" }, - { url = "https://files.pythonhosted.org/packages/2b/bf/dd68965a4c5138a630eeed0baec9ae96e5d598887835bdde96cdd2fe4780/scipy-1.15.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:100193bb72fbff37dbd0bf14322314fc7cbe08b7ff3137f11a34d06dc0ee6b85", size = 41441136, upload-time = "2025-01-11T00:03:17.245Z" }, - { url = "https://files.pythonhosted.org/packages/ef/5e/4928581312922d7e4d416d74c416a660addec4dd5ea185401df2269ba5a0/scipy-1.15.1-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:2114a08daec64980e4b4cbdf5bee90935af66d750146b1d2feb0d3ac30613692", size = 32533699, upload-time = "2025-01-11T00:03:26.894Z" }, - { url = "https://files.pythonhosted.org/packages/32/90/03f99c43041852837686898c66767787cd41c5843d7a1509c39ffef683e9/scipy-1.15.1-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:6b3e71893c6687fc5e29208d518900c24ea372a862854c9888368c0b267387ab", size = 24807289, upload-time = "2025-01-11T00:03:34.263Z" }, - { url = "https://files.pythonhosted.org/packages/9d/52/bfe82b42ae112eaba1af2f3e556275b8727d55ac6e4932e7aef337a9d9d4/scipy-1.15.1-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:837299eec3d19b7e042923448d17d95a86e43941104d33f00da7e31a0f715d3c", size = 27929844, upload-time = "2025-01-11T00:03:42.934Z" }, - { url = "https://files.pythonhosted.org/packages/f6/77/54ff610bad600462c313326acdb035783accc6a3d5f566d22757ad297564/scipy-1.15.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82add84e8a9fb12af5c2c1a3a3f1cb51849d27a580cb9e6bd66226195142be6e", size = 38031272, upload-time = "2025-01-11T00:03:52.509Z" }, - { url = "https://files.pythonhosted.org/packages/f1/26/98585cbf04c7cf503d7eb0a1966df8a268154b5d923c5fe0c1ed13154c49/scipy-1.15.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:070d10654f0cb6abd295bc96c12656f948e623ec5f9a4eab0ddb1466c000716e", size = 40210217, upload-time = "2025-01-11T00:04:05.615Z" }, - { url = "https://files.pythonhosted.org/packages/fd/3f/3d2285eb6fece8bc5dbb2f9f94d61157d61d155e854fd5fea825b8218f12/scipy-1.15.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:55cc79ce4085c702ac31e49b1e69b27ef41111f22beafb9b49fea67142b696c4", size = 42587785, upload-time = "2025-01-11T00:04:16.702Z" }, - { url = "https://files.pythonhosted.org/packages/48/7d/5b5251984bf0160d6533695a74a5fddb1fa36edd6f26ffa8c871fbd4782a/scipy-1.15.1-cp313-cp313-win_amd64.whl", hash = "sha256:c352c1b6d7cac452534517e022f8f7b8d139cd9f27e6fbd9f3cbd0bfd39f5bef", size = 43640439, upload-time = "2025-01-11T00:05:28.233Z" }, - { url = "https://files.pythonhosted.org/packages/e7/b8/0e092f592d280496de52e152582030f8a270b194f87f890e1a97c5599b81/scipy-1.15.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0458839c9f873062db69a03de9a9765ae2e694352c76a16be44f93ea45c28d2b", size = 41619862, upload-time = "2025-01-11T00:04:26.419Z" }, - { url = "https://files.pythonhosted.org/packages/f6/19/0b6e1173aba4db9e0b7aa27fe45019857fb90d6904038b83927cbe0a6c1d/scipy-1.15.1-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:af0b61c1de46d0565b4b39c6417373304c1d4f5220004058bdad3061c9fa8a95", size = 32610387, upload-time = "2025-01-11T00:04:35.474Z" }, - { url = "https://files.pythonhosted.org/packages/e7/02/754aae3bd1fa0f2479ade3cfdf1732ecd6b05853f63eee6066a32684563a/scipy-1.15.1-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:71ba9a76c2390eca6e359be81a3e879614af3a71dfdabb96d1d7ab33da6f2364", size = 24883814, upload-time = "2025-01-11T00:04:46.708Z" }, - { url = "https://files.pythonhosted.org/packages/1f/ac/d7906201604a2ea3b143bb0de51b3966f66441ba50b7dc182c4505b3edf9/scipy-1.15.1-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:14eaa373c89eaf553be73c3affb11ec6c37493b7eaaf31cf9ac5dffae700c2e0", size = 27944865, upload-time = "2025-01-11T00:04:54.43Z" }, - { url = "https://files.pythonhosted.org/packages/84/9d/8f539002b5e203723af6a6f513a45e0a7671e9dabeedb08f417ac17e4edc/scipy-1.15.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f735bc41bd1c792c96bc426dece66c8723283695f02df61dcc4d0a707a42fc54", size = 39883261, upload-time = "2025-01-11T00:05:01.015Z" }, - { url = "https://files.pythonhosted.org/packages/97/c0/62fd3bab828bcccc9b864c5997645a3b86372a35941cdaf677565c25c98d/scipy-1.15.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:2722a021a7929d21168830790202a75dbb20b468a8133c74a2c0230c72626b6c", size = 42093299, upload-time = "2025-01-11T00:05:10.873Z" }, - { url = "https://files.pythonhosted.org/packages/e4/1f/5d46a8d94e9f6d2c913cbb109e57e7eed914de38ea99e2c4d69a9fc93140/scipy-1.15.1-cp313-cp313t-win_amd64.whl", hash = "sha256:bc7136626261ac1ed988dca56cfc4ab5180f75e0ee52e58f1e6aa74b5f3eacd5", size = 43181730, upload-time = "2025-01-11T00:05:20.145Z" }, -] - [[package]] name = "send2trash" version = "1.8.3" @@ -5497,25 +4475,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/40/b0/4562db6223154aa4e22f939003cb92514c79f3d4dccca3444253fd17f902/Send2Trash-1.8.3-py3-none-any.whl", hash = "sha256:0c31227e0bd08961c7665474a3d1ef7193929fedda4233843689baa056be46c9", size = 18072, upload-time = "2024-04-07T00:01:07.438Z" }, ] -[[package]] -name = "sentence-transformers" -version = "3.4.1" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "huggingface-hub" }, - { name = "pillow" }, - { name = "scikit-learn" }, - { name = "scipy", version = "1.13.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, - { name = "scipy", version = "1.15.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, - { name = "torch" }, - { name = "tqdm" }, - { name = "transformers" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/16/74/aca6f8a2b8d62b4daf8c9a0c49d2aa573381caf47dc35cbb343389229376/sentence_transformers-3.4.1.tar.gz", hash = "sha256:68daa57504ff548340e54ff117bd86c1d2f784b21e0fb2689cf3272b8937b24b", size = 223898, upload-time = "2025-01-29T14:25:55.982Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/05/89/7eb147a37b7f31d3c815543df539d8b8d0425e93296c875cc87719d65232/sentence_transformers-3.4.1-py3-none-any.whl", hash = "sha256:e026dc6d56801fd83f74ad29a30263f401b4b522165c19386d8bc10dcca805da", size = 275896, upload-time = "2025-01-29T14:25:53.614Z" }, -] - [[package]] name = "setuptools" version = "67.8.0" @@ -5525,141 +4484,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/f5/2c/074ab1c5be9c7d523d8d6d69d1f46f450fe7f11713147dc9e779aa4ca4ea/setuptools-67.8.0-py3-none-any.whl", hash = "sha256:5df61bf30bb10c6f756eb19e7c9f3b473051f48db77fddbe06ff2ca307df9a6f", size = 1093916, upload-time = "2023-05-19T19:38:35.61Z" }, ] -[[package]] -name = "shapely" -version = "2.0.7" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "numpy", version = "1.26.4", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.13'" }, - { name = "numpy", version = "2.2.4", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.13'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/21/c0/a911d1fd765d07a2b6769ce155219a281bfbe311584ebe97340d75c5bdb1/shapely-2.0.7.tar.gz", hash = "sha256:28fe2997aab9a9dc026dc6a355d04e85841546b2a5d232ed953e3321ab958ee5", size = 283413, upload-time = "2025-01-31T01:10:20.787Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/15/2e/02c694d6ddacd4f13b625722d313d2838f23c5b988cbc680132983f73ce3/shapely-2.0.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:33fb10e50b16113714ae40adccf7670379e9ccf5b7a41d0002046ba2b8f0f691", size = 1478310, upload-time = "2025-01-31T02:42:18.134Z" }, - { url = "https://files.pythonhosted.org/packages/87/69/b54a08bcd25e561bdd5183c008ace4424c25e80506e80674032504800efd/shapely-2.0.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f44eda8bd7a4bccb0f281264b34bf3518d8c4c9a8ffe69a1a05dabf6e8461147", size = 1336082, upload-time = "2025-01-31T02:42:19.986Z" }, - { url = "https://files.pythonhosted.org/packages/b3/f9/40473fcb5b66ff849e563ca523d2a26dafd6957d52dd876ffd0eded39f1c/shapely-2.0.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf6c50cd879831955ac47af9c907ce0310245f9d162e298703f82e1785e38c98", size = 2371047, upload-time = "2025-01-31T02:42:22.724Z" }, - { url = "https://files.pythonhosted.org/packages/d6/f3/c9cc07a7a03b5f5e83bd059f9adf3e21cf086b0e41d7f95e6464b151e798/shapely-2.0.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:04a65d882456e13c8b417562c36324c0cd1e5915f3c18ad516bb32ee3f5fc895", size = 2469112, upload-time = "2025-01-31T02:42:26.739Z" }, - { url = "https://files.pythonhosted.org/packages/5d/b9/fc63d6b0b25063a3ff806857a5dc88851d54d1c278288f18cef1b322b449/shapely-2.0.7-cp310-cp310-win32.whl", hash = "sha256:7e97104d28e60b69f9b6a957c4d3a2a893b27525bc1fc96b47b3ccef46726bf2", size = 1296057, upload-time = "2025-01-31T02:42:29.156Z" }, - { url = "https://files.pythonhosted.org/packages/fe/d1/8df43f94cf4cda0edbab4545f7cdd67d3f1d02910eaff152f9f45c6d00d8/shapely-2.0.7-cp310-cp310-win_amd64.whl", hash = "sha256:35524cc8d40ee4752520819f9894b9f28ba339a42d4922e92c99b148bed3be39", size = 1441787, upload-time = "2025-01-31T02:42:31.412Z" }, - { url = "https://files.pythonhosted.org/packages/1d/ad/21798c2fec013e289f8ab91d42d4d3299c315b8c4460c08c75fef0901713/shapely-2.0.7-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5cf23400cb25deccf48c56a7cdda8197ae66c0e9097fcdd122ac2007e320bc34", size = 1473091, upload-time = "2025-01-31T02:42:33.595Z" }, - { url = "https://files.pythonhosted.org/packages/15/63/eef4f180f1b5859c70e7f91d2f2570643e5c61e7d7c40743d15f8c6cbc42/shapely-2.0.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d8f1da01c04527f7da59ee3755d8ee112cd8967c15fab9e43bba936b81e2a013", size = 1332921, upload-time = "2025-01-31T02:42:34.993Z" }, - { url = "https://files.pythonhosted.org/packages/fe/67/77851dd17738bbe7762a0ef1acf7bc499d756f68600dd68a987d78229412/shapely-2.0.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f623b64bb219d62014781120f47499a7adc30cf7787e24b659e56651ceebcb0", size = 2427949, upload-time = "2025-01-31T02:42:37.578Z" }, - { url = "https://files.pythonhosted.org/packages/0b/a5/2c8dbb0f383519771df19164e3bf3a8895d195d2edeab4b6040f176ee28e/shapely-2.0.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e6d95703efaa64aaabf278ced641b888fc23d9c6dd71f8215091afd8a26a66e3", size = 2529282, upload-time = "2025-01-31T02:42:39.504Z" }, - { url = "https://files.pythonhosted.org/packages/dc/4e/e1d608773c7fe4cde36d48903c0d6298e3233dc69412403783ac03fa5205/shapely-2.0.7-cp311-cp311-win32.whl", hash = "sha256:2f6e4759cf680a0f00a54234902415f2fa5fe02f6b05546c662654001f0793a2", size = 1295751, upload-time = "2025-01-31T02:42:41.107Z" }, - { url = "https://files.pythonhosted.org/packages/27/57/8ec7c62012bed06731f7ee979da7f207bbc4b27feed5f36680b6a70df54f/shapely-2.0.7-cp311-cp311-win_amd64.whl", hash = "sha256:b52f3ab845d32dfd20afba86675c91919a622f4627182daec64974db9b0b4608", size = 1442684, upload-time = "2025-01-31T02:42:43.181Z" }, - { url = "https://files.pythonhosted.org/packages/4f/3e/ea100eec5811bafd0175eb21828a3be5b0960f65250f4474391868be7c0f/shapely-2.0.7-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4c2b9859424facbafa54f4a19b625a752ff958ab49e01bc695f254f7db1835fa", size = 1482451, upload-time = "2025-01-31T02:42:44.902Z" }, - { url = "https://files.pythonhosted.org/packages/ce/53/c6a3487716fd32e1f813d2a9608ba7b72a8a52a6966e31c6443480a1d016/shapely-2.0.7-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5aed1c6764f51011d69a679fdf6b57e691371ae49ebe28c3edb5486537ffbd51", size = 1345765, upload-time = "2025-01-31T02:42:46.625Z" }, - { url = "https://files.pythonhosted.org/packages/fd/dd/b35d7891d25cc11066a70fb8d8169a6a7fca0735dd9b4d563a84684969a3/shapely-2.0.7-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:73c9ae8cf443187d784d57202199bf9fd2d4bb7d5521fe8926ba40db1bc33e8e", size = 2421540, upload-time = "2025-01-31T02:42:49.971Z" }, - { url = "https://files.pythonhosted.org/packages/62/de/8dbd7df60eb23cb983bb698aac982944b3d602ef0ce877a940c269eae34e/shapely-2.0.7-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9469f49ff873ef566864cb3516091881f217b5d231c8164f7883990eec88b73", size = 2525741, upload-time = "2025-01-31T02:42:53.882Z" }, - { url = "https://files.pythonhosted.org/packages/96/64/faf0413ebc7a84fe7a0790bf39ec0b02b40132b68e57aba985c0b6e4e7b6/shapely-2.0.7-cp312-cp312-win32.whl", hash = "sha256:6bca5095e86be9d4ef3cb52d56bdd66df63ff111d580855cb8546f06c3c907cd", size = 1296552, upload-time = "2025-01-31T02:42:55.714Z" }, - { url = "https://files.pythonhosted.org/packages/63/05/8a1c279c226d6ad7604d9e237713dd21788eab96db97bf4ce0ea565e5596/shapely-2.0.7-cp312-cp312-win_amd64.whl", hash = "sha256:f86e2c0259fe598c4532acfcf638c1f520fa77c1275912bbc958faecbf00b108", size = 1443464, upload-time = "2025-01-31T02:42:57.696Z" }, - { url = "https://files.pythonhosted.org/packages/c6/21/abea43effbfe11f792e44409ee9ad7635aa93ef1c8ada0ef59b3c1c3abad/shapely-2.0.7-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a0c09e3e02f948631c7763b4fd3dd175bc45303a0ae04b000856dedebefe13cb", size = 1481618, upload-time = "2025-01-31T02:42:59.915Z" }, - { url = "https://files.pythonhosted.org/packages/d9/71/af688798da36fe355a6e6ffe1d4628449cb5fa131d57fc169bcb614aeee7/shapely-2.0.7-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:06ff6020949b44baa8fc2e5e57e0f3d09486cd5c33b47d669f847c54136e7027", size = 1345159, upload-time = "2025-01-31T02:43:01.611Z" }, - { url = "https://files.pythonhosted.org/packages/67/47/f934fe2b70d31bb9774ad4376e34f81666deed6b811306ff574faa3d115e/shapely-2.0.7-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d6dbf096f961ca6bec5640e22e65ccdec11e676344e8157fe7d636e7904fd36", size = 2410267, upload-time = "2025-01-31T02:43:05.83Z" }, - { url = "https://files.pythonhosted.org/packages/f5/8a/2545cc2a30afc63fc6176c1da3b76af28ef9c7358ed4f68f7c6a9d86cf5b/shapely-2.0.7-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:adeddfb1e22c20548e840403e5e0b3d9dc3daf66f05fa59f1fcf5b5f664f0e98", size = 2514128, upload-time = "2025-01-31T02:43:08.427Z" }, - { url = "https://files.pythonhosted.org/packages/87/54/2344ce7da39676adec94e84fbaba92a8f1664e4ae2d33bd404dafcbe607f/shapely-2.0.7-cp313-cp313-win32.whl", hash = "sha256:a7f04691ce1c7ed974c2f8b34a1fe4c3c5dfe33128eae886aa32d730f1ec1913", size = 1295783, upload-time = "2025-01-31T02:43:10.608Z" }, - { url = "https://files.pythonhosted.org/packages/d7/1e/6461e5cfc8e73ae165b8cff6eb26a4d65274fad0e1435137c5ba34fe4e88/shapely-2.0.7-cp313-cp313-win_amd64.whl", hash = "sha256:aaaf5f7e6cc234c1793f2a2760da464b604584fb58c6b6d7d94144fd2692d67e", size = 1442300, upload-time = "2025-01-31T02:43:12.299Z" }, - { url = "https://files.pythonhosted.org/packages/ad/de/dc856cf99a981b83aa041d1a240a65b36618657d5145d1c0c7ffb4263d5b/shapely-2.0.7-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4abeb44b3b946236e4e1a1b3d2a0987fb4d8a63bfb3fdefb8a19d142b72001e5", size = 1478794, upload-time = "2025-01-31T02:43:38.532Z" }, - { url = "https://files.pythonhosted.org/packages/53/ea/70fec89a9f6fa84a8bf6bd2807111a9175cee22a3df24470965acdd5fb74/shapely-2.0.7-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cd0e75d9124b73e06a42bf1615ad3d7d805f66871aa94538c3a9b7871d620013", size = 1336402, upload-time = "2025-01-31T02:43:40.134Z" }, - { url = "https://files.pythonhosted.org/packages/e5/22/f6b074b08748d6f6afedd79f707d7eb88b79fa0121369246c25bbc721776/shapely-2.0.7-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7977d8a39c4cf0e06247cd2dca695ad4e020b81981d4c82152c996346cf1094b", size = 2376673, upload-time = "2025-01-31T02:43:41.922Z" }, - { url = "https://files.pythonhosted.org/packages/ab/f0/befc440a6c90c577300f5f84361bad80919e7c7ac381ae4960ce3195cedc/shapely-2.0.7-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0145387565fcf8f7c028b073c802956431308da933ef41d08b1693de49990d27", size = 2474380, upload-time = "2025-01-31T02:43:43.671Z" }, - { url = "https://files.pythonhosted.org/packages/13/b8/edaf33dfb97e281d9de3871810de131b01e4f33d38d8f613515abc89d91e/shapely-2.0.7-cp39-cp39-win32.whl", hash = "sha256:98697c842d5c221408ba8aa573d4f49caef4831e9bc6b6e785ce38aca42d1999", size = 1297939, upload-time = "2025-01-31T02:43:46.287Z" }, - { url = "https://files.pythonhosted.org/packages/7b/95/4d164c2fcb19c51e50537aafb99ecfda82f62356bfdb6f4ca620a3932bad/shapely-2.0.7-cp39-cp39-win_amd64.whl", hash = "sha256:a3fb7fbae257e1b042f440289ee7235d03f433ea880e73e687f108d044b24db5", size = 1443665, upload-time = "2025-01-31T02:43:47.889Z" }, -] - -[[package]] -name = "simsimd" -version = "6.2.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/da/1c/90e6ec0f0de20108fdd7d5665ac2916b1e8c893ce2f8d7481fd37eabbb97/simsimd-6.2.1.tar.gz", hash = "sha256:5e202c5386a4141946b7aee05faac8ebc2e36bca0a360b24080e57b59bc4ef6a", size = 165828, upload-time = "2024-11-27T13:18:21.016Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/36/95/66c0485fd0734c6d77a96a11b7ec52a21c8a368b48f8400dcc8b5593685e/simsimd-6.2.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:9c79486cf75eb06c5e1f623e8315f9fb73620ac63b846d5a6c843f14905de43f", size = 170242, upload-time = "2024-11-27T13:14:02.151Z" }, - { url = "https://files.pythonhosted.org/packages/fb/c1/7c535b65aa1bcb0aef18407859f188ec5afc9404f6ad57e79e6ce74321a4/simsimd-6.2.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:104d53f2489dcbf569b8260d678e2183af605510115dc2b22ed0340aa47fe892", size = 102331, upload-time = "2024-11-27T13:14:05.09Z" }, - { url = "https://files.pythonhosted.org/packages/44/c5/fe1915c70f82733782f57e9410bd92936a51ba6f5d2408aa98204a16885c/simsimd-6.2.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fef886c8220d3566b9f43d441226ca267a11682dea5496bb6e007f655eee1fd1", size = 93455, upload-time = "2024-11-27T13:14:09.355Z" }, - { url = "https://files.pythonhosted.org/packages/a7/b0/9a7df126e36bf1397c31f1e2482857183b5eac61141cf72041d730fd5b4d/simsimd-6.2.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:522e56451481bff3468653c2818ad1240b4cb13cff0ec76bc88d8860bfc775c9", size = 251045, upload-time = "2024-11-27T13:14:10.786Z" }, - { url = "https://files.pythonhosted.org/packages/16/6a/15578d772bb4b5506b5617d078557296fce74b7206bb1c9d3fe6db0e47c8/simsimd-6.2.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a5dfb02fa141a6e039803044930753aef1df5ed05cae8b14fe348cdc160cef1e", size = 302448, upload-time = "2024-11-27T13:14:12.991Z" }, - { url = "https://files.pythonhosted.org/packages/49/51/cbf5f43c8cb1c9e173a040004ebb7726b87936e5110b15916510c1b7fa32/simsimd-6.2.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:39eb6abdd44adfddec181a713e9cfad8742d03abbc6247c4e5ca2caee38e4775", size = 227246, upload-time = "2024-11-27T13:14:14.951Z" }, - { url = "https://files.pythonhosted.org/packages/9e/56/3f3609cbeaf9393158ef5ee5cf60b8e2190bb87925e21a43dd321c52a05f/simsimd-6.2.1-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:9ca68b9d2cc1c19af6afe6f01a764861fc8bb919d688a64cf0b0ac0abae7e0fa", size = 432346, upload-time = "2024-11-27T13:14:17.634Z" }, - { url = "https://files.pythonhosted.org/packages/56/53/13629d84b95b9373b7ce1447c43fc09da448d521bfa93eb02a8806ec0a50/simsimd-6.2.1-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:2b56b1ca7b76c0d4515938a036e688b73a866b19e6f6eb743596144fdf498a0c", size = 632661, upload-time = "2024-11-27T13:14:19.467Z" }, - { url = "https://files.pythonhosted.org/packages/d7/52/6361628a462b6e753f1ed9d5de9c4e1f3d35ced2922c7e196ce4e45d81fa/simsimd-6.2.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:02d7b7c7afecc63ddf501460f09c1da90625bfd59b4da5fda126c1aa5c54bb95", size = 468411, upload-time = "2024-11-27T13:14:21.249Z" }, - { url = "https://files.pythonhosted.org/packages/ef/f1/f56395d5885a3a19268d8f62589e3cc5b37b7c0f407fcf89bacf1d57397c/simsimd-6.2.1-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:8abc529daf0a61649ca4a237cd9e63723f3355394686898654c643bd63846cf5", size = 268931, upload-time = "2024-11-27T13:14:23.53Z" }, - { url = "https://files.pythonhosted.org/packages/b1/90/597c8756697b7fdb7f4b6e7d7e4c85207b449c286b6bf8a6c3815798bc33/simsimd-6.2.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:9ea60422d0f45d3a1899984c3fc3a14dbd248cfca8f67c24751029441464a806", size = 344281, upload-time = "2024-11-27T13:14:25.122Z" }, - { url = "https://files.pythonhosted.org/packages/16/fb/9b976f87db319ad95b541f94232a1cc6d0d3c16b01f910e1f8b967b241d5/simsimd-6.2.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:98e38a0ca4805c1de2882d0641b54e249eabca4ed2980c82465822130d7f8c98", size = 389374, upload-time = "2024-11-27T13:14:27.652Z" }, - { url = "https://files.pythonhosted.org/packages/da/e1/d3e41accb2a4a3b6fd46c7900c49e36b7d426e20e49e06b3418316eba2b9/simsimd-6.2.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:cbbc2434286493b88f3b8211e922d37b46588b34d4cc28f3262f154c8ca1141c", size = 316688, upload-time = "2024-11-27T13:14:29.485Z" }, - { url = "https://files.pythonhosted.org/packages/28/1f/c8cc75df5d386071e067ca22d54b6629eb6d600879e223bba3ddf96849d7/simsimd-6.2.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:4f2ecd459f4917facdb287c42c5e68030b21cb98edac0fec9919a7215968e38a", size = 669697, upload-time = "2024-11-27T13:14:31.548Z" }, - { url = "https://files.pythonhosted.org/packages/ab/cc/d4a0f90706432fa3b5cbde390ec7f213e7639ce6cf87be0f9f19ff8a23d9/simsimd-6.2.1-cp310-cp310-win32.whl", hash = "sha256:4ec31c076dc839114bff5d83526ddf46551d4720cc8cd0f16516896809a4fca6", size = 55008, upload-time = "2024-11-27T13:14:33.376Z" }, - { url = "https://files.pythonhosted.org/packages/9b/e6/33ea89f17e83a8743f9461c85f926203ef5a82782c4a72263571b7186427/simsimd-6.2.1-cp310-cp310-win_amd64.whl", hash = "sha256:94282e040be985c993d415290371f6b22bec3eeadafe747a6d8dfbd2c317f35e", size = 86852, upload-time = "2024-11-27T13:14:36.235Z" }, - { url = "https://files.pythonhosted.org/packages/ad/30/65252e79ef62807c33e22f1df04b3dbd16ceda5ecc88bf46de239a4516c3/simsimd-6.2.1-cp310-cp310-win_arm64.whl", hash = "sha256:0784e98ca48a0075fb0cbd7782df11eaa17ce15c60f09a65e8477864208afb8a", size = 60194, upload-time = "2024-11-27T13:14:38.342Z" }, - { url = "https://files.pythonhosted.org/packages/a7/5f/361cee272fd6c88f33e14e233792f59dd58836ea8c776344f7445a829ca2/simsimd-6.2.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:e9614309af75be4d08a051dc61ed5cf41b5239b8303b37dc2f9c8a7223534392", size = 170254, upload-time = "2024-11-27T13:14:39.932Z" }, - { url = "https://files.pythonhosted.org/packages/b8/88/edf4442ec655765d570bfb6cef81dfb12c8829c28e580459bac8a4847fb5/simsimd-6.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ea4f0f68be5f85bbcf4322bfdd1b449176cf5fdd99960c546514457635632443", size = 102331, upload-time = "2024-11-27T13:14:42.27Z" }, - { url = "https://files.pythonhosted.org/packages/5d/2b/9e7d42ac54bdb32d76953db3bc83eec29bd5d5c9a4069d380b18e200d6bd/simsimd-6.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:12a8d60ccc8991dfbbf056c221ce4f02135f5892492894972f421a6f155015d9", size = 93455, upload-time = "2024-11-27T13:14:44.5Z" }, - { url = "https://files.pythonhosted.org/packages/13/9c/fac1167e80328d1e332f515c9cd62da4a0e12b9aa8ee90d448eb4ad5a47f/simsimd-6.2.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a74142ea21a6fd3ec5c64e4d4acf1ec6f4d80c0bb1a5989d68af6e84f7ac612e", size = 251040, upload-time = "2024-11-27T13:14:46.073Z" }, - { url = "https://files.pythonhosted.org/packages/31/93/b374e5538fc65cf381920bdba7603769b1b71e42afe2bb4939e9c338c423/simsimd-6.2.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:298f7c793fc2a1eeedcefa1278eb2ef6f52ce0b36aaa8780885f96a39ce1a4e8", size = 302428, upload-time = "2024-11-27T13:14:47.635Z" }, - { url = "https://files.pythonhosted.org/packages/e6/42/2733a0e11b660c6b10f3ec90d7fac6f96267368b961b1a43dda0456fa9f2/simsimd-6.2.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4025ebad36fb3fa5cffcd48d33375d5e5decc59c1129a259b74fed097eab1ab5", size = 227200, upload-time = "2024-11-27T13:14:50.058Z" }, - { url = "https://files.pythonhosted.org/packages/eb/ae/40e0804d06a351efe27bb6f8e4d332daeb1681d3f398ca10d8a2b087ab78/simsimd-6.2.1-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:f486682aa7a8918d86df411d3c11c635db4b67d514cb6bb499c0edab7fb8ec58", size = 432333, upload-time = "2024-11-27T13:14:51.692Z" }, - { url = "https://files.pythonhosted.org/packages/a7/eb/a823b0227b5dc43de8125f502237dd8e844b1e803a74e46aa7c3d0f24f83/simsimd-6.2.1-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:173e66699597a4fcf6fa50b52cced40216fdcfba15f60b761a2bd9cb1d98a444", size = 632659, upload-time = "2024-11-27T13:14:53.58Z" }, - { url = "https://files.pythonhosted.org/packages/0a/aa/aee48063c4a98aaea062316dedf598d0d9e09fa9edc28baab6886ae0afa8/simsimd-6.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:5b5c6f79f797cc020a2ff64950162dfb6d130c51a07cdac5ad97ec836e85ce50", size = 468407, upload-time = "2024-11-27T13:14:55.374Z" }, - { url = "https://files.pythonhosted.org/packages/d4/84/e89bc71456aa2d48e5acf3795b2384f597de643f17d00d752aa8217af233/simsimd-6.2.1-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:25812637f43feaef1a33ae00b81a4d2b0116aadae3a08267486c1e57236fc368", size = 268908, upload-time = "2024-11-27T13:14:57.232Z" }, - { url = "https://files.pythonhosted.org/packages/94/eb/774debec7ee727f436f15e5b5416b781c78564fff97c81a5fb3b636b4298/simsimd-6.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:592a578c788a9cb7877eff41487cc7f50474e00f774de74bea8590fa95c804ae", size = 344256, upload-time = "2024-11-27T13:14:58.982Z" }, - { url = "https://files.pythonhosted.org/packages/62/03/fec040e7fbb66fa4766ca959cfd766a22d7a00a4e9371f046d8fcc62d846/simsimd-6.2.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:191c020f312350ac06eee829376b11d8c1282da8fefb4381fe0625edfb678d8d", size = 389403, upload-time = "2024-11-27T13:15:01.049Z" }, - { url = "https://files.pythonhosted.org/packages/55/f0/ad441d90a4dde6e100155931fa4468e33cc23276c3caef6330d2a34b866c/simsimd-6.2.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:e9ad2c247ed58ba9bb170a01295cb315a45c817775cc7e51ad342f70978a1057", size = 316665, upload-time = "2024-11-27T13:15:02.647Z" }, - { url = "https://files.pythonhosted.org/packages/05/27/843adbc6a468a58178dcb7907e72c670c8a7c36a06d8a4c5eac9573f5d2d/simsimd-6.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0ff603134600da12175e66b842b7a7331c827fa070d1d8b63386a40bc8d09fcd", size = 669697, upload-time = "2024-11-27T13:15:05.288Z" }, - { url = "https://files.pythonhosted.org/packages/6d/db/d2369e0d3b9ca469b923bc81d57dcfed922193e4e4d7cf5f7637df14dd51/simsimd-6.2.1-cp311-cp311-win32.whl", hash = "sha256:99dff4e04663c82284152ecc2e8bf76b2825f3f17e179abf7892e06196061056", size = 55007, upload-time = "2024-11-27T13:15:08.021Z" }, - { url = "https://files.pythonhosted.org/packages/73/9f/13d6fca5a32a062e84db0a68433ae416073986c8e1d20b5b936cad18bece/simsimd-6.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:0efc6343c440a26cf16463c4c667655af9597bcbd55ad66f33a80b2b84de7412", size = 86855, upload-time = "2024-11-27T13:15:09.834Z" }, - { url = "https://files.pythonhosted.org/packages/64/e9/7e0514f32c9a0e42261f598775b34a858477e0fcffccf32cc11f94e78ee2/simsimd-6.2.1-cp311-cp311-win_arm64.whl", hash = "sha256:2d364f2c24dd38578bf0eec436c4b901c900ae1893680f46eb5632e01330d814", size = 60195, upload-time = "2024-11-27T13:15:12.075Z" }, - { url = "https://files.pythonhosted.org/packages/81/87/1f521d471d9079d89dd6860b9dd5d0f39c1633675a30b71acd0bd37cbba5/simsimd-6.2.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:9b3315e41bb759dc038ecd6f4fa7bcf278bf72ee7d982f752482cdc732aea271", size = 169397, upload-time = "2024-11-27T13:15:13.807Z" }, - { url = "https://files.pythonhosted.org/packages/4b/1a/b0627589737dc75ccd2ed58893e9e7f8b8e082531bd34d319481d88018d5/simsimd-6.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:8d476c874bafa0d12d4c8c5c47faf17407f3c96140616384421c2aa980342b6f", size = 101478, upload-time = "2024-11-27T13:15:15.698Z" }, - { url = "https://files.pythonhosted.org/packages/e0/b7/e766f0ce9b595927ae1c534f1409b768187e8af567f4412ca220b67c1155/simsimd-6.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:e9d4f15c06cc221d29e181197c7bbf92c5e829220cbeb3cd1cf080de78b04f2a", size = 93439, upload-time = "2024-11-27T13:15:17.299Z" }, - { url = "https://files.pythonhosted.org/packages/ae/48/3b5ec9b3a6063bae2f280f5168aca7099a44fa7ec8b42875b98c79c1d49b/simsimd-6.2.1-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d286fd4538cb1a1c70e69da00a3acee301519d578931b41161f4f1379d1195c6", size = 251469, upload-time = "2024-11-27T13:15:18.943Z" }, - { url = "https://files.pythonhosted.org/packages/70/86/16e8d5b9bdd34f75c7515adfad249f394653131bd1a1366076cf6113e84b/simsimd-6.2.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:050f68cfa85f1fb2cfa156280928e42926e3977034b755023ce1315bf59e87ff", size = 302974, upload-time = "2024-11-27T13:15:20.757Z" }, - { url = "https://files.pythonhosted.org/packages/02/09/3f4240f2b43957aa0d72a2203b2549c0326c7baf97b7f78c72d48d4cd3d2/simsimd-6.2.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:67bb4b17e04919545f29c7b708faaccbe027f164f8b5c9f4328604fa8f5560ea", size = 227864, upload-time = "2024-11-27T13:15:22.468Z" }, - { url = "https://files.pythonhosted.org/packages/07/4a/8c46806493c3a98025f01d81d9f55e0e574f11279c2ad77be919262ea9eb/simsimd-6.2.1-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:3d6bffd999dbb36e606b065e0180365efac2606049c4f7818e4cba2d34c3678f", size = 432491, upload-time = "2024-11-27T13:15:24.201Z" }, - { url = "https://files.pythonhosted.org/packages/13/44/b56f207031405af52c6158c40e9f1121fe3a716d98946d9fa5919cf00266/simsimd-6.2.1-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:25adb244fb75dbf49af0d1bcac4ed4a3fef8e847d78449faa5595af0a3e20d61", size = 633061, upload-time = "2024-11-27T13:15:26.002Z" }, - { url = "https://files.pythonhosted.org/packages/4c/ad/241f87641af09a1789af8df559aa86b45218d087e09c37c2dd8c013819d6/simsimd-6.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:b4542cee77e801a9c27370fc36ae271514fc0fb2ce14a35f8b25f47989e3d267", size = 468544, upload-time = "2024-11-27T13:15:27.84Z" }, - { url = "https://files.pythonhosted.org/packages/e2/3e/357aca7df85ed1092dfa50b91cf1b7c0df6f70b384a0e3798132dd824b5c/simsimd-6.2.1-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:4f665228f8ff4911790b485e74b00fa9586a141dde6011970be71bb303b5a22f", size = 269133, upload-time = "2024-11-27T13:15:29.63Z" }, - { url = "https://files.pythonhosted.org/packages/f0/67/079ca2c58bbc5812802c6ac1b332a6ef889d73cf1188726f36edc27898f6/simsimd-6.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:783b4308f80ae00763b0eaa0dac26196958f9c2df60d35a0347ebd2f82ece46d", size = 344412, upload-time = "2024-11-27T13:15:31.378Z" }, - { url = "https://files.pythonhosted.org/packages/3c/f0/500c9002276259c17e3a6a13a7c7f84e5119602decadbf40429c978655b0/simsimd-6.2.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:95055e72cfe313c1c8694783bf8a631cc15673b3b775abef367e396d931db0b8", size = 389546, upload-time = "2024-11-27T13:15:33.927Z" }, - { url = "https://files.pythonhosted.org/packages/55/a2/d3f4c6aabba0430758367b3de5bbab59b979bf3525c039b882001f1d2ade/simsimd-6.2.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:a98f2b383f51b4f4ee568a637fc7958a347fdae0bd184cff8faa8030b6454a39", size = 316912, upload-time = "2024-11-27T13:15:35.991Z" }, - { url = "https://files.pythonhosted.org/packages/f8/a3/2514189c3aaa1beb1714b36be86e2d3af7067c3c95152d78cc4cffff6d87/simsimd-6.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2e474fd10ceb38e2c9f826108a7762f8ff7912974846d86f08c4e7b19cd35ed4", size = 670006, upload-time = "2024-11-27T13:15:38.037Z" }, - { url = "https://files.pythonhosted.org/packages/ef/23/dbf7c4aed7542260784dc7bc2056a4e5b6d716a14a9b40989d5c3096990a/simsimd-6.2.1-cp312-cp312-win32.whl", hash = "sha256:b2530ea44fffeab25e5752bec6a5991f30fbc430b04647980db5b195c0971d48", size = 55019, upload-time = "2024-11-27T13:15:39.999Z" }, - { url = "https://files.pythonhosted.org/packages/a0/d8/57304c2317822634abd475f5912584a3cfa13363740e9ec72c0622c894f1/simsimd-6.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:dc23283235d5b8f0373b95a547e26da2d7785647a5d0fa15c282fc8c49c0dcb0", size = 87133, upload-time = "2024-11-27T13:15:42.494Z" }, - { url = "https://files.pythonhosted.org/packages/3f/7b/ca333232a8bc87d1e846fa2feb9f0d4778500c30493726cb48f04551dfab/simsimd-6.2.1-cp312-cp312-win_arm64.whl", hash = "sha256:5692ce7e56253178eea9dbd58191734918409b83d54b07cfdcecf868d0150a73", size = 60401, upload-time = "2024-11-27T13:15:44.367Z" }, - { url = "https://files.pythonhosted.org/packages/9b/f2/4ec7ed52c910a58a07043c5f3355adf4055246dafb79be57d0726e1a4aa0/simsimd-6.2.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:76b32fdc7142c9714e94651ece8bc00dd5139c554813211552aa358e44af0e07", size = 169399, upload-time = "2024-11-27T13:15:46.866Z" }, - { url = "https://files.pythonhosted.org/packages/61/d3/5af24e4f42e2b5bc3a06456ea9068d0fbcd23d8ceeb0e09fe54ed72cfdba/simsimd-6.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f44e5e2319427f94db658c6f75caae78850da505902874a1664a83ef5713f333", size = 101484, upload-time = "2024-11-27T13:15:48.64Z" }, - { url = "https://files.pythonhosted.org/packages/cf/86/816050f0fd0767e960c6b900e3c97fd6a4ae54a6aa5b8ef24846757a3f7d/simsimd-6.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:05323cbad7200592c2e53fbcc759e615594e8ca444ef5eddf9f3fb196ad4de9c", size = 93447, upload-time = "2024-11-27T13:15:50.37Z" }, - { url = "https://files.pythonhosted.org/packages/e9/7e/61dc3392eafd9fc20357b448aac5f84c84ad61289ab0ab3e5a4aaa1ca3ef/simsimd-6.2.1-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b1f3cbe5c39db2bb64f30999104de1215ba3805d6059af7bc5a9d662d50f4707", size = 251501, upload-time = "2024-11-27T13:15:53.208Z" }, - { url = "https://files.pythonhosted.org/packages/06/55/99d3cf2c2d844c1a57d81379acaebac2e0a0efdf1e73a53990cd84c1d719/simsimd-6.2.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eaa94e0932ae2a48b7e4df8c29204dc9fe59f72b1faeb08e9d5015bf51fb9f21", size = 302991, upload-time = "2024-11-27T13:15:55.081Z" }, - { url = "https://files.pythonhosted.org/packages/6f/99/597b322835147f407e6f611810cb8232055711398fbbd47e6a14bfc0995f/simsimd-6.2.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:508465f8d4e3e0fff07c939921aeedf55b0ade9f56f64e938c350c283dea42fb", size = 227917, upload-time = "2024-11-27T13:15:58.301Z" }, - { url = "https://files.pythonhosted.org/packages/ba/8a/6a6596a97d1cc7068a26935bbdd7f170a889240b8081e000aef09b6d0549/simsimd-6.2.1-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:ca67f6273ef544c74c48b134af756de7c98a711ccf69cd0791225f26dd449281", size = 432527, upload-time = "2024-11-27T13:16:00.248Z" }, - { url = "https://files.pythonhosted.org/packages/46/0e/5c6e82fa9fe9a21481fe0f6546b4986e07e42bd4d8b6f04f4475b8d7564e/simsimd-6.2.1-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:d470b43ce606f21f54a23fc19ad6928333e17d0956b02eb27b7b112edc156a10", size = 633095, upload-time = "2024-11-27T13:16:02.247Z" }, - { url = "https://files.pythonhosted.org/packages/ae/53/2e17bd16e2ca2a73cd447b89fa7059ae7275c82840f229bf917936ee800a/simsimd-6.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:59518b9834c167a1dd8900600718e95cdadc9d74525452f426aa8455a38c55ef", size = 468561, upload-time = "2024-11-27T13:16:04.241Z" }, - { url = "https://files.pythonhosted.org/packages/86/8b/1319605c630973741bc749b6e432e56dded2b6a7db0744b659c0de613ab3/simsimd-6.2.1-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:59c2978c4e402097d8a4b38f076ff98cc43e6b059d53f89736404f26e9a9bd5a", size = 269157, upload-time = "2024-11-27T13:16:06.201Z" }, - { url = "https://files.pythonhosted.org/packages/53/50/1cac5113a542c82d5b5399d454c578a65ba14951bfff38aef297104f72fe/simsimd-6.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:edc68e727d53ed2866dcfb625f15e52be8f1e6809f4be2147bf8d2115a2542b7", size = 344437, upload-time = "2024-11-27T13:16:08.13Z" }, - { url = "https://files.pythonhosted.org/packages/9a/72/44905ee0e2ed999c52ad1eebf2c8705ce2776212a6387d77355df2c76704/simsimd-6.2.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:9e5e82551d75c0e2cd0d4b8af8db1cae7b5ac6dcc076c0c760870ff81f78135b", size = 389569, upload-time = "2024-11-27T13:16:10.196Z" }, - { url = "https://files.pythonhosted.org/packages/ee/d6/9b4a9141ceb29150d86698553c8e0193256b069bc755e875836c14a6f12e/simsimd-6.2.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:2fa19f8c9786757d19afcbda9f8fb68de55e4f5562725ae8727f887d01bf0e4d", size = 316923, upload-time = "2024-11-27T13:16:12.13Z" }, - { url = "https://files.pythonhosted.org/packages/ce/c0/de6aebd58b8de8f0177395b8fd68afb9a27ec010427c4ccd6104b94b6569/simsimd-6.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5b0748aa6bd4df4c5a3f5e979aec14b26588f1b2e0d44075dcc9eaf4d555e15b", size = 670038, upload-time = "2024-11-27T13:16:14.104Z" }, - { url = "https://files.pythonhosted.org/packages/77/32/4c74664656231ccb43be4328dba40e9ada63d3cc1e557b1785ae0b9560b5/simsimd-6.2.1-cp313-cp313-win32.whl", hash = "sha256:7f43721e1a4ebe8d2245b0e85dd7de7153d1bf22839579d5f69a345909c68d9e", size = 55017, upload-time = "2024-11-27T13:16:16.163Z" }, - { url = "https://files.pythonhosted.org/packages/76/7f/57e02f6b2d09a1d42697e739b002bbe2112f8b8384d15d166154ec4cec44/simsimd-6.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:6af1565e0ef7060bc52a38e3273a8e6e92aff47835965dc5311298563475935e", size = 87138, upload-time = "2024-11-27T13:16:17.973Z" }, - { url = "https://files.pythonhosted.org/packages/38/b9/941876e98dd1f98c158cd5e6633dc1573d1be6daf8f2e3ad5d15e6a8024d/simsimd-6.2.1-cp313-cp313-win_arm64.whl", hash = "sha256:e690b41377c8dd157d585713b0bc35c845aee7742334bf12d1f087fc8a65b6c3", size = 60408, upload-time = "2024-11-27T13:16:20.052Z" }, - { url = "https://files.pythonhosted.org/packages/6d/0c/34607a15edde12916f53ed90ab4f53fd200b3ccb9d5e50a820ff5247b9c8/simsimd-6.2.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:45010111c39117af851a323e78bd43e6a344349b4ed7b1f5ca4c4ebb2284c7e5", size = 170235, upload-time = "2024-11-27T13:17:36.159Z" }, - { url = "https://files.pythonhosted.org/packages/a4/38/eaf7e8be501dc4cd09f8ccf2961f41a32cb9f285b214561e7f84168f2222/simsimd-6.2.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:dd6ecae57a481f9fc0bceb331cba7b18a0b23a71f15af7d06cdf8aa8aac38645", size = 102326, upload-time = "2024-11-27T13:17:39.345Z" }, - { url = "https://files.pythonhosted.org/packages/70/83/c9b5e20adf7c91c2fe0597ba9128819a5353ad38f107c53442e0934122f5/simsimd-6.2.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ffbb874d4c3ed53443468f9c20704845cc8736d5717817c35d5cb12ad5548c7a", size = 93445, upload-time = "2024-11-27T13:17:41.539Z" }, - { url = "https://files.pythonhosted.org/packages/53/26/bf18b09ff20db78d559c95a202a7e1e619a9a188cd34657aa55a24895d72/simsimd-6.2.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7b6147ddc390c08a802af258ad204b1d775bb3d180ec6f6fcea82f4fd71fb447", size = 250808, upload-time = "2024-11-27T13:17:44.052Z" }, - { url = "https://files.pythonhosted.org/packages/61/46/ae7ef2c945d6be2f3d1eb025fcc14e72280ea827b1bef3b9a81d86102fe8/simsimd-6.2.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0048df2245d239ed016e5f4b5d75e96987149bf7245e90713e1fe3b53e321a74", size = 302187, upload-time = "2024-11-27T13:17:47.336Z" }, - { url = "https://files.pythonhosted.org/packages/bf/79/dad6ff124a41db6c92d6afe0919c832778617830ba41fcf588add7cfcdc0/simsimd-6.2.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fc087d9dacab1eb4abc2f3d9f33047fc601db501cb43165e658973fe5fd50c9b", size = 227003, upload-time = "2024-11-27T13:17:49.637Z" }, - { url = "https://files.pythonhosted.org/packages/9a/d8/b71698d2414eb41d8b3ecd34ec49a8e92c6a1b4ae01d3e6337df026a83a5/simsimd-6.2.1-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:d1d2e6c3d655a34b42c6e0d0c28ac7b86498858ffb68c58733893fc538bd26a9", size = 432085, upload-time = "2024-11-27T13:17:52.035Z" }, - { url = "https://files.pythonhosted.org/packages/3c/b0/6594bf7f87608c7f531eb4fc7e368dd6c22fa56c53221def30732f76dc81/simsimd-6.2.1-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:d063beb7a53d8525af56c4247e1e43a7fa161b70bcbacf30daab639b32ad4a10", size = 632449, upload-time = "2024-11-27T13:17:54.469Z" }, - { url = "https://files.pythonhosted.org/packages/ca/10/db0662c635f1cef946be3c67d420711ffdacdcbd0a4a4e1a3d856c7790f5/simsimd-6.2.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:4a517ae74d18a8b7d4d349cf4afed45f33cd660cb44d0ae34c95d00c1f7fa760", size = 468247, upload-time = "2024-11-27T13:17:56.976Z" }, - { url = "https://files.pythonhosted.org/packages/cd/d5/f2c4a107018daf58e1702116fce8096653ed70e230b521b69d6cc15b8004/simsimd-6.2.1-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:a79a2bd32ba0f90f70c22accf4b441846049b55aeae73556f4b5c6e9fe6e024f", size = 268712, upload-time = "2024-11-27T13:17:59.497Z" }, - { url = "https://files.pythonhosted.org/packages/48/73/7b5f0cf2e59438699cba4d9f6437ac44c2c04583a8bfb1e713a6f91c329f/simsimd-6.2.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:4c9487acdae92b4089a0142cd3691328bfdcaaebf2587a0c11df4039ff7005e8", size = 344070, upload-time = "2024-11-27T13:18:01.937Z" }, - { url = "https://files.pythonhosted.org/packages/23/9d/d5696507a1a486d2ca42f0cea8ae6495b53cb8893a97be4fefdfbeba42d0/simsimd-6.2.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:1c4760dee8f65a890b82a6175d5294d30271637495a9e4195969fc1ad38ec056", size = 389209, upload-time = "2024-11-27T13:18:04.776Z" }, - { url = "https://files.pythonhosted.org/packages/79/29/999a125a3e725172e8d52605bdaa3326f885ca6f460841adafe063b8955f/simsimd-6.2.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:abee753fbb8584373218bf78396ae3d2b2a1202c7284cd9c70695535c62cdc31", size = 316445, upload-time = "2024-11-27T13:18:07.316Z" }, - { url = "https://files.pythonhosted.org/packages/e6/a8/cff109cb6060e9e773bbc9bfcf253ea8911d94ef146136ff7cbbbe80e0ae/simsimd-6.2.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:300042eeab379923d77bca328fdc2ac1df8adfdffa9a6939f28ba6b196f02002", size = 669464, upload-time = "2024-11-27T13:18:10.45Z" }, - { url = "https://files.pythonhosted.org/packages/86/ec/2ec8bd871fd09699a81912b0d2a9aa53119b8790c13c81e738cf95ceea4a/simsimd-6.2.1-cp39-cp39-win32.whl", hash = "sha256:2eed0ad770b18a3b74b19ad744ee3224dae9bf1a86bd9126eae0636ada53eebd", size = 54914, upload-time = "2024-11-27T13:18:12.986Z" }, - { url = "https://files.pythonhosted.org/packages/81/1c/ea93dc5ee1effea28299026d173b9acda20e7d61ca96122b8bb3ad08af15/simsimd-6.2.1-cp39-cp39-win_amd64.whl", hash = "sha256:e99cc8aa19af5ca3574aa72e1d0e959c4859345fdf553a887ce22e469c1145a8", size = 86867, upload-time = "2024-11-27T13:18:15.478Z" }, - { url = "https://files.pythonhosted.org/packages/de/b3/6f54c339999f36c2391bf4880391fa59e9b496665162a3fa6d77887e302e/simsimd-6.2.1-cp39-cp39-win_arm64.whl", hash = "sha256:37b0db92ca0fec835ec1256d6dd167d7c9f727d3372b98bf27b1fd59ad299768", size = 60198, upload-time = "2024-11-27T13:18:17.834Z" }, -] - [[package]] name = "six" version = "1.17.0" @@ -5740,6 +4564,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/aa/e4/592120713a314621c692211eba034d09becaf6bc8848fabc1dc2a54d8c16/SQLAlchemy-2.0.38-py3-none-any.whl", hash = "sha256:63178c675d4c80def39f1febd625a6333f44c0ba269edd8a468b156394b27753", size = 1896347, upload-time = "2025-02-06T22:08:29.784Z" }, ] +[[package]] +name = "sseclient-py" +version = "1.8.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e8/ed/3df5ab8bb0c12f86c28d0cadb11ed1de44a92ed35ce7ff4fd5518a809325/sseclient-py-1.8.0.tar.gz", hash = "sha256:c547c5c1a7633230a38dc599a21a2dc638f9b5c297286b48b46b935c71fac3e8", size = 7791, upload-time = "2023-09-01T19:39:20.45Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/49/58/97655efdfeb5b4eeab85b1fc5d3fa1023661246c2ab2a26ea8e47402d4f2/sseclient_py-1.8.0-py2.py3-none-any.whl", hash = "sha256:4ecca6dc0b9f963f8384e9d7fd529bf93dd7d708144c4fb5da0e0a1a926fee83", size = 8828, upload-time = "2023-09-01T19:39:17.627Z" }, +] + [[package]] name = "stack-data" version = "0.6.3" @@ -5754,18 +4587,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/f1/7b/ce1eafaf1a76852e2ec9b22edecf1daa58175c090266e9f6c64afcd81d91/stack_data-0.6.3-py3-none-any.whl", hash = "sha256:d5558e0c25a4cb0853cddad3d77da9891a08cb85dd9f9f91b9f8cd66e511e695", size = 24521, upload-time = "2023-09-30T13:58:03.53Z" }, ] -[[package]] -name = "sympy" -version = "1.13.1" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "mpmath" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/ca/99/5a5b6f19ff9f083671ddf7b9632028436167cd3d33e11015754e41b249a4/sympy-1.13.1.tar.gz", hash = "sha256:9cebf7e04ff162015ce31c9c6c9144daa34a93bd082f54fd8f12deca4f47515f", size = 7533040, upload-time = "2024-07-19T09:26:51.238Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/b2/fe/81695a1aa331a842b582453b605175f419fe8540355886031328089d840a/sympy-1.13.1-py3-none-any.whl", hash = "sha256:db36cdc64bf61b9b24578b6f7bab1ecdd2452cf008f34faa33776680c26d66f8", size = 6189177, upload-time = "2024-07-19T09:26:48.863Z" }, -] - [[package]] name = "syrupy" version = "4.8.1" @@ -5778,6 +4599,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/80/47/5e8f44ec0f287b08e8c1f3fc63fe1fbe182f07bf606eec903d7827b95e51/syrupy-4.8.1-py3-none-any.whl", hash = "sha256:274f97cbaf44175f5e478a2f3a53559d31f41c66c6bf28131695f94ac893ea00", size = 50326, upload-time = "2025-01-13T12:09:29.96Z" }, ] +[[package]] +name = "tabulate" +version = "0.9.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ec/fe/802052aecb21e3797b8f7902564ab6ea0d60ff8ca23952079064155d1ae1/tabulate-0.9.0.tar.gz", hash = "sha256:0095b12bf5966de529c0feb1fa08671671b3368eec77d7ef7ab114be2c068b3c", size = 81090, upload-time = "2022-10-06T17:21:48.54Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/40/44/4a5f08c96eb108af5cb50b41f76142f0afa346dfa99d5296fe7202a11854/tabulate-0.9.0-py3-none-any.whl", hash = "sha256:024ca478df22e9340661486f85298cff5f6dcdba14f3813e8830015b9ed1948f", size = 35252, upload-time = "2022-10-06T17:21:44.262Z" }, +] + [[package]] name = "tenacity" version = "9.0.0" @@ -5801,15 +4631,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/6a/9e/2064975477fdc887e47ad42157e214526dcad8f317a948dee17e1659a62f/terminado-0.18.1-py3-none-any.whl", hash = "sha256:a4468e1b37bb318f8a86514f65814e1afc977cf29b3992a4500d9dd305dcceb0", size = 14154, upload-time = "2024-03-12T14:34:36.569Z" }, ] -[[package]] -name = "threadpoolctl" -version = "3.5.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/bd/55/b5148dcbf72f5cde221f8bfe3b6a540da7aa1842f6b491ad979a6c8b84af/threadpoolctl-3.5.0.tar.gz", hash = "sha256:082433502dd922bf738de0d8bcc4fdcbf0979ff44c42bd40f5af8a282f6fa107", size = 41936, upload-time = "2024-04-29T13:50:16.544Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/4b/2c/ffbf7a134b9ab11a67b0cf0726453cedd9c5043a4fe7a35d1cefa9a1bcfb/threadpoolctl-3.5.0-py3-none-any.whl", hash = "sha256:56c1e26c150397e58c4926da8eeee87533b1e32bef131bd4bf6a2f45f3185467", size = 18414, upload-time = "2024-04-29T13:50:14.014Z" }, -] - [[package]] name = "tiktoken" version = "0.8.0" @@ -5864,6 +4685,24 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/e6/34/ebdc18bae6aa14fbee1a08b63c015c72b64868ff7dae68808ab500c492e2/tinycss2-1.4.0-py3-none-any.whl", hash = "sha256:3a49cf47b7675da0b15d0c6e1df8df4ebd96e9394bb905a5775adb0d884c5289", size = 26610, upload-time = "2024-10-24T14:58:28.029Z" }, ] +[[package]] +name = "together" +version = "0.2.11" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aiohttp" }, + { name = "pydantic" }, + { name = "requests" }, + { name = "sseclient-py" }, + { name = "tabulate" }, + { name = "tqdm" }, + { name = "typer" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/36/8f/ca74863a4591b359100470974b2f4f2c2221ce764bae0fe9b36f80dc38fe/together-0.2.11.tar.gz", hash = "sha256:906ae626b0ec49f7ea90d5660fb4c28664a1f55719f998e868d035f417316e24", size = 41518, upload-time = "2024-01-20T01:13:10.575Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ef/9c/ff58694ce629171db4563ec0a8c274a1c6b2804526c8ed204d3e61912498/together-0.2.11-py3-none-any.whl", hash = "sha256:0760aafe1b53cf41cb58592086a2b5edef956a2536b52daecfc2f9ef0fc736d5", size = 43800, upload-time = "2024-01-20T01:13:08.505Z" }, +] + [[package]] name = "tokenizers" version = "0.21.0" @@ -5937,57 +4776,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/6e/c2/61d3e0f47e2b74ef40a68b9e6ad5984f6241a942f7cd3bbfbdbd03861ea9/tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc", size = 14257, upload-time = "2024-11-27T22:38:35.385Z" }, ] -[[package]] -name = "torch" -version = "2.6.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "filelock" }, - { name = "fsspec" }, - { name = "jinja2" }, - { name = "networkx", version = "3.2.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, - { name = "networkx", version = "3.4.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, - { name = "nvidia-cublas-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, - { name = "nvidia-cuda-cupti-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, - { name = "nvidia-cuda-nvrtc-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, - { name = "nvidia-cuda-runtime-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, - { name = "nvidia-cudnn-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, - { name = "nvidia-cufft-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, - { name = "nvidia-curand-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, - { name = "nvidia-cusolver-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, - { name = "nvidia-cusparse-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, - { name = "nvidia-cusparselt-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, - { name = "nvidia-nccl-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, - { name = "nvidia-nvjitlink-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, - { name = "nvidia-nvtx-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, - { name = "setuptools", marker = "python_full_version >= '3.12'" }, - { name = "sympy" }, - { name = "triton", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, - { name = "typing-extensions" }, -] -wheels = [ - { url = "https://files.pythonhosted.org/packages/37/81/aa9ab58ec10264c1abe62c8b73f5086c3c558885d6beecebf699f0dbeaeb/torch-2.6.0-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:6860df13d9911ac158f4c44031609700e1eba07916fff62e21e6ffa0a9e01961", size = 766685561, upload-time = "2025-01-29T16:19:12.12Z" }, - { url = "https://files.pythonhosted.org/packages/86/86/e661e229df2f5bfc6eab4c97deb1286d598bbeff31ab0cdb99b3c0d53c6f/torch-2.6.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:c4f103a49830ce4c7561ef4434cc7926e5a5fe4e5eb100c19ab36ea1e2b634ab", size = 95751887, upload-time = "2025-01-29T16:27:50.77Z" }, - { url = "https://files.pythonhosted.org/packages/20/e0/5cb2f8493571f0a5a7273cd7078f191ac252a402b5fb9cb6091f14879109/torch-2.6.0-cp310-cp310-win_amd64.whl", hash = "sha256:56eeaf2ecac90da5d9e35f7f35eb286da82673ec3c582e310a8d1631a1c02341", size = 204165139, upload-time = "2025-01-29T16:27:11.63Z" }, - { url = "https://files.pythonhosted.org/packages/e5/16/ea1b7842413a7b8a5aaa5e99e8eaf3da3183cc3ab345ad025a07ff636301/torch-2.6.0-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:09e06f9949e1a0518c5b09fe95295bc9661f219d9ecb6f9893e5123e10696628", size = 66520221, upload-time = "2025-01-29T16:22:18.862Z" }, - { url = "https://files.pythonhosted.org/packages/78/a9/97cbbc97002fff0de394a2da2cdfa859481fdca36996d7bd845d50aa9d8d/torch-2.6.0-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:7979834102cd5b7a43cc64e87f2f3b14bd0e1458f06e9f88ffa386d07c7446e1", size = 766715424, upload-time = "2025-01-29T16:25:15.874Z" }, - { url = "https://files.pythonhosted.org/packages/6d/fa/134ce8f8a7ea07f09588c9cc2cea0d69249efab977707cf67669431dcf5c/torch-2.6.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:ccbd0320411fe1a3b3fec7b4d3185aa7d0c52adac94480ab024b5c8f74a0bf1d", size = 95759416, upload-time = "2025-01-29T16:27:38.429Z" }, - { url = "https://files.pythonhosted.org/packages/11/c5/2370d96b31eb1841c3a0883a492c15278a6718ccad61bb6a649c80d1d9eb/torch-2.6.0-cp311-cp311-win_amd64.whl", hash = "sha256:46763dcb051180ce1ed23d1891d9b1598e07d051ce4c9d14307029809c4d64f7", size = 204164970, upload-time = "2025-01-29T16:26:16.182Z" }, - { url = "https://files.pythonhosted.org/packages/0b/fa/f33a4148c6fb46ca2a3f8de39c24d473822d5774d652b66ed9b1214da5f7/torch-2.6.0-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:94fc63b3b4bedd327af588696559f68c264440e2503cc9e6954019473d74ae21", size = 66530713, upload-time = "2025-01-29T16:26:38.881Z" }, - { url = "https://files.pythonhosted.org/packages/e5/35/0c52d708144c2deb595cd22819a609f78fdd699b95ff6f0ebcd456e3c7c1/torch-2.6.0-cp312-cp312-manylinux1_x86_64.whl", hash = "sha256:2bb8987f3bb1ef2675897034402373ddfc8f5ef0e156e2d8cfc47cacafdda4a9", size = 766624563, upload-time = "2025-01-29T16:23:19.084Z" }, - { url = "https://files.pythonhosted.org/packages/01/d6/455ab3fbb2c61c71c8842753b566012e1ed111e7a4c82e0e1c20d0c76b62/torch-2.6.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:b789069020c5588c70d5c2158ac0aa23fd24a028f34a8b4fcb8fcb4d7efcf5fb", size = 95607867, upload-time = "2025-01-29T16:25:55.649Z" }, - { url = "https://files.pythonhosted.org/packages/18/cf/ae99bd066571656185be0d88ee70abc58467b76f2f7c8bfeb48735a71fe6/torch-2.6.0-cp312-cp312-win_amd64.whl", hash = "sha256:7e1448426d0ba3620408218b50aa6ada88aeae34f7a239ba5431f6c8774b1239", size = 204120469, upload-time = "2025-01-29T16:24:01.821Z" }, - { url = "https://files.pythonhosted.org/packages/81/b4/605ae4173aa37fb5aa14605d100ff31f4f5d49f617928c9f486bb3aaec08/torch-2.6.0-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:9a610afe216a85a8b9bc9f8365ed561535c93e804c2a317ef7fabcc5deda0989", size = 66532538, upload-time = "2025-01-29T16:24:18.976Z" }, - { url = "https://files.pythonhosted.org/packages/24/85/ead1349fc30fe5a32cadd947c91bda4a62fbfd7f8c34ee61f6398d38fb48/torch-2.6.0-cp313-cp313-manylinux1_x86_64.whl", hash = "sha256:4874a73507a300a5d089ceaff616a569e7bb7c613c56f37f63ec3ffac65259cf", size = 766626191, upload-time = "2025-01-29T16:17:26.26Z" }, - { url = "https://files.pythonhosted.org/packages/dd/b0/26f06f9428b250d856f6d512413e9e800b78625f63801cbba13957432036/torch-2.6.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:a0d5e1b9874c1a6c25556840ab8920569a7a4137afa8a63a32cee0bc7d89bd4b", size = 95611439, upload-time = "2025-01-29T16:21:21.061Z" }, - { url = "https://files.pythonhosted.org/packages/c2/9c/fc5224e9770c83faed3a087112d73147cd7c7bfb7557dcf9ad87e1dda163/torch-2.6.0-cp313-cp313-win_amd64.whl", hash = "sha256:510c73251bee9ba02ae1cb6c9d4ee0907b3ce6020e62784e2d7598e0cfa4d6cc", size = 204126475, upload-time = "2025-01-29T16:21:55.394Z" }, - { url = "https://files.pythonhosted.org/packages/88/8b/d60c0491ab63634763be1537ad488694d316ddc4a20eaadd639cedc53971/torch-2.6.0-cp313-none-macosx_11_0_arm64.whl", hash = "sha256:ff96f4038f8af9f7ec4231710ed4549da1bdebad95923953a25045dcf6fd87e2", size = 66536783, upload-time = "2025-01-29T16:22:08.559Z" }, - { url = "https://files.pythonhosted.org/packages/40/bb/feb5644baa621fd8e1e88bf51f6fa38ab3f985d472a764144ff4867ac1d6/torch-2.6.0-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:9ea955317cfcd3852b1402b62af258ce735c2edeee42ca9419b6bc889e5ae053", size = 766680961, upload-time = "2025-01-29T16:20:39.827Z" }, - { url = "https://files.pythonhosted.org/packages/ee/11/08774a8198a33263947c59e04b8a0bf85a61a44e82100c46cf833bbce35e/torch-2.6.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:bb2c6c3e65049f081940f5ab15c9136c7de40d3f01192541c920a07c7c585b7e", size = 95782656, upload-time = "2025-01-29T16:21:33.978Z" }, - { url = "https://files.pythonhosted.org/packages/c1/0d/56fb07032accbfebb4555638b6002ec5678d0942da85497e40f9405ab756/torch-2.6.0-cp39-cp39-win_amd64.whl", hash = "sha256:683410f97984103148e31b38a8631acf31c3034c020c0f4d26171e7626d8317a", size = 204061417, upload-time = "2025-01-29T16:18:07.965Z" }, - { url = "https://files.pythonhosted.org/packages/b3/17/41f681b87290a1d2f1394f943e470f8b0b3c2987b7df8dc078d8831fce5b/torch-2.6.0-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:265f70de5fd45b864d924b64be1797f86e76c8e48a02c2a3a6fc7ec247d2226c", size = 66520446, upload-time = "2025-01-29T16:19:42.165Z" }, -] - [[package]] name = "tornado" version = "6.4.2" @@ -6028,37 +4816,16 @@ wheels = [ ] [[package]] -name = "transformers" -version = "4.48.3" +name = "typer" +version = "0.9.4" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "filelock" }, - { name = "huggingface-hub" }, - { name = "numpy", version = "1.26.4", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.13'" }, - { name = "numpy", version = "2.2.4", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.13'" }, - { name = "packaging" }, - { name = "pyyaml" }, - { name = "regex" }, - { name = "requests" }, - { name = "safetensors" }, - { name = "tokenizers" }, - { name = "tqdm" }, + { name = "click" }, + { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/e3/82/cebeb7af5e64440f1638f18c4ed0f89156d0eeaa6290d98da8ca93ac3872/transformers-4.48.3.tar.gz", hash = "sha256:a5e8f1e9a6430aa78215836be70cecd3f872d99eeda300f41ad6cc841724afdb", size = 8373458, upload-time = "2025-02-07T10:10:47.402Z" } +sdist = { url = "https://files.pythonhosted.org/packages/e9/7d/b1e0399aa5e27071f0042784681d28417f3e526c61f62c8e3635ee5ad334/typer-0.9.4.tar.gz", hash = "sha256:f714c2d90afae3a7929fcd72a3abb08df305e1ff61719381384211c4070af57f", size = 276061, upload-time = "2024-03-23T17:07:55.568Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/b6/1a/efeecb8d83705f2f4beac98d46f2148c95ecd7babfb31b5c0f1e7017e83d/transformers-4.48.3-py3-none-any.whl", hash = "sha256:78697f990f5ef350c23b46bf86d5081ce96b49479ab180b2de7687267de8fd36", size = 9669412, upload-time = "2025-02-07T10:10:43.395Z" }, -] - -[[package]] -name = "triton" -version = "3.2.0" -source = { registry = "https://pypi.org/simple" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/01/65/3ffa90e158a2c82f0716eee8d26a725d241549b7d7aaf7e4f44ac03ebd89/triton-3.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b3e54983cd51875855da7c68ec05c05cf8bb08df361b1d5b69e05e40b0c9bd62", size = 253090354, upload-time = "2025-01-22T19:12:21.872Z" }, - { url = "https://files.pythonhosted.org/packages/a7/2e/757d2280d4fefe7d33af7615124e7e298ae7b8e3bc4446cdb8e88b0f9bab/triton-3.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8009a1fb093ee8546495e96731336a33fb8856a38e45bb4ab6affd6dbc3ba220", size = 253157636, upload-time = "2025-01-22T19:12:51.322Z" }, - { url = "https://files.pythonhosted.org/packages/06/00/59500052cb1cf8cf5316be93598946bc451f14072c6ff256904428eaf03c/triton-3.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d9b215efc1c26fa7eefb9a157915c92d52e000d2bf83e5f69704047e63f125c", size = 253159365, upload-time = "2025-01-22T19:13:24.648Z" }, - { url = "https://files.pythonhosted.org/packages/c7/30/37a3384d1e2e9320331baca41e835e90a3767303642c7a80d4510152cbcf/triton-3.2.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e5dfa23ba84541d7c0a531dfce76d8bcd19159d50a4a8b14ad01e91734a5c1b0", size = 253154278, upload-time = "2025-01-22T19:13:54.221Z" }, - { url = "https://files.pythonhosted.org/packages/bc/74/9f12bdedeb110242d8bb1bd621f6605e753ee0cbf73cf7f3a62b8173f190/triton-3.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:30ceed0eff2c4a73b14eb63e052992f44bbdf175f3fad21e1ac8097a772de7ee", size = 253057866, upload-time = "2025-01-22T19:14:23.943Z" }, + { url = "https://files.pythonhosted.org/packages/62/39/82c9d3e10979851847361d922a373bdfef4091020da7f893acfaf07c0225/typer-0.9.4-py3-none-any.whl", hash = "sha256:aa6c4a4e2329d868b80ecbaf16f807f2b54e192209d7ac9dd42691d63f7a54eb", size = 45973, upload-time = "2024-03-23T17:07:53.985Z" }, ] [[package]] @@ -6251,15 +5018,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/e7/00/3fca040d7cf8a32776d3d81a00c8ee7457e00f80c649f1e4a863c8321ae9/uri_template-1.3.0-py3-none-any.whl", hash = "sha256:a44a133ea12d44a0c0f06d7d42a52d71282e77e2f937d8abd5655b8d56fc1363", size = 11140, upload-time = "2023-06-21T01:49:03.467Z" }, ] -[[package]] -name = "uritemplate" -version = "4.1.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/d2/5a/4742fdba39cd02a56226815abfa72fe0aa81c33bed16ed045647d6000eba/uritemplate-4.1.1.tar.gz", hash = "sha256:4346edfc5c3b79f694bccd6d6099a322bbeb628dbf2cd86eea55a456ce5124f0", size = 273898, upload-time = "2021-10-13T11:15:14.84Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/81/c0/7461b49cd25aeece13766f02ee576d1db528f1c37ce69aee300e075b485b/uritemplate-4.1.1-py2.py3-none-any.whl", hash = "sha256:830c08b8d99bdd312ea4ead05994a38e8936266f84b9a7878232db50b044e02e", size = 10356, upload-time = "2021-10-13T11:15:12.316Z" }, -] - [[package]] name = "urllib3" version = "1.26.20" diff --git a/libs/partners/openai/pyproject.toml b/libs/partners/openai/pyproject.toml index a54595796de..799c1fa8da9 100644 --- a/libs/partners/openai/pyproject.toml +++ b/libs/partners/openai/pyproject.toml @@ -7,12 +7,12 @@ authors = [] license = { text = "MIT" } requires-python = ">=3.9" dependencies = [ - "langchain-core<1.0.0,>=0.3.68", + "langchain-core<1.0.0,>=0.4.0.dev0", "openai<2.0.0,>=1.86.0", "tiktoken<1,>=0.7", ] name = "langchain-openai" -version = "0.3.28" +version = "0.4.0.dev0" description = "An integration package connecting OpenAI and LangChain" readme = "README.md" @@ -92,4 +92,4 @@ filterwarnings = [ "tests/**/*.py" = [ "S101", # Tests need assertions "S311", # Standard pseudo-random generators are not suitable for cryptographic purposes -] \ No newline at end of file +] diff --git a/libs/partners/openai/uv.lock b/libs/partners/openai/uv.lock index 0934e7aa241..8c4731d08a1 100644 --- a/libs/partners/openai/uv.lock +++ b/libs/partners/openai/uv.lock @@ -480,7 +480,7 @@ wheels = [ [[package]] name = "langchain-core" -version = "0.3.72" +version = "0.4.0.dev0" source = { editable = "../../core" } dependencies = [ { name = "jsonpatch" }, @@ -538,7 +538,7 @@ typing = [ [[package]] name = "langchain-openai" -version = "0.3.28" +version = "0.4.0.dev0" source = { editable = "." } dependencies = [ { name = "langchain-core" },