diff --git a/.github/workflows/_release.yml b/.github/workflows/_release.yml index 03c943efb53..22b2d9e4e62 100644 --- a/.github/workflows/_release.yml +++ b/.github/workflows/_release.yml @@ -289,7 +289,8 @@ jobs: env: MIN_VERSIONS: ${{ steps.min-version.outputs.min-versions }} run: | - VIRTUAL_ENV=.venv uv pip install --force-reinstall $MIN_VERSIONS --editable . + VIRTUAL_ENV=.venv uv pip install --force-reinstall --editable . + VIRTUAL_ENV=.venv uv pip install --force-reinstall $MIN_VERSIONS make tests working-directory: ${{ inputs.working-directory }} diff --git a/.github/workflows/scheduled_test.yml b/.github/workflows/scheduled_test.yml index 939b2a87a7a..2fd8bddea23 100644 --- a/.github/workflows/scheduled_test.yml +++ b/.github/workflows/scheduled_test.yml @@ -20,7 +20,7 @@ env: POETRY_VERSION: "1.8.4" UV_FROZEN: "true" DEFAULT_LIBS: '["libs/partners/openai", "libs/partners/anthropic", "libs/partners/fireworks", "libs/partners/groq", "libs/partners/mistralai", "libs/partners/xai", "libs/partners/google-vertexai", "libs/partners/google-genai", "libs/partners/aws"]' - POETRY_LIBS: ("libs/partners/google-vertexai" "libs/partners/google-genai" "libs/partners/aws") + POETRY_LIBS: ("libs/partners/aws") jobs: # Generate dynamic test matrix based on input parameters or defaults diff --git a/docs/docs/integrations/chat/anthropic.ipynb b/docs/docs/integrations/chat/anthropic.ipynb index 12d0710c679..0f5fcaa9324 100644 --- a/docs/docs/integrations/chat/anthropic.ipynb +++ b/docs/docs/integrations/chat/anthropic.ipynb @@ -998,8 +998,6 @@ " ]\n", "```\n", "\n", - "We also need to specify the `search-results-2025-06-09` beta when instantiating ChatAnthropic. You can see an end-to-end example below.\n", - "\n", "
\n", "End to end example with LangGraph\n", "\n", diff --git a/libs/core/langchain_core/messages/utils.py b/libs/core/langchain_core/messages/utils.py index 771f0231e4e..e9376b13728 100644 --- a/libs/core/langchain_core/messages/utils.py +++ b/libs/core/langchain_core/messages/utils.py @@ -291,6 +291,9 @@ def _create_message_from_message_type( message = FunctionMessage(content=content, **kwargs) elif message_type == "tool": artifact = kwargs.get("additional_kwargs", {}).pop("artifact", None) + status = kwargs.get("additional_kwargs", {}).pop("status", None) + if status is not None: + kwargs["status"] = status message = ToolMessage(content=content, artifact=artifact, **kwargs) elif message_type == "remove": message = RemoveMessage(**kwargs) diff --git a/libs/core/langchain_core/runnables/graph.py b/libs/core/langchain_core/runnables/graph.py index cebf2a667c1..22f0b8ba35d 100644 --- a/libs/core/langchain_core/runnables/graph.py +++ b/libs/core/langchain_core/runnables/graph.py @@ -614,7 +614,6 @@ class Graph: Returns: The Mermaid syntax string. - """ # Import locally to prevent circular import from langchain_core.runnables.graph_mermaid import draw_mermaid # noqa: PLC0415 @@ -648,6 +647,7 @@ class Graph: max_retries: int = 1, retry_delay: float = 1.0, frontmatter_config: Optional[dict[str, Any]] = None, + base_url: Optional[str] = None, ) -> bytes: """Draw the graph as a PNG image using Mermaid. @@ -683,6 +683,8 @@ class Graph: "themeVariables": { "primaryColor": "#e2e2e2"}, } } + base_url: The base URL of the Mermaid server for rendering via API. + Defaults to None. Returns: The PNG image as bytes. @@ -707,6 +709,7 @@ class Graph: padding=padding, max_retries=max_retries, retry_delay=retry_delay, + base_url=base_url, ) diff --git a/libs/core/langchain_core/runnables/graph_mermaid.py b/libs/core/langchain_core/runnables/graph_mermaid.py index df1468fc437..fe945dace4d 100644 --- a/libs/core/langchain_core/runnables/graph_mermaid.py +++ b/libs/core/langchain_core/runnables/graph_mermaid.py @@ -277,6 +277,7 @@ def draw_mermaid_png( padding: int = 10, max_retries: int = 1, retry_delay: float = 1.0, + base_url: Optional[str] = None, ) -> bytes: """Draws a Mermaid graph as PNG using provided syntax. @@ -293,6 +294,8 @@ def draw_mermaid_png( Defaults to 1. retry_delay (float, optional): Delay between retries (MermaidDrawMethod.API). Defaults to 1.0. + base_url (str, optional): Base URL for the Mermaid.ink API. + Defaults to None. Returns: bytes: PNG image bytes. @@ -313,6 +316,7 @@ def draw_mermaid_png( background_color=background_color, max_retries=max_retries, retry_delay=retry_delay, + base_url=base_url, ) else: supported_methods = ", ".join([m.value for m in MermaidDrawMethod]) @@ -404,8 +408,12 @@ def _render_mermaid_using_api( file_type: Optional[Literal["jpeg", "png", "webp"]] = "png", max_retries: int = 1, retry_delay: float = 1.0, + base_url: Optional[str] = None, ) -> bytes: """Renders Mermaid graph using the Mermaid.INK API.""" + # Defaults to using the public mermaid.ink server. + base_url = base_url if base_url is not None else "https://mermaid.ink" + if not _HAS_REQUESTS: msg = ( "Install the `requests` module to use the Mermaid.INK API: " @@ -425,7 +433,7 @@ def _render_mermaid_using_api( background_color = f"!{background_color}" image_url = ( - f"https://mermaid.ink/img/{mermaid_syntax_encoded}" + f"{base_url}/img/{mermaid_syntax_encoded}" f"?type={file_type}&bgColor={background_color}" ) @@ -457,7 +465,7 @@ def _render_mermaid_using_api( # For other status codes, fail immediately msg = ( - "Failed to reach https://mermaid.ink/ API while trying to render " + f"Failed to reach {base_url} API while trying to render " f"your graph. Status code: {response.status_code}.\n\n" ) + error_msg_suffix raise ValueError(msg) @@ -469,14 +477,14 @@ def _render_mermaid_using_api( time.sleep(sleep_time) else: msg = ( - "Failed to reach https://mermaid.ink/ API while trying to render " + f"Failed to reach {base_url} API while trying to render " f"your graph after {max_retries} retries. " ) + error_msg_suffix raise ValueError(msg) from e # This should not be reached, but just in case msg = ( - "Failed to reach https://mermaid.ink/ API while trying to render " + f"Failed to reach {base_url} API while trying to render " f"your graph after {max_retries} retries. " ) + error_msg_suffix raise ValueError(msg) diff --git a/libs/core/langchain_core/tools/base.py b/libs/core/langchain_core/tools/base.py index fd9880e3335..b9949559961 100644 --- a/libs/core/langchain_core/tools/base.py +++ b/libs/core/langchain_core/tools/base.py @@ -1362,8 +1362,8 @@ def get_all_basemodel_annotations( continue # if class = FooBar inherits from Baz[str]: - # parent = Baz[str], - # parent_origin = Baz, + # parent = class Baz[str], + # parent_origin = class Baz, # generic_type_vars = (type vars in Baz) # generic_map = {type var in Baz: str} generic_type_vars: tuple = getattr(parent_origin, "__parameters__", ()) diff --git a/libs/core/langchain_core/tools/convert.py b/libs/core/langchain_core/tools/convert.py index 691550439c1..0c48b3c7e4d 100644 --- a/libs/core/langchain_core/tools/convert.py +++ b/libs/core/langchain_core/tools/convert.py @@ -315,7 +315,7 @@ def tool( if runnable is not None: # tool is used as a function - # tool_from_runnable = tool("name", runnable) + # for instance tool_from_runnable = tool("name", runnable) if not name_or_callable: msg = "Runnable without name for tool constructor" raise ValueError(msg) diff --git a/libs/core/pyproject.toml b/libs/core/pyproject.toml index 3024ed2610c..d2de169f549 100644 --- a/libs/core/pyproject.toml +++ b/libs/core/pyproject.toml @@ -73,9 +73,6 @@ disallow_any_generics = false warn_return_any = false -[tool.ruff] -target-version = "py39" - [tool.ruff.format] docstring-code-format = true diff --git a/libs/core/tests/unit_tests/callbacks/test_sync_callback_manager.py b/libs/core/tests/unit_tests/callbacks/test_sync_callback_manager.py index d8b3aab6001..0cdabea9cc4 100644 --- a/libs/core/tests/unit_tests/callbacks/test_sync_callback_manager.py +++ b/libs/core/tests/unit_tests/callbacks/test_sync_callback_manager.py @@ -1,3 +1,5 @@ +import pytest + from langchain_core.callbacks.base import BaseCallbackHandler, BaseCallbackManager @@ -13,3 +15,29 @@ def test_remove_handler() -> None: manager = BaseCallbackManager([handler1], inheritable_handlers=[handler2]) manager.remove_handler(handler1) manager.remove_handler(handler2) + + +@pytest.mark.xfail( + reason="TODO: #32028 merge() incorrectly mixes handlers and inheritable_handlers" +) +def test_merge_preserves_handler_distinction() -> None: + """Test that merging managers preserves the distinction between handlers. + + This test verifies the correct behavior of the BaseCallbackManager.merge() + method. When two managers are merged, their handlers and + inheritable_handlers should be combined independently. + + Currently, it is expected to xfail until the issue is resolved. + """ + h1 = BaseCallbackHandler() + h2 = BaseCallbackHandler() + ih1 = BaseCallbackHandler() + ih2 = BaseCallbackHandler() + + m1 = BaseCallbackManager(handlers=[h1], inheritable_handlers=[ih1]) + m2 = BaseCallbackManager(handlers=[h2], inheritable_handlers=[ih2]) + + merged = m1.merge(m2) + + assert set(merged.handlers) == {h1, h2} + assert set(merged.inheritable_handlers) == {ih1, ih2} diff --git a/libs/core/tests/unit_tests/indexing/test_indexing.py b/libs/core/tests/unit_tests/indexing/test_indexing.py index a3e88e7535b..0f4850fafe0 100644 --- a/libs/core/tests/unit_tests/indexing/test_indexing.py +++ b/libs/core/tests/unit_tests/indexing/test_indexing.py @@ -2421,14 +2421,12 @@ def test_index_into_document_index(record_manager: InMemoryRecordManager) -> Non "num_updated": 2, } - # TODO: This test is failing due to an existing bug with DocumentIndex deletion - # when indexing an empty list. Skipping this assertion for now. - # assert index([], record_manager, document_index, cleanup="full") == { - # "num_added": 0, - # "num_deleted": 2, - # "num_skipped": 0, - # "num_updated": 0, - # } + assert index([], record_manager, document_index, cleanup="full") == { + "num_added": 0, + "num_deleted": 2, + "num_skipped": 0, + "num_updated": 0, + } async def test_aindex_into_document_index( @@ -2460,6 +2458,7 @@ async def test_aindex_into_document_index( "num_skipped": 2, "num_updated": 0, } + assert await aindex( docs, arecord_manager, document_index, cleanup="full", force_update=True ) == { @@ -2469,14 +2468,12 @@ async def test_aindex_into_document_index( "num_updated": 2, } - # TODO: This test is failing due to an existing bug with DocumentIndex deletion - # when indexing an empty list. Skipping this assertion for now. - # assert await aindex([], arecord_manager, document_index, cleanup="full") == { - # "num_added": 0, - # "num_deleted": 2, - # "num_skipped": 0, - # "num_updated": 0, - # } + assert await aindex([], arecord_manager, document_index, cleanup="full") == { + "num_added": 0, + "num_deleted": 2, + "num_skipped": 0, + "num_updated": 0, + } def test_index_with_upsert_kwargs( diff --git a/libs/core/tests/unit_tests/runnables/test_graph.py b/libs/core/tests/unit_tests/runnables/test_graph.py index fd9ff2f813e..398ae45e66e 100644 --- a/libs/core/tests/unit_tests/runnables/test_graph.py +++ b/libs/core/tests/unit_tests/runnables/test_graph.py @@ -1,4 +1,5 @@ from typing import Any, Optional +from unittest.mock import MagicMock, patch from packaging import version from pydantic import BaseModel @@ -12,8 +13,12 @@ from langchain_core.output_parsers.xml import XMLOutputParser from langchain_core.prompts.prompt import PromptTemplate from langchain_core.runnables import RunnableConfig from langchain_core.runnables.base import Runnable -from langchain_core.runnables.graph import Edge, Graph, Node -from langchain_core.runnables.graph_mermaid import _escape_node_label +from langchain_core.runnables.graph import Edge, Graph, MermaidDrawMethod, Node +from langchain_core.runnables.graph_mermaid import ( + _escape_node_label, + _render_mermaid_using_api, + draw_mermaid_png, +) from langchain_core.utils.pydantic import PYDANTIC_VERSION from tests.unit_tests.pydantic_utils import _normalize_schema @@ -561,3 +566,90 @@ def test_graph_mermaid_frontmatter_config(snapshot: SnapshotAssertion) -> None: } } ) == snapshot(name="mermaid") + + +def test_mermaid_base_url_default() -> None: + """Test that _render_mermaid_using_api defaults to mermaid.ink when None.""" + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.content = b"fake image data" + + with patch("requests.get", return_value=mock_response) as mock_get: + # Call the function with base_url=None (default) + _render_mermaid_using_api( + "graph TD;\n A --> B;", + base_url=None, + ) + + # Verify that the URL was constructed with the default base URL + assert mock_get.called + args, kwargs = mock_get.call_args + url = args[0] # First argument to request.get is the URL + assert url.startswith("https://mermaid.ink") + + +def test_mermaid_base_url_custom() -> None: + """Test that _render_mermaid_using_api uses custom base_url when provided.""" + custom_url = "https://custom.mermaid.com" + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.content = b"fake image data" + + with patch("requests.get", return_value=mock_response) as mock_get: + # Call the function with custom base_url. + _render_mermaid_using_api( + "graph TD;\n A --> B;", + base_url=custom_url, + ) + + # Verify that the URL was constructed with our custom base URL + assert mock_get.called + args, kwargs = mock_get.call_args + url = args[0] # First argument to request.get is the URL + assert url.startswith(custom_url) + + +def test_draw_mermaid_png_function_base_url() -> None: + """Test that draw_mermaid_png function passes base_url to API renderer.""" + custom_url = "https://custom.mermaid.com" + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.content = b"fake image data" + + with patch("requests.get", return_value=mock_response) as mock_get: + # Call draw_mermaid_png with custom base_url + draw_mermaid_png( + "graph TD;\n A --> B;", + draw_method=MermaidDrawMethod.API, + base_url=custom_url, + ) + + # Verify that the URL was constructed with our custom base URL + assert mock_get.called + args, kwargs = mock_get.call_args + url = args[0] # First argument to request.get is the URL + assert url.startswith(custom_url) + + +def test_graph_draw_mermaid_png_base_url() -> None: + """Test that Graph.draw_mermaid_png method passes base_url to renderer.""" + custom_url = "https://custom.mermaid.com" + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.content = b"fake image data" + + with patch("requests.get", return_value=mock_response) as mock_get: + # Create a simple graph + graph = Graph() + start_node = graph.add_node(BaseModel, id="start") + end_node = graph.add_node(BaseModel, id="end") + graph.add_edge(start_node, end_node) + + # Call draw_mermaid_png with custom base_url + graph.draw_mermaid_png(draw_method=MermaidDrawMethod.API, base_url=custom_url) + + # Verify that the URL was constructed with our custom base URL + assert mock_get.called + args, kwargs = mock_get.call_args + url = args[0] # First argument to request.get is the URL + assert url.startswith(custom_url) diff --git a/libs/core/tests/unit_tests/test_messages.py b/libs/core/tests/unit_tests/test_messages.py index ceed3eb4662..d94b0278421 100644 --- a/libs/core/tests/unit_tests/test_messages.py +++ b/libs/core/tests/unit_tests/test_messages.py @@ -743,6 +743,7 @@ def test_convert_to_messages() -> None: "tool_call_id": "tool_id2", "content": "Bye!", "artifact": {"foo": 123}, + "status": "success", }, {"role": "remove", "id": "message_to_remove", "content": ""}, { @@ -776,7 +777,12 @@ def test_convert_to_messages() -> None: ], ), ToolMessage(tool_call_id="tool_id", content="Hi!"), - ToolMessage(tool_call_id="tool_id2", content="Bye!", artifact={"foo": 123}), + ToolMessage( + tool_call_id="tool_id2", + content="Bye!", + artifact={"foo": 123}, + status="success", + ), RemoveMessage(id="message_to_remove"), HumanMessage( content="Now the turn for Larry to ask a question about the book!", diff --git a/libs/langchain/langchain/agents/openai_assistant/base.py b/libs/langchain/langchain/agents/openai_assistant/base.py index 37ef8c554d5..c0c7138466d 100644 --- a/libs/langchain/langchain/agents/openai_assistant/base.py +++ b/libs/langchain/langchain/agents/openai_assistant/base.py @@ -375,6 +375,7 @@ class OpenAIAssistantRunnable(RunnableSerializable[dict, OutputType]): run_manager.on_chain_error(e) raise try: + # Use sync response handler in sync invoke response = self._get_response(run) except BaseException as e: run_manager.on_chain_error(e, metadata=run.dict()) @@ -511,7 +512,8 @@ class OpenAIAssistantRunnable(RunnableSerializable[dict, OutputType]): run_manager.on_chain_error(e) raise try: - response = self._get_response(run) + # Use async response handler in async ainvoke + response = await self._aget_response(run) except BaseException as e: run_manager.on_chain_error(e, metadata=run.dict()) raise diff --git a/libs/langchain/pyproject.toml b/libs/langchain/pyproject.toml index 07d17ea3318..19e55f30996 100644 --- a/libs/langchain/pyproject.toml +++ b/libs/langchain/pyproject.toml @@ -120,7 +120,6 @@ langchain-text-splitters = { path = "../text-splitters", editable = true } langchain-openai = { path = "../partners/openai", editable = true } [tool.ruff] -target-version = "py39" exclude = ["tests/integration_tests/examples/non-utf8-encoding.py"] [tool.mypy] diff --git a/libs/langchain/tests/unit_tests/agents/test_openai_assistant.py b/libs/langchain/tests/unit_tests/agents/test_openai_assistant.py index 8304295ac91..61a79e8a02d 100644 --- a/libs/langchain/tests/unit_tests/agents/test_openai_assistant.py +++ b/libs/langchain/tests/unit_tests/agents/test_openai_assistant.py @@ -17,7 +17,7 @@ def _create_mock_client(*_: Any, use_async: bool = False, **__: Any) -> Any: @pytest.mark.requires("openai") def test_user_supplied_client() -> None: - import openai + openai = pytest.importorskip("openai") client = openai.AzureOpenAI( azure_endpoint="azure_endpoint", @@ -48,6 +48,85 @@ def test_create_assistant() -> None: assert isinstance(assistant, OpenAIAssistantRunnable) +@pytest.mark.requires("openai") +@patch( + "langchain.agents.openai_assistant.base._get_openai_async_client", + new=partial(_create_mock_client, use_async=True), +) +async def test_ainvoke_uses_async_response_completed() -> None: + # Arrange a runner with mocked async client and a completed run + assistant = OpenAIAssistantRunnable( + assistant_id="assistant_id", + client=_create_mock_client(), + async_client=_create_mock_client(use_async=True), + as_agent=False, + ) + mock_run = MagicMock() + mock_run.id = "run-id" + mock_run.thread_id = "thread-id" + mock_run.status = "completed" + + # await_for_run returns a completed run + await_for_run_mock = AsyncMock(return_value=mock_run) + # async messages list returns messages belonging to run + msg = MagicMock() + msg.run_id = "run-id" + msg.content = [] + list_mock = AsyncMock(return_value=[msg]) + + with ( + patch.object(assistant, "_await_for_run", await_for_run_mock), + patch.object( + assistant.async_client.beta.threads.messages, + "list", + list_mock, + ), + ): + # Act + result = await assistant.ainvoke({"content": "hi"}) + + # Assert: returns messages list (non-agent path) and did not block + assert isinstance(result, list) + list_mock.assert_awaited() + + +@pytest.mark.requires("openai") +@patch( + "langchain.agents.openai_assistant.base._get_openai_async_client", + new=partial(_create_mock_client, use_async=True), +) +async def test_ainvoke_uses_async_response_requires_action_agent() -> None: + # Arrange a runner with mocked async client and requires_action run + assistant = OpenAIAssistantRunnable( + assistant_id="assistant_id", + client=_create_mock_client(), + async_client=_create_mock_client(use_async=True), + as_agent=True, + ) + mock_run = MagicMock() + mock_run.id = "run-id" + mock_run.thread_id = "thread-id" + mock_run.status = "requires_action" + + # Fake tool call structure + tool_call = MagicMock() + tool_call.id = "tool-id" + tool_call.function.name = "foo" + tool_call.function.arguments = '{\n "x": 1\n}' + mock_run.required_action.submit_tool_outputs.tool_calls = [tool_call] + + await_for_run_mock = AsyncMock(return_value=mock_run) + + # Act + with patch.object(assistant, "_await_for_run", await_for_run_mock): + result = await assistant.ainvoke({"content": "hi"}) + + # Assert: returns list of OpenAIAssistantAction + assert isinstance(result, list) + assert result + assert getattr(result[0], "tool", None) == "foo" + + @pytest.mark.requires("openai") @patch( "langchain.agents.openai_assistant.base._get_openai_async_client", diff --git a/libs/partners/anthropic/langchain_anthropic/chat_models.py b/libs/partners/anthropic/langchain_anthropic/chat_models.py index 65f19765862..236a8fa6f40 100644 --- a/libs/partners/anthropic/langchain_anthropic/chat_models.py +++ b/libs/partners/anthropic/langchain_anthropic/chat_models.py @@ -1186,7 +1186,7 @@ class ChatAnthropic(BaseChatModel): from langchain_anthropic import ChatAnthropic - llm = ChatAnthropic(model="claude-3-5-sonnet-latest") + llm = ChatAnthropic(model="claude-3-5-haiku-latest") tool = {"type": "web_search_20250305", "name": "web_search", "max_uses": 3} llm_with_tools = llm.bind_tools([tool]) diff --git a/libs/partners/anthropic/pyproject.toml b/libs/partners/anthropic/pyproject.toml index ec7c9f1f4ac..db404835bac 100644 --- a/libs/partners/anthropic/pyproject.toml +++ b/libs/partners/anthropic/pyproject.toml @@ -56,9 +56,6 @@ langchain-tests = { path = "../../standard-tests", editable = true } disallow_untyped_defs = "True" plugins = ['pydantic.mypy'] -[tool.ruff] -target-version = "py39" - [tool.ruff.lint] select = [ "A", # flake8-builtins diff --git a/libs/partners/anthropic/tests/cassettes/test_remote_mcp.yaml.gz b/libs/partners/anthropic/tests/cassettes/test_remote_mcp.yaml.gz index e29dfe5a082..c082b738dc8 100644 Binary files a/libs/partners/anthropic/tests/cassettes/test_remote_mcp.yaml.gz and b/libs/partners/anthropic/tests/cassettes/test_remote_mcp.yaml.gz differ diff --git a/libs/partners/anthropic/tests/cassettes/test_web_search.yaml.gz b/libs/partners/anthropic/tests/cassettes/test_web_search.yaml.gz index 546cdf8c505..6802f280403 100644 Binary files a/libs/partners/anthropic/tests/cassettes/test_web_search.yaml.gz and b/libs/partners/anthropic/tests/cassettes/test_web_search.yaml.gz differ diff --git a/libs/partners/anthropic/tests/integration_tests/test_chat_models.py b/libs/partners/anthropic/tests/integration_tests/test_chat_models.py index 50ada317517..ccd66c149ea 100644 --- a/libs/partners/anthropic/tests/integration_tests/test_chat_models.py +++ b/libs/partners/anthropic/tests/integration_tests/test_chat_models.py @@ -1063,7 +1063,7 @@ def test_image_tool_calling() -> None: ], ), ] - llm = ChatAnthropic(model="claude-3-5-sonnet-latest") # type: ignore[call-arg] + llm = ChatAnthropic(model="claude-3-5-haiku-latest") # type: ignore[call-arg] llm.bind_tools([color_picker]).invoke(messages) @@ -1072,7 +1072,7 @@ def test_image_tool_calling() -> None: @pytest.mark.parametrize("output_version", ["v0", "v1"]) def test_web_search(output_version: Literal["v0", "v1"]) -> None: llm = ChatAnthropic( - model="claude-3-5-sonnet-latest", # type: ignore[call-arg] + model="claude-3-5-haiku-latest", # type: ignore[call-arg] max_tokens=1024, output_version=output_version, ) @@ -1124,6 +1124,10 @@ def test_web_search(output_version: Literal["v0", "v1"]) -> None: @pytest.mark.vcr @pytest.mark.parametrize("output_version", ["v0", "v1"]) def test_code_execution(output_version: Literal["v0", "v1"]) -> None: + """Note: this is a beta feature. + + TODO: Update to remove beta once generally available. + """ llm = ChatAnthropic( model="claude-sonnet-4-20250514", # type: ignore[call-arg] betas=["code-execution-2025-05-22"], @@ -1189,6 +1193,10 @@ def test_code_execution(output_version: Literal["v0", "v1"]) -> None: @pytest.mark.vcr @pytest.mark.parametrize("output_version", ["v0", "v1"]) def test_remote_mcp(output_version: Literal["v0", "v1"]) -> None: + """Note: this is a beta feature. + + TODO: Update to remove beta once generally available. + """ mcp_servers = [ { "type": "url", @@ -1253,6 +1261,10 @@ def test_remote_mcp(output_version: Literal["v0", "v1"]) -> None: @pytest.mark.parametrize("block_format", ["anthropic", "standard"]) def test_files_api_image(block_format: str) -> None: + """Note: this is a beta feature. + + TODO: Update to remove beta once generally available. + """ image_file_id = os.getenv("ANTHROPIC_FILES_API_IMAGE_ID") if not image_file_id: pytest.skip() @@ -1286,6 +1298,10 @@ def test_files_api_image(block_format: str) -> None: @pytest.mark.parametrize("block_format", ["anthropic", "standard"]) def test_files_api_pdf(block_format: str) -> None: + """Note: this is a beta feature. + + TODO: Update to remove beta once generally available. + """ pdf_file_id = os.getenv("ANTHROPIC_FILES_API_PDF_ID") if not pdf_file_id: pytest.skip() @@ -1315,7 +1331,6 @@ def test_search_result_tool_message() -> None: """Test that we can pass a search result tool message to the model.""" llm = ChatAnthropic( model="claude-3-5-haiku-latest", # type: ignore[call-arg] - betas=["search-results-2025-06-09"], ) @tool @@ -1373,7 +1388,6 @@ def test_search_result_tool_message() -> None: def test_search_result_top_level() -> None: llm = ChatAnthropic( model="claude-3-5-haiku-latest", # type: ignore[call-arg] - betas=["search-results-2025-06-09"], ) input_message = HumanMessage( [ diff --git a/libs/partners/anthropic/uv.lock b/libs/partners/anthropic/uv.lock index 2a718f91801..842dc8b227a 100644 --- a/libs/partners/anthropic/uv.lock +++ b/libs/partners/anthropic/uv.lock @@ -512,7 +512,7 @@ typing = [ [[package]] name = "langchain-tests" -version = "0.3.20" +version = "0.3.21" source = { editable = "../../standard-tests" } dependencies = [ { name = "httpx" }, @@ -550,7 +550,8 @@ test = [{ name = "langchain-core", editable = "../../core" }] test-integration = [] typing = [ { name = "langchain-core", editable = "../../core" }, - { name = "mypy", specifier = ">=1.17.1,<2" }, + { name = "mypy", specifier = ">=1.17.1,<1.18" }, + { name = "types-pyyaml", specifier = ">=6.0.12.2,<7.0.0.0" }, ] [[package]] diff --git a/libs/partners/chroma/pyproject.toml b/libs/partners/chroma/pyproject.toml index c6b1a1883dc..cd6dc551424 100644 --- a/libs/partners/chroma/pyproject.toml +++ b/libs/partners/chroma/pyproject.toml @@ -58,9 +58,6 @@ langchain-tests = { path = "../../standard-tests", editable = true } [tool.mypy] disallow_untyped_defs = true -[tool.ruff] -target-version = "py39" - [tool.ruff.lint] select = [ "A", # flake8-builtins diff --git a/libs/partners/deepseek/pyproject.toml b/libs/partners/deepseek/pyproject.toml index 49fe2636cd2..bfd1449f911 100644 --- a/libs/partners/deepseek/pyproject.toml +++ b/libs/partners/deepseek/pyproject.toml @@ -45,9 +45,6 @@ langchain-tests = { path = "../../standard-tests", editable = true } [tool.mypy] disallow_untyped_defs = "True" -[tool.ruff] -target-version = "py39" - [tool.ruff.lint] select = [ "A", # flake8-builtins diff --git a/libs/partners/exa/pyproject.toml b/libs/partners/exa/pyproject.toml index 5c748c3b37f..5873fa966ae 100644 --- a/libs/partners/exa/pyproject.toml +++ b/libs/partners/exa/pyproject.toml @@ -44,9 +44,6 @@ langchain-core = { path = "../../core", editable = true } [tool.mypy] disallow_untyped_defs = "True" -[tool.ruff] -target-version = "py39" - [tool.ruff.lint] select = [ "A", # flake8-builtins diff --git a/libs/partners/fireworks/pyproject.toml b/libs/partners/fireworks/pyproject.toml index 5924eef72e8..59389af1145 100644 --- a/libs/partners/fireworks/pyproject.toml +++ b/libs/partners/fireworks/pyproject.toml @@ -48,9 +48,6 @@ langchain-tests = { path = "../../standard-tests", editable = true } [tool.mypy] disallow_untyped_defs = "True" -[tool.ruff] -target-version = "py39" - [tool.ruff.lint] select = [ "A", # flake8-builtins diff --git a/libs/partners/groq/pyproject.toml b/libs/partners/groq/pyproject.toml index 349f61e266d..1e4a3758e60 100644 --- a/libs/partners/groq/pyproject.toml +++ b/libs/partners/groq/pyproject.toml @@ -40,9 +40,6 @@ langchain-tests = { path = "../../standard-tests", editable = true } [tool.mypy] disallow_untyped_defs = "True" -[tool.ruff] -target-version = "py39" - [tool.ruff.lint] select = [ "A", # flake8-builtins diff --git a/libs/partners/mistralai/pyproject.toml b/libs/partners/mistralai/pyproject.toml index 0381cdc1b7c..d94a48bba1f 100644 --- a/libs/partners/mistralai/pyproject.toml +++ b/libs/partners/mistralai/pyproject.toml @@ -44,9 +44,6 @@ langchain-tests = { path = "../../standard-tests", editable = true } [tool.mypy] disallow_untyped_defs = "True" -[tool.ruff] -target-version = "py39" - [tool.ruff.lint] select = [ "A", # flake8-builtins diff --git a/libs/partners/nomic/pyproject.toml b/libs/partners/nomic/pyproject.toml index 0b42497a911..5d661378f6c 100644 --- a/libs/partners/nomic/pyproject.toml +++ b/libs/partners/nomic/pyproject.toml @@ -40,9 +40,6 @@ dev = ["langchain-core"] [tool.uv.sources] langchain-core = { path = "../../core", editable = true } -[tool.ruff] -target-version = "py39" - [tool.ruff.lint] select = [ "A", # flake8-builtins diff --git a/libs/partners/ollama/langchain_ollama/chat_models.py b/libs/partners/ollama/langchain_ollama/chat_models.py index 0301b55e5aa..ebccbef37b0 100644 --- a/libs/partners/ollama/langchain_ollama/chat_models.py +++ b/libs/partners/ollama/langchain_ollama/chat_models.py @@ -661,8 +661,10 @@ class ChatOllama(BaseChatModel): if isinstance(message.content, str): content = message.content else: - for content_part in cast(list[dict], message.content): - if content_part.get("type") == "text": + for content_part in message.content: + if isinstance(content_part, str): + content += f"\n{content_part}" + elif content_part.get("type") == "text": content += f"\n{content_part['text']}" elif content_part.get("type") == "tool_use": continue diff --git a/libs/partners/ollama/pyproject.toml b/libs/partners/ollama/pyproject.toml index 45ad9421b94..7e57cf45e00 100644 --- a/libs/partners/ollama/pyproject.toml +++ b/libs/partners/ollama/pyproject.toml @@ -42,9 +42,6 @@ langchain-tests = { path = "../../standard-tests", editable = true } [tool.mypy] disallow_untyped_defs = "True" -[tool.ruff] -target-version = "py39" - [tool.ruff.lint] select = [ "A", # flake8-builtins diff --git a/libs/partners/openai/langchain_openai/embeddings/azure.py b/libs/partners/openai/langchain_openai/embeddings/azure.py index be8ddcc8b9e..70e50b2dd08 100644 --- a/libs/partners/openai/langchain_openai/embeddings/azure.py +++ b/libs/partners/openai/langchain_openai/embeddings/azure.py @@ -173,13 +173,15 @@ class AzureOpenAIEmbeddings(OpenAIEmbeddings): # type: ignore[override] # between azure_endpoint and base_url (openai_api_base). openai_api_base = self.openai_api_base if openai_api_base and self.validate_base_url: - if "/openai" not in openai_api_base: - self.openai_api_base = cast(str, self.openai_api_base) + "/openai" - raise ValueError( - "As of openai>=1.0.0, Azure endpoints should be specified via " - "the `azure_endpoint` param not `openai_api_base` " - "(or alias `base_url`). " - ) + # Only validate openai_api_base if azure_endpoint is not provided + if not self.azure_endpoint: + if "/openai" not in openai_api_base: + self.openai_api_base = cast(str, self.openai_api_base) + "/openai" + raise ValueError( + "As of openai>=1.0.0, Azure endpoints should be specified via " + "the `azure_endpoint` param not `openai_api_base` " + "(or alias `base_url`). " + ) if self.deployment: raise ValueError( "As of openai>=1.0.0, if `deployment` (or alias " diff --git a/libs/partners/openai/pyproject.toml b/libs/partners/openai/pyproject.toml index 1970062c6a7..8f652161621 100644 --- a/libs/partners/openai/pyproject.toml +++ b/libs/partners/openai/pyproject.toml @@ -60,9 +60,6 @@ disallow_untyped_defs = "True" module = "transformers" ignore_missing_imports = true -[tool.ruff] -target-version = "py39" - [tool.ruff.lint] select = ["E", "F", "I", "T201", "UP", "S"] ignore = [ "UP007", "UP045" ] diff --git a/libs/partners/openai/uv.lock b/libs/partners/openai/uv.lock index 496ab4d337a..2164a75d69c 100644 --- a/libs/partners/openai/uv.lock +++ b/libs/partners/openai/uv.lock @@ -605,7 +605,8 @@ test = [{ name = "langchain-core", editable = "../../core" }] test-integration = [] typing = [ { name = "langchain-core", editable = "../../core" }, - { name = "mypy", specifier = ">=1.17.1,<2" }, + { name = "mypy", specifier = ">=1.17.1,<1.18" }, + { name = "types-pyyaml", specifier = ">=6.0.12.2,<7.0.0.0" }, ] [[package]] diff --git a/libs/partners/perplexity/pyproject.toml b/libs/partners/perplexity/pyproject.toml index 684023f94ba..bd3ba0dab11 100644 --- a/libs/partners/perplexity/pyproject.toml +++ b/libs/partners/perplexity/pyproject.toml @@ -55,9 +55,6 @@ plugins = ['pydantic.mypy'] module = "transformers" ignore_missing_imports = true -[tool.ruff] -target-version = "py39" - [tool.ruff.lint] select = ["E", "F", "I", "T201", "UP", "S"] ignore = [ "UP007", "UP045"] diff --git a/libs/partners/prompty/pyproject.toml b/libs/partners/prompty/pyproject.toml index e96b0480c97..18be3ad7dce 100644 --- a/libs/partners/prompty/pyproject.toml +++ b/libs/partners/prompty/pyproject.toml @@ -45,9 +45,6 @@ langchain-core = { path = "../../core", editable = true } langchain-text-splitters = { path = "../../text-splitters", editable = true } langchain = { path = "../../langchain", editable = true } -[tool.ruff] -target-version = "py39" - [tool.ruff.lint] select = ["E", "F", "I", "T201", "UP", "S"] ignore = [ "UP007", "UP045" ] diff --git a/libs/partners/qdrant/langchain_qdrant/qdrant.py b/libs/partners/qdrant/langchain_qdrant/qdrant.py index 38f8e76060e..d6be80dcc5e 100644 --- a/libs/partners/qdrant/langchain_qdrant/qdrant.py +++ b/libs/partners/qdrant/langchain_qdrant/qdrant.py @@ -237,21 +237,53 @@ class QdrantVectorStore(VectorStore): return self._client @property - def embeddings(self) -> Embeddings: + def embeddings(self) -> Optional[Embeddings]: """Get the dense embeddings instance that is being used. - Raises: - ValueError: If embeddings are ``None``. - Returns: - Embeddings: An instance of ``Embeddings``. + Embeddings: An instance of ``Embeddings``, or None for SPARSE mode. """ - if self._embeddings is None: - msg = "Embeddings are `None`. Please set using the `embedding` parameter." - raise ValueError(msg) return self._embeddings + def _get_retriever_tags(self) -> list[str]: + """Get tags for retriever. + + Override the base class method to handle SPARSE mode where embeddings can be + None. In SPARSE mode, embeddings is None, so we don't include embeddings class + name in tags. In DENSE/HYBRID modes, embeddings is not None, so we include + embeddings class name. + """ + tags = [self.__class__.__name__] + + # Handle different retrieval modes + if self.retrieval_mode == RetrievalMode.SPARSE: + # SPARSE mode: no dense embeddings, so no embeddings class name in tags + pass + else: + # DENSE/HYBRID modes: include embeddings class name if available + if self.embeddings is not None: + tags.append(self.embeddings.__class__.__name__) + + return tags + + def _require_embeddings(self, operation: str) -> Embeddings: + """Require embeddings for operations that need them. + + Args: + operation: Description of the operation requiring embeddings. + + Returns: + The embeddings instance. + + Raises: + ValueError: If embeddings are None and required for the operation. + """ + if self.embeddings is None: + msg = f"Embeddings are required for {operation}" + raise ValueError(msg) + return self.embeddings + @property def sparse_embeddings(self) -> SparseEmbeddings: """Get the sparse embeddings instance that is being used. @@ -517,7 +549,8 @@ class QdrantVectorStore(VectorStore): **kwargs, } if self.retrieval_mode == RetrievalMode.DENSE: - query_dense_embedding = self.embeddings.embed_query(query) + embeddings = self._require_embeddings("DENSE mode") + query_dense_embedding = embeddings.embed_query(query) results = self.client.query_points( query=query_dense_embedding, using=self.vector_name, @@ -536,7 +569,8 @@ class QdrantVectorStore(VectorStore): ).points elif self.retrieval_mode == RetrievalMode.HYBRID: - query_dense_embedding = self.embeddings.embed_query(query) + embeddings = self._require_embeddings("HYBRID mode") + query_dense_embedding = embeddings.embed_query(query) query_sparse_embedding = self.sparse_embeddings.embed_query(query) results = self.client.query_points( prefetch=[ @@ -690,7 +724,8 @@ class QdrantVectorStore(VectorStore): self.embeddings, ) - query_embedding = self.embeddings.embed_query(query) + embeddings = self._require_embeddings("max_marginal_relevance_search") + query_embedding = embeddings.embed_query(query) return self.max_marginal_relevance_search_by_vector( query_embedding, k=k, @@ -1041,7 +1076,8 @@ class QdrantVectorStore(VectorStore): texts: Iterable[str], ) -> list[models.VectorStruct]: if self.retrieval_mode == RetrievalMode.DENSE: - batch_embeddings = self.embeddings.embed_documents(list(texts)) + embeddings = self._require_embeddings("DENSE mode") + batch_embeddings = embeddings.embed_documents(list(texts)) return [ { self.vector_name: vector, @@ -1063,7 +1099,8 @@ class QdrantVectorStore(VectorStore): ] if self.retrieval_mode == RetrievalMode.HYBRID: - dense_embeddings = self.embeddings.embed_documents(list(texts)) + embeddings = self._require_embeddings("HYBRID mode") + dense_embeddings = embeddings.embed_documents(list(texts)) sparse_embeddings = self.sparse_embeddings.embed_documents(list(texts)) if len(dense_embeddings) != len(sparse_embeddings): diff --git a/libs/partners/qdrant/pyproject.toml b/libs/partners/qdrant/pyproject.toml index 85f659c0511..f180803e794 100644 --- a/libs/partners/qdrant/pyproject.toml +++ b/libs/partners/qdrant/pyproject.toml @@ -12,7 +12,7 @@ dependencies = [ "langchain-core!=0.3.0,!=0.3.1,!=0.3.10,!=0.3.11,!=0.3.12,!=0.3.13,!=0.3.14,!=0.3.2,!=0.3.3,!=0.3.4,!=0.3.5,!=0.3.6,!=0.3.7,!=0.3.8,!=0.3.9,<0.4.0,>=0.2.43", ] name = "langchain-qdrant" -version = "0.2.0" +version = "0.2.1" description = "An integration package connecting Qdrant and LangChain" readme = "README.md" @@ -47,9 +47,6 @@ typing = ["mypy<2.0,>=1.10", "simsimd<7.0.0,>=6.0.0", "langchain-core"] [tool.uv.sources] langchain-core = { path = "../../core", editable = true } -[tool.ruff] -target-version = "py39" - [tool.ruff.lint] select = [ "A", # flake8-builtins diff --git a/libs/partners/qdrant/tests/integration_tests/qdrant_vector_store/test_search.py b/libs/partners/qdrant/tests/integration_tests/qdrant_vector_store/test_search.py index d95a923c726..d612c48dc95 100644 --- a/libs/partners/qdrant/tests/integration_tests/qdrant_vector_store/test_search.py +++ b/libs/partners/qdrant/tests/integration_tests/qdrant_vector_store/test_search.py @@ -309,3 +309,105 @@ def test_similarity_search_filters_with_qdrant_filters( ) ], ) + + +@pytest.mark.parametrize("location", qdrant_locations()) +def test_embeddings_property_sparse_mode(location: str) -> None: + """Test that embeddings property returns None in SPARSE mode.""" + # Use from_texts to create the vectorstore, which handles collection creation + texts = ["test document"] + vectorstore = QdrantVectorStore.from_texts( + texts, + embedding=None, # No dense embedding for SPARSE mode + location=location, + retrieval_mode=RetrievalMode.SPARSE, + sparse_embedding=ConsistentFakeSparseEmbeddings(), + sparse_vector_name="sparse", + ) + + # In SPARSE mode, embeddings should return None + assert vectorstore.embeddings is None + + +@pytest.mark.parametrize("location", qdrant_locations()) +def test_embeddings_property_dense_mode(location: str) -> None: + """Test that embeddings property returns embedding object in DENSE mode.""" + # Use from_texts to create the vectorstore, which handles collection creation + texts = ["test document"] + embedding = ConsistentFakeEmbeddings() + vectorstore = QdrantVectorStore.from_texts( + texts, + embedding=embedding, + location=location, + retrieval_mode=RetrievalMode.DENSE, + ) + + # In DENSE mode, embeddings should return the embedding object + assert vectorstore.embeddings is embedding + + +@pytest.mark.parametrize("location", qdrant_locations()) +def test_as_retriever_sparse_mode(location: str) -> None: + """Test that as_retriever() works in SPARSE mode.""" + # Use from_texts to create the vectorstore, which handles collection creation + texts = ["test document"] + vectorstore = QdrantVectorStore.from_texts( + texts, + embedding=None, # No dense embedding for SPARSE mode + location=location, + retrieval_mode=RetrievalMode.SPARSE, + sparse_embedding=ConsistentFakeSparseEmbeddings(), + sparse_vector_name="sparse", + ) + + # Add test documents + docs = [ + Document(page_content="Python programming", metadata={"topic": "programming"}), + Document(page_content="Machine learning", metadata={"topic": "AI"}), + Document(page_content="Data analysis", metadata={"topic": "data"}), + ] + vectorstore.add_documents(docs) + + # Test basic as_retriever() functionality + retriever = vectorstore.as_retriever() + results = retriever.invoke("programming") + + # Should return documents + assert len(results) > 0 + assert all(isinstance(doc, Document) for doc in results) + + # Test that retriever has tags + assert hasattr(retriever, "tags") + assert isinstance(retriever.tags, list) + assert "QdrantVectorStore" in retriever.tags + + +@pytest.mark.parametrize("location", qdrant_locations()) +def test_as_retriever_sparse_mode_with_search_kwargs(location: str) -> None: + """Test as_retriever() with custom search_kwargs in SPARSE mode.""" + # Use from_texts to create the vectorstore, which handles collection creation + texts = ["test document"] + vectorstore = QdrantVectorStore.from_texts( + texts, + embedding=None, # No dense embedding for SPARSE mode + location=location, + retrieval_mode=RetrievalMode.SPARSE, + sparse_embedding=ConsistentFakeSparseEmbeddings(), + sparse_vector_name="sparse", + ) + + # Add test documents + docs = [ + Document(page_content="Python programming", metadata={"topic": "programming"}), + Document(page_content="Machine learning", metadata={"topic": "AI"}), + Document(page_content="Data analysis", metadata={"topic": "data"}), + ] + vectorstore.add_documents(docs) + + # Test with custom search_kwargs + retriever = vectorstore.as_retriever(search_kwargs={"k": 1}) + results = retriever.invoke("programming") + + # Should return exactly 1 document + assert len(results) == 1 + assert isinstance(results[0], Document) diff --git a/libs/partners/qdrant/uv.lock b/libs/partners/qdrant/uv.lock index f11635d4b86..13743838431 100644 --- a/libs/partners/qdrant/uv.lock +++ b/libs/partners/qdrant/uv.lock @@ -1,5 +1,5 @@ version = 1 -revision = 3 +revision = 2 requires-python = ">=3.9" resolution-markers = [ "python_full_version >= '3.13'", @@ -541,7 +541,7 @@ wheels = [ [[package]] name = "langchain-core" -version = "0.3.75" +version = "0.3.76" source = { editable = "../../core" } dependencies = [ { name = "jsonpatch" }, @@ -599,7 +599,7 @@ typing = [ [[package]] name = "langchain-qdrant" -version = "0.2.0" +version = "0.2.1" source = { editable = "." } dependencies = [ { name = "langchain-core" }, diff --git a/libs/partners/xai/pyproject.toml b/libs/partners/xai/pyproject.toml index 9f1776bd245..6353e3a0243 100644 --- a/libs/partners/xai/pyproject.toml +++ b/libs/partners/xai/pyproject.toml @@ -50,9 +50,6 @@ langchain-openai = { path = "../openai", editable = true } [tool.mypy] disallow_untyped_defs = "True" -[tool.ruff] -target-version = "py39" - [tool.ruff.lint] select = [ "A", # flake8-builtins diff --git a/uv.lock b/uv.lock index b6a5e789af1..71a061a6fc1 100644 --- a/uv.lock +++ b/uv.lock @@ -1,5 +1,5 @@ version = 1 -revision = 2 +revision = 3 requires-python = ">=3.9" resolution-markers = [ "python_full_version >= '3.13' and platform_python_implementation == 'PyPy'", @@ -922,7 +922,6 @@ dependencies = [ { name = "numpy", version = "2.2.4", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.13'" }, { name = "packaging" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/91/1b/6fe5dbe5be0240cfd82b52bd7c186655c578d935c0ce2e713c100e6f8cce/faiss_cpu-1.10.0.tar.gz", hash = "sha256:5bdca555f24bc036f4d67f8a5a4d6cc91b8d2126d4e78de496ca23ccd46e479d", size = 69159, upload-time = "2025-01-31T07:45:49.305Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/8b/56/87eb506d8634f08fc7c63d1ca5631aeec7d6b9afbfabedf2cb7a2a804b13/faiss_cpu-1.10.0-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:6693474be296a7142ade1051ea18e7d85cedbfdee4b7eac9c52f83fed0467855", size = 7693034, upload-time = "2025-01-31T07:44:31.908Z" }, { url = "https://files.pythonhosted.org/packages/51/46/f4d9de34ed1b06300b1a75b824d4857963216f5826de33f291af78088e39/faiss_cpu-1.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:70ebe60a560414dc8dd6cfe8fed105c8f002c0d11f765f5adfe8d63d42c0467f", size = 3234656, upload-time = "2025-01-31T07:44:34.418Z" }, @@ -2478,7 +2477,7 @@ wheels = [ [[package]] name = "langchain-core" -version = "0.3.75" +version = "0.3.76" source = { editable = "libs/core" } dependencies = [ { name = "jsonpatch" }, @@ -2613,7 +2612,7 @@ dependencies = [ [[package]] name = "langchain-groq" -version = "0.3.7" +version = "0.3.8" source = { editable = "libs/partners/groq" } dependencies = [ { name = "groq" }, @@ -2814,7 +2813,7 @@ typing = [] [[package]] name = "langchain-openai" -version = "0.3.32" +version = "0.3.33" source = { editable = "libs/partners/openai" } dependencies = [ { name = "langchain-core" }, @@ -2825,7 +2824,7 @@ dependencies = [ [package.metadata] requires-dist = [ { name = "langchain-core", editable = "libs/core" }, - { name = "openai", specifier = ">=1.99.9,<2.0.0" }, + { name = "openai", specifier = ">=1.104.2,<2.0.0" }, { name = "tiktoken", specifier = ">=0.7,<1" }, ] @@ -2880,18 +2879,14 @@ wheels = [ [[package]] name = "langchain-text-splitters" -version = "0.3.10" +version = "0.3.11" source = { editable = "libs/text-splitters" } dependencies = [ { name = "langchain-core" }, - { name = "pip" }, ] [package.metadata] -requires-dist = [ - { name = "langchain-core", editable = "libs/core" }, - { name = "pip", specifier = ">=25.2" }, -] +requires-dist = [{ name = "langchain-core", editable = "libs/core" }] [package.metadata.requires-dev] dev = [ @@ -2913,6 +2908,7 @@ test = [ { name = "pytest-xdist", specifier = ">=3.6.1,<4.0.0" }, ] test-integration = [ + { name = "en-core-web-sm", url = "https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.8.0/en_core_web_sm-3.8.0-py3-none-any.whl" }, { name = "nltk", specifier = ">=3.9.1,<4.0.0" }, { name = "sentence-transformers", specifier = ">=3.0.1" }, { name = "spacy", specifier = ">=3.8.7,<4.0.0" }, @@ -2921,9 +2917,10 @@ test-integration = [ { name = "transformers", specifier = ">=4.51.3,<5.0.0" }, ] typing = [ + { name = "beautifulsoup4", specifier = ">=4.13.5,<5.0.0" }, { name = "lxml-stubs", specifier = ">=0.5.1,<1.0.0" }, { name = "mypy", specifier = ">=1.17.1,<1.18" }, - { name = "tiktoken", specifier = ">=0.8.0,<1.0.0" }, + { name = "tiktoken", specifier = ">=0.11.0,<1.0.0" }, { name = "types-requests", specifier = ">=2.31.0.20240218,<3.0.0.0" }, ] @@ -3971,7 +3968,7 @@ wheels = [ [[package]] name = "openai" -version = "1.99.9" +version = "1.107.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, @@ -3983,9 +3980,9 @@ dependencies = [ { name = "tqdm" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/8a/d2/ef89c6f3f36b13b06e271d3cc984ddd2f62508a0972c1cbcc8485a6644ff/openai-1.99.9.tar.gz", hash = "sha256:f2082d155b1ad22e83247c3de3958eb4255b20ccf4a1de2e6681b6957b554e92", size = 506992, upload-time = "2025-08-12T02:31:10.054Z" } +sdist = { url = "https://files.pythonhosted.org/packages/f3/e0/a62daa7ff769df969cc1b782852cace79615039630b297005356f5fb46fb/openai-1.107.1.tar.gz", hash = "sha256:7c51b6b8adadfcf5cada08a613423575258b180af5ad4bc2954b36ebc0d3ad48", size = 563671, upload-time = "2025-09-10T15:04:40.288Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/e8/fb/df274ca10698ee77b07bff952f302ea627cc12dac6b85289485dd77db6de/openai-1.99.9-py3-none-any.whl", hash = "sha256:9dbcdb425553bae1ac5d947147bebbd630d91bbfc7788394d4c4f3a35682ab3a", size = 786816, upload-time = "2025-08-12T02:31:08.34Z" }, + { url = "https://files.pythonhosted.org/packages/d4/12/32c19999a58eec4a695e8ce334442b6135df949f0bb61b2ceaa4fa60d3a9/openai-1.107.1-py3-none-any.whl", hash = "sha256:168f9885b1b70d13ada0868a0d0adfd538c16a02f7fd9fe063851a2c9a025e72", size = 945177, upload-time = "2025-09-10T15:04:37.782Z" }, ] [[package]] @@ -4438,15 +4435,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/41/67/936f9814bdd74b2dfd4822f1f7725ab5d8ff4103919a1664eb4874c58b2f/pillow-11.1.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:4637b88343166249fe8aa94e7c4a62a180c4b3898283bb5d3d2fd5fe10d8e4e0", size = 2626353, upload-time = "2025-01-02T08:13:52.725Z" }, ] -[[package]] -name = "pip" -version = "25.2" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/20/16/650289cd3f43d5a2fadfd98c68bd1e1e7f2550a1a5326768cddfbcedb2c5/pip-25.2.tar.gz", hash = "sha256:578283f006390f85bb6282dffb876454593d637f5d1be494b5202ce4877e71f2", size = 1840021, upload-time = "2025-07-30T21:50:15.401Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/b7/3f/945ef7ab14dc4f9d7f40288d2df998d1837ee0888ec3659c813487572faa/pip-25.2-py3-none-any.whl", hash = "sha256:6d67a2b4e7f14d8b31b8b52648866fa717f45a1eb70e83002f4331d07e953717", size = 1752557, upload-time = "2025-07-30T21:50:13.323Z" }, -] - [[package]] name = "platformdirs" version = "4.3.6"