From b7dac2c90b4ccb16387ce1351d31218a3665cf9f Mon Sep 17 00:00:00 2001 From: John Kennedy <65985482+jkennedyvz@users.noreply.github.com> Date: Fri, 30 Jan 2026 18:33:55 -0800 Subject: [PATCH] fix: cleanup unused imports, add Anthropic to extended tests - Remove unused SystemMessage import - Fix reference to non-existent e2e test file - Add anthropic_model to all parameterized security tests - Now tests both OpenAI (gpt-5.2) and Anthropic (claude-sonnet-4-5) --- .../middleware/prompt_injection_defense.py | 2 +- .../test_prompt_injection_defense.py | 2 +- .../test_prompt_injection_defense_extended.py | 37 ++++++++++--------- 3 files changed, 21 insertions(+), 20 deletions(-) diff --git a/libs/langchain_v1/langchain/agents/middleware/prompt_injection_defense.py b/libs/langchain_v1/langchain/agents/middleware/prompt_injection_defense.py index a7fcfd1e318..75144c81e98 100644 --- a/libs/langchain_v1/langchain/agents/middleware/prompt_injection_defense.py +++ b/libs/langchain_v1/langchain/agents/middleware/prompt_injection_defense.py @@ -17,7 +17,7 @@ from __future__ import annotations from abc import abstractmethod from typing import TYPE_CHECKING, Any, Protocol -from langchain_core.messages import AIMessage, HumanMessage, SystemMessage, ToolMessage +from langchain_core.messages import AIMessage, HumanMessage, ToolMessage from typing_extensions import override from langchain.agents.middleware.types import AgentMiddleware diff --git a/libs/langchain_v1/tests/unit_tests/agents/middleware/implementations/test_prompt_injection_defense.py b/libs/langchain_v1/tests/unit_tests/agents/middleware/implementations/test_prompt_injection_defense.py index 609023ddf16..1a1f571718f 100644 --- a/libs/langchain_v1/tests/unit_tests/agents/middleware/implementations/test_prompt_injection_defense.py +++ b/libs/langchain_v1/tests/unit_tests/agents/middleware/implementations/test_prompt_injection_defense.py @@ -3,7 +3,7 @@ SECURITY TESTS: These tests verify defenses against indirect prompt injection attacks, where malicious instructions embedded in tool results attempt to hijack agent behavior. -See also: tests/e2e_prompt_injection_test.py for end-to-end tests with real LLMs. +See also: test_prompt_injection_defense_extended.py for tests with real LLMs. """ from unittest.mock import AsyncMock, MagicMock, patch diff --git a/libs/langchain_v1/tests/unit_tests/agents/middleware/implementations/test_prompt_injection_defense_extended.py b/libs/langchain_v1/tests/unit_tests/agents/middleware/implementations/test_prompt_injection_defense_extended.py index 4516aeac0b4..de764275e81 100644 --- a/libs/langchain_v1/tests/unit_tests/agents/middleware/implementations/test_prompt_injection_defense_extended.py +++ b/libs/langchain_v1/tests/unit_tests/agents/middleware/implementations/test_prompt_injection_defense_extended.py @@ -229,12 +229,11 @@ def assert_indirect_tool_trigger_blocked(output: str): class TestCriticalSecurity: """Critical security tests using real LLM models. - By default, only OpenAI tests run. To run Ollama tests: - pytest -k "ollama" --run-ollama + Tests run against both OpenAI and Anthropic models by default. """ - @pytest.mark.requires("langchain_openai") - @pytest.mark.parametrize("model_fixture", ["openai_model"]) + @pytest.mark.requires("langchain_openai", "langchain_anthropic") + @pytest.mark.parametrize("model_fixture", ["openai_model", "anthropic_model"]) def test_goal_hijacking_blocked(self, model_fixture, request): """CRITICAL: Test that 'ignore previous instructions' attacks are blocked.""" model = request.getfixturevalue(model_fixture) @@ -252,8 +251,8 @@ class TestCriticalSecurity: ) assert_goal_hijacking_blocked(str(result.content)) - @pytest.mark.requires("langchain_openai") - @pytest.mark.parametrize("model_fixture", ["openai_model"]) + @pytest.mark.requires("langchain_openai", "langchain_anthropic") + @pytest.mark.parametrize("model_fixture", ["openai_model", "anthropic_model"]) def test_privilege_escalation_blocked(self, model_fixture, request): """CRITICAL: Test that privilege escalation/jailbreak attempts are blocked.""" model = request.getfixturevalue(model_fixture) @@ -271,8 +270,8 @@ class TestCriticalSecurity: ) assert_privilege_escalation_blocked(str(result.content)) - @pytest.mark.requires("langchain_openai") - @pytest.mark.parametrize("model_fixture", ["openai_model"]) + @pytest.mark.requires("langchain_openai", "langchain_anthropic") + @pytest.mark.parametrize("model_fixture", ["openai_model", "anthropic_model"]) def test_indirect_tool_trigger_blocked(self, model_fixture, request): """CRITICAL: Test that indirect tool triggering attempts are blocked.""" model = request.getfixturevalue(model_fixture) @@ -290,8 +289,8 @@ class TestCriticalSecurity: ) assert_indirect_tool_trigger_blocked(str(result.content)) - @pytest.mark.requires("langchain_openai") - @pytest.mark.parametrize("model_fixture", ["openai_model"]) + @pytest.mark.requires("langchain_openai", "langchain_anthropic") + @pytest.mark.parametrize("model_fixture", ["openai_model", "anthropic_model"]) def test_multi_language_injection_blocked(self, model_fixture, request): """Test that multi-language injection is blocked.""" model = request.getfixturevalue(model_fixture) @@ -309,8 +308,8 @@ class TestCriticalSecurity: ) assert "hacker@malo.com" not in str(result.content).lower() - @pytest.mark.requires("langchain_openai") - @pytest.mark.parametrize("model_fixture", ["openai_model"]) + @pytest.mark.requires("langchain_openai", "langchain_anthropic") + @pytest.mark.parametrize("model_fixture", ["openai_model", "anthropic_model"]) def test_json_injection_blocked(self, model_fixture, request): """Test that JSON-embedded injection is blocked.""" model = request.getfixturevalue(model_fixture) @@ -389,16 +388,18 @@ class TestCriticalSecurityOllama: class TestAdditionalInjectionVectors: """Additional prompt injection tests for specific strategies.""" - @pytest.mark.requires("langchain_openai") - def test_parse_data_blocks_embedded_injection(self, openai_model): + @pytest.mark.requires("langchain_openai", "langchain_anthropic") + @pytest.mark.parametrize("model_fixture", ["openai_model", "anthropic_model"]) + def test_parse_data_blocks_embedded_injection(self, model_fixture, request): """Test ParseDataStrategy blocks injection hidden in legitimate content.""" - strategy = ParseDataStrategy(openai_model, use_full_conversation=True) + model = request.getfixturevalue(model_fixture) + strategy = ParseDataStrategy(model, use_full_conversation=True) tools = [search_email] - request = create_tool_request(tools) - request.state["messages"] = [] + req = create_tool_request(tools) + req.state["messages"] = [] result = strategy.process( - request, + req, ToolMessage( content="""Subject: Team Meeting From: boss@company.com