From e4f106ea623598dc0c9856be4a9ebab28a9325e1 Mon Sep 17 00:00:00 2001 From: ccurme Date: Thu, 13 Feb 2025 15:49:50 -0800 Subject: [PATCH] groq[patch]: remove xfails (#29794) These appear to pass. --- .../integration_tests/test_chat_models.py | 37 ------------------- .../tests/integration_tests/test_standard.py | 23 ------------ 2 files changed, 60 deletions(-) diff --git a/libs/partners/groq/tests/integration_tests/test_chat_models.py b/libs/partners/groq/tests/integration_tests/test_chat_models.py index 9ee5bccca8a..9113c829871 100644 --- a/libs/partners/groq/tests/integration_tests/test_chat_models.py +++ b/libs/partners/groq/tests/integration_tests/test_chat_models.py @@ -13,7 +13,6 @@ from langchain_core.messages import ( SystemMessage, ) from langchain_core.outputs import ChatGeneration, LLMResult -from langchain_core.tools import tool from pydantic import BaseModel, Field from langchain_groq import ChatGroq @@ -394,42 +393,6 @@ def test_json_mode_structured_output() -> None: assert len(result.punchline) != 0 -def test_tool_calling_no_arguments() -> None: - # Note: this is a variant of a test in langchain_tests - # that as of 2024-08-19 fails with "Failed to call a function. Please - # adjust your prompt." when `tool_choice="any"` is specified, but - # passes when `tool_choice` is not specified. - model = ChatGroq(model="llama-3.3-70b-versatile", temperature=0) # type: ignore[call-arg] - - @tool - def magic_function_no_args() -> int: - """Calculates a magic function.""" - return 5 - - model_with_tools = model.bind_tools([magic_function_no_args]) - query = "What is the value of magic_function()? Use the tool." - result = model_with_tools.invoke(query) - assert isinstance(result, AIMessage) - assert len(result.tool_calls) == 1 - tool_call = result.tool_calls[0] - assert tool_call["name"] == "magic_function_no_args" - assert tool_call["args"] == {} - assert tool_call["id"] is not None - assert tool_call["type"] == "tool_call" - - # Test streaming - full: Optional[BaseMessageChunk] = None - for chunk in model_with_tools.stream(query): - full = chunk if full is None else full + chunk # type: ignore - assert isinstance(full, AIMessage) - assert len(full.tool_calls) == 1 - tool_call = full.tool_calls[0] - assert tool_call["name"] == "magic_function_no_args" - assert tool_call["args"] == {} - assert tool_call["id"] is not None - assert tool_call["type"] == "tool_call" - - # Groq does not currently support N > 1 # @pytest.mark.scheduled # def test_chat_multiple_completions() -> None: diff --git a/libs/partners/groq/tests/integration_tests/test_standard.py b/libs/partners/groq/tests/integration_tests/test_standard.py index b62982e4ec9..fe91a614dd7 100644 --- a/libs/partners/groq/tests/integration_tests/test_standard.py +++ b/libs/partners/groq/tests/integration_tests/test_standard.py @@ -48,26 +48,3 @@ class TestGroqLlama(BaseTestGroq): @property def supports_json_mode(self) -> bool: return False # Not supported in streaming mode - - @pytest.mark.xfail( - reason=("Fails with 'Failed to call a function. Please adjust your prompt.'") - ) - def test_tool_calling_with_no_arguments(self, model: BaseChatModel) -> None: - super().test_tool_calling_with_no_arguments(model) - - @pytest.mark.xfail( - reason=("Fails with 'Failed to call a function. Please adjust your prompt.'") - ) - def test_tool_message_histories_string_content( - self, model: BaseChatModel, my_adder_tool: BaseTool - ) -> None: - super().test_tool_message_histories_string_content(model, my_adder_tool) - - @pytest.mark.xfail( - reason=( - "Sometimes fails with 'Failed to call a function. " - "Please adjust your prompt.'" - ) - ) - def test_bind_runnables_as_tools(self, model: BaseChatModel) -> None: - super().test_bind_runnables_as_tools(model)