From 5bf89628bf82a5ddf9d0dd4bc0926e61ccc4657d Mon Sep 17 00:00:00 2001 From: ccurme Date: Fri, 30 May 2025 13:27:12 -0400 Subject: [PATCH] groq[patch]: update model for integration tests (#31440) Llama-3.1 started failing consistently with > groq.BadRequestError: Error code: 400 - ***'error': ***'message': "Failed to call a function. Please adjust your prompt. See 'failed_generation' for more details.", 'type': 'invalid_request_error', 'code': 'tool_use_failed', 'failed_generation': '***"query": "Hello!"***'*** --- .../groq/tests/integration_tests/test_standard.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/libs/partners/groq/tests/integration_tests/test_standard.py b/libs/partners/groq/tests/integration_tests/test_standard.py index 04e68fa567f..90b9f16a5e5 100644 --- a/libs/partners/groq/tests/integration_tests/test_standard.py +++ b/libs/partners/groq/tests/integration_tests/test_standard.py @@ -29,14 +29,10 @@ class BaseTestGroq(ChatModelIntegrationTests): return True -class TestGroqLlama(BaseTestGroq): +class TestGroqGemma(BaseTestGroq): @property def chat_model_params(self) -> dict: - return { - "model": "llama-3.1-8b-instant", - "temperature": 0, - "rate_limiter": rate_limiter, - } + return {"model": "gemma2-9b-it", "rate_limiter": rate_limiter} @property def supports_json_mode(self) -> bool: