From ac9295761a673b21bd1ec2f151fd1168e60a3d53 Mon Sep 17 00:00:00 2001 From: Mason Daugherty Date: Mon, 2 Mar 2026 12:29:05 -0500 Subject: [PATCH] fix(huggingface): resolve huggingface-hub 1.x compat (#35524) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Switch `TestHuggingFaceEndpoint` from `Llama-4-Maverick` + `fireworks-ai` to `Llama-3.3-70B-Instruct` + `sambanova` — Maverick is no longer routed to Fireworks in hub 1.x - Switch `test_stream_usage` provider from `nebius` to `scaleway` for `gemma-3-27b-it` — same provider routing change --- .../huggingface/tests/integration_tests/test_chat_models.py | 2 +- .../huggingface/tests/integration_tests/test_standard.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/libs/partners/huggingface/tests/integration_tests/test_chat_models.py b/libs/partners/huggingface/tests/integration_tests/test_chat_models.py index 4a64f7f0b5a..2c546056a2a 100644 --- a/libs/partners/huggingface/tests/integration_tests/test_chat_models.py +++ b/libs/partners/huggingface/tests/integration_tests/test_chat_models.py @@ -8,7 +8,7 @@ def test_stream_usage() -> None: llm = HuggingFaceEndpoint( # type: ignore[call-arg] # (model is inferred in class) repo_id="google/gemma-3-27b-it", task="conversational", - provider="nebius", + provider="scaleway", ) model = ChatHuggingFace(llm=llm, stream_usage=True) diff --git a/libs/partners/huggingface/tests/integration_tests/test_standard.py b/libs/partners/huggingface/tests/integration_tests/test_standard.py index 892c0bc0d39..cba4d775392 100644 --- a/libs/partners/huggingface/tests/integration_tests/test_standard.py +++ b/libs/partners/huggingface/tests/integration_tests/test_standard.py @@ -18,9 +18,9 @@ class TestHuggingFaceEndpoint(ChatModelIntegrationTests): @property def chat_model_params(self) -> dict: llm = HuggingFaceEndpoint( # type: ignore[call-arg] - repo_id="meta-llama/Llama-4-Maverick-17B-128E-Instruct", + repo_id="meta-llama/Llama-3.3-70B-Instruct", task="conversational", - provider="fireworks-ai", + provider="sambanova", temperature=0, ) return {"llm": llm}