mirror of
https://github.com/hwchase17/langchain.git
synced 2026-03-18 11:07:36 +00:00
fix(huggingface): resolve huggingface-hub 1.x compat (#35524)
- Switch `TestHuggingFaceEndpoint` from `Llama-4-Maverick` + `fireworks-ai` to `Llama-3.3-70B-Instruct` + `sambanova` — Maverick is no longer routed to Fireworks in hub 1.x - Switch `test_stream_usage` provider from `nebius` to `scaleway` for `gemma-3-27b-it` — same provider routing change
This commit is contained in:
@@ -8,7 +8,7 @@ def test_stream_usage() -> None:
|
||||
llm = HuggingFaceEndpoint( # type: ignore[call-arg] # (model is inferred in class)
|
||||
repo_id="google/gemma-3-27b-it",
|
||||
task="conversational",
|
||||
provider="nebius",
|
||||
provider="scaleway",
|
||||
)
|
||||
|
||||
model = ChatHuggingFace(llm=llm, stream_usage=True)
|
||||
|
||||
@@ -18,9 +18,9 @@ class TestHuggingFaceEndpoint(ChatModelIntegrationTests):
|
||||
@property
|
||||
def chat_model_params(self) -> dict:
|
||||
llm = HuggingFaceEndpoint( # type: ignore[call-arg]
|
||||
repo_id="meta-llama/Llama-4-Maverick-17B-128E-Instruct",
|
||||
repo_id="meta-llama/Llama-3.3-70B-Instruct",
|
||||
task="conversational",
|
||||
provider="fireworks-ai",
|
||||
provider="sambanova",
|
||||
temperature=0,
|
||||
)
|
||||
return {"llm": llm}
|
||||
|
||||
Reference in New Issue
Block a user