diff --git a/.github/workflows/_integration_test.yml b/.github/workflows/_integration_test.yml index 05971e1dfc5..40b0ec5344e 100644 --- a/.github/workflows/_integration_test.yml +++ b/.github/workflows/_integration_test.yml @@ -67,6 +67,7 @@ jobs: NVIDIA_API_KEY: ${{ secrets.NVIDIA_API_KEY }} GOOGLE_SEARCH_API_KEY: ${{ secrets.GOOGLE_SEARCH_API_KEY }} GOOGLE_CSE_ID: ${{ secrets.GOOGLE_CSE_ID }} + HUGGINGFACEHUB_API_TOKEN: ${{ secrets.HUGGINGFACEHUB_API_TOKEN }} EXA_API_KEY: ${{ secrets.EXA_API_KEY }} NOMIC_API_KEY: ${{ secrets.NOMIC_API_KEY }} WATSONX_APIKEY: ${{ secrets.WATSONX_APIKEY }} diff --git a/.github/workflows/_release.yml b/.github/workflows/_release.yml index 68da8c3a704..d3efddf67eb 100644 --- a/.github/workflows/_release.yml +++ b/.github/workflows/_release.yml @@ -273,6 +273,7 @@ jobs: GOOGLE_SEARCH_API_KEY: ${{ secrets.GOOGLE_SEARCH_API_KEY }} GOOGLE_CSE_ID: ${{ secrets.GOOGLE_CSE_ID }} GROQ_API_KEY: ${{ secrets.GROQ_API_KEY }} + HUGGINGFACEHUB_API_TOKEN: ${{ secrets.HUGGINGFACEHUB_API_TOKEN }} EXA_API_KEY: ${{ secrets.EXA_API_KEY }} NOMIC_API_KEY: ${{ secrets.NOMIC_API_KEY }} WATSONX_APIKEY: ${{ secrets.WATSONX_APIKEY }} diff --git a/.github/workflows/scheduled_test.yml b/.github/workflows/scheduled_test.yml index 61e1b3d25a6..b317a48e515 100644 --- a/.github/workflows/scheduled_test.yml +++ b/.github/workflows/scheduled_test.yml @@ -90,6 +90,7 @@ jobs: AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT_NAME }} FIREWORKS_API_KEY: ${{ secrets.FIREWORKS_API_KEY }} GROQ_API_KEY: ${{ secrets.GROQ_API_KEY }} + HUGGINGFACEHUB_API_TOKEN: ${{ secrets.HUGGINGFACEHUB_API_TOKEN }} MISTRAL_API_KEY: ${{ secrets.MISTRAL_API_KEY }} COHERE_API_KEY: ${{ secrets.COHERE_API_KEY }} NVIDIA_API_KEY: ${{ secrets.NVIDIA_API_KEY }} diff --git a/libs/partners/huggingface/tests/integration_tests/test_standard.py b/libs/partners/huggingface/tests/integration_tests/test_standard.py new file mode 100644 index 00000000000..f9180b47711 --- /dev/null +++ b/libs/partners/huggingface/tests/integration_tests/test_standard.py @@ -0,0 +1,78 @@ +"""Standard LangChain interface tests""" + +from typing import Type + +import pytest +from langchain_core.language_models import BaseChatModel +from langchain_standard_tests.integration_tests import ChatModelIntegrationTests + +from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint + + +class TestHuggingFaceEndpoint(ChatModelIntegrationTests): + @property + def chat_model_class(self) -> Type[BaseChatModel]: + return ChatHuggingFace + + @property + def chat_model_params(self) -> dict: + return {} + + @pytest.fixture + def model(self) -> BaseChatModel: + llm = HuggingFaceEndpoint( # type: ignore[call-arg] + repo_id="HuggingFaceH4/zephyr-7b-beta", + task="text-generation", + max_new_tokens=512, + do_sample=False, + repetition_penalty=1.03, + ) + return self.chat_model_class(llm=llm) # type: ignore[call-arg] + + @pytest.mark.xfail(reason=("Not implemented")) + def test_stream(self, model: BaseChatModel) -> None: + super().test_stream(model) + + @pytest.mark.xfail(reason=("Not implemented")) + async def test_astream(self, model: BaseChatModel) -> None: + await super().test_astream(model) + + @pytest.mark.xfail(reason=("Not implemented")) + def test_usage_metadata(self, model: BaseChatModel) -> None: + super().test_usage_metadata(model) + + @pytest.mark.xfail(reason=("Not implemented")) + def test_usage_metadata_streaming(self, model: BaseChatModel) -> None: + super().test_usage_metadata_streaming(model) + + @pytest.mark.xfail(reason=("Not implemented")) + def test_stop_sequence(self, model: BaseChatModel) -> None: + super().test_stop_sequence(model) + + @pytest.mark.xfail(reason=("Not implemented")) + def test_tool_calling(self, model: BaseChatModel) -> None: + super().test_tool_calling(model) + + @pytest.mark.xfail(reason=("Not implemented")) + def test_tool_calling_with_no_arguments(self, model: BaseChatModel) -> None: + super().test_tool_calling_with_no_arguments(model) + + @pytest.mark.xfail(reason=("Not implemented")) + def test_bind_runnables_as_tools(self, model: BaseChatModel) -> None: + super().test_bind_runnables_as_tools(model) + + @pytest.mark.xfail(reason=("Not implemented")) + def test_structured_output(self, model: BaseChatModel) -> None: + super().test_structured_output(model) + + @pytest.mark.xfail(reason=("Not implemented")) + def test_structured_output_pydantic_2_v1(self, model: BaseChatModel) -> None: + super().test_structured_output_pydantic_2_v1(model) + + @pytest.mark.xfail(reason=("Not implemented")) + def test_tool_message_histories_list_content(self, model: BaseChatModel) -> None: + super().test_tool_message_histories_list_content(model) + + @pytest.mark.xfail(reason=("Not implemented")) + def test_structured_few_shot_examples(self, model: BaseChatModel) -> None: + super().test_structured_few_shot_examples(model)