diff --git a/.github/workflows/scheduled_test.yml b/.github/workflows/scheduled_test.yml index 7ce59d5b695..5087c719347 100644 --- a/.github/workflows/scheduled_test.yml +++ b/.github/workflows/scheduled_test.yml @@ -34,12 +34,19 @@ jobs: working-directory: libs/langchain cache-key: scheduled + - name: 'Authenticate to Google Cloud' + id: 'auth' + uses: 'google-github-actions/auth@v1' + with: + credentials_json: '${{ secrets.GOOGLE_CREDENTIALS }}' + - name: Install dependencies working-directory: libs/langchain shell: bash run: | echo "Running scheduled tests, installing dependencies with poetry..." poetry install --with=test_integration + poetry run pip install google-cloud-aiplatform - name: Run tests shell: bash diff --git a/libs/langchain/tests/integration_tests/chat_models/test_vertexai.py b/libs/langchain/tests/integration_tests/chat_models/test_vertexai.py index cc5163e5251..7a50f70a5cb 100644 --- a/libs/langchain/tests/integration_tests/chat_models/test_vertexai.py +++ b/libs/langchain/tests/integration_tests/chat_models/test_vertexai.py @@ -18,6 +18,14 @@ from langchain.schema import LLMResult from langchain.schema.messages import AIMessage, HumanMessage, SystemMessage +@pytest.mark.parametrize("model_name", [None, "codechat-bison", "chat-bison"]) +def test_vertexai_instantiation(model_name: str) -> None: + model = ChatVertexAI(model_name=model_name) + assert model._llm_type == "vertexai" + assert model.model_name == model.client._model_id + + +@pytest.mark.scheduled @pytest.mark.parametrize("model_name", [None, "codechat-bison", "chat-bison"]) def test_vertexai_single_call(model_name: str) -> None: if model_name: @@ -28,10 +36,9 @@ def test_vertexai_single_call(model_name: str) -> None: response = model([message]) assert isinstance(response, AIMessage) assert isinstance(response.content, str) - assert model._llm_type == "vertexai" - assert model.model_name == model.client._model_id +@pytest.mark.scheduled @pytest.mark.asyncio async def test_vertexai_agenerate() -> None: model = ChatVertexAI(temperature=0) @@ -44,6 +51,7 @@ async def test_vertexai_agenerate() -> None: assert response.generations[0][0] == sync_response.generations[0][0] +@pytest.mark.scheduled def test_vertexai_single_call_with_context() -> None: model = ChatVertexAI() raw_context = ( @@ -60,6 +68,7 @@ def test_vertexai_single_call_with_context() -> None: assert isinstance(response.content, str) +@pytest.mark.scheduled def test_vertexai_single_call_with_examples() -> None: model = ChatVertexAI() raw_context = "My name is Ned. You are my personal assistant." @@ -74,6 +83,7 @@ def test_vertexai_single_call_with_examples() -> None: assert isinstance(response.content, str) +@pytest.mark.scheduled @pytest.mark.parametrize("model_name", [None, "codechat-bison", "chat-bison"]) def test_vertexai_single_call_with_history(model_name: str) -> None: if model_name: @@ -118,7 +128,7 @@ def test_parse_chat_history_correct() -> None: ] -def test_vertexai_single_call_failes_no_message() -> None: +def test_vertexai_single_call_fails_no_message() -> None: chat = ChatVertexAI() with pytest.raises(ValueError) as exc_info: _ = chat([]) diff --git a/libs/langchain/tests/integration_tests/llms/test_vertexai.py b/libs/langchain/tests/integration_tests/llms/test_vertexai.py index 994f75a86d4..561e10a002c 100644 --- a/libs/langchain/tests/integration_tests/llms/test_vertexai.py +++ b/libs/langchain/tests/integration_tests/llms/test_vertexai.py @@ -15,20 +15,26 @@ from langchain.llms import VertexAI, VertexAIModelGarden from langchain.schema import LLMResult -def test_vertex_call() -> None: - llm = VertexAI(temperature=0) - output = llm("Say foo:") - assert isinstance(output, str) +def test_vertex_initialization() -> None: + llm = VertexAI() assert llm._llm_type == "vertexai" assert llm.model_name == llm.client._model_id +def test_vertex_call() -> None: + llm = VertexAI(temperature=0) + output = llm("Say foo:") + assert isinstance(output, str) + + +@pytest.mark.scheduled def test_vertex_generate() -> None: llm = VertexAI(temperate=0) output = llm.generate(["Please say foo:"]) assert isinstance(output, LLMResult) +@pytest.mark.scheduled @pytest.mark.asyncio async def test_vertex_agenerate() -> None: llm = VertexAI(temperate=0) @@ -36,7 +42,8 @@ async def test_vertex_agenerate() -> None: assert isinstance(output, LLMResult) -def test_vertext_stream() -> None: +@pytest.mark.scheduled +def test_vertex_stream() -> None: llm = VertexAI(temperate=0) outputs = list(llm.stream("Please say foo:")) assert isinstance(outputs[0], str) @@ -63,7 +70,6 @@ def test_model_garden() -> None: project = os.environ["PROJECT"] llm = VertexAIModelGarden(endpoint_id=endpoint_id, project=project) output = llm("What is the meaning of life?") - print(output) assert isinstance(output, str) assert llm._llm_type == "vertexai_model_garden"