mirror of
https://github.com/hwchase17/langchain.git
synced 2025-07-11 07:25:29 +00:00
Add vertex scheduled test (#10958)
This commit is contained in:
parent
8602a32b7e
commit
040d436b3f
7
.github/workflows/scheduled_test.yml
vendored
7
.github/workflows/scheduled_test.yml
vendored
@ -34,12 +34,19 @@ jobs:
|
|||||||
working-directory: libs/langchain
|
working-directory: libs/langchain
|
||||||
cache-key: scheduled
|
cache-key: scheduled
|
||||||
|
|
||||||
|
- name: 'Authenticate to Google Cloud'
|
||||||
|
id: 'auth'
|
||||||
|
uses: 'google-github-actions/auth@v1'
|
||||||
|
with:
|
||||||
|
credentials_json: '${{ secrets.GOOGLE_CREDENTIALS }}'
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
working-directory: libs/langchain
|
working-directory: libs/langchain
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
echo "Running scheduled tests, installing dependencies with poetry..."
|
echo "Running scheduled tests, installing dependencies with poetry..."
|
||||||
poetry install --with=test_integration
|
poetry install --with=test_integration
|
||||||
|
poetry run pip install google-cloud-aiplatform
|
||||||
|
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
shell: bash
|
shell: bash
|
||||||
|
@ -18,6 +18,14 @@ from langchain.schema import LLMResult
|
|||||||
from langchain.schema.messages import AIMessage, HumanMessage, SystemMessage
|
from langchain.schema.messages import AIMessage, HumanMessage, SystemMessage
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("model_name", [None, "codechat-bison", "chat-bison"])
|
||||||
|
def test_vertexai_instantiation(model_name: str) -> None:
|
||||||
|
model = ChatVertexAI(model_name=model_name)
|
||||||
|
assert model._llm_type == "vertexai"
|
||||||
|
assert model.model_name == model.client._model_id
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.scheduled
|
||||||
@pytest.mark.parametrize("model_name", [None, "codechat-bison", "chat-bison"])
|
@pytest.mark.parametrize("model_name", [None, "codechat-bison", "chat-bison"])
|
||||||
def test_vertexai_single_call(model_name: str) -> None:
|
def test_vertexai_single_call(model_name: str) -> None:
|
||||||
if model_name:
|
if model_name:
|
||||||
@ -28,10 +36,9 @@ def test_vertexai_single_call(model_name: str) -> None:
|
|||||||
response = model([message])
|
response = model([message])
|
||||||
assert isinstance(response, AIMessage)
|
assert isinstance(response, AIMessage)
|
||||||
assert isinstance(response.content, str)
|
assert isinstance(response.content, str)
|
||||||
assert model._llm_type == "vertexai"
|
|
||||||
assert model.model_name == model.client._model_id
|
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.scheduled
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_vertexai_agenerate() -> None:
|
async def test_vertexai_agenerate() -> None:
|
||||||
model = ChatVertexAI(temperature=0)
|
model = ChatVertexAI(temperature=0)
|
||||||
@ -44,6 +51,7 @@ async def test_vertexai_agenerate() -> None:
|
|||||||
assert response.generations[0][0] == sync_response.generations[0][0]
|
assert response.generations[0][0] == sync_response.generations[0][0]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.scheduled
|
||||||
def test_vertexai_single_call_with_context() -> None:
|
def test_vertexai_single_call_with_context() -> None:
|
||||||
model = ChatVertexAI()
|
model = ChatVertexAI()
|
||||||
raw_context = (
|
raw_context = (
|
||||||
@ -60,6 +68,7 @@ def test_vertexai_single_call_with_context() -> None:
|
|||||||
assert isinstance(response.content, str)
|
assert isinstance(response.content, str)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.scheduled
|
||||||
def test_vertexai_single_call_with_examples() -> None:
|
def test_vertexai_single_call_with_examples() -> None:
|
||||||
model = ChatVertexAI()
|
model = ChatVertexAI()
|
||||||
raw_context = "My name is Ned. You are my personal assistant."
|
raw_context = "My name is Ned. You are my personal assistant."
|
||||||
@ -74,6 +83,7 @@ def test_vertexai_single_call_with_examples() -> None:
|
|||||||
assert isinstance(response.content, str)
|
assert isinstance(response.content, str)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.scheduled
|
||||||
@pytest.mark.parametrize("model_name", [None, "codechat-bison", "chat-bison"])
|
@pytest.mark.parametrize("model_name", [None, "codechat-bison", "chat-bison"])
|
||||||
def test_vertexai_single_call_with_history(model_name: str) -> None:
|
def test_vertexai_single_call_with_history(model_name: str) -> None:
|
||||||
if model_name:
|
if model_name:
|
||||||
@ -118,7 +128,7 @@ def test_parse_chat_history_correct() -> None:
|
|||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
def test_vertexai_single_call_failes_no_message() -> None:
|
def test_vertexai_single_call_fails_no_message() -> None:
|
||||||
chat = ChatVertexAI()
|
chat = ChatVertexAI()
|
||||||
with pytest.raises(ValueError) as exc_info:
|
with pytest.raises(ValueError) as exc_info:
|
||||||
_ = chat([])
|
_ = chat([])
|
||||||
|
@ -15,20 +15,26 @@ from langchain.llms import VertexAI, VertexAIModelGarden
|
|||||||
from langchain.schema import LLMResult
|
from langchain.schema import LLMResult
|
||||||
|
|
||||||
|
|
||||||
def test_vertex_call() -> None:
|
def test_vertex_initialization() -> None:
|
||||||
llm = VertexAI(temperature=0)
|
llm = VertexAI()
|
||||||
output = llm("Say foo:")
|
|
||||||
assert isinstance(output, str)
|
|
||||||
assert llm._llm_type == "vertexai"
|
assert llm._llm_type == "vertexai"
|
||||||
assert llm.model_name == llm.client._model_id
|
assert llm.model_name == llm.client._model_id
|
||||||
|
|
||||||
|
|
||||||
|
def test_vertex_call() -> None:
|
||||||
|
llm = VertexAI(temperature=0)
|
||||||
|
output = llm("Say foo:")
|
||||||
|
assert isinstance(output, str)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.scheduled
|
||||||
def test_vertex_generate() -> None:
|
def test_vertex_generate() -> None:
|
||||||
llm = VertexAI(temperate=0)
|
llm = VertexAI(temperate=0)
|
||||||
output = llm.generate(["Please say foo:"])
|
output = llm.generate(["Please say foo:"])
|
||||||
assert isinstance(output, LLMResult)
|
assert isinstance(output, LLMResult)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.scheduled
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_vertex_agenerate() -> None:
|
async def test_vertex_agenerate() -> None:
|
||||||
llm = VertexAI(temperate=0)
|
llm = VertexAI(temperate=0)
|
||||||
@ -36,7 +42,8 @@ async def test_vertex_agenerate() -> None:
|
|||||||
assert isinstance(output, LLMResult)
|
assert isinstance(output, LLMResult)
|
||||||
|
|
||||||
|
|
||||||
def test_vertext_stream() -> None:
|
@pytest.mark.scheduled
|
||||||
|
def test_vertex_stream() -> None:
|
||||||
llm = VertexAI(temperate=0)
|
llm = VertexAI(temperate=0)
|
||||||
outputs = list(llm.stream("Please say foo:"))
|
outputs = list(llm.stream("Please say foo:"))
|
||||||
assert isinstance(outputs[0], str)
|
assert isinstance(outputs[0], str)
|
||||||
@ -63,7 +70,6 @@ def test_model_garden() -> None:
|
|||||||
project = os.environ["PROJECT"]
|
project = os.environ["PROJECT"]
|
||||||
llm = VertexAIModelGarden(endpoint_id=endpoint_id, project=project)
|
llm = VertexAIModelGarden(endpoint_id=endpoint_id, project=project)
|
||||||
output = llm("What is the meaning of life?")
|
output = llm("What is the meaning of life?")
|
||||||
print(output)
|
|
||||||
assert isinstance(output, str)
|
assert isinstance(output, str)
|
||||||
assert llm._llm_type == "vertexai_model_garden"
|
assert llm._llm_type == "vertexai_model_garden"
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user