mirror of
https://github.com/hwchase17/langchain.git
synced 2025-07-16 09:48:04 +00:00
fireworks scheduled integration tests (#12373)
This commit is contained in:
parent
01c5cd365b
commit
76230d2c08
7
.github/workflows/scheduled_test.yml
vendored
7
.github/workflows/scheduled_test.yml
vendored
@ -55,6 +55,10 @@ jobs:
|
||||
poetry install --with=test_integration
|
||||
poetry run pip install google-cloud-aiplatform
|
||||
poetry run pip install "boto3>=1.28.57"
|
||||
if [[ ${{ matrix.python-version }} != "3.8" ]]
|
||||
then
|
||||
poetry run pip install fireworks-ai
|
||||
fi
|
||||
|
||||
- name: Run tests
|
||||
shell: bash
|
||||
@ -64,7 +68,8 @@ jobs:
|
||||
AZURE_OPENAI_API_VERSION: ${{ secrets.AZURE_OPENAI_API_VERSION }}
|
||||
AZURE_OPENAI_API_BASE: ${{ secrets.AZURE_OPENAI_API_BASE }}
|
||||
AZURE_OPENAI_API_KEY: ${{ secrets.AZURE_OPENAI_API_KEY }}
|
||||
AZURE_OPENAI_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_DEPLOYMENT_NAME }}
|
||||
AZURE_OPENAI_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_DEPLOYMENT_NAME }}
|
||||
FIREWORKS_API_KEY: ${{ secrets.FIREWORKS_API_KEY }}
|
||||
run: |
|
||||
make scheduled_tests
|
||||
|
||||
|
@ -1,4 +1,5 @@
|
||||
"""Test ChatFireworks wrapper."""
|
||||
import sys
|
||||
|
||||
import pytest
|
||||
|
||||
@ -6,25 +7,34 @@ from langchain.chat_models.fireworks import ChatFireworks
|
||||
from langchain.schema import ChatGeneration, ChatResult, LLMResult
|
||||
from langchain.schema.messages import BaseMessage, HumanMessage, SystemMessage
|
||||
|
||||
if sys.version_info < (3, 9):
|
||||
pytest.skip("fireworks-ai requires Python > 3.8", allow_module_level=True)
|
||||
|
||||
def test_chat_fireworks() -> None:
|
||||
|
||||
@pytest.fixture
|
||||
def chat() -> ChatFireworks:
|
||||
return ChatFireworks(model_kwargs={"temperature": 0, "max_tokens": 512})
|
||||
|
||||
|
||||
@pytest.mark.scheduled
|
||||
def test_chat_fireworks(chat: ChatFireworks) -> None:
|
||||
"""Test ChatFireworks wrapper."""
|
||||
chat = ChatFireworks()
|
||||
message = HumanMessage(content="What is the weather in Redwood City, CA today")
|
||||
response = chat([message])
|
||||
assert isinstance(response, BaseMessage)
|
||||
assert isinstance(response.content, str)
|
||||
|
||||
|
||||
@pytest.mark.scheduled
|
||||
def test_chat_fireworks_model() -> None:
|
||||
"""Test ChatFireworks wrapper handles model_name."""
|
||||
chat = ChatFireworks(model="foo")
|
||||
assert chat.model == "foo"
|
||||
|
||||
|
||||
def test_chat_fireworks_system_message() -> None:
|
||||
@pytest.mark.scheduled
|
||||
def test_chat_fireworks_system_message(chat: ChatFireworks) -> None:
|
||||
"""Test ChatFireworks wrapper with system message."""
|
||||
chat = ChatFireworks()
|
||||
system_message = SystemMessage(content="You are to chat with the user.")
|
||||
human_message = HumanMessage(content="Hello")
|
||||
response = chat([system_message, human_message])
|
||||
@ -32,6 +42,7 @@ def test_chat_fireworks_system_message() -> None:
|
||||
assert isinstance(response.content, str)
|
||||
|
||||
|
||||
@pytest.mark.scheduled
|
||||
def test_chat_fireworks_generate() -> None:
|
||||
"""Test ChatFireworks wrapper with generate."""
|
||||
chat = ChatFireworks(model_kwargs={"n": 2})
|
||||
@ -47,6 +58,7 @@ def test_chat_fireworks_generate() -> None:
|
||||
assert generation.text == generation.message.content
|
||||
|
||||
|
||||
@pytest.mark.scheduled
|
||||
def test_chat_fireworks_multiple_completions() -> None:
|
||||
"""Test ChatFireworks wrapper with multiple completions."""
|
||||
chat = ChatFireworks(model_kwargs={"n": 5})
|
||||
@ -59,35 +71,35 @@ def test_chat_fireworks_multiple_completions() -> None:
|
||||
assert isinstance(generation.message.content, str)
|
||||
|
||||
|
||||
def test_chat_fireworks_llm_output_contains_model_id() -> None:
|
||||
@pytest.mark.scheduled
|
||||
def test_chat_fireworks_llm_output_contains_model_id(chat: ChatFireworks) -> None:
|
||||
"""Test llm_output contains model_id."""
|
||||
chat = ChatFireworks()
|
||||
message = HumanMessage(content="Hello")
|
||||
llm_result = chat.generate([[message]])
|
||||
assert llm_result.llm_output is not None
|
||||
assert llm_result.llm_output["model"] == chat.model
|
||||
|
||||
|
||||
def test_fireworks_invoke() -> None:
|
||||
@pytest.mark.scheduled
|
||||
def test_fireworks_invoke(chat: ChatFireworks) -> None:
|
||||
"""Tests chat completion with invoke"""
|
||||
chat = ChatFireworks()
|
||||
result = chat.invoke("How is the weather in New York today?", stop=[","])
|
||||
assert isinstance(result.content, str)
|
||||
assert result.content[-1] == ","
|
||||
|
||||
|
||||
@pytest.mark.scheduled
|
||||
@pytest.mark.asyncio
|
||||
async def test_fireworks_ainvoke() -> None:
|
||||
async def test_fireworks_ainvoke(chat: ChatFireworks) -> None:
|
||||
"""Tests chat completion with invoke"""
|
||||
chat = ChatFireworks()
|
||||
result = await chat.ainvoke("How is the weather in New York today?", stop=[","])
|
||||
assert isinstance(result.content, str)
|
||||
assert result.content[-1] == ","
|
||||
|
||||
|
||||
def test_fireworks_batch() -> None:
|
||||
@pytest.mark.scheduled
|
||||
def test_fireworks_batch(chat: ChatFireworks) -> None:
|
||||
"""Test batch tokens from ChatFireworks."""
|
||||
chat = ChatFireworks()
|
||||
result = chat.batch(
|
||||
[
|
||||
"What is the weather in Redwood City, CA today",
|
||||
@ -105,10 +117,10 @@ def test_fireworks_batch() -> None:
|
||||
assert token.content[-1] == ","
|
||||
|
||||
|
||||
@pytest.mark.scheduled
|
||||
@pytest.mark.asyncio
|
||||
async def test_fireworks_abatch() -> None:
|
||||
async def test_fireworks_abatch(chat: ChatFireworks) -> None:
|
||||
"""Test batch tokens from ChatFireworks."""
|
||||
chat = ChatFireworks()
|
||||
result = await chat.abatch(
|
||||
[
|
||||
"What is the weather in Redwood City, CA today",
|
||||
@ -126,25 +138,26 @@ async def test_fireworks_abatch() -> None:
|
||||
assert token.content[-1] == ","
|
||||
|
||||
|
||||
def test_fireworks_streaming() -> None:
|
||||
@pytest.mark.scheduled
|
||||
def test_fireworks_streaming(chat: ChatFireworks) -> None:
|
||||
"""Test streaming tokens from Fireworks."""
|
||||
llm = ChatFireworks()
|
||||
|
||||
for token in llm.stream("I'm Pickle Rick"):
|
||||
for token in chat.stream("I'm Pickle Rick"):
|
||||
assert isinstance(token.content, str)
|
||||
|
||||
|
||||
def test_fireworks_streaming_stop_words() -> None:
|
||||
@pytest.mark.scheduled
|
||||
def test_fireworks_streaming_stop_words(chat: ChatFireworks) -> None:
|
||||
"""Test streaming tokens with stop words."""
|
||||
llm = ChatFireworks()
|
||||
|
||||
last_token = ""
|
||||
for token in llm.stream("I'm Pickle Rick", stop=[","]):
|
||||
for token in chat.stream("I'm Pickle Rick", stop=[","]):
|
||||
last_token = token.content
|
||||
assert isinstance(token.content, str)
|
||||
assert last_token[-1] == ","
|
||||
|
||||
|
||||
@pytest.mark.scheduled
|
||||
@pytest.mark.asyncio
|
||||
async def test_chat_fireworks_agenerate() -> None:
|
||||
"""Test ChatFireworks wrapper with generate."""
|
||||
@ -161,13 +174,13 @@ async def test_chat_fireworks_agenerate() -> None:
|
||||
assert generation.text == generation.message.content
|
||||
|
||||
|
||||
@pytest.mark.scheduled
|
||||
@pytest.mark.asyncio
|
||||
async def test_fireworks_astream() -> None:
|
||||
async def test_fireworks_astream(chat: ChatFireworks) -> None:
|
||||
"""Test streaming tokens from Fireworks."""
|
||||
llm = ChatFireworks()
|
||||
|
||||
last_token = ""
|
||||
async for token in llm.astream(
|
||||
async for token in chat.astream(
|
||||
"Who's the best quarterback in the NFL?", stop=[","]
|
||||
):
|
||||
last_token = token.content
|
||||
|
@ -1,4 +1,5 @@
|
||||
"""Test Fireworks AI API Wrapper."""
|
||||
import sys
|
||||
from typing import Generator
|
||||
|
||||
import pytest
|
||||
@ -12,14 +13,23 @@ from langchain.prompts.chat import (
|
||||
)
|
||||
from langchain.schema import LLMResult
|
||||
|
||||
if sys.version_info < (3, 9):
|
||||
pytest.skip("fireworks-ai requires Python > 3.8", allow_module_level=True)
|
||||
|
||||
def test_fireworks_call() -> None:
|
||||
|
||||
@pytest.fixture
|
||||
def llm() -> Fireworks:
|
||||
return Fireworks(model_kwargs={"temperature": 0, "max_tokens": 512})
|
||||
|
||||
|
||||
@pytest.mark.scheduled
|
||||
def test_fireworks_call(llm: Fireworks) -> None:
|
||||
"""Test valid call to fireworks."""
|
||||
llm = Fireworks()
|
||||
output = llm("How is the weather in New York today?")
|
||||
assert isinstance(output, str)
|
||||
|
||||
|
||||
@pytest.mark.scheduled
|
||||
def test_fireworks_in_chain() -> None:
|
||||
"""Tests fireworks AI in a Langchain chain"""
|
||||
human_message_prompt = HumanMessagePromptTemplate(
|
||||
@ -35,30 +45,32 @@ def test_fireworks_in_chain() -> None:
|
||||
assert isinstance(output, str)
|
||||
|
||||
|
||||
@pytest.mark.scheduled
|
||||
def test_fireworks_model_param() -> None:
|
||||
"""Tests model parameters for Fireworks"""
|
||||
llm = Fireworks(model="foo")
|
||||
assert llm.model == "foo"
|
||||
|
||||
|
||||
def test_fireworks_invoke() -> None:
|
||||
@pytest.mark.scheduled
|
||||
def test_fireworks_invoke(llm: Fireworks) -> None:
|
||||
"""Tests completion with invoke"""
|
||||
llm = Fireworks()
|
||||
output = llm.invoke("How is the weather in New York today?", stop=[","])
|
||||
assert isinstance(output, str)
|
||||
assert output[-1] == ","
|
||||
|
||||
|
||||
@pytest.mark.scheduled
|
||||
@pytest.mark.asyncio
|
||||
async def test_fireworks_ainvoke() -> None:
|
||||
async def test_fireworks_ainvoke(llm: Fireworks) -> None:
|
||||
"""Tests completion with invoke"""
|
||||
llm = Fireworks()
|
||||
output = await llm.ainvoke("How is the weather in New York today?", stop=[","])
|
||||
assert isinstance(output, str)
|
||||
assert output[-1] == ","
|
||||
|
||||
|
||||
def test_fireworks_batch() -> None:
|
||||
@pytest.mark.scheduled
|
||||
def test_fireworks_batch(llm: Fireworks) -> None:
|
||||
"""Tests completion with invoke"""
|
||||
llm = Fireworks()
|
||||
output = llm.batch(
|
||||
@ -76,10 +88,10 @@ def test_fireworks_batch() -> None:
|
||||
assert token[-1] == ","
|
||||
|
||||
|
||||
@pytest.mark.scheduled
|
||||
@pytest.mark.asyncio
|
||||
async def test_fireworks_abatch() -> None:
|
||||
async def test_fireworks_abatch(llm: Fireworks) -> None:
|
||||
"""Tests completion with invoke"""
|
||||
llm = Fireworks()
|
||||
output = await llm.abatch(
|
||||
[
|
||||
"How is the weather in New York today?",
|
||||
@ -95,18 +107,20 @@ async def test_fireworks_abatch() -> None:
|
||||
assert token[-1] == ","
|
||||
|
||||
|
||||
def test_fireworks_multiple_prompts() -> None:
|
||||
@pytest.mark.scheduled
|
||||
def test_fireworks_multiple_prompts(
|
||||
llm: Fireworks,
|
||||
) -> None:
|
||||
"""Test completion with multiple prompts."""
|
||||
llm = Fireworks()
|
||||
output = llm.generate(["How is the weather in New York today?", "I'm pickle rick"])
|
||||
assert isinstance(output, LLMResult)
|
||||
assert isinstance(output.generations, list)
|
||||
assert len(output.generations) == 2
|
||||
|
||||
|
||||
def test_fireworks_streaming() -> None:
|
||||
@pytest.mark.scheduled
|
||||
def test_fireworks_streaming(llm: Fireworks) -> None:
|
||||
"""Test stream completion."""
|
||||
llm = Fireworks()
|
||||
generator = llm.stream("Who's the best quarterback in the NFL?")
|
||||
assert isinstance(generator, Generator)
|
||||
|
||||
@ -114,9 +128,9 @@ def test_fireworks_streaming() -> None:
|
||||
assert isinstance(token, str)
|
||||
|
||||
|
||||
def test_fireworks_streaming_stop_words() -> None:
|
||||
@pytest.mark.scheduled
|
||||
def test_fireworks_streaming_stop_words(llm: Fireworks) -> None:
|
||||
"""Test stream completion with stop words."""
|
||||
llm = Fireworks()
|
||||
generator = llm.stream("Who's the best quarterback in the NFL?", stop=[","])
|
||||
assert isinstance(generator, Generator)
|
||||
|
||||
@ -127,10 +141,10 @@ def test_fireworks_streaming_stop_words() -> None:
|
||||
assert last_token[-1] == ","
|
||||
|
||||
|
||||
@pytest.mark.scheduled
|
||||
@pytest.mark.asyncio
|
||||
async def test_fireworks_streaming_async() -> None:
|
||||
async def test_fireworks_streaming_async(llm: Fireworks) -> None:
|
||||
"""Test stream completion."""
|
||||
llm = Fireworks()
|
||||
|
||||
last_token = ""
|
||||
async for token in llm.astream(
|
||||
@ -141,17 +155,17 @@ async def test_fireworks_streaming_async() -> None:
|
||||
assert last_token[-1] == ","
|
||||
|
||||
|
||||
@pytest.mark.scheduled
|
||||
@pytest.mark.asyncio
|
||||
async def test_fireworks_async_agenerate() -> None:
|
||||
async def test_fireworks_async_agenerate(llm: Fireworks) -> None:
|
||||
"""Test async."""
|
||||
llm = Fireworks()
|
||||
output = await llm.agenerate(["What is the best city to live in California?"])
|
||||
assert isinstance(output, LLMResult)
|
||||
|
||||
|
||||
@pytest.mark.scheduled
|
||||
@pytest.mark.asyncio
|
||||
async def test_fireworks_multiple_prompts_async_agenerate() -> None:
|
||||
llm = Fireworks()
|
||||
async def test_fireworks_multiple_prompts_async_agenerate(llm: Fireworks) -> None:
|
||||
output = await llm.agenerate(
|
||||
["How is the weather in New York today?", "I'm pickle rick"]
|
||||
)
|
||||
|
Loading…
Reference in New Issue
Block a user