mirror of
https://github.com/hwchase17/langchain.git
synced 2025-08-24 03:52:08 +00:00
Using `pyupgrade` to get all `partners` code up to 3.9 standards (mostly, fixing old `typing` imports).
44 lines
1.2 KiB
Python
44 lines
1.2 KiB
Python
"""Standard LangChain interface tests"""
|
|
|
|
import pytest
|
|
from langchain_core.language_models import BaseChatModel
|
|
from langchain_core.rate_limiters import InMemoryRateLimiter
|
|
from langchain_core.tools import BaseTool
|
|
from langchain_tests.integration_tests import (
|
|
ChatModelIntegrationTests,
|
|
)
|
|
|
|
from langchain_groq import ChatGroq
|
|
|
|
rate_limiter = InMemoryRateLimiter(requests_per_second=0.2)
|
|
|
|
|
|
class BaseTestGroq(ChatModelIntegrationTests):
|
|
@property
|
|
def chat_model_class(self) -> type[BaseChatModel]:
|
|
return ChatGroq
|
|
|
|
@pytest.mark.xfail(reason="Not yet implemented.")
|
|
def test_tool_message_histories_list_content(
|
|
self, model: BaseChatModel, my_adder_tool: BaseTool
|
|
) -> None:
|
|
super().test_tool_message_histories_list_content(model, my_adder_tool)
|
|
|
|
@property
|
|
def supports_json_mode(self) -> bool:
|
|
return True
|
|
|
|
|
|
class TestGroqLlama(BaseTestGroq):
|
|
@property
|
|
def chat_model_params(self) -> dict:
|
|
return {
|
|
"model": "llama-3.1-8b-instant",
|
|
"temperature": 0,
|
|
"rate_limiter": rate_limiter,
|
|
}
|
|
|
|
@property
|
|
def supports_json_mode(self) -> bool:
|
|
return True
|