mirror of
https://github.com/hwchase17/langchain.git
synced 2025-07-04 04:07:54 +00:00
anthropic[patch]: add benchmark (#31718)
Account for lazy loading of clients in init time benchmark
This commit is contained in:
parent
9164e6f906
commit
e09abf8170
@ -1,7 +1,9 @@
|
||||
"""Standard LangChain interface tests"""
|
||||
|
||||
import pytest
|
||||
from langchain_core.language_models import BaseChatModel
|
||||
from langchain_tests.unit_tests import ChatModelUnitTests
|
||||
from pytest_benchmark.fixture import BenchmarkFixture # type: ignore[import-untyped]
|
||||
|
||||
from langchain_anthropic import ChatAnthropic
|
||||
|
||||
@ -14,3 +16,16 @@ class TestAnthropicStandard(ChatModelUnitTests):
|
||||
@property
|
||||
def chat_model_params(self) -> dict:
|
||||
return {"model": "claude-3-haiku-20240307"}
|
||||
|
||||
|
||||
@pytest.mark.benchmark
|
||||
def test_init_time_with_client(benchmark: BenchmarkFixture) -> None:
|
||||
"""Test initialization time, accounting for lazy loading of client."""
|
||||
|
||||
def _init_in_loop_with_clients() -> None:
|
||||
for _ in range(10):
|
||||
llm = ChatAnthropic(model="claude-3-5-haiku-latest")
|
||||
_ = llm._client
|
||||
_ = llm._async_client
|
||||
|
||||
benchmark(_init_in_loop_with_clients)
|
||||
|
Loading…
Reference in New Issue
Block a user