diff --git a/libs/core/langchain_core/callbacks/usage.py b/libs/core/langchain_core/callbacks/usage.py index a83265d549e..ef1dd78e600 100644 --- a/libs/core/langchain_core/callbacks/usage.py +++ b/libs/core/langchain_core/callbacks/usage.py @@ -24,7 +24,7 @@ class UsageMetadataCallbackHandler(BaseCallbackHandler): from langchain_core.callbacks import UsageMetadataCallbackHandler llm_1 = init_chat_model(model="openai:gpt-4o-mini") - llm_2 = init_chat_model(model="anthropic:claude-3-5-haiku-20241022") + llm_2 = init_chat_model(model="anthropic:claude-haiku-4-5-20251001") callback = UsageMetadataCallbackHandler() result_1 = llm_1.invoke("Hello", config={"callbacks": [callback]}) @@ -38,7 +38,7 @@ class UsageMetadataCallbackHandler(BaseCallbackHandler): 'total_tokens': 18, 'input_token_details': {'audio': 0, 'cache_read': 0}, 'output_token_details': {'audio': 0, 'reasoning': 0}}, - 'claude-3-5-haiku-20241022': {'input_tokens': 8, + 'claude-haiku-4-5-20251001': {'input_tokens': 8, 'output_tokens': 21, 'total_tokens': 29, 'input_token_details': {'cache_read': 0, 'cache_creation': 0}}} @@ -110,7 +110,7 @@ def get_usage_metadata_callback( from langchain_core.callbacks import get_usage_metadata_callback llm_1 = init_chat_model(model="openai:gpt-4o-mini") - llm_2 = init_chat_model(model="anthropic:claude-3-5-haiku-20241022") + llm_2 = init_chat_model(model="anthropic:claude-haiku-4-5-20251001") with get_usage_metadata_callback() as cb: llm_1.invoke("Hello") @@ -127,7 +127,7 @@ def get_usage_metadata_callback( "input_token_details": {"audio": 0, "cache_read": 0}, "output_token_details": {"audio": 0, "reasoning": 0}, }, - "claude-3-5-haiku-20241022": { + "claude-haiku-4-5-20251001": { "input_tokens": 8, "output_tokens": 21, "total_tokens": 29, diff --git a/libs/partners/anthropic/tests/integration_tests/test_chat_models.py b/libs/partners/anthropic/tests/integration_tests/test_chat_models.py index b92f3a54aa0..74b624a94ce 100644 --- a/libs/partners/anthropic/tests/integration_tests/test_chat_models.py +++ b/libs/partners/anthropic/tests/integration_tests/test_chat_models.py @@ -35,7 +35,7 @@ from langchain_anthropic import ChatAnthropic from langchain_anthropic._compat import _convert_from_v1_to_anthropic from tests.unit_tests._utils import FakeCallbackHandler -MODEL_NAME = "claude-3-5-haiku-20241022" +MODEL_NAME = "claude-haiku-4-5-20251001" def test_stream() -> None: @@ -454,7 +454,7 @@ async def test_astreaming() -> None: def test_tool_use() -> None: llm = ChatAnthropic( - model="claude-3-7-sonnet-20250219", # type: ignore[call-arg] + model="claude-sonnet-4-5-20250929", # type: ignore[call-arg] temperature=0, ) tool_definition = { @@ -487,7 +487,7 @@ def test_tool_use() -> None: # Test streaming llm = ChatAnthropic( - model="claude-3-7-sonnet-20250219", # type: ignore[call-arg] + model="claude-sonnet-4-5-20250929", # type: ignore[call-arg] temperature=0, # Add extra headers to also test token-efficient tools model_kwargs={ diff --git a/libs/partners/anthropic/tests/integration_tests/test_standard.py b/libs/partners/anthropic/tests/integration_tests/test_standard.py index 95256eb6b34..b60d008a979 100644 --- a/libs/partners/anthropic/tests/integration_tests/test_standard.py +++ b/libs/partners/anthropic/tests/integration_tests/test_standard.py @@ -12,7 +12,7 @@ from langchain_anthropic import ChatAnthropic REPO_ROOT_DIR = Path(__file__).parents[5] -MODEL = "claude-3-5-haiku-20241022" +MODEL = "claude-haiku-4-5-20251001" class TestAnthropicStandard(ChatModelIntegrationTests): diff --git a/libs/partners/anthropic/tests/unit_tests/test_standard.py b/libs/partners/anthropic/tests/unit_tests/test_standard.py index bfcf563d732..badc27573ab 100644 --- a/libs/partners/anthropic/tests/unit_tests/test_standard.py +++ b/libs/partners/anthropic/tests/unit_tests/test_standard.py @@ -48,7 +48,7 @@ def test_init_time_with_client(benchmark: BenchmarkFixture) -> None: def _init_in_loop_with_clients() -> None: for _ in range(10): - llm = ChatAnthropic(model="claude-3-5-haiku-20241022") + llm = ChatAnthropic(model="claude-haiku-4-5-20251001") _ = llm._client _ = llm._async_client