release[anthropic]: 0.3.15 (#31479)

Co-authored-by: Eugene Yurtsev <eyurtsev@gmail.com>
This commit is contained in:
Bagatur 2025-06-03 10:38:11 -04:00 committed by GitHub
parent e70ec3b9fa
commit 310e643842
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 29 additions and 10 deletions

View File

@ -12,7 +12,7 @@ dependencies = [
"pydantic<3.0.0,>=2.7.4", "pydantic<3.0.0,>=2.7.4",
] ]
name = "langchain-anthropic" name = "langchain-anthropic"
version = "0.3.14" version = "0.3.15"
description = "An integration package connecting AnthropicMessages and LangChain" description = "An integration package connecting AnthropicMessages and LangChain"
readme = "README.md" readme = "README.md"

View File

@ -3,7 +3,7 @@
import json import json
import os import os
from base64 import b64encode from base64 import b64encode
from typing import Optional from typing import Optional, cast
import httpx import httpx
import pytest import pytest
@ -42,7 +42,10 @@ def test_stream() -> None:
chunks_with_model_name = 0 chunks_with_model_name = 0
for token in llm.stream("I'm Pickle Rick"): for token in llm.stream("I'm Pickle Rick"):
assert isinstance(token.content, str) assert isinstance(token.content, str)
full = token if full is None else full + token if full is None:
full = cast(BaseMessageChunk, token)
else:
full = full + token
assert isinstance(token, AIMessageChunk) assert isinstance(token, AIMessageChunk)
if token.usage_metadata is not None: if token.usage_metadata is not None:
if token.usage_metadata.get("input_tokens"): if token.usage_metadata.get("input_tokens"):
@ -81,7 +84,10 @@ async def test_astream() -> None:
chunks_with_output_token_counts = 0 chunks_with_output_token_counts = 0
async for token in llm.astream("I'm Pickle Rick"): async for token in llm.astream("I'm Pickle Rick"):
assert isinstance(token.content, str) assert isinstance(token.content, str)
full = token if full is None else full + token if full is None:
full = cast(BaseMessageChunk, token)
else:
full = full + token
assert isinstance(token, AIMessageChunk) assert isinstance(token, AIMessageChunk)
if token.usage_metadata is not None: if token.usage_metadata is not None:
if token.usage_metadata.get("input_tokens"): if token.usage_metadata.get("input_tokens"):
@ -697,7 +703,10 @@ def test_citations() -> None:
# Test streaming # Test streaming
full: Optional[BaseMessageChunk] = None full: Optional[BaseMessageChunk] = None
for chunk in llm.stream(messages): for chunk in llm.stream(messages):
full = chunk if full is None else full + chunk if full is None:
full = cast(BaseMessageChunk, chunk)
else:
full = full + chunk
assert isinstance(full, AIMessageChunk) assert isinstance(full, AIMessageChunk)
assert isinstance(full.content, list) assert isinstance(full.content, list)
assert any("citations" in block for block in full.content) assert any("citations" in block for block in full.content)
@ -722,7 +731,10 @@ def test_thinking() -> None:
# Test streaming # Test streaming
full: Optional[BaseMessageChunk] = None full: Optional[BaseMessageChunk] = None
for chunk in llm.stream("Hello"): for chunk in llm.stream("Hello"):
full = chunk if full is None else full + chunk if full is None:
full = cast(BaseMessageChunk, chunk)
else:
full = full + chunk
assert isinstance(full, AIMessageChunk) assert isinstance(full, AIMessageChunk)
assert isinstance(full.content, list) assert isinstance(full.content, list)
assert any("thinking" in block for block in full.content) assert any("thinking" in block for block in full.content)
@ -756,7 +768,10 @@ def test_redacted_thinking() -> None:
# Test streaming # Test streaming
full: Optional[BaseMessageChunk] = None full: Optional[BaseMessageChunk] = None
for chunk in llm.stream(query): for chunk in llm.stream(query):
full = chunk if full is None else full + chunk if full is None:
full = cast(BaseMessageChunk, chunk)
else:
full = full + chunk
assert isinstance(full, AIMessageChunk) assert isinstance(full, AIMessageChunk)
assert isinstance(full.content, list) assert isinstance(full.content, list)
stream_has_reasoning = False stream_has_reasoning = False

View File

@ -4,7 +4,7 @@ from pathlib import Path
from typing import Literal, cast from typing import Literal, cast
from langchain_core.language_models import BaseChatModel from langchain_core.language_models import BaseChatModel
from langchain_core.messages import AIMessage from langchain_core.messages import AIMessage, BaseMessageChunk
from langchain_tests.integration_tests import ChatModelIntegrationTests from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_anthropic import ChatAnthropic from langchain_anthropic import ChatAnthropic
@ -146,7 +146,10 @@ def _invoke(llm: ChatAnthropic, input_: list, stream: bool) -> AIMessage:
if stream: if stream:
full = None full = None
for chunk in llm.stream(input_): for chunk in llm.stream(input_):
full = full + chunk if full else chunk # type: ignore[operator] if full is None:
full = cast(BaseMessageChunk, chunk)
else:
full = full + chunk
return cast(AIMessage, full) return cast(AIMessage, full)
else: else:
return cast(AIMessage, llm.invoke(input_)) return cast(AIMessage, llm.invoke(input_))

View File

@ -1,4 +1,5 @@
version = 1 version = 1
revision = 1
requires-python = ">=3.9" requires-python = ">=3.9"
resolution-markers = [ resolution-markers = [
"python_full_version >= '3.13' and platform_python_implementation == 'PyPy'", "python_full_version >= '3.13' and platform_python_implementation == 'PyPy'",
@ -423,7 +424,7 @@ wheels = [
[[package]] [[package]]
name = "langchain-anthropic" name = "langchain-anthropic"
version = "0.3.14" version = "0.3.15"
source = { editable = "." } source = { editable = "." }
dependencies = [ dependencies = [
{ name = "anthropic" }, { name = "anthropic" },