fix(anthropic,openai): fix tests (#33257)

following https://github.com/langchain-ai/langchain/pull/33192
This commit is contained in:
ccurme
2025-10-03 13:41:37 -04:00
committed by GitHub
parent 7f5be6b65c
commit 010ed5d096
6 changed files with 13 additions and 13 deletions

View File

@@ -319,7 +319,7 @@ def test_anthropic_streaming_callback() -> None:
callback_manager = CallbackManager([callback_handler])
chat = ChatAnthropic(
model=MODEL_NAME, # type: ignore[call-arg]
callback_manager=callback_manager,
callbacks=callback_manager,
verbose=True,
)
message = HumanMessage(content="Write me a sentence with 10 words.")
@@ -335,7 +335,7 @@ async def test_anthropic_async_streaming_callback() -> None:
callback_manager = CallbackManager([callback_handler])
chat = ChatAnthropic(
model=MODEL_NAME, # type: ignore[call-arg]
callback_manager=callback_manager,
callbacks=callback_manager,
verbose=True,
)
chat_messages: list[BaseMessage] = [
@@ -379,7 +379,7 @@ def test_streaming() -> None:
llm = ChatAnthropic( # type: ignore[call-arg, call-arg]
model_name=MODEL_NAME,
streaming=True,
callback_manager=callback_manager,
callbacks=callback_manager,
)
response = llm.generate([[HumanMessage(content="I'm Pickle Rick")]])
@@ -395,7 +395,7 @@ async def test_astreaming() -> None:
llm = ChatAnthropic( # type: ignore[call-arg, call-arg]
model_name=MODEL_NAME,
streaming=True,
callback_manager=callback_manager,
callbacks=callback_manager,
)
response = await llm.agenerate([[HumanMessage(content="I'm Pickle Rick")]])

View File

@@ -49,7 +49,7 @@ def test_anthropic_streaming_callback() -> None:
llm = AnthropicLLM(
model=MODEL, # type: ignore[call-arg]
streaming=True,
callback_manager=callback_manager,
callbacks=callback_manager,
verbose=True,
)
llm.invoke("Write me a sentence with 100 words.")
@@ -70,7 +70,7 @@ async def test_anthropic_async_streaming_callback() -> None:
llm = AnthropicLLM(
model=MODEL, # type: ignore[call-arg]
streaming=True,
callback_manager=callback_manager,
callbacks=callback_manager,
verbose=True,
)
result = await llm.agenerate(["How many toes do dogs have?"])

View File

@@ -91,7 +91,7 @@ def test_chat_openai_streaming() -> None:
max_tokens=10,
streaming=True,
temperature=0,
callback_manager=callback_manager,
callbacks=callback_manager,
verbose=True,
)
message = HumanMessage(content="Hello")
@@ -113,7 +113,7 @@ def test_chat_openai_streaming_generation_info() -> None:
callback = _FakeCallback()
callback_manager = CallbackManager([callback])
chat = _get_llm(max_tokens=2, temperature=0, callback_manager=callback_manager)
chat = _get_llm(max_tokens=2, temperature=0, callbacks=callback_manager)
list(chat.stream("hi"))
generation = callback.saved_things["generation"]
# `Hello!` is two tokens, assert that that is what is returned
@@ -145,7 +145,7 @@ async def test_async_chat_openai_streaming() -> None:
max_tokens=10,
streaming=True,
temperature=0,
callback_manager=callback_manager,
callbacks=callback_manager,
verbose=True,
)
message = HumanMessage(content="Hello")

View File

@@ -115,7 +115,7 @@ def test_chat_openai_streaming(use_responses_api: bool) -> None:
max_tokens=MAX_TOKEN_COUNT, # type: ignore[call-arg]
streaming=True,
temperature=0,
callback_manager=callback_manager,
callbacks=callback_manager,
verbose=True,
use_responses_api=use_responses_api,
)
@@ -138,7 +138,7 @@ def test_chat_openai_streaming_generation_info() -> None:
callback = _FakeCallback()
callback_manager = CallbackManager([callback])
chat = ChatOpenAI(max_tokens=2, temperature=0, callback_manager=callback_manager) # type: ignore[call-arg]
chat = ChatOpenAI(max_tokens=2, temperature=0, callbacks=callback_manager) # type: ignore[call-arg]
list(chat.stream("hi"))
generation = callback.saved_things["generation"]
# `Hello!` is two tokens, assert that that is what is returned

View File

@@ -145,7 +145,7 @@ def test_openai_streaming_callback() -> None:
max_tokens=10,
streaming=True,
temperature=0,
callback_manager=callback_manager,
callbacks=callback_manager,
verbose=True,
)
llm.invoke("Write me a sentence with 100 words.")
@@ -168,7 +168,7 @@ async def test_openai_async_streaming_callback() -> None:
max_tokens=10,
streaming=True,
temperature=0,
callback_manager=callback_manager,
callbacks=callback_manager,
verbose=True,
)
result = await llm.agenerate(["Write me a sentence with 100 words."])