mirror of
https://github.com/hwchase17/langchain.git
synced 2025-07-01 02:43:37 +00:00
standard-tests[patch]: fix oai usage metadata test (#27122)
This commit is contained in:
parent
827bdf4f51
commit
bd5b335cb4
@ -1,7 +1,7 @@
|
|||||||
"""Standard LangChain interface tests"""
|
"""Standard LangChain interface tests"""
|
||||||
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import List, Literal, Type, cast
|
from typing import Dict, List, Literal, Type, cast
|
||||||
|
|
||||||
from langchain_core.language_models import BaseChatModel
|
from langchain_core.language_models import BaseChatModel
|
||||||
from langchain_core.messages import AIMessage
|
from langchain_core.messages import AIMessage
|
||||||
@ -36,16 +36,22 @@ class TestAnthropicStandard(ChatModelIntegrationTests):
|
|||||||
@property
|
@property
|
||||||
def supported_usage_metadata_details(
|
def supported_usage_metadata_details(
|
||||||
self,
|
self,
|
||||||
) -> List[
|
) -> Dict[
|
||||||
Literal[
|
Literal["invoke", "stream"],
|
||||||
"audio_input",
|
List[
|
||||||
"audio_output",
|
Literal[
|
||||||
"reasoning_output",
|
"audio_input",
|
||||||
"cache_read_input",
|
"audio_output",
|
||||||
"cache_creation_input",
|
"reasoning_output",
|
||||||
]
|
"cache_read_input",
|
||||||
|
"cache_creation_input",
|
||||||
|
]
|
||||||
|
],
|
||||||
]:
|
]:
|
||||||
return ["cache_read_input", "cache_creation_input"]
|
return {
|
||||||
|
"invoke": ["cache_read_input", "cache_creation_input"],
|
||||||
|
"stream": ["cache_read_input", "cache_creation_input"],
|
||||||
|
}
|
||||||
|
|
||||||
def invoke_with_cache_creation_input(self, *, stream: bool = False) -> AIMessage:
|
def invoke_with_cache_creation_input(self, *, stream: bool = False) -> AIMessage:
|
||||||
llm = ChatAnthropic(
|
llm = ChatAnthropic(
|
||||||
|
@ -238,30 +238,6 @@ async def test_async_chat_openai_bind_functions() -> None:
|
|||||||
assert isinstance(generation, AIMessage)
|
assert isinstance(generation, AIMessage)
|
||||||
|
|
||||||
|
|
||||||
def test_chat_openai_extra_kwargs() -> None:
|
|
||||||
"""Test extra kwargs to chat openai."""
|
|
||||||
# Check that foo is saved in extra_kwargs.
|
|
||||||
llm = ChatOpenAI(foo=3, max_tokens=10) # type: ignore[call-arg]
|
|
||||||
assert llm.max_tokens == 10
|
|
||||||
assert llm.model_kwargs == {"foo": 3}
|
|
||||||
|
|
||||||
# Test that if extra_kwargs are provided, they are added to it.
|
|
||||||
llm = ChatOpenAI(foo=3, model_kwargs={"bar": 2}) # type: ignore[call-arg]
|
|
||||||
assert llm.model_kwargs == {"foo": 3, "bar": 2}
|
|
||||||
|
|
||||||
# Test that if provided twice it errors
|
|
||||||
with pytest.raises(ValueError):
|
|
||||||
ChatOpenAI(foo=3, model_kwargs={"foo": 2}) # type: ignore[call-arg]
|
|
||||||
|
|
||||||
# Test that if explicit param is specified in kwargs it errors
|
|
||||||
with pytest.raises(ValueError):
|
|
||||||
ChatOpenAI(model_kwargs={"temperature": 0.2})
|
|
||||||
|
|
||||||
# Test that "model" cannot be specified in kwargs
|
|
||||||
with pytest.raises(ValueError):
|
|
||||||
ChatOpenAI(model_kwargs={"model": "gpt-3.5-turbo-instruct"})
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.scheduled
|
@pytest.mark.scheduled
|
||||||
def test_openai_streaming() -> None:
|
def test_openai_streaming() -> None:
|
||||||
"""Test streaming tokens from OpenAI."""
|
"""Test streaming tokens from OpenAI."""
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
"""Standard LangChain interface tests"""
|
"""Standard LangChain interface tests"""
|
||||||
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import List, Literal, Type, cast
|
from typing import Dict, List, Literal, Type, cast
|
||||||
|
|
||||||
from langchain_core.language_models import BaseChatModel
|
from langchain_core.language_models import BaseChatModel
|
||||||
from langchain_core.messages import AIMessage
|
from langchain_core.messages import AIMessage
|
||||||
@ -28,16 +28,19 @@ class TestOpenAIStandard(ChatModelIntegrationTests):
|
|||||||
@property
|
@property
|
||||||
def supported_usage_metadata_details(
|
def supported_usage_metadata_details(
|
||||||
self,
|
self,
|
||||||
) -> List[
|
) -> Dict[
|
||||||
Literal[
|
Literal["invoke", "stream"],
|
||||||
"audio_input",
|
List[
|
||||||
"audio_output",
|
Literal[
|
||||||
"reasoning_output",
|
"audio_input",
|
||||||
"cache_read_input",
|
"audio_output",
|
||||||
"cache_creation_input",
|
"reasoning_output",
|
||||||
]
|
"cache_read_input",
|
||||||
|
"cache_creation_input",
|
||||||
|
]
|
||||||
|
],
|
||||||
]:
|
]:
|
||||||
return ["reasoning_output", "cache_read_input"]
|
return {"invoke": ["reasoning_output", "cache_read_input"], "stream": []}
|
||||||
|
|
||||||
def invoke_with_cache_read_input(self, *, stream: bool = False) -> AIMessage:
|
def invoke_with_cache_read_input(self, *, stream: bool = False) -> AIMessage:
|
||||||
with open(REPO_ROOT_DIR / "README.md", "r") as f:
|
with open(REPO_ROOT_DIR / "README.md", "r") as f:
|
||||||
|
@ -151,25 +151,25 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
assert isinstance(result.usage_metadata["output_tokens"], int)
|
assert isinstance(result.usage_metadata["output_tokens"], int)
|
||||||
assert isinstance(result.usage_metadata["total_tokens"], int)
|
assert isinstance(result.usage_metadata["total_tokens"], int)
|
||||||
|
|
||||||
if "audio_input" in self.supported_usage_metadata_details:
|
if "audio_input" in self.supported_usage_metadata_details["invoke"]:
|
||||||
msg = self.invoke_with_audio_input()
|
msg = self.invoke_with_audio_input()
|
||||||
assert isinstance(msg.usage_metadata["input_token_details"]["audio"], int) # type: ignore[index]
|
assert isinstance(msg.usage_metadata["input_token_details"]["audio"], int) # type: ignore[index]
|
||||||
if "audio_output" in self.supported_usage_metadata_details:
|
if "audio_output" in self.supported_usage_metadata_details["invoke"]:
|
||||||
msg = self.invoke_with_audio_output()
|
msg = self.invoke_with_audio_output()
|
||||||
assert isinstance(msg.usage_metadata["output_token_details"]["audio"], int) # type: ignore[index]
|
assert isinstance(msg.usage_metadata["output_token_details"]["audio"], int) # type: ignore[index]
|
||||||
if "reasoning_output" in self.supported_usage_metadata_details:
|
if "reasoning_output" in self.supported_usage_metadata_details["invoke"]:
|
||||||
msg = self.invoke_with_reasoning_output()
|
msg = self.invoke_with_reasoning_output()
|
||||||
assert isinstance(
|
assert isinstance(
|
||||||
msg.usage_metadata["output_token_details"]["reasoning"], # type: ignore[index]
|
msg.usage_metadata["output_token_details"]["reasoning"], # type: ignore[index]
|
||||||
int,
|
int,
|
||||||
)
|
)
|
||||||
if "cache_read_input" in self.supported_usage_metadata_details:
|
if "cache_read_input" in self.supported_usage_metadata_details["invoke"]:
|
||||||
msg = self.invoke_with_cache_read_input()
|
msg = self.invoke_with_cache_read_input()
|
||||||
assert isinstance(
|
assert isinstance(
|
||||||
msg.usage_metadata["input_token_details"]["cache_read"], # type: ignore[index]
|
msg.usage_metadata["input_token_details"]["cache_read"], # type: ignore[index]
|
||||||
int,
|
int,
|
||||||
)
|
)
|
||||||
if "cache_creation_input" in self.supported_usage_metadata_details:
|
if "cache_creation_input" in self.supported_usage_metadata_details["invoke"]:
|
||||||
msg = self.invoke_with_cache_creation_input()
|
msg = self.invoke_with_cache_creation_input()
|
||||||
assert isinstance(
|
assert isinstance(
|
||||||
msg.usage_metadata["input_token_details"]["cache_creation"], # type: ignore[index]
|
msg.usage_metadata["input_token_details"]["cache_creation"], # type: ignore[index]
|
||||||
@ -189,25 +189,25 @@ class ChatModelIntegrationTests(ChatModelTests):
|
|||||||
assert isinstance(full.usage_metadata["output_tokens"], int)
|
assert isinstance(full.usage_metadata["output_tokens"], int)
|
||||||
assert isinstance(full.usage_metadata["total_tokens"], int)
|
assert isinstance(full.usage_metadata["total_tokens"], int)
|
||||||
|
|
||||||
if "audio_input" in self.supported_usage_metadata_details:
|
if "audio_input" in self.supported_usage_metadata_details["stream"]:
|
||||||
msg = self.invoke_with_audio_input(stream=True)
|
msg = self.invoke_with_audio_input(stream=True)
|
||||||
assert isinstance(msg.usage_metadata["input_token_details"]["audio"], int) # type: ignore[index]
|
assert isinstance(msg.usage_metadata["input_token_details"]["audio"], int) # type: ignore[index]
|
||||||
if "audio_output" in self.supported_usage_metadata_details:
|
if "audio_output" in self.supported_usage_metadata_details["stream"]:
|
||||||
msg = self.invoke_with_audio_output(stream=True)
|
msg = self.invoke_with_audio_output(stream=True)
|
||||||
assert isinstance(msg.usage_metadata["output_token_details"]["audio"], int) # type: ignore[index]
|
assert isinstance(msg.usage_metadata["output_token_details"]["audio"], int) # type: ignore[index]
|
||||||
if "reasoning_output" in self.supported_usage_metadata_details:
|
if "reasoning_output" in self.supported_usage_metadata_details["stream"]:
|
||||||
msg = self.invoke_with_reasoning_output(stream=True)
|
msg = self.invoke_with_reasoning_output(stream=True)
|
||||||
assert isinstance(
|
assert isinstance(
|
||||||
msg.usage_metadata["output_token_details"]["reasoning"], # type: ignore[index]
|
msg.usage_metadata["output_token_details"]["reasoning"], # type: ignore[index]
|
||||||
int,
|
int,
|
||||||
)
|
)
|
||||||
if "cache_read_input" in self.supported_usage_metadata_details:
|
if "cache_read_input" in self.supported_usage_metadata_details["stream"]:
|
||||||
msg = self.invoke_with_cache_read_input(stream=True)
|
msg = self.invoke_with_cache_read_input(stream=True)
|
||||||
assert isinstance(
|
assert isinstance(
|
||||||
msg.usage_metadata["input_token_details"]["cache_read"], # type: ignore[index]
|
msg.usage_metadata["input_token_details"]["cache_read"], # type: ignore[index]
|
||||||
int,
|
int,
|
||||||
)
|
)
|
||||||
if "cache_creation_input" in self.supported_usage_metadata_details:
|
if "cache_creation_input" in self.supported_usage_metadata_details["stream"]:
|
||||||
msg = self.invoke_with_cache_creation_input(stream=True)
|
msg = self.invoke_with_cache_creation_input(stream=True)
|
||||||
assert isinstance(
|
assert isinstance(
|
||||||
msg.usage_metadata["input_token_details"]["cache_creation"], # type: ignore[index]
|
msg.usage_metadata["input_token_details"]["cache_creation"], # type: ignore[index]
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
import os
|
import os
|
||||||
from abc import abstractmethod
|
from abc import abstractmethod
|
||||||
from typing import Any, List, Literal, Optional, Tuple, Type
|
from typing import Any, Dict, List, Literal, Optional, Tuple, Type
|
||||||
from unittest import mock
|
from unittest import mock
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
@ -141,16 +141,19 @@ class ChatModelTests(BaseStandardTests):
|
|||||||
@property
|
@property
|
||||||
def supported_usage_metadata_details(
|
def supported_usage_metadata_details(
|
||||||
self,
|
self,
|
||||||
) -> List[
|
) -> Dict[
|
||||||
Literal[
|
Literal["invoke", "stream"],
|
||||||
"audio_input",
|
List[
|
||||||
"audio_output",
|
Literal[
|
||||||
"reasoning_output",
|
"audio_input",
|
||||||
"cache_read_input",
|
"audio_output",
|
||||||
"cache_creation_input",
|
"reasoning_output",
|
||||||
]
|
"cache_read_input",
|
||||||
|
"cache_creation_input",
|
||||||
|
]
|
||||||
|
],
|
||||||
]:
|
]:
|
||||||
return []
|
return {"invoke": [], "stream": []}
|
||||||
|
|
||||||
|
|
||||||
class ChatModelUnitTests(ChatModelTests):
|
class ChatModelUnitTests(ChatModelTests):
|
||||||
|
Loading…
Reference in New Issue
Block a user