mirror of
https://github.com/hwchase17/langchain.git
synced 2025-07-10 15:06:18 +00:00
clients -> private attributes
This commit is contained in:
parent
adcb5396d9
commit
4588e06794
@ -660,37 +660,37 @@ class AzureChatOpenAI(BaseChatOpenAI):
|
|||||||
return self
|
return self
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def _root_client(self) -> openai.AzureOpenAI:
|
def root_client(self) -> openai.AzureOpenAI:
|
||||||
if self.root_client is not None:
|
if self._root_client is not None:
|
||||||
return self.root_client
|
return self._root_client
|
||||||
sync_specific = {"http_client": self._http_client}
|
sync_specific = {"http_client": self.http_client}
|
||||||
self.root_client = openai.AzureOpenAI(**self._client_params, **sync_specific) # type: ignore[call-overload]
|
self._root_client = openai.AzureOpenAI(**self._client_params, **sync_specific) # type: ignore[call-overload]
|
||||||
return self.root_client
|
return self._root_client
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def _root_async_client(self) -> openai.AsyncAzureOpenAI:
|
def root_async_client(self) -> openai.AsyncAzureOpenAI:
|
||||||
if self.root_async_client is not None:
|
if self._root_async_client is not None:
|
||||||
return self.root_async_client
|
return self._root_async_client
|
||||||
async_specific = {"http_client": self._http_async_client}
|
async_specific = {"http_client": self.http_async_client}
|
||||||
self.root_async_client = openai.AsyncAzureOpenAI(
|
self._root_async_client = openai.AsyncAzureOpenAI(
|
||||||
**self._client_params,
|
**self._client_params,
|
||||||
**async_specific, # type: ignore[call-overload]
|
**async_specific, # type: ignore[call-overload]
|
||||||
)
|
)
|
||||||
return self._root_async_client
|
return self._root_async_client
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def _client(self) -> Any:
|
def client(self) -> Any:
|
||||||
if self.client is not None:
|
if self._client is not None:
|
||||||
return self.client
|
return self._client
|
||||||
self.client = self._root_client.chat.completions
|
self._client = self.root_client.chat.completions
|
||||||
return self.client
|
return self._client
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def _async_client(self) -> Any:
|
def async_client(self) -> Any:
|
||||||
if self.async_client is not None:
|
if self._async_client is not None:
|
||||||
return self.async_client
|
return self._async_client
|
||||||
self.async_client = self._root_async_client.chat.completions
|
self._async_client = self.root_async_client.chat.completions
|
||||||
return self.async_client
|
return self._async_client
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def _identifying_params(self) -> Dict[str, Any]:
|
def _identifying_params(self) -> Dict[str, Any]:
|
||||||
|
@ -396,10 +396,10 @@ class _AllReturnType(TypedDict):
|
|||||||
|
|
||||||
|
|
||||||
class BaseChatOpenAI(BaseChatModel):
|
class BaseChatOpenAI(BaseChatModel):
|
||||||
client: Any = Field(default=None, exclude=True) #: :meta private:
|
_client: Any = PrivateAttr(default=None) #: :meta private:
|
||||||
async_client: Any = Field(default=None, exclude=True) #: :meta private:
|
_async_client: Any = PrivateAttr(default=None) #: :meta private:
|
||||||
root_client: Any = Field(default=None, exclude=True) #: :meta private:
|
_root_client: Any = PrivateAttr(default=None) #: :meta private:
|
||||||
root_async_client: Any = Field(default=None, exclude=True) #: :meta private:
|
_root_async_client: Any = PrivateAttr(default=None) #: :meta private:
|
||||||
model_name: str = Field(default="gpt-3.5-turbo", alias="model")
|
model_name: str = Field(default="gpt-3.5-turbo", alias="model")
|
||||||
"""Model name to use."""
|
"""Model name to use."""
|
||||||
temperature: Optional[float] = None
|
temperature: Optional[float] = None
|
||||||
@ -471,11 +471,11 @@ class BaseChatOpenAI(BaseChatModel):
|
|||||||
default_query: Union[Mapping[str, object], None] = None
|
default_query: Union[Mapping[str, object], None] = None
|
||||||
# Configure a custom httpx client. See the
|
# Configure a custom httpx client. See the
|
||||||
# [httpx documentation](https://www.python-httpx.org/api/#client) for more details.
|
# [httpx documentation](https://www.python-httpx.org/api/#client) for more details.
|
||||||
http_client: Union[Any, None] = Field(default=None, exclude=True)
|
_http_client: Union[Any, None] = PrivateAttr(default=None)
|
||||||
"""Optional httpx.Client. Only used for sync invocations. Must specify
|
"""Optional httpx.Client. Only used for sync invocations. Must specify
|
||||||
http_async_client as well if you'd like a custom client for async invocations.
|
http_async_client as well if you'd like a custom client for async invocations.
|
||||||
"""
|
"""
|
||||||
http_async_client: Union[Any, None] = Field(default=None, exclude=True)
|
_http_async_client: Union[Any, None] = PrivateAttr(default=None)
|
||||||
"""Optional httpx.AsyncClient. Only used for async invocations. Must specify
|
"""Optional httpx.AsyncClient. Only used for async invocations. Must specify
|
||||||
http_client as well if you'd like a custom client for sync invocations."""
|
http_client as well if you'd like a custom client for sync invocations."""
|
||||||
stop: Optional[Union[List[str], str]] = Field(default=None, alias="stop_sequences")
|
stop: Optional[Union[List[str], str]] = Field(default=None, alias="stop_sequences")
|
||||||
@ -523,6 +523,24 @@ class BaseChatOpenAI(BaseChatModel):
|
|||||||
values["temperature"] = 1
|
values["temperature"] = 1
|
||||||
return values
|
return values
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
client: Optional[Any] = None,
|
||||||
|
async_client: Optional[Any] = None,
|
||||||
|
root_client: Optional[Any] = None,
|
||||||
|
async_root_client: Optional[Any] = None,
|
||||||
|
http_client: Optional[Any] = None,
|
||||||
|
http_async_client: Optional[Any] = None,
|
||||||
|
**kwargs: Any,
|
||||||
|
) -> None:
|
||||||
|
super().__init__(**kwargs)
|
||||||
|
self._client = client
|
||||||
|
self._async_client = async_client
|
||||||
|
self._root_client = root_client
|
||||||
|
self._async_root_client = async_root_client
|
||||||
|
self._http_client = http_client
|
||||||
|
self._http_async_client = http_async_client
|
||||||
|
|
||||||
@model_validator(mode="after")
|
@model_validator(mode="after")
|
||||||
def validate_environment(self) -> Self:
|
def validate_environment(self) -> Self:
|
||||||
"""Validate that api key and python package exists in environment."""
|
"""Validate that api key and python package exists in environment."""
|
||||||
@ -551,10 +569,10 @@ class BaseChatOpenAI(BaseChatModel):
|
|||||||
if self.max_retries is not None:
|
if self.max_retries is not None:
|
||||||
self._client_params["max_retries"] = self.max_retries
|
self._client_params["max_retries"] = self.max_retries
|
||||||
|
|
||||||
if self.openai_proxy and (self.http_client or self.http_async_client):
|
if self.openai_proxy and (self._http_client or self._http_async_client):
|
||||||
openai_proxy = self.openai_proxy
|
openai_proxy = self.openai_proxy
|
||||||
http_client = self.http_client
|
http_client = self._http_client
|
||||||
http_async_client = self.http_async_client
|
http_async_client = self._http_async_client
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"Cannot specify 'openai_proxy' if one of "
|
"Cannot specify 'openai_proxy' if one of "
|
||||||
"'http_client'/'http_async_client' is already specified. Received:\n"
|
"'http_client'/'http_async_client' is already specified. Received:\n"
|
||||||
@ -564,7 +582,7 @@ class BaseChatOpenAI(BaseChatModel):
|
|||||||
return self
|
return self
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def _http_client(self) -> Optional[httpx.Client]:
|
def http_client(self) -> Optional[httpx.Client]:
|
||||||
"""Optional httpx.Client. Only used for sync invocations.
|
"""Optional httpx.Client. Only used for sync invocations.
|
||||||
|
|
||||||
Must specify http_async_client as well if you'd like a custom client for
|
Must specify http_async_client as well if you'd like a custom client for
|
||||||
@ -573,8 +591,8 @@ class BaseChatOpenAI(BaseChatModel):
|
|||||||
# Configure a custom httpx client. See the
|
# Configure a custom httpx client. See the
|
||||||
# [httpx documentation](https://www.python-httpx.org/api/#client) for more
|
# [httpx documentation](https://www.python-httpx.org/api/#client) for more
|
||||||
# details.
|
# details.
|
||||||
if self.http_client is not None:
|
if self._http_client is not None:
|
||||||
return self.http_client
|
return self._http_client
|
||||||
if not self.openai_proxy:
|
if not self.openai_proxy:
|
||||||
return None
|
return None
|
||||||
try:
|
try:
|
||||||
@ -584,18 +602,18 @@ class BaseChatOpenAI(BaseChatModel):
|
|||||||
"Could not import httpx python package. "
|
"Could not import httpx python package. "
|
||||||
"Please install it with `pip install httpx`."
|
"Please install it with `pip install httpx`."
|
||||||
) from e
|
) from e
|
||||||
self.http_client = httpx.Client(proxy=self.openai_proxy)
|
self._http_client = httpx.Client(proxy=self.openai_proxy)
|
||||||
return self.http_client
|
return self._http_client
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def _http_async_client(self) -> Optional[httpx.AsyncClient]:
|
def http_async_client(self) -> Optional[httpx.AsyncClient]:
|
||||||
"""Optional httpx.AsyncClient. Only used for async invocations.
|
"""Optional httpx.AsyncClient. Only used for async invocations.
|
||||||
|
|
||||||
Must specify http_client as well if you'd like a custom client for sync
|
Must specify http_client as well if you'd like a custom client for sync
|
||||||
invocations.
|
invocations.
|
||||||
"""
|
"""
|
||||||
if self.http_async_client is not None:
|
if self._http_async_client is not None:
|
||||||
return self.http_async_client
|
return self._http_async_client
|
||||||
if not self.openai_proxy:
|
if not self.openai_proxy:
|
||||||
return None
|
return None
|
||||||
try:
|
try:
|
||||||
@ -605,41 +623,41 @@ class BaseChatOpenAI(BaseChatModel):
|
|||||||
"Could not import httpx python package. "
|
"Could not import httpx python package. "
|
||||||
"Please install it with `pip install httpx`."
|
"Please install it with `pip install httpx`."
|
||||||
) from e
|
) from e
|
||||||
self.http_async_client = httpx.AsyncClient(proxy=self.openai_proxy)
|
self._http_async_client = httpx.AsyncClient(proxy=self.openai_proxy)
|
||||||
return self.http_async_client
|
return self._http_async_client
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def _root_client(self) -> openai.OpenAI:
|
def root_client(self) -> openai.OpenAI:
|
||||||
if self.root_client is not None:
|
if self._root_client is not None:
|
||||||
return self.root_client
|
return self._root_client
|
||||||
sync_specific = {"http_client": self._http_client}
|
sync_specific = {"http_client": self.http_client}
|
||||||
self.root_client = openai.OpenAI(**self._client_params, **sync_specific) # type: ignore[arg-type]
|
self._root_client = openai.OpenAI(**self._client_params, **sync_specific) # type: ignore[arg-type]
|
||||||
return self.root_client
|
return self._root_client
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def _root_async_client(self) -> openai.AsyncOpenAI:
|
def root_async_client(self) -> openai.AsyncOpenAI:
|
||||||
if self.root_async_client is not None:
|
if self._root_async_client is not None:
|
||||||
return self.root_async_client
|
return self._root_async_client
|
||||||
async_specific = {"http_client": self._http_async_client}
|
async_specific = {"http_client": self.http_async_client}
|
||||||
self.root_async_client = openai.AsyncOpenAI(
|
self._root_async_client = openai.AsyncOpenAI(
|
||||||
**self._client_params,
|
**self._client_params,
|
||||||
**async_specific, # type: ignore[arg-type]
|
**async_specific, # type: ignore[arg-type]
|
||||||
)
|
)
|
||||||
return self.root_async_client
|
return self._root_async_client
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def _client(self) -> Any:
|
def client(self) -> Any:
|
||||||
if self.client is not None:
|
if self._client is not None:
|
||||||
return self.client
|
return self._client
|
||||||
self.client = self._root_client.chat.completions
|
self._client = self.root_client.chat.completions
|
||||||
return self.client
|
return self._client
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def _async_client(self) -> Any:
|
def async_client(self) -> Any:
|
||||||
if self.async_client is not None:
|
if self._async_client is not None:
|
||||||
return self.async_client
|
return self._async_client
|
||||||
self.async_client = self._root_async_client.chat.completions
|
self._async_client = self.root_async_client.chat.completions
|
||||||
return self.async_client
|
return self._async_client
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def _default_params(self) -> Dict[str, Any]:
|
def _default_params(self) -> Dict[str, Any]:
|
||||||
@ -766,15 +784,15 @@ class BaseChatOpenAI(BaseChatModel):
|
|||||||
"specified."
|
"specified."
|
||||||
)
|
)
|
||||||
payload.pop("stream")
|
payload.pop("stream")
|
||||||
response_stream = self._root_client.beta.chat.completions.stream(**payload)
|
response_stream = self.root_client.beta.chat.completions.stream(**payload)
|
||||||
context_manager = response_stream
|
context_manager = response_stream
|
||||||
else:
|
else:
|
||||||
if self.include_response_headers:
|
if self.include_response_headers:
|
||||||
raw_response = self._client.with_raw_response.create(**payload)
|
raw_response = self.client.with_raw_response.create(**payload)
|
||||||
response = raw_response.parse()
|
response = raw_response.parse()
|
||||||
base_generation_info = {"headers": dict(raw_response.headers)}
|
base_generation_info = {"headers": dict(raw_response.headers)}
|
||||||
else:
|
else:
|
||||||
response = self._client.create(**payload)
|
response = self.client.create(**payload)
|
||||||
context_manager = response
|
context_manager = response
|
||||||
try:
|
try:
|
||||||
with context_manager as response:
|
with context_manager as response:
|
||||||
@ -834,15 +852,15 @@ class BaseChatOpenAI(BaseChatModel):
|
|||||||
)
|
)
|
||||||
payload.pop("stream")
|
payload.pop("stream")
|
||||||
try:
|
try:
|
||||||
response = self._root_client.beta.chat.completions.parse(**payload)
|
response = self.root_client.beta.chat.completions.parse(**payload)
|
||||||
except openai.BadRequestError as e:
|
except openai.BadRequestError as e:
|
||||||
_handle_openai_bad_request(e)
|
_handle_openai_bad_request(e)
|
||||||
elif self.include_response_headers:
|
elif self.include_response_headers:
|
||||||
raw_response = self._client.with_raw_response.create(**payload)
|
raw_response = self.client.with_raw_response.create(**payload)
|
||||||
response = raw_response.parse()
|
response = raw_response.parse()
|
||||||
generation_info = {"headers": dict(raw_response.headers)}
|
generation_info = {"headers": dict(raw_response.headers)}
|
||||||
else:
|
else:
|
||||||
response = self._client.create(**payload)
|
response = self.client.create(**payload)
|
||||||
return self._create_chat_result(response, generation_info)
|
return self._create_chat_result(response, generation_info)
|
||||||
|
|
||||||
def _get_request_payload(
|
def _get_request_payload(
|
||||||
@ -930,19 +948,19 @@ class BaseChatOpenAI(BaseChatModel):
|
|||||||
"specified."
|
"specified."
|
||||||
)
|
)
|
||||||
payload.pop("stream")
|
payload.pop("stream")
|
||||||
response_stream = self._root_async_client.beta.chat.completions.stream(
|
response_stream = self.root_async_client.beta.chat.completions.stream(
|
||||||
**payload
|
**payload
|
||||||
)
|
)
|
||||||
context_manager = response_stream
|
context_manager = response_stream
|
||||||
else:
|
else:
|
||||||
if self.include_response_headers:
|
if self.include_response_headers:
|
||||||
raw_response = await self._async_client.with_raw_response.create(
|
raw_response = await self.async_client.with_raw_response.create(
|
||||||
**payload
|
**payload
|
||||||
)
|
)
|
||||||
response = raw_response.parse()
|
response = raw_response.parse()
|
||||||
base_generation_info = {"headers": dict(raw_response.headers)}
|
base_generation_info = {"headers": dict(raw_response.headers)}
|
||||||
else:
|
else:
|
||||||
response = await self._async_client.create(**payload)
|
response = await self.async_client.create(**payload)
|
||||||
context_manager = response
|
context_manager = response
|
||||||
try:
|
try:
|
||||||
async with context_manager as response:
|
async with context_manager as response:
|
||||||
@ -1002,17 +1020,17 @@ class BaseChatOpenAI(BaseChatModel):
|
|||||||
)
|
)
|
||||||
payload.pop("stream")
|
payload.pop("stream")
|
||||||
try:
|
try:
|
||||||
response = await self._root_async_client.beta.chat.completions.parse(
|
response = await self.root_async_client.beta.chat.completions.parse(
|
||||||
**payload
|
**payload
|
||||||
)
|
)
|
||||||
except openai.BadRequestError as e:
|
except openai.BadRequestError as e:
|
||||||
_handle_openai_bad_request(e)
|
_handle_openai_bad_request(e)
|
||||||
elif self.include_response_headers:
|
elif self.include_response_headers:
|
||||||
raw_response = await self._async_client.with_raw_response.create(**payload)
|
raw_response = await self.async_client.with_raw_response.create(**payload)
|
||||||
response = raw_response.parse()
|
response = raw_response.parse()
|
||||||
generation_info = {"headers": dict(raw_response.headers)}
|
generation_info = {"headers": dict(raw_response.headers)}
|
||||||
else:
|
else:
|
||||||
response = await self._async_client.create(**payload)
|
response = await self.async_client.create(**payload)
|
||||||
return await run_in_executor(
|
return await run_in_executor(
|
||||||
None, self._create_chat_result, response, generation_info
|
None, self._create_chat_result, response, generation_info
|
||||||
)
|
)
|
||||||
@ -2023,6 +2041,9 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
|
|||||||
max_tokens: Optional[int] = Field(default=None, alias="max_completion_tokens")
|
max_tokens: Optional[int] = Field(default=None, alias="max_completion_tokens")
|
||||||
"""Maximum number of tokens to generate."""
|
"""Maximum number of tokens to generate."""
|
||||||
|
|
||||||
|
def __init__(self, *args: Any, **kwargs: Any) -> None:
|
||||||
|
super().__init__(*args, **kwargs)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def lc_secrets(self) -> Dict[str, str]:
|
def lc_secrets(self) -> Dict[str, str]:
|
||||||
return {"openai_api_key": "OPENAI_API_KEY"}
|
return {"openai_api_key": "OPENAI_API_KEY"}
|
||||||
|
@ -660,9 +660,6 @@ def test_openai_structured_output(model: str) -> None:
|
|||||||
def test_openai_proxy() -> None:
|
def test_openai_proxy() -> None:
|
||||||
"""Test ChatOpenAI with proxy."""
|
"""Test ChatOpenAI with proxy."""
|
||||||
chat_openai = ChatOpenAI(openai_proxy="http://localhost:8080")
|
chat_openai = ChatOpenAI(openai_proxy="http://localhost:8080")
|
||||||
assert chat_openai.client is None
|
|
||||||
_ = chat_openai._client # force client to instantiate
|
|
||||||
assert chat_openai.client is not None
|
|
||||||
mounts = chat_openai.client._client._client._mounts
|
mounts = chat_openai.client._client._client._mounts
|
||||||
assert len(mounts) == 1
|
assert len(mounts) == 1
|
||||||
for key, value in mounts.items():
|
for key, value in mounts.items():
|
||||||
@ -671,9 +668,6 @@ def test_openai_proxy() -> None:
|
|||||||
assert proxy.host == b"localhost"
|
assert proxy.host == b"localhost"
|
||||||
assert proxy.port == 8080
|
assert proxy.port == 8080
|
||||||
|
|
||||||
assert chat_openai.async_client is None
|
|
||||||
_ = chat_openai._async_client # force client to instantiate
|
|
||||||
assert chat_openai.async_client is not None
|
|
||||||
async_client_mounts = chat_openai.async_client._client._client._mounts
|
async_client_mounts = chat_openai.async_client._client._client._mounts
|
||||||
assert len(async_client_mounts) == 1
|
assert len(async_client_mounts) == 1
|
||||||
for key, value in async_client_mounts.items():
|
for key, value in async_client_mounts.items():
|
||||||
|
@ -14,6 +14,7 @@ def test_initialize_azure_openai() -> None:
|
|||||||
azure_deployment="35-turbo-dev",
|
azure_deployment="35-turbo-dev",
|
||||||
openai_api_version="2023-05-15",
|
openai_api_version="2023-05-15",
|
||||||
azure_endpoint="my-base-url",
|
azure_endpoint="my-base-url",
|
||||||
|
http_client=None,
|
||||||
)
|
)
|
||||||
assert llm.deployment_name == "35-turbo-dev"
|
assert llm.deployment_name == "35-turbo-dev"
|
||||||
assert llm.openai_api_version == "2023-05-15"
|
assert llm.openai_api_version == "2023-05-15"
|
||||||
|
@ -298,7 +298,7 @@ async def test_glm4_astream(mock_glm4_completion: list) -> None:
|
|||||||
usage_chunk = mock_glm4_completion[-1]
|
usage_chunk = mock_glm4_completion[-1]
|
||||||
|
|
||||||
usage_metadata: Optional[UsageMetadata] = None
|
usage_metadata: Optional[UsageMetadata] = None
|
||||||
with patch.object(llm, "async_client", mock_client):
|
with patch.object(llm, "_async_client", mock_client):
|
||||||
async for chunk in llm.astream("你的名字叫什么?只回答名字"):
|
async for chunk in llm.astream("你的名字叫什么?只回答名字"):
|
||||||
assert isinstance(chunk, AIMessageChunk)
|
assert isinstance(chunk, AIMessageChunk)
|
||||||
if chunk.usage_metadata is not None:
|
if chunk.usage_metadata is not None:
|
||||||
@ -323,7 +323,7 @@ def test_glm4_stream(mock_glm4_completion: list) -> None:
|
|||||||
usage_chunk = mock_glm4_completion[-1]
|
usage_chunk = mock_glm4_completion[-1]
|
||||||
|
|
||||||
usage_metadata: Optional[UsageMetadata] = None
|
usage_metadata: Optional[UsageMetadata] = None
|
||||||
with patch.object(llm, "client", mock_client):
|
with patch.object(llm, "_client", mock_client):
|
||||||
for chunk in llm.stream("你的名字叫什么?只回答名字"):
|
for chunk in llm.stream("你的名字叫什么?只回答名字"):
|
||||||
assert isinstance(chunk, AIMessageChunk)
|
assert isinstance(chunk, AIMessageChunk)
|
||||||
if chunk.usage_metadata is not None:
|
if chunk.usage_metadata is not None:
|
||||||
@ -378,7 +378,7 @@ async def test_deepseek_astream(mock_deepseek_completion: list) -> None:
|
|||||||
mock_client.create = mock_create
|
mock_client.create = mock_create
|
||||||
usage_chunk = mock_deepseek_completion[-1]
|
usage_chunk = mock_deepseek_completion[-1]
|
||||||
usage_metadata: Optional[UsageMetadata] = None
|
usage_metadata: Optional[UsageMetadata] = None
|
||||||
with patch.object(llm, "async_client", mock_client):
|
with patch.object(llm, "_async_client", mock_client):
|
||||||
async for chunk in llm.astream("你的名字叫什么?只回答名字"):
|
async for chunk in llm.astream("你的名字叫什么?只回答名字"):
|
||||||
assert isinstance(chunk, AIMessageChunk)
|
assert isinstance(chunk, AIMessageChunk)
|
||||||
if chunk.usage_metadata is not None:
|
if chunk.usage_metadata is not None:
|
||||||
@ -402,7 +402,7 @@ def test_deepseek_stream(mock_deepseek_completion: list) -> None:
|
|||||||
mock_client.create = mock_create
|
mock_client.create = mock_create
|
||||||
usage_chunk = mock_deepseek_completion[-1]
|
usage_chunk = mock_deepseek_completion[-1]
|
||||||
usage_metadata: Optional[UsageMetadata] = None
|
usage_metadata: Optional[UsageMetadata] = None
|
||||||
with patch.object(llm, "client", mock_client):
|
with patch.object(llm, "_client", mock_client):
|
||||||
for chunk in llm.stream("你的名字叫什么?只回答名字"):
|
for chunk in llm.stream("你的名字叫什么?只回答名字"):
|
||||||
assert isinstance(chunk, AIMessageChunk)
|
assert isinstance(chunk, AIMessageChunk)
|
||||||
if chunk.usage_metadata is not None:
|
if chunk.usage_metadata is not None:
|
||||||
@ -446,7 +446,7 @@ async def test_openai_astream(mock_openai_completion: list) -> None:
|
|||||||
mock_client.create = mock_create
|
mock_client.create = mock_create
|
||||||
usage_chunk = mock_openai_completion[-1]
|
usage_chunk = mock_openai_completion[-1]
|
||||||
usage_metadata: Optional[UsageMetadata] = None
|
usage_metadata: Optional[UsageMetadata] = None
|
||||||
with patch.object(llm, "async_client", mock_client):
|
with patch.object(llm, "_async_client", mock_client):
|
||||||
async for chunk in llm.astream("你的名字叫什么?只回答名字"):
|
async for chunk in llm.astream("你的名字叫什么?只回答名字"):
|
||||||
assert isinstance(chunk, AIMessageChunk)
|
assert isinstance(chunk, AIMessageChunk)
|
||||||
if chunk.usage_metadata is not None:
|
if chunk.usage_metadata is not None:
|
||||||
@ -470,7 +470,7 @@ def test_openai_stream(mock_openai_completion: list) -> None:
|
|||||||
mock_client.create = mock_create
|
mock_client.create = mock_create
|
||||||
usage_chunk = mock_openai_completion[-1]
|
usage_chunk = mock_openai_completion[-1]
|
||||||
usage_metadata: Optional[UsageMetadata] = None
|
usage_metadata: Optional[UsageMetadata] = None
|
||||||
with patch.object(llm, "client", mock_client):
|
with patch.object(llm, "_client", mock_client):
|
||||||
for chunk in llm.stream("你的名字叫什么?只回答名字"):
|
for chunk in llm.stream("你的名字叫什么?只回答名字"):
|
||||||
assert isinstance(chunk, AIMessageChunk)
|
assert isinstance(chunk, AIMessageChunk)
|
||||||
if chunk.usage_metadata is not None:
|
if chunk.usage_metadata is not None:
|
||||||
@ -533,7 +533,7 @@ def mock_async_client(mock_completion: dict) -> AsyncMock:
|
|||||||
def test_openai_invoke(mock_client: MagicMock) -> None:
|
def test_openai_invoke(mock_client: MagicMock) -> None:
|
||||||
llm = ChatOpenAI()
|
llm = ChatOpenAI()
|
||||||
|
|
||||||
with patch.object(llm, "client", mock_client):
|
with patch.object(llm, "_client", mock_client):
|
||||||
res = llm.invoke("bar")
|
res = llm.invoke("bar")
|
||||||
assert res.content == "Bar Baz"
|
assert res.content == "Bar Baz"
|
||||||
|
|
||||||
@ -541,13 +541,13 @@ def test_openai_invoke(mock_client: MagicMock) -> None:
|
|||||||
assert "headers" not in res.response_metadata
|
assert "headers" not in res.response_metadata
|
||||||
assert mock_client.create.called
|
assert mock_client.create.called
|
||||||
|
|
||||||
assert llm.async_client is None
|
assert llm._async_client is None
|
||||||
|
|
||||||
|
|
||||||
async def test_openai_ainvoke(mock_async_client: AsyncMock) -> None:
|
async def test_openai_ainvoke(mock_async_client: AsyncMock) -> None:
|
||||||
llm = ChatOpenAI()
|
llm = ChatOpenAI()
|
||||||
|
|
||||||
with patch.object(llm, "async_client", mock_async_client):
|
with patch.object(llm, "_async_client", mock_async_client):
|
||||||
res = await llm.ainvoke("bar")
|
res = await llm.ainvoke("bar")
|
||||||
assert res.content == "Bar Baz"
|
assert res.content == "Bar Baz"
|
||||||
|
|
||||||
@ -575,7 +575,7 @@ def test__get_encoding_model(model: str) -> None:
|
|||||||
def test_openai_invoke_name(mock_client: MagicMock) -> None:
|
def test_openai_invoke_name(mock_client: MagicMock) -> None:
|
||||||
llm = ChatOpenAI()
|
llm = ChatOpenAI()
|
||||||
|
|
||||||
with patch.object(llm, "client", mock_client):
|
with patch.object(llm, "_client", mock_client):
|
||||||
messages = [HumanMessage(content="Foo", name="Katie")]
|
messages = [HumanMessage(content="Foo", name="Katie")]
|
||||||
res = llm.invoke(messages)
|
res = llm.invoke(messages)
|
||||||
call_args, call_kwargs = mock_client.create.call_args
|
call_args, call_kwargs = mock_client.create.call_args
|
||||||
|
Loading…
Reference in New Issue
Block a user