mirror of
https://github.com/hwchase17/langchain.git
synced 2025-08-08 20:41:52 +00:00
openai: cache httpx client (#31260)
 Co-authored-by: Sydney Runkle <54324534+sydney-runkle@users.noreply.github.com>
This commit is contained in:
parent
e6633a7efb
commit
afd349cc95
@ -0,0 +1,65 @@
|
||||
"""Helpers for creating OpenAI API clients.
|
||||
|
||||
This module allows for the caching of httpx clients to avoid creating new instances
|
||||
for each instance of ChatOpenAI.
|
||||
|
||||
Logic is largely replicated from openai._base_client.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import os
|
||||
from functools import lru_cache
|
||||
from typing import Any, Optional
|
||||
|
||||
import openai
|
||||
|
||||
|
||||
class _SyncHttpxClientWrapper(openai.DefaultHttpxClient):
|
||||
"""Borrowed from openai._base_client"""
|
||||
|
||||
def __del__(self) -> None:
|
||||
if self.is_closed:
|
||||
return
|
||||
|
||||
try:
|
||||
self.close()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
class _AsyncHttpxClientWrapper(openai.DefaultAsyncHttpxClient):
|
||||
"""Borrowed from openai._base_client"""
|
||||
|
||||
def __del__(self) -> None:
|
||||
if self.is_closed:
|
||||
return
|
||||
|
||||
try:
|
||||
# TODO(someday): support non asyncio runtimes here
|
||||
asyncio.get_running_loop().create_task(self.aclose())
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
@lru_cache
|
||||
def _get_default_httpx_client(
|
||||
base_url: Optional[str], timeout: Any
|
||||
) -> _SyncHttpxClientWrapper:
|
||||
return _SyncHttpxClientWrapper(
|
||||
base_url=base_url
|
||||
or os.environ.get("OPENAI_BASE_URL")
|
||||
or "https://api.openai.com/v1",
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
|
||||
@lru_cache
|
||||
def _get_default_async_httpx_client(
|
||||
base_url: Optional[str], timeout: Any
|
||||
) -> _AsyncHttpxClientWrapper:
|
||||
return _AsyncHttpxClientWrapper(
|
||||
base_url=base_url
|
||||
or os.environ.get("OPENAI_BASE_URL")
|
||||
or "https://api.openai.com/v1",
|
||||
timeout=timeout,
|
||||
)
|
@ -102,6 +102,11 @@ from pydantic import BaseModel, ConfigDict, Field, SecretStr, model_validator
|
||||
from pydantic.v1 import BaseModel as BaseModelV1
|
||||
from typing_extensions import Self
|
||||
|
||||
from langchain_openai.chat_models._client_utils import (
|
||||
_get_default_async_httpx_client,
|
||||
_get_default_httpx_client,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from openai.types.responses import Response
|
||||
|
||||
@ -621,7 +626,10 @@ class BaseChatOpenAI(BaseChatModel):
|
||||
self.http_client = httpx.Client(
|
||||
proxy=self.openai_proxy, verify=global_ssl_context
|
||||
)
|
||||
sync_specific = {"http_client": self.http_client}
|
||||
sync_specific = {
|
||||
"http_client": self.http_client
|
||||
or _get_default_httpx_client(self.openai_api_base, self.request_timeout)
|
||||
}
|
||||
self.root_client = openai.OpenAI(**client_params, **sync_specific) # type: ignore[arg-type]
|
||||
self.client = self.root_client.chat.completions
|
||||
if not self.async_client:
|
||||
@ -636,7 +644,12 @@ class BaseChatOpenAI(BaseChatModel):
|
||||
self.http_async_client = httpx.AsyncClient(
|
||||
proxy=self.openai_proxy, verify=global_ssl_context
|
||||
)
|
||||
async_specific = {"http_client": self.http_async_client}
|
||||
async_specific = {
|
||||
"http_client": self.http_async_client
|
||||
or _get_default_async_httpx_client(
|
||||
self.openai_api_base, self.request_timeout
|
||||
)
|
||||
}
|
||||
self.root_async_client = openai.AsyncOpenAI(
|
||||
**client_params,
|
||||
**async_specific, # type: ignore[arg-type]
|
||||
|
Loading…
Reference in New Issue
Block a user