From 4588e0679483d31375fbeb348caae769d38e03c2 Mon Sep 17 00:00:00 2001 From: Chester Curme Date: Fri, 21 Feb 2025 19:08:41 -0500 Subject: [PATCH] clients -> private attributes --- .../langchain_openai/chat_models/azure.py | 42 +++--- .../langchain_openai/chat_models/base.py | 127 ++++++++++-------- .../chat_models/test_base.py | 6 - .../unit_tests/chat_models/test_azure.py | 1 + .../tests/unit_tests/chat_models/test_base.py | 20 +-- 5 files changed, 106 insertions(+), 90 deletions(-) diff --git a/libs/partners/openai/langchain_openai/chat_models/azure.py b/libs/partners/openai/langchain_openai/chat_models/azure.py index 2b505129530..f4fe272c46d 100644 --- a/libs/partners/openai/langchain_openai/chat_models/azure.py +++ b/libs/partners/openai/langchain_openai/chat_models/azure.py @@ -660,37 +660,37 @@ class AzureChatOpenAI(BaseChatOpenAI): return self @property - def _root_client(self) -> openai.AzureOpenAI: - if self.root_client is not None: - return self.root_client - sync_specific = {"http_client": self._http_client} - self.root_client = openai.AzureOpenAI(**self._client_params, **sync_specific) # type: ignore[call-overload] - return self.root_client + def root_client(self) -> openai.AzureOpenAI: + if self._root_client is not None: + return self._root_client + sync_specific = {"http_client": self.http_client} + self._root_client = openai.AzureOpenAI(**self._client_params, **sync_specific) # type: ignore[call-overload] + return self._root_client @property - def _root_async_client(self) -> openai.AsyncAzureOpenAI: - if self.root_async_client is not None: - return self.root_async_client - async_specific = {"http_client": self._http_async_client} - self.root_async_client = openai.AsyncAzureOpenAI( + def root_async_client(self) -> openai.AsyncAzureOpenAI: + if self._root_async_client is not None: + return self._root_async_client + async_specific = {"http_client": self.http_async_client} + self._root_async_client = openai.AsyncAzureOpenAI( **self._client_params, **async_specific, # type: ignore[call-overload] ) return self._root_async_client @property - def _client(self) -> Any: - if self.client is not None: - return self.client - self.client = self._root_client.chat.completions - return self.client + def client(self) -> Any: + if self._client is not None: + return self._client + self._client = self.root_client.chat.completions + return self._client @property - def _async_client(self) -> Any: - if self.async_client is not None: - return self.async_client - self.async_client = self._root_async_client.chat.completions - return self.async_client + def async_client(self) -> Any: + if self._async_client is not None: + return self._async_client + self._async_client = self.root_async_client.chat.completions + return self._async_client @property def _identifying_params(self) -> Dict[str, Any]: diff --git a/libs/partners/openai/langchain_openai/chat_models/base.py b/libs/partners/openai/langchain_openai/chat_models/base.py index 73cb63a5b87..7fe69cc712a 100644 --- a/libs/partners/openai/langchain_openai/chat_models/base.py +++ b/libs/partners/openai/langchain_openai/chat_models/base.py @@ -396,10 +396,10 @@ class _AllReturnType(TypedDict): class BaseChatOpenAI(BaseChatModel): - client: Any = Field(default=None, exclude=True) #: :meta private: - async_client: Any = Field(default=None, exclude=True) #: :meta private: - root_client: Any = Field(default=None, exclude=True) #: :meta private: - root_async_client: Any = Field(default=None, exclude=True) #: :meta private: + _client: Any = PrivateAttr(default=None) #: :meta private: + _async_client: Any = PrivateAttr(default=None) #: :meta private: + _root_client: Any = PrivateAttr(default=None) #: :meta private: + _root_async_client: Any = PrivateAttr(default=None) #: :meta private: model_name: str = Field(default="gpt-3.5-turbo", alias="model") """Model name to use.""" temperature: Optional[float] = None @@ -471,11 +471,11 @@ class BaseChatOpenAI(BaseChatModel): default_query: Union[Mapping[str, object], None] = None # Configure a custom httpx client. See the # [httpx documentation](https://www.python-httpx.org/api/#client) for more details. - http_client: Union[Any, None] = Field(default=None, exclude=True) + _http_client: Union[Any, None] = PrivateAttr(default=None) """Optional httpx.Client. Only used for sync invocations. Must specify http_async_client as well if you'd like a custom client for async invocations. """ - http_async_client: Union[Any, None] = Field(default=None, exclude=True) + _http_async_client: Union[Any, None] = PrivateAttr(default=None) """Optional httpx.AsyncClient. Only used for async invocations. Must specify http_client as well if you'd like a custom client for sync invocations.""" stop: Optional[Union[List[str], str]] = Field(default=None, alias="stop_sequences") @@ -523,6 +523,24 @@ class BaseChatOpenAI(BaseChatModel): values["temperature"] = 1 return values + def __init__( + self, + client: Optional[Any] = None, + async_client: Optional[Any] = None, + root_client: Optional[Any] = None, + async_root_client: Optional[Any] = None, + http_client: Optional[Any] = None, + http_async_client: Optional[Any] = None, + **kwargs: Any, + ) -> None: + super().__init__(**kwargs) + self._client = client + self._async_client = async_client + self._root_client = root_client + self._async_root_client = async_root_client + self._http_client = http_client + self._http_async_client = http_async_client + @model_validator(mode="after") def validate_environment(self) -> Self: """Validate that api key and python package exists in environment.""" @@ -551,10 +569,10 @@ class BaseChatOpenAI(BaseChatModel): if self.max_retries is not None: self._client_params["max_retries"] = self.max_retries - if self.openai_proxy and (self.http_client or self.http_async_client): + if self.openai_proxy and (self._http_client or self._http_async_client): openai_proxy = self.openai_proxy - http_client = self.http_client - http_async_client = self.http_async_client + http_client = self._http_client + http_async_client = self._http_async_client raise ValueError( "Cannot specify 'openai_proxy' if one of " "'http_client'/'http_async_client' is already specified. Received:\n" @@ -564,7 +582,7 @@ class BaseChatOpenAI(BaseChatModel): return self @property - def _http_client(self) -> Optional[httpx.Client]: + def http_client(self) -> Optional[httpx.Client]: """Optional httpx.Client. Only used for sync invocations. Must specify http_async_client as well if you'd like a custom client for @@ -573,8 +591,8 @@ class BaseChatOpenAI(BaseChatModel): # Configure a custom httpx client. See the # [httpx documentation](https://www.python-httpx.org/api/#client) for more # details. - if self.http_client is not None: - return self.http_client + if self._http_client is not None: + return self._http_client if not self.openai_proxy: return None try: @@ -584,18 +602,18 @@ class BaseChatOpenAI(BaseChatModel): "Could not import httpx python package. " "Please install it with `pip install httpx`." ) from e - self.http_client = httpx.Client(proxy=self.openai_proxy) - return self.http_client + self._http_client = httpx.Client(proxy=self.openai_proxy) + return self._http_client @property - def _http_async_client(self) -> Optional[httpx.AsyncClient]: + def http_async_client(self) -> Optional[httpx.AsyncClient]: """Optional httpx.AsyncClient. Only used for async invocations. Must specify http_client as well if you'd like a custom client for sync invocations. """ - if self.http_async_client is not None: - return self.http_async_client + if self._http_async_client is not None: + return self._http_async_client if not self.openai_proxy: return None try: @@ -605,41 +623,41 @@ class BaseChatOpenAI(BaseChatModel): "Could not import httpx python package. " "Please install it with `pip install httpx`." ) from e - self.http_async_client = httpx.AsyncClient(proxy=self.openai_proxy) - return self.http_async_client + self._http_async_client = httpx.AsyncClient(proxy=self.openai_proxy) + return self._http_async_client @property - def _root_client(self) -> openai.OpenAI: - if self.root_client is not None: - return self.root_client - sync_specific = {"http_client": self._http_client} - self.root_client = openai.OpenAI(**self._client_params, **sync_specific) # type: ignore[arg-type] - return self.root_client + def root_client(self) -> openai.OpenAI: + if self._root_client is not None: + return self._root_client + sync_specific = {"http_client": self.http_client} + self._root_client = openai.OpenAI(**self._client_params, **sync_specific) # type: ignore[arg-type] + return self._root_client @property - def _root_async_client(self) -> openai.AsyncOpenAI: - if self.root_async_client is not None: - return self.root_async_client - async_specific = {"http_client": self._http_async_client} - self.root_async_client = openai.AsyncOpenAI( + def root_async_client(self) -> openai.AsyncOpenAI: + if self._root_async_client is not None: + return self._root_async_client + async_specific = {"http_client": self.http_async_client} + self._root_async_client = openai.AsyncOpenAI( **self._client_params, **async_specific, # type: ignore[arg-type] ) - return self.root_async_client + return self._root_async_client @property - def _client(self) -> Any: - if self.client is not None: - return self.client - self.client = self._root_client.chat.completions - return self.client + def client(self) -> Any: + if self._client is not None: + return self._client + self._client = self.root_client.chat.completions + return self._client @property - def _async_client(self) -> Any: - if self.async_client is not None: - return self.async_client - self.async_client = self._root_async_client.chat.completions - return self.async_client + def async_client(self) -> Any: + if self._async_client is not None: + return self._async_client + self._async_client = self.root_async_client.chat.completions + return self._async_client @property def _default_params(self) -> Dict[str, Any]: @@ -766,15 +784,15 @@ class BaseChatOpenAI(BaseChatModel): "specified." ) payload.pop("stream") - response_stream = self._root_client.beta.chat.completions.stream(**payload) + response_stream = self.root_client.beta.chat.completions.stream(**payload) context_manager = response_stream else: if self.include_response_headers: - raw_response = self._client.with_raw_response.create(**payload) + raw_response = self.client.with_raw_response.create(**payload) response = raw_response.parse() base_generation_info = {"headers": dict(raw_response.headers)} else: - response = self._client.create(**payload) + response = self.client.create(**payload) context_manager = response try: with context_manager as response: @@ -834,15 +852,15 @@ class BaseChatOpenAI(BaseChatModel): ) payload.pop("stream") try: - response = self._root_client.beta.chat.completions.parse(**payload) + response = self.root_client.beta.chat.completions.parse(**payload) except openai.BadRequestError as e: _handle_openai_bad_request(e) elif self.include_response_headers: - raw_response = self._client.with_raw_response.create(**payload) + raw_response = self.client.with_raw_response.create(**payload) response = raw_response.parse() generation_info = {"headers": dict(raw_response.headers)} else: - response = self._client.create(**payload) + response = self.client.create(**payload) return self._create_chat_result(response, generation_info) def _get_request_payload( @@ -930,19 +948,19 @@ class BaseChatOpenAI(BaseChatModel): "specified." ) payload.pop("stream") - response_stream = self._root_async_client.beta.chat.completions.stream( + response_stream = self.root_async_client.beta.chat.completions.stream( **payload ) context_manager = response_stream else: if self.include_response_headers: - raw_response = await self._async_client.with_raw_response.create( + raw_response = await self.async_client.with_raw_response.create( **payload ) response = raw_response.parse() base_generation_info = {"headers": dict(raw_response.headers)} else: - response = await self._async_client.create(**payload) + response = await self.async_client.create(**payload) context_manager = response try: async with context_manager as response: @@ -1002,17 +1020,17 @@ class BaseChatOpenAI(BaseChatModel): ) payload.pop("stream") try: - response = await self._root_async_client.beta.chat.completions.parse( + response = await self.root_async_client.beta.chat.completions.parse( **payload ) except openai.BadRequestError as e: _handle_openai_bad_request(e) elif self.include_response_headers: - raw_response = await self._async_client.with_raw_response.create(**payload) + raw_response = await self.async_client.with_raw_response.create(**payload) response = raw_response.parse() generation_info = {"headers": dict(raw_response.headers)} else: - response = await self._async_client.create(**payload) + response = await self.async_client.create(**payload) return await run_in_executor( None, self._create_chat_result, response, generation_info ) @@ -2023,6 +2041,9 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override] max_tokens: Optional[int] = Field(default=None, alias="max_completion_tokens") """Maximum number of tokens to generate.""" + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + @property def lc_secrets(self) -> Dict[str, str]: return {"openai_api_key": "OPENAI_API_KEY"} diff --git a/libs/partners/openai/tests/integration_tests/chat_models/test_base.py b/libs/partners/openai/tests/integration_tests/chat_models/test_base.py index 5a7dfabaed0..09cae79520b 100644 --- a/libs/partners/openai/tests/integration_tests/chat_models/test_base.py +++ b/libs/partners/openai/tests/integration_tests/chat_models/test_base.py @@ -660,9 +660,6 @@ def test_openai_structured_output(model: str) -> None: def test_openai_proxy() -> None: """Test ChatOpenAI with proxy.""" chat_openai = ChatOpenAI(openai_proxy="http://localhost:8080") - assert chat_openai.client is None - _ = chat_openai._client # force client to instantiate - assert chat_openai.client is not None mounts = chat_openai.client._client._client._mounts assert len(mounts) == 1 for key, value in mounts.items(): @@ -671,9 +668,6 @@ def test_openai_proxy() -> None: assert proxy.host == b"localhost" assert proxy.port == 8080 - assert chat_openai.async_client is None - _ = chat_openai._async_client # force client to instantiate - assert chat_openai.async_client is not None async_client_mounts = chat_openai.async_client._client._client._mounts assert len(async_client_mounts) == 1 for key, value in async_client_mounts.items(): diff --git a/libs/partners/openai/tests/unit_tests/chat_models/test_azure.py b/libs/partners/openai/tests/unit_tests/chat_models/test_azure.py index bee3f742a5e..64b2de8ba22 100644 --- a/libs/partners/openai/tests/unit_tests/chat_models/test_azure.py +++ b/libs/partners/openai/tests/unit_tests/chat_models/test_azure.py @@ -14,6 +14,7 @@ def test_initialize_azure_openai() -> None: azure_deployment="35-turbo-dev", openai_api_version="2023-05-15", azure_endpoint="my-base-url", + http_client=None, ) assert llm.deployment_name == "35-turbo-dev" assert llm.openai_api_version == "2023-05-15" diff --git a/libs/partners/openai/tests/unit_tests/chat_models/test_base.py b/libs/partners/openai/tests/unit_tests/chat_models/test_base.py index 08d2ce395eb..4954cf8398c 100644 --- a/libs/partners/openai/tests/unit_tests/chat_models/test_base.py +++ b/libs/partners/openai/tests/unit_tests/chat_models/test_base.py @@ -298,7 +298,7 @@ async def test_glm4_astream(mock_glm4_completion: list) -> None: usage_chunk = mock_glm4_completion[-1] usage_metadata: Optional[UsageMetadata] = None - with patch.object(llm, "async_client", mock_client): + with patch.object(llm, "_async_client", mock_client): async for chunk in llm.astream("你的名字叫什么?只回答名字"): assert isinstance(chunk, AIMessageChunk) if chunk.usage_metadata is not None: @@ -323,7 +323,7 @@ def test_glm4_stream(mock_glm4_completion: list) -> None: usage_chunk = mock_glm4_completion[-1] usage_metadata: Optional[UsageMetadata] = None - with patch.object(llm, "client", mock_client): + with patch.object(llm, "_client", mock_client): for chunk in llm.stream("你的名字叫什么?只回答名字"): assert isinstance(chunk, AIMessageChunk) if chunk.usage_metadata is not None: @@ -378,7 +378,7 @@ async def test_deepseek_astream(mock_deepseek_completion: list) -> None: mock_client.create = mock_create usage_chunk = mock_deepseek_completion[-1] usage_metadata: Optional[UsageMetadata] = None - with patch.object(llm, "async_client", mock_client): + with patch.object(llm, "_async_client", mock_client): async for chunk in llm.astream("你的名字叫什么?只回答名字"): assert isinstance(chunk, AIMessageChunk) if chunk.usage_metadata is not None: @@ -402,7 +402,7 @@ def test_deepseek_stream(mock_deepseek_completion: list) -> None: mock_client.create = mock_create usage_chunk = mock_deepseek_completion[-1] usage_metadata: Optional[UsageMetadata] = None - with patch.object(llm, "client", mock_client): + with patch.object(llm, "_client", mock_client): for chunk in llm.stream("你的名字叫什么?只回答名字"): assert isinstance(chunk, AIMessageChunk) if chunk.usage_metadata is not None: @@ -446,7 +446,7 @@ async def test_openai_astream(mock_openai_completion: list) -> None: mock_client.create = mock_create usage_chunk = mock_openai_completion[-1] usage_metadata: Optional[UsageMetadata] = None - with patch.object(llm, "async_client", mock_client): + with patch.object(llm, "_async_client", mock_client): async for chunk in llm.astream("你的名字叫什么?只回答名字"): assert isinstance(chunk, AIMessageChunk) if chunk.usage_metadata is not None: @@ -470,7 +470,7 @@ def test_openai_stream(mock_openai_completion: list) -> None: mock_client.create = mock_create usage_chunk = mock_openai_completion[-1] usage_metadata: Optional[UsageMetadata] = None - with patch.object(llm, "client", mock_client): + with patch.object(llm, "_client", mock_client): for chunk in llm.stream("你的名字叫什么?只回答名字"): assert isinstance(chunk, AIMessageChunk) if chunk.usage_metadata is not None: @@ -533,7 +533,7 @@ def mock_async_client(mock_completion: dict) -> AsyncMock: def test_openai_invoke(mock_client: MagicMock) -> None: llm = ChatOpenAI() - with patch.object(llm, "client", mock_client): + with patch.object(llm, "_client", mock_client): res = llm.invoke("bar") assert res.content == "Bar Baz" @@ -541,13 +541,13 @@ def test_openai_invoke(mock_client: MagicMock) -> None: assert "headers" not in res.response_metadata assert mock_client.create.called - assert llm.async_client is None + assert llm._async_client is None async def test_openai_ainvoke(mock_async_client: AsyncMock) -> None: llm = ChatOpenAI() - with patch.object(llm, "async_client", mock_async_client): + with patch.object(llm, "_async_client", mock_async_client): res = await llm.ainvoke("bar") assert res.content == "Bar Baz" @@ -575,7 +575,7 @@ def test__get_encoding_model(model: str) -> None: def test_openai_invoke_name(mock_client: MagicMock) -> None: llm = ChatOpenAI() - with patch.object(llm, "client", mock_client): + with patch.object(llm, "_client", mock_client): messages = [HumanMessage(content="Foo", name="Katie")] res = llm.invoke(messages) call_args, call_kwargs = mock_client.create.call_args