From 691ff67096e7fd79cff86513bd10f8fa013126b4 Mon Sep 17 00:00:00 2001 From: Savvas Mantzouranidis Date: Wed, 21 Feb 2024 00:57:34 +0000 Subject: [PATCH] partners/openai: fix depracation errors of pydantic's .dict() function (reopen #16629) (#17404) --------- Co-authored-by: Bagatur --- .../openai/langchain_openai/chat_models/azure.py | 8 +++++--- .../openai/langchain_openai/chat_models/base.py | 10 ++++++---- .../openai/langchain_openai/embeddings/base.py | 8 ++++---- libs/partners/openai/langchain_openai/llms/base.py | 8 ++++---- 4 files changed, 19 insertions(+), 15 deletions(-) diff --git a/libs/partners/openai/langchain_openai/chat_models/azure.py b/libs/partners/openai/langchain_openai/chat_models/azure.py index 64b0ebc6ca3..066cda7eace 100644 --- a/libs/partners/openai/langchain_openai/chat_models/azure.py +++ b/libs/partners/openai/langchain_openai/chat_models/azure.py @@ -7,7 +7,7 @@ from typing import Any, Callable, Dict, List, Optional, Union import openai from langchain_core.outputs import ChatResult -from langchain_core.pydantic_v1 import BaseModel, Field, SecretStr, root_validator +from langchain_core.pydantic_v1 import Field, SecretStr, root_validator from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env from langchain_openai.chat_models.base import ChatOpenAI @@ -209,9 +209,11 @@ class AzureChatOpenAI(ChatOpenAI): "openai_api_version": self.openai_api_version, } - def _create_chat_result(self, response: Union[dict, BaseModel]) -> ChatResult: + def _create_chat_result( + self, response: Union[dict, openai.BaseModel] + ) -> ChatResult: if not isinstance(response, dict): - response = response.dict() + response = response.model_dump() for res in response["choices"]: if res.get("finish_reason", None) == "content_filter": raise ValueError( diff --git a/libs/partners/openai/langchain_openai/chat_models/base.py b/libs/partners/openai/langchain_openai/chat_models/base.py index 57e265a6ef4..57dbfa71a02 100644 --- a/libs/partners/openai/langchain_openai/chat_models/base.py +++ b/libs/partners/openai/langchain_openai/chat_models/base.py @@ -394,7 +394,7 @@ class ChatOpenAI(BaseChatModel): default_chunk_class = AIMessageChunk for chunk in self.client.create(messages=message_dicts, **params): if not isinstance(chunk, dict): - chunk = chunk.dict() + chunk = chunk.model_dump() if len(chunk["choices"]) == 0: continue choice = chunk["choices"][0] @@ -449,10 +449,12 @@ class ChatOpenAI(BaseChatModel): message_dicts = [_convert_message_to_dict(m) for m in messages] return message_dicts, params - def _create_chat_result(self, response: Union[dict, BaseModel]) -> ChatResult: + def _create_chat_result( + self, response: Union[dict, openai.BaseModel] + ) -> ChatResult: generations = [] if not isinstance(response, dict): - response = response.dict() + response = response.model_dump() for res in response["choices"]: message = _convert_dict_to_message(res["message"]) generation_info = dict(finish_reason=res.get("finish_reason")) @@ -486,7 +488,7 @@ class ChatOpenAI(BaseChatModel): messages=message_dicts, **params ): if not isinstance(chunk, dict): - chunk = chunk.dict() + chunk = chunk.model_dump() if len(chunk["choices"]) == 0: continue choice = chunk["choices"][0] diff --git a/libs/partners/openai/langchain_openai/embeddings/base.py b/libs/partners/openai/langchain_openai/embeddings/base.py index d0b815358e3..8baa4c1884c 100644 --- a/libs/partners/openai/langchain_openai/embeddings/base.py +++ b/libs/partners/openai/langchain_openai/embeddings/base.py @@ -324,7 +324,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings): input=tokens[i : i + _chunk_size], **self._invocation_params ) if not isinstance(response, dict): - response = response.dict() + response = response.model_dump() batched_embeddings.extend(r["embedding"] for r in response["data"]) results: List[List[List[float]]] = [[] for _ in range(len(texts))] @@ -343,7 +343,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings): input="", **self._invocation_params ) if not isinstance(average_embedded, dict): - average_embedded = average_embedded.dict() + average_embedded = average_embedded.model_dump() average = average_embedded["data"][0]["embedding"] else: average = np.average(_result, axis=0, weights=num_tokens_in_batch[i]) @@ -436,7 +436,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings): ) if not isinstance(response, dict): - response = response.dict() + response = response.model_dump() batched_embeddings.extend(r["embedding"] for r in response["data"]) results: List[List[List[float]]] = [[] for _ in range(len(texts))] @@ -453,7 +453,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings): input="", **self._invocation_params ) if not isinstance(average_embedded, dict): - average_embedded = average_embedded.dict() + average_embedded = average_embedded.model_dump() average = average_embedded["data"][0]["embedding"] else: average = np.average(_result, axis=0, weights=num_tokens_in_batch[i]) diff --git a/libs/partners/openai/langchain_openai/llms/base.py b/libs/partners/openai/langchain_openai/llms/base.py index a298b048d0e..ec8ff643fa2 100644 --- a/libs/partners/openai/langchain_openai/llms/base.py +++ b/libs/partners/openai/langchain_openai/llms/base.py @@ -251,7 +251,7 @@ class BaseOpenAI(BaseLLM): self.get_sub_prompts(params, [prompt], stop) # this mutates params for stream_resp in self.client.create(prompt=prompt, **params): if not isinstance(stream_resp, dict): - stream_resp = stream_resp.dict() + stream_resp = stream_resp.model_dump() chunk = _stream_response_to_generation_chunk(stream_resp) yield chunk if run_manager: @@ -279,7 +279,7 @@ class BaseOpenAI(BaseLLM): prompt=prompt, **params ): if not isinstance(stream_resp, dict): - stream_resp = stream_resp.dict() + stream_resp = stream_resp.model_dump() chunk = _stream_response_to_generation_chunk(stream_resp) yield chunk if run_manager: @@ -357,7 +357,7 @@ class BaseOpenAI(BaseLLM): if not isinstance(response, dict): # V1 client returns the response in an PyDantic object instead of # dict. For the transition period, we deep convert it to dict. - response = response.dict() + response = response.model_dump() choices.extend(response["choices"]) _update_token_usage(_keys, response, token_usage) @@ -420,7 +420,7 @@ class BaseOpenAI(BaseLLM): else: response = await self.async_client.create(prompt=_prompts, **params) if not isinstance(response, dict): - response = response.dict() + response = response.model_dump() choices.extend(response["choices"]) _update_token_usage(_keys, response, token_usage) return self.create_llm_result(