mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-03 13:43:24 +00:00
--------- Co-authored-by: Bagatur <baskaryan@gmail.com>
This commit is contained in:
parent
bebe401b1a
commit
691ff67096
@ -7,7 +7,7 @@ from typing import Any, Callable, Dict, List, Optional, Union
|
|||||||
|
|
||||||
import openai
|
import openai
|
||||||
from langchain_core.outputs import ChatResult
|
from langchain_core.outputs import ChatResult
|
||||||
from langchain_core.pydantic_v1 import BaseModel, Field, SecretStr, root_validator
|
from langchain_core.pydantic_v1 import Field, SecretStr, root_validator
|
||||||
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
|
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
|
||||||
|
|
||||||
from langchain_openai.chat_models.base import ChatOpenAI
|
from langchain_openai.chat_models.base import ChatOpenAI
|
||||||
@ -209,9 +209,11 @@ class AzureChatOpenAI(ChatOpenAI):
|
|||||||
"openai_api_version": self.openai_api_version,
|
"openai_api_version": self.openai_api_version,
|
||||||
}
|
}
|
||||||
|
|
||||||
def _create_chat_result(self, response: Union[dict, BaseModel]) -> ChatResult:
|
def _create_chat_result(
|
||||||
|
self, response: Union[dict, openai.BaseModel]
|
||||||
|
) -> ChatResult:
|
||||||
if not isinstance(response, dict):
|
if not isinstance(response, dict):
|
||||||
response = response.dict()
|
response = response.model_dump()
|
||||||
for res in response["choices"]:
|
for res in response["choices"]:
|
||||||
if res.get("finish_reason", None) == "content_filter":
|
if res.get("finish_reason", None) == "content_filter":
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
|
@ -394,7 +394,7 @@ class ChatOpenAI(BaseChatModel):
|
|||||||
default_chunk_class = AIMessageChunk
|
default_chunk_class = AIMessageChunk
|
||||||
for chunk in self.client.create(messages=message_dicts, **params):
|
for chunk in self.client.create(messages=message_dicts, **params):
|
||||||
if not isinstance(chunk, dict):
|
if not isinstance(chunk, dict):
|
||||||
chunk = chunk.dict()
|
chunk = chunk.model_dump()
|
||||||
if len(chunk["choices"]) == 0:
|
if len(chunk["choices"]) == 0:
|
||||||
continue
|
continue
|
||||||
choice = chunk["choices"][0]
|
choice = chunk["choices"][0]
|
||||||
@ -449,10 +449,12 @@ class ChatOpenAI(BaseChatModel):
|
|||||||
message_dicts = [_convert_message_to_dict(m) for m in messages]
|
message_dicts = [_convert_message_to_dict(m) for m in messages]
|
||||||
return message_dicts, params
|
return message_dicts, params
|
||||||
|
|
||||||
def _create_chat_result(self, response: Union[dict, BaseModel]) -> ChatResult:
|
def _create_chat_result(
|
||||||
|
self, response: Union[dict, openai.BaseModel]
|
||||||
|
) -> ChatResult:
|
||||||
generations = []
|
generations = []
|
||||||
if not isinstance(response, dict):
|
if not isinstance(response, dict):
|
||||||
response = response.dict()
|
response = response.model_dump()
|
||||||
for res in response["choices"]:
|
for res in response["choices"]:
|
||||||
message = _convert_dict_to_message(res["message"])
|
message = _convert_dict_to_message(res["message"])
|
||||||
generation_info = dict(finish_reason=res.get("finish_reason"))
|
generation_info = dict(finish_reason=res.get("finish_reason"))
|
||||||
@ -486,7 +488,7 @@ class ChatOpenAI(BaseChatModel):
|
|||||||
messages=message_dicts, **params
|
messages=message_dicts, **params
|
||||||
):
|
):
|
||||||
if not isinstance(chunk, dict):
|
if not isinstance(chunk, dict):
|
||||||
chunk = chunk.dict()
|
chunk = chunk.model_dump()
|
||||||
if len(chunk["choices"]) == 0:
|
if len(chunk["choices"]) == 0:
|
||||||
continue
|
continue
|
||||||
choice = chunk["choices"][0]
|
choice = chunk["choices"][0]
|
||||||
|
@ -324,7 +324,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
|
|||||||
input=tokens[i : i + _chunk_size], **self._invocation_params
|
input=tokens[i : i + _chunk_size], **self._invocation_params
|
||||||
)
|
)
|
||||||
if not isinstance(response, dict):
|
if not isinstance(response, dict):
|
||||||
response = response.dict()
|
response = response.model_dump()
|
||||||
batched_embeddings.extend(r["embedding"] for r in response["data"])
|
batched_embeddings.extend(r["embedding"] for r in response["data"])
|
||||||
|
|
||||||
results: List[List[List[float]]] = [[] for _ in range(len(texts))]
|
results: List[List[List[float]]] = [[] for _ in range(len(texts))]
|
||||||
@ -343,7 +343,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
|
|||||||
input="", **self._invocation_params
|
input="", **self._invocation_params
|
||||||
)
|
)
|
||||||
if not isinstance(average_embedded, dict):
|
if not isinstance(average_embedded, dict):
|
||||||
average_embedded = average_embedded.dict()
|
average_embedded = average_embedded.model_dump()
|
||||||
average = average_embedded["data"][0]["embedding"]
|
average = average_embedded["data"][0]["embedding"]
|
||||||
else:
|
else:
|
||||||
average = np.average(_result, axis=0, weights=num_tokens_in_batch[i])
|
average = np.average(_result, axis=0, weights=num_tokens_in_batch[i])
|
||||||
@ -436,7 +436,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
|
|||||||
)
|
)
|
||||||
|
|
||||||
if not isinstance(response, dict):
|
if not isinstance(response, dict):
|
||||||
response = response.dict()
|
response = response.model_dump()
|
||||||
batched_embeddings.extend(r["embedding"] for r in response["data"])
|
batched_embeddings.extend(r["embedding"] for r in response["data"])
|
||||||
|
|
||||||
results: List[List[List[float]]] = [[] for _ in range(len(texts))]
|
results: List[List[List[float]]] = [[] for _ in range(len(texts))]
|
||||||
@ -453,7 +453,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
|
|||||||
input="", **self._invocation_params
|
input="", **self._invocation_params
|
||||||
)
|
)
|
||||||
if not isinstance(average_embedded, dict):
|
if not isinstance(average_embedded, dict):
|
||||||
average_embedded = average_embedded.dict()
|
average_embedded = average_embedded.model_dump()
|
||||||
average = average_embedded["data"][0]["embedding"]
|
average = average_embedded["data"][0]["embedding"]
|
||||||
else:
|
else:
|
||||||
average = np.average(_result, axis=0, weights=num_tokens_in_batch[i])
|
average = np.average(_result, axis=0, weights=num_tokens_in_batch[i])
|
||||||
|
@ -251,7 +251,7 @@ class BaseOpenAI(BaseLLM):
|
|||||||
self.get_sub_prompts(params, [prompt], stop) # this mutates params
|
self.get_sub_prompts(params, [prompt], stop) # this mutates params
|
||||||
for stream_resp in self.client.create(prompt=prompt, **params):
|
for stream_resp in self.client.create(prompt=prompt, **params):
|
||||||
if not isinstance(stream_resp, dict):
|
if not isinstance(stream_resp, dict):
|
||||||
stream_resp = stream_resp.dict()
|
stream_resp = stream_resp.model_dump()
|
||||||
chunk = _stream_response_to_generation_chunk(stream_resp)
|
chunk = _stream_response_to_generation_chunk(stream_resp)
|
||||||
yield chunk
|
yield chunk
|
||||||
if run_manager:
|
if run_manager:
|
||||||
@ -279,7 +279,7 @@ class BaseOpenAI(BaseLLM):
|
|||||||
prompt=prompt, **params
|
prompt=prompt, **params
|
||||||
):
|
):
|
||||||
if not isinstance(stream_resp, dict):
|
if not isinstance(stream_resp, dict):
|
||||||
stream_resp = stream_resp.dict()
|
stream_resp = stream_resp.model_dump()
|
||||||
chunk = _stream_response_to_generation_chunk(stream_resp)
|
chunk = _stream_response_to_generation_chunk(stream_resp)
|
||||||
yield chunk
|
yield chunk
|
||||||
if run_manager:
|
if run_manager:
|
||||||
@ -357,7 +357,7 @@ class BaseOpenAI(BaseLLM):
|
|||||||
if not isinstance(response, dict):
|
if not isinstance(response, dict):
|
||||||
# V1 client returns the response in an PyDantic object instead of
|
# V1 client returns the response in an PyDantic object instead of
|
||||||
# dict. For the transition period, we deep convert it to dict.
|
# dict. For the transition period, we deep convert it to dict.
|
||||||
response = response.dict()
|
response = response.model_dump()
|
||||||
|
|
||||||
choices.extend(response["choices"])
|
choices.extend(response["choices"])
|
||||||
_update_token_usage(_keys, response, token_usage)
|
_update_token_usage(_keys, response, token_usage)
|
||||||
@ -420,7 +420,7 @@ class BaseOpenAI(BaseLLM):
|
|||||||
else:
|
else:
|
||||||
response = await self.async_client.create(prompt=_prompts, **params)
|
response = await self.async_client.create(prompt=_prompts, **params)
|
||||||
if not isinstance(response, dict):
|
if not isinstance(response, dict):
|
||||||
response = response.dict()
|
response = response.model_dump()
|
||||||
choices.extend(response["choices"])
|
choices.extend(response["choices"])
|
||||||
_update_token_usage(_keys, response, token_usage)
|
_update_token_usage(_keys, response, token_usage)
|
||||||
return self.create_llm_result(
|
return self.create_llm_result(
|
||||||
|
Loading…
Reference in New Issue
Block a user