mirror of
https://github.com/hwchase17/langchain.git
synced 2025-07-17 10:13:29 +00:00
ChatOpenAI and AzureChatOpenAI openai>=1 compatible (#12948)
This commit is contained in:
parent
52d0055a91
commit
8e0cb2eb84
@ -2,10 +2,10 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
from typing import Any, Dict, Mapping
|
from typing import Any, Dict, Union
|
||||||
|
|
||||||
from langchain.chat_models.openai import ChatOpenAI
|
from langchain.chat_models.openai import ChatOpenAI, _is_openai_v1
|
||||||
from langchain.pydantic_v1 import root_validator
|
from langchain.pydantic_v1 import BaseModel, Field, root_validator
|
||||||
from langchain.schema import ChatResult
|
from langchain.schema import ChatResult
|
||||||
from langchain.utils import get_from_dict_or_env
|
from langchain.utils import get_from_dict_or_env
|
||||||
|
|
||||||
@ -51,13 +51,13 @@ class AzureChatOpenAI(ChatOpenAI):
|
|||||||
in, even if not explicitly saved on this class.
|
in, even if not explicitly saved on this class.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
deployment_name: str = ""
|
deployment_name: str = Field(default="", alias="azure_deployment")
|
||||||
model_version: str = ""
|
model_version: str = ""
|
||||||
openai_api_type: str = ""
|
openai_api_type: str = ""
|
||||||
openai_api_base: str = ""
|
openai_api_base: str = Field(default="", alias="azure_endpoint")
|
||||||
openai_api_version: str = ""
|
openai_api_version: str = Field(default="", alias="api_version")
|
||||||
openai_api_key: str = ""
|
openai_api_key: str = Field(default="", alias="api_key")
|
||||||
openai_organization: str = ""
|
openai_organization: str = Field(default="", alias="organization")
|
||||||
openai_proxy: str = ""
|
openai_proxy: str = ""
|
||||||
|
|
||||||
@root_validator()
|
@root_validator()
|
||||||
@ -101,14 +101,27 @@ class AzureChatOpenAI(ChatOpenAI):
|
|||||||
"Could not import openai python package. "
|
"Could not import openai python package. "
|
||||||
"Please install it with `pip install openai`."
|
"Please install it with `pip install openai`."
|
||||||
)
|
)
|
||||||
try:
|
if _is_openai_v1():
|
||||||
|
values["client"] = openai.AzureOpenAI(
|
||||||
|
azure_endpoint=values["openai_api_base"],
|
||||||
|
api_key=values["openai_api_key"],
|
||||||
|
timeout=values["request_timeout"],
|
||||||
|
max_retries=values["max_retries"],
|
||||||
|
organization=values["openai_organization"],
|
||||||
|
api_version=values["openai_api_version"],
|
||||||
|
azure_deployment=values["deployment_name"],
|
||||||
|
).chat.completions
|
||||||
|
values["async_client"] = openai.AsyncAzureOpenAI(
|
||||||
|
azure_endpoint=values["openai_api_base"],
|
||||||
|
api_key=values["openai_api_key"],
|
||||||
|
timeout=values["request_timeout"],
|
||||||
|
max_retries=values["max_retries"],
|
||||||
|
organization=values["openai_organization"],
|
||||||
|
api_version=values["openai_api_version"],
|
||||||
|
azure_deployment=values["deployment_name"],
|
||||||
|
).chat.completions
|
||||||
|
else:
|
||||||
values["client"] = openai.ChatCompletion
|
values["client"] = openai.ChatCompletion
|
||||||
except AttributeError:
|
|
||||||
raise ValueError(
|
|
||||||
"`openai` has no `ChatCompletion` attribute, this is likely "
|
|
||||||
"due to an old version of the openai package. Try upgrading it "
|
|
||||||
"with `pip install --upgrade openai`."
|
|
||||||
)
|
|
||||||
if values["n"] < 1:
|
if values["n"] < 1:
|
||||||
raise ValueError("n must be at least 1.")
|
raise ValueError("n must be at least 1.")
|
||||||
if values["n"] > 1 and values["streaming"]:
|
if values["n"] > 1 and values["streaming"]:
|
||||||
@ -118,10 +131,13 @@ class AzureChatOpenAI(ChatOpenAI):
|
|||||||
@property
|
@property
|
||||||
def _default_params(self) -> Dict[str, Any]:
|
def _default_params(self) -> Dict[str, Any]:
|
||||||
"""Get the default parameters for calling OpenAI API."""
|
"""Get the default parameters for calling OpenAI API."""
|
||||||
return {
|
if _is_openai_v1():
|
||||||
**super()._default_params,
|
return super()._default_params
|
||||||
"engine": self.deployment_name,
|
else:
|
||||||
}
|
return {
|
||||||
|
**super()._default_params,
|
||||||
|
"engine": self.deployment_name,
|
||||||
|
}
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def _identifying_params(self) -> Dict[str, Any]:
|
def _identifying_params(self) -> Dict[str, Any]:
|
||||||
@ -131,11 +147,14 @@ class AzureChatOpenAI(ChatOpenAI):
|
|||||||
@property
|
@property
|
||||||
def _client_params(self) -> Dict[str, Any]:
|
def _client_params(self) -> Dict[str, Any]:
|
||||||
"""Get the config params used for the openai client."""
|
"""Get the config params used for the openai client."""
|
||||||
return {
|
if _is_openai_v1():
|
||||||
**super()._client_params,
|
return super()._client_params
|
||||||
"api_type": self.openai_api_type,
|
else:
|
||||||
"api_version": self.openai_api_version,
|
return {
|
||||||
}
|
**super()._client_params,
|
||||||
|
"api_type": self.openai_api_type,
|
||||||
|
"api_version": self.openai_api_version,
|
||||||
|
}
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def _llm_type(self) -> str:
|
def _llm_type(self) -> str:
|
||||||
@ -148,7 +167,9 @@ class AzureChatOpenAI(ChatOpenAI):
|
|||||||
"openai_api_version": self.openai_api_version,
|
"openai_api_version": self.openai_api_version,
|
||||||
}
|
}
|
||||||
|
|
||||||
def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult:
|
def _create_chat_result(self, response: Union[dict, BaseModel]) -> ChatResult:
|
||||||
|
if not isinstance(response, dict):
|
||||||
|
response = response.dict()
|
||||||
for res in response["choices"]:
|
for res in response["choices"]:
|
||||||
if res.get("finish_reason", None) == "content_filter":
|
if res.get("finish_reason", None) == "content_filter":
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
|
@ -21,8 +21,8 @@ from langchain.adapters.openai import convert_dict_to_message, convert_message_t
|
|||||||
from langchain.callbacks.manager import (
|
from langchain.callbacks.manager import (
|
||||||
CallbackManagerForLLMRun,
|
CallbackManagerForLLMRun,
|
||||||
)
|
)
|
||||||
from langchain.chat_models.base import _generate_from_stream
|
from langchain.chat_models.base import BaseChatModel, _generate_from_stream
|
||||||
from langchain.chat_models.openai import ChatOpenAI, _convert_delta_to_message_chunk
|
from langchain.chat_models.openai import _convert_delta_to_message_chunk
|
||||||
from langchain.pydantic_v1 import Field, root_validator
|
from langchain.pydantic_v1 import Field, root_validator
|
||||||
from langchain.schema import ChatGeneration, ChatResult
|
from langchain.schema import ChatGeneration, ChatResult
|
||||||
from langchain.schema.messages import AIMessageChunk, BaseMessage
|
from langchain.schema.messages import AIMessageChunk, BaseMessage
|
||||||
@ -35,7 +35,7 @@ DEFAULT_MODEL = "meta-llama/Llama-2-13b-chat-hf"
|
|||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class ChatKonko(ChatOpenAI):
|
class ChatKonko(BaseChatModel):
|
||||||
"""`ChatKonko` Chat large language models API.
|
"""`ChatKonko` Chat large language models API.
|
||||||
|
|
||||||
To use, you should have the ``konko`` python package installed, and the
|
To use, you should have the ``konko`` python package installed, and the
|
||||||
|
@ -3,6 +3,7 @@ from __future__ import annotations
|
|||||||
|
|
||||||
import logging
|
import logging
|
||||||
import sys
|
import sys
|
||||||
|
from importlib.metadata import version
|
||||||
from typing import (
|
from typing import (
|
||||||
TYPE_CHECKING,
|
TYPE_CHECKING,
|
||||||
Any,
|
Any,
|
||||||
@ -19,6 +20,8 @@ from typing import (
|
|||||||
Union,
|
Union,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
from packaging.version import Version, parse
|
||||||
|
|
||||||
from langchain.adapters.openai import convert_dict_to_message, convert_message_to_dict
|
from langchain.adapters.openai import convert_dict_to_message, convert_message_to_dict
|
||||||
from langchain.callbacks.manager import (
|
from langchain.callbacks.manager import (
|
||||||
AsyncCallbackManagerForLLMRun,
|
AsyncCallbackManagerForLLMRun,
|
||||||
@ -44,9 +47,13 @@ from langchain.schema.messages import (
|
|||||||
)
|
)
|
||||||
from langchain.schema.output import ChatGenerationChunk
|
from langchain.schema.output import ChatGenerationChunk
|
||||||
from langchain.schema.runnable import Runnable
|
from langchain.schema.runnable import Runnable
|
||||||
from langchain.utils import get_from_dict_or_env, get_pydantic_field_names
|
from langchain.utils import (
|
||||||
|
get_from_dict_or_env,
|
||||||
|
get_pydantic_field_names,
|
||||||
|
)
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
|
import httpx
|
||||||
import tiktoken
|
import tiktoken
|
||||||
|
|
||||||
|
|
||||||
@ -91,6 +98,9 @@ async def acompletion_with_retry(
|
|||||||
**kwargs: Any,
|
**kwargs: Any,
|
||||||
) -> Any:
|
) -> Any:
|
||||||
"""Use tenacity to retry the async completion call."""
|
"""Use tenacity to retry the async completion call."""
|
||||||
|
if _is_openai_v1():
|
||||||
|
return await llm.async_client.create(**kwargs)
|
||||||
|
|
||||||
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
|
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
|
||||||
|
|
||||||
@retry_decorator
|
@retry_decorator
|
||||||
@ -108,6 +118,11 @@ def _convert_delta_to_message_chunk(
|
|||||||
content = _dict.get("content") or ""
|
content = _dict.get("content") or ""
|
||||||
if _dict.get("function_call"):
|
if _dict.get("function_call"):
|
||||||
additional_kwargs = {"function_call": dict(_dict["function_call"])}
|
additional_kwargs = {"function_call": dict(_dict["function_call"])}
|
||||||
|
if (
|
||||||
|
"name" in additional_kwargs["function_call"]
|
||||||
|
and additional_kwargs["function_call"]["name"] is None
|
||||||
|
):
|
||||||
|
additional_kwargs["function_call"]["name"] = ""
|
||||||
else:
|
else:
|
||||||
additional_kwargs = {}
|
additional_kwargs = {}
|
||||||
|
|
||||||
@ -125,6 +140,11 @@ def _convert_delta_to_message_chunk(
|
|||||||
return default_class(content=content)
|
return default_class(content=content)
|
||||||
|
|
||||||
|
|
||||||
|
def _is_openai_v1() -> bool:
|
||||||
|
_version = parse(version("openai"))
|
||||||
|
return _version >= Version("1.0.0")
|
||||||
|
|
||||||
|
|
||||||
class ChatOpenAI(BaseChatModel):
|
class ChatOpenAI(BaseChatModel):
|
||||||
"""`OpenAI` Chat large language models API.
|
"""`OpenAI` Chat large language models API.
|
||||||
|
|
||||||
@ -166,6 +186,7 @@ class ChatOpenAI(BaseChatModel):
|
|||||||
return True
|
return True
|
||||||
|
|
||||||
client: Any = None #: :meta private:
|
client: Any = None #: :meta private:
|
||||||
|
async_client: Any = None #: :meta private:
|
||||||
model_name: str = Field(default="gpt-3.5-turbo", alias="model")
|
model_name: str = Field(default="gpt-3.5-turbo", alias="model")
|
||||||
"""Model name to use."""
|
"""Model name to use."""
|
||||||
temperature: float = 0.7
|
temperature: float = 0.7
|
||||||
@ -175,16 +196,18 @@ class ChatOpenAI(BaseChatModel):
|
|||||||
# When updating this to use a SecretStr
|
# When updating this to use a SecretStr
|
||||||
# Check for classes that derive from this class (as some of them
|
# Check for classes that derive from this class (as some of them
|
||||||
# may assume openai_api_key is a str)
|
# may assume openai_api_key is a str)
|
||||||
openai_api_key: Optional[str] = None
|
openai_api_key: Optional[str] = Field(default=None, alias="api_key")
|
||||||
"""Base URL path for API requests,
|
"""Base URL path for API requests,
|
||||||
leave blank if not using a proxy or service emulator."""
|
leave blank if not using a proxy or service emulator."""
|
||||||
openai_api_base: Optional[str] = None
|
openai_api_base: Optional[str] = Field(default=None, alias="base_url")
|
||||||
openai_organization: Optional[str] = None
|
openai_organization: Optional[str] = Field(default=None, alias="organization")
|
||||||
# to support explicit proxy for OpenAI
|
# to support explicit proxy for OpenAI
|
||||||
openai_proxy: Optional[str] = None
|
openai_proxy: Optional[str] = None
|
||||||
request_timeout: Optional[Union[float, Tuple[float, float]]] = None
|
request_timeout: Union[float, Tuple[float, float], httpx.Timeout, None] = Field(
|
||||||
|
default=None, alias="timeout"
|
||||||
|
)
|
||||||
"""Timeout for requests to OpenAI completion API. Default is 600 seconds."""
|
"""Timeout for requests to OpenAI completion API. Default is 600 seconds."""
|
||||||
max_retries: int = 6
|
max_retries: int = 2
|
||||||
"""Maximum number of retries to make when generating."""
|
"""Maximum number of retries to make when generating."""
|
||||||
streaming: bool = False
|
streaming: bool = False
|
||||||
"""Whether to stream the results or not."""
|
"""Whether to stream the results or not."""
|
||||||
@ -266,14 +289,24 @@ class ChatOpenAI(BaseChatModel):
|
|||||||
"Could not import openai python package. "
|
"Could not import openai python package. "
|
||||||
"Please install it with `pip install openai`."
|
"Please install it with `pip install openai`."
|
||||||
)
|
)
|
||||||
try:
|
|
||||||
|
if _is_openai_v1():
|
||||||
|
values["client"] = openai.OpenAI(
|
||||||
|
api_key=values["openai_api_key"],
|
||||||
|
timeout=values["request_timeout"],
|
||||||
|
max_retries=values["max_retries"],
|
||||||
|
organization=values["openai_organization"],
|
||||||
|
base_url=values["openai_api_base"] or None,
|
||||||
|
).chat.completions
|
||||||
|
values["async_client"] = openai.AsyncOpenAI(
|
||||||
|
api_key=values["openai_api_key"],
|
||||||
|
timeout=values["request_timeout"],
|
||||||
|
max_retries=values["max_retries"],
|
||||||
|
organization=values["openai_organization"],
|
||||||
|
base_url=values["openai_api_base"] or None,
|
||||||
|
).chat.completions
|
||||||
|
else:
|
||||||
values["client"] = openai.ChatCompletion
|
values["client"] = openai.ChatCompletion
|
||||||
except AttributeError:
|
|
||||||
raise ValueError(
|
|
||||||
"`openai` has no `ChatCompletion` attribute, this is likely "
|
|
||||||
"due to an old version of the openai package. Try upgrading it "
|
|
||||||
"with `pip install --upgrade openai`."
|
|
||||||
)
|
|
||||||
if values["n"] < 1:
|
if values["n"] < 1:
|
||||||
raise ValueError("n must be at least 1.")
|
raise ValueError("n must be at least 1.")
|
||||||
if values["n"] > 1 and values["streaming"]:
|
if values["n"] > 1 and values["streaming"]:
|
||||||
@ -285,7 +318,6 @@ class ChatOpenAI(BaseChatModel):
|
|||||||
"""Get the default parameters for calling OpenAI API."""
|
"""Get the default parameters for calling OpenAI API."""
|
||||||
return {
|
return {
|
||||||
"model": self.model_name,
|
"model": self.model_name,
|
||||||
"request_timeout": self.request_timeout,
|
|
||||||
"max_tokens": self.max_tokens,
|
"max_tokens": self.max_tokens,
|
||||||
"stream": self.streaming,
|
"stream": self.streaming,
|
||||||
"n": self.n,
|
"n": self.n,
|
||||||
@ -297,6 +329,9 @@ class ChatOpenAI(BaseChatModel):
|
|||||||
self, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any
|
self, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any
|
||||||
) -> Any:
|
) -> Any:
|
||||||
"""Use tenacity to retry the completion call."""
|
"""Use tenacity to retry the completion call."""
|
||||||
|
if _is_openai_v1():
|
||||||
|
return self.client.create(**kwargs)
|
||||||
|
|
||||||
retry_decorator = _create_retry_decorator(self, run_manager=run_manager)
|
retry_decorator = _create_retry_decorator(self, run_manager=run_manager)
|
||||||
|
|
||||||
@retry_decorator
|
@retry_decorator
|
||||||
@ -333,6 +368,8 @@ class ChatOpenAI(BaseChatModel):
|
|||||||
for chunk in self.completion_with_retry(
|
for chunk in self.completion_with_retry(
|
||||||
messages=message_dicts, run_manager=run_manager, **params
|
messages=message_dicts, run_manager=run_manager, **params
|
||||||
):
|
):
|
||||||
|
if not isinstance(chunk, dict):
|
||||||
|
chunk = chunk.dict()
|
||||||
if len(chunk["choices"]) == 0:
|
if len(chunk["choices"]) == 0:
|
||||||
continue
|
continue
|
||||||
choice = chunk["choices"][0]
|
choice = chunk["choices"][0]
|
||||||
@ -381,8 +418,10 @@ class ChatOpenAI(BaseChatModel):
|
|||||||
message_dicts = [convert_message_to_dict(m) for m in messages]
|
message_dicts = [convert_message_to_dict(m) for m in messages]
|
||||||
return message_dicts, params
|
return message_dicts, params
|
||||||
|
|
||||||
def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult:
|
def _create_chat_result(self, response: Union[dict, BaseModel]) -> ChatResult:
|
||||||
generations = []
|
generations = []
|
||||||
|
if not isinstance(response, dict):
|
||||||
|
response = response.dict()
|
||||||
for res in response["choices"]:
|
for res in response["choices"]:
|
||||||
message = convert_dict_to_message(res["message"])
|
message = convert_dict_to_message(res["message"])
|
||||||
gen = ChatGeneration(
|
gen = ChatGeneration(
|
||||||
@ -408,6 +447,8 @@ class ChatOpenAI(BaseChatModel):
|
|||||||
async for chunk in await acompletion_with_retry(
|
async for chunk in await acompletion_with_retry(
|
||||||
self, messages=message_dicts, run_manager=run_manager, **params
|
self, messages=message_dicts, run_manager=run_manager, **params
|
||||||
):
|
):
|
||||||
|
if not isinstance(chunk, dict):
|
||||||
|
chunk = chunk.dict()
|
||||||
if len(chunk["choices"]) == 0:
|
if len(chunk["choices"]) == 0:
|
||||||
continue
|
continue
|
||||||
choice = chunk["choices"][0]
|
choice = chunk["choices"][0]
|
||||||
@ -455,11 +496,16 @@ class ChatOpenAI(BaseChatModel):
|
|||||||
def _client_params(self) -> Dict[str, Any]:
|
def _client_params(self) -> Dict[str, Any]:
|
||||||
"""Get the parameters used for the openai client."""
|
"""Get the parameters used for the openai client."""
|
||||||
openai_creds: Dict[str, Any] = {
|
openai_creds: Dict[str, Any] = {
|
||||||
"api_key": self.openai_api_key,
|
|
||||||
"api_base": self.openai_api_base,
|
|
||||||
"organization": self.openai_organization,
|
|
||||||
"model": self.model_name,
|
"model": self.model_name,
|
||||||
}
|
}
|
||||||
|
if not _is_openai_v1():
|
||||||
|
openai_creds.update(
|
||||||
|
{
|
||||||
|
"api_key": self.openai_api_key,
|
||||||
|
"api_base": self.openai_api_base,
|
||||||
|
"organization": self.openai_organization,
|
||||||
|
}
|
||||||
|
)
|
||||||
if self.openai_proxy:
|
if self.openai_proxy:
|
||||||
import openai
|
import openai
|
||||||
|
|
||||||
|
@ -1,6 +1,5 @@
|
|||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
from typing import Any, Mapping, cast
|
|
||||||
from unittest import mock
|
from unittest import mock
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
@ -48,9 +47,8 @@ def test_model_name_set_on_chat_result_when_present_in_response(
|
|||||||
"""
|
"""
|
||||||
# convert sample_response_text to instance of Mapping[str, Any]
|
# convert sample_response_text to instance of Mapping[str, Any]
|
||||||
sample_response = json.loads(sample_response_text)
|
sample_response = json.loads(sample_response_text)
|
||||||
mock_response = cast(Mapping[str, Any], sample_response)
|
|
||||||
mock_chat = AzureChatOpenAI()
|
mock_chat = AzureChatOpenAI()
|
||||||
chat_result = mock_chat._create_chat_result(mock_response)
|
chat_result = mock_chat._create_chat_result(sample_response)
|
||||||
assert (
|
assert (
|
||||||
chat_result.llm_output is not None
|
chat_result.llm_output is not None
|
||||||
and chat_result.llm_output["model_name"] == model_name
|
and chat_result.llm_output["model_name"] == model_name
|
||||||
|
Loading…
Reference in New Issue
Block a user