Integrating the Yi family of models. (#24491)

Thank you for contributing to LangChain!

- [x] **PR title**: "community:add Yi LLM", "docs:add Yi Documentation"
                          
- [x] **PR message**: ***Delete this entire checklist*** and replace
with
- **Description:** This PR adds support for the Yi model to LangChain.
- **Dependencies:**
[langchain_core,requests,contextlib,typing,logging,json,langchain_community]
    - **Twitter handle:** 01.AI


- [x] **Add tests and docs**: I've added the corresponding documentation
to the relevant paths

---------

Co-authored-by: Bagatur <baskaryan@gmail.com>
Co-authored-by: isaac hershenson <ihershenson@hmc.edu>
This commit is contained in:
Haijian Wang
2024-07-27 01:57:33 +08:00
committed by GitHub
parent ad7581751f
commit cda3025ee1
9 changed files with 856 additions and 13 deletions

View File

@@ -165,13 +165,15 @@ if TYPE_CHECKING:
from langchain_community.chat_models.yandex import (
ChatYandexGPT,
)
from langchain_community.chat_models.yi import (
ChatYi,
)
from langchain_community.chat_models.yuan2 import (
ChatYuan2,
)
from langchain_community.chat_models.zhipuai import (
ChatZhipuAI,
)
__all__ = [
"AzureChatOpenAI",
"BedrockChat",
@@ -225,6 +227,7 @@ __all__ = [
"QianfanChatEndpoint",
"SolarChat",
"VolcEngineMaasChat",
"ChatYi",
]
@@ -281,6 +284,7 @@ _module_lookup = {
"VolcEngineMaasChat": "langchain_community.chat_models.volcengine_maas",
"ChatPremAI": "langchain_community.chat_models.premai",
"ChatLlamaCpp": "langchain_community.chat_models.llamacpp",
"ChatYi": "langchain_community.chat_models.yi",
}

View File

@@ -0,0 +1,339 @@
import json
import logging
from contextlib import asynccontextmanager
from typing import Any, AsyncIterator, Dict, Iterator, List, Mapping, Optional, Type
import requests
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models.chat_models import (
BaseChatModel,
agenerate_from_stream,
generate_from_stream,
)
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
BaseMessageChunk,
ChatMessage,
ChatMessageChunk,
HumanMessage,
HumanMessageChunk,
SystemMessage,
)
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain_core.pydantic_v1 import Field, SecretStr
from langchain_core.utils import (
convert_to_secret_str,
get_from_dict_or_env,
get_pydantic_field_names,
)
logger = logging.getLogger(__name__)
DEFAULT_API_BASE_CN = "https://api.lingyiwanwu.com/v1/chat/completions"
DEFAULT_API_BASE_GLOBAL = "https://api.01.ai/v1/chat/completions"
def _convert_message_to_dict(message: BaseMessage) -> dict:
message_dict: Dict[str, Any]
if isinstance(message, ChatMessage):
message_dict = {"role": message.role, "content": message.content}
elif isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
elif isinstance(message, SystemMessage):
message_dict = {"role": "assistant", "content": message.content}
else:
raise TypeError(f"Got unknown type {message}")
return message_dict
def _convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage:
role = _dict["role"]
if role == "user":
return HumanMessage(content=_dict["content"])
elif role == "assistant":
return AIMessage(content=_dict.get("content", "") or "")
elif role == "system":
return AIMessage(content=_dict["content"])
else:
return ChatMessage(content=_dict["content"], role=role)
def _convert_delta_to_message_chunk(
_dict: Mapping[str, Any], default_class: Type[BaseMessageChunk]
) -> BaseMessageChunk:
role: str = _dict["role"]
content = _dict.get("content") or ""
if role == "user" or default_class == HumanMessageChunk:
return HumanMessageChunk(content=content)
elif role == "assistant" or default_class == AIMessageChunk:
return AIMessageChunk(content=content)
elif role or default_class == ChatMessageChunk:
return ChatMessageChunk(content=content, role=role)
else:
return default_class(content=content, type=role)
@asynccontextmanager
async def aconnect_httpx_sse(
client: Any, method: str, url: str, **kwargs: Any
) -> AsyncIterator:
from httpx_sse import EventSource
async with client.stream(method, url, **kwargs) as response:
yield EventSource(response)
class ChatYi(BaseChatModel):
"""Yi chat models API."""
@property
def lc_secrets(self) -> Dict[str, str]:
return {
"yi_api_key": "YI_API_KEY",
}
@property
def lc_serializable(self) -> bool:
return True
yi_api_base: str = Field(default=DEFAULT_API_BASE_CN)
yi_api_key: SecretStr = Field(alias="api_key")
region: str = Field(default="cn") # 默认使用中国区
streaming: bool = False
request_timeout: int = Field(default=60, alias="timeout")
model: str = "yi-large"
temperature: Optional[float] = Field(default=0.7)
top_p: float = 0.7
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
class Config:
allow_population_by_field_name = True
def __init__(self, **kwargs: Any) -> None:
kwargs["yi_api_key"] = convert_to_secret_str(
get_from_dict_or_env(
kwargs,
["yi_api_key", "api_key"],
"YI_API_KEY",
)
)
if kwargs.get("yi_api_base") is None:
region = kwargs.get("region", "cn").lower()
if region == "global":
kwargs["yi_api_base"] = DEFAULT_API_BASE_GLOBAL
else:
kwargs["yi_api_base"] = DEFAULT_API_BASE_CN
all_required_field_names = get_pydantic_field_names(self.__class__)
extra = kwargs.get("model_kwargs", {})
for field_name in list(kwargs):
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
if field_name not in all_required_field_names:
extra[field_name] = kwargs.pop(field_name)
invalid_model_kwargs = all_required_field_names.intersection(extra.keys())
if invalid_model_kwargs:
raise ValueError(
f"Parameters {invalid_model_kwargs} should be specified explicitly. "
f"Instead they were passed in as part of `model_kwargs` parameter."
)
kwargs["model_kwargs"] = extra
super().__init__(**kwargs)
@property
def _default_params(self) -> Dict[str, Any]:
return {
"model": self.model,
"temperature": self.temperature,
"top_p": self.top_p,
"stream": self.streaming,
}
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
if self.streaming:
stream_iter = self._stream(
messages=messages, stop=stop, run_manager=run_manager, **kwargs
)
return generate_from_stream(stream_iter)
res = self._chat(messages, **kwargs)
if res.status_code != 200:
raise ValueError(f"Error from Yi api response: {res}")
response = res.json()
return self._create_chat_result(response)
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
res = self._chat(messages, stream=True, **kwargs)
if res.status_code != 200:
raise ValueError(f"Error from Yi api response: {res}")
default_chunk_class = AIMessageChunk
for chunk in res.iter_lines():
chunk = chunk.decode("utf-8").strip("\r\n")
parts = chunk.split("data: ", 1)
chunk = parts[1] if len(parts) > 1 else None
if chunk is None:
continue
if chunk == "[DONE]":
break
response = json.loads(chunk)
for m in response.get("choices"):
chunk = _convert_delta_to_message_chunk(
m.get("delta"), default_chunk_class
)
default_chunk_class = chunk.__class__
cg_chunk = ChatGenerationChunk(message=chunk)
if run_manager:
run_manager.on_llm_new_token(chunk.content, chunk=cg_chunk)
yield cg_chunk
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
stream: Optional[bool] = None,
**kwargs: Any,
) -> ChatResult:
should_stream = stream if stream is not None else self.streaming
if should_stream:
stream_iter = self._astream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return await agenerate_from_stream(stream_iter)
headers = self._create_headers_parameters(**kwargs)
payload = self._create_payload_parameters(messages, **kwargs)
import httpx
async with httpx.AsyncClient(
headers=headers, timeout=self.request_timeout
) as client:
response = await client.post(self.yi_api_base, json=payload)
response.raise_for_status()
return self._create_chat_result(response.json())
async def _astream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
headers = self._create_headers_parameters(**kwargs)
payload = self._create_payload_parameters(messages, stream=True, **kwargs)
import httpx
async with httpx.AsyncClient(
headers=headers, timeout=self.request_timeout
) as client:
async with aconnect_httpx_sse(
client, "POST", self.yi_api_base, json=payload
) as event_source:
async for sse in event_source.aiter_sse():
chunk = json.loads(sse.data)
if len(chunk["choices"]) == 0:
continue
choice = chunk["choices"][0]
chunk = _convert_delta_to_message_chunk(
choice["delta"], AIMessageChunk
)
finish_reason = choice.get("finish_reason", None)
generation_info = (
{"finish_reason": finish_reason}
if finish_reason is not None
else None
)
chunk = ChatGenerationChunk(
message=chunk, generation_info=generation_info
)
if run_manager:
await run_manager.on_llm_new_token(chunk.text, chunk=chunk)
yield chunk
if finish_reason is not None:
break
def _chat(self, messages: List[BaseMessage], **kwargs: Any) -> requests.Response:
payload = self._create_payload_parameters(messages, **kwargs)
url = self.yi_api_base
headers = self._create_headers_parameters(**kwargs)
res = requests.post(
url=url,
timeout=self.request_timeout,
headers=headers,
json=payload,
stream=self.streaming,
)
return res
def _create_payload_parameters(
self, messages: List[BaseMessage], **kwargs: Any
) -> Dict[str, Any]:
parameters = {**self._default_params, **kwargs}
temperature = parameters.pop("temperature", 0.7)
top_p = parameters.pop("top_p", 0.7)
model = parameters.pop("model")
stream = parameters.pop("stream", False)
payload = {
"model": model,
"messages": [_convert_message_to_dict(m) for m in messages],
"top_p": top_p,
"temperature": temperature,
"stream": stream,
}
return payload
def _create_headers_parameters(self, **kwargs: Any) -> Dict[str, Any]:
parameters = {**self._default_params, **kwargs}
default_headers = parameters.pop("headers", {})
api_key = ""
if self.yi_api_key:
api_key = self.yi_api_key.get_secret_value()
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}",
**default_headers,
}
return headers
def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult:
generations = []
for c in response["choices"]:
message = _convert_dict_to_message(c["message"])
gen = ChatGeneration(message=message)
generations.append(gen)
token_usage = response["usage"]
llm_output = {"token_usage": token_usage, "model": self.model}
return ChatResult(generations=generations, llm_output=llm_output)
@property
def _llm_type(self) -> str:
return "yi-chat"

View File

@@ -640,12 +640,6 @@ def _import_yuan2() -> Type[BaseLLM]:
return Yuan2
def _import_you() -> Type[BaseLLM]:
from langchain_community.llms.you import You
return You
def _import_volcengine_maas() -> Type[BaseLLM]:
from langchain_community.llms.volcengine_maas import VolcEngineMaasLLM
@@ -658,6 +652,18 @@ def _import_sparkllm() -> Type[BaseLLM]:
return SparkLLM
def _import_you() -> Type[BaseLLM]:
from langchain_community.llms.you import You
return You
def _import_yi() -> Type[BaseLLM]:
from langchain_community.llms.yi import YiLLM
return YiLLM
def __getattr__(name: str) -> Any:
if name == "AI21":
return _import_ai21()
@@ -853,18 +859,20 @@ def __getattr__(name: str) -> Any:
return _import_yandex_gpt()
elif name == "Yuan2":
return _import_yuan2()
elif name == "You":
return _import_you()
elif name == "VolcEngineMaasLLM":
return _import_volcengine_maas()
elif name == "SparkLLM":
return _import_sparkllm()
elif name == "YiLLM":
return _import_yi()
elif name == "You":
return _import_you()
elif name == "type_to_cls_dict":
# for backwards compatibility
type_to_cls_dict: Dict[str, Type[BaseLLM]] = {
k: v() for k, v in get_type_to_cls_dict().items()
}
return type_to_cls_dict
elif name == "SparkLLM":
return _import_sparkllm()
else:
raise AttributeError(f"Could not find: {name}")
@@ -967,8 +975,9 @@ __all__ = [
"Writer",
"Xinference",
"YandexGPT",
"You",
"Yuan2",
"YiLLM",
"You",
]
@@ -1065,7 +1074,8 @@ def get_type_to_cls_dict() -> Dict[str, Callable[[], Type[BaseLLM]]]:
"qianfan_endpoint": _import_baidu_qianfan_endpoint,
"yandex_gpt": _import_yandex_gpt,
"yuan2": _import_yuan2,
"you": _import_you,
"VolcEngineMaasLLM": _import_volcengine_maas,
"SparkLLM": _import_sparkllm,
"yi": _import_yi,
"you": _import_you,
}

View File

@@ -0,0 +1,104 @@
from __future__ import annotations
import json
import logging
from typing import Any, Dict, List, Literal, Optional
import requests
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from langchain_core.pydantic_v1 import Field, SecretStr
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
from langchain_community.llms.utils import enforce_stop_tokens
logger = logging.getLogger(__name__)
class YiLLM(LLM):
"""Yi large language models."""
model: str = "yi-large"
temperature: float = 0.3
top_p: float = 0.95
timeout: int = 60
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
yi_api_key: Optional[SecretStr] = None
region: Literal["auto", "domestic", "international"] = "auto"
yi_api_url_domestic: str = "https://api.lingyiwanwu.com/v1/chat/completions"
yi_api_url_international: str = "https://api.01.ai/v1/chat/completions"
def __init__(self, **kwargs: Any):
kwargs["yi_api_key"] = convert_to_secret_str(
get_from_dict_or_env(kwargs, "yi_api_key", "YI_API_KEY")
)
super().__init__(**kwargs)
@property
def _default_params(self) -> Dict[str, Any]:
return {
"model": self.model,
"temperature": self.temperature,
"top_p": self.top_p,
**self.model_kwargs,
}
def _post(self, request: Any) -> Any:
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {self.yi_api_key.get_secret_value()}", # type: ignore
}
urls = []
if self.region == "domestic":
urls = [self.yi_api_url_domestic]
elif self.region == "international":
urls = [self.yi_api_url_international]
else: # auto
urls = [self.yi_api_url_domestic, self.yi_api_url_international]
for url in urls:
try:
response = requests.post(
url,
headers=headers,
json=request,
timeout=self.timeout,
)
if response.status_code == 200:
parsed_json = json.loads(response.text)
return parsed_json["choices"][0]["message"]["content"]
elif (
response.status_code != 403
): # If not a permission error, raise immediately
response.raise_for_status()
except requests.RequestException as e:
if url == urls[-1]: # If this is the last URL to try
raise ValueError(f"An error has occurred: {e}")
else:
logger.warning(f"Failed to connect to {url}, trying next URL")
continue
raise ValueError("Failed to connect to all available URLs")
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
request = self._default_params
request["messages"] = [{"role": "user", "content": prompt}]
request.update(kwargs)
text = self._post(request)
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
@property
def _llm_type(self) -> str:
"""Return type of chat_model."""
return "yi-llm"