mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-25 16:13:25 +00:00
Upgrades the Tongyi LLM and ChatTongyi Model (#14793)
- **Description:** fixes and upgrades for the Tongyi LLM and ChatTongyi Model - Fixed typos; it should be `Tongyi`, not `OpenAI`. - Fixed a bug in `stream_generate_with_retry`; it's a real stream generator now. - Fixed a bug in `validate_environment`; the `dashscope_api_key` should be properly handled when set by environment variables or initialization parameters. - Changed the `dashscope` response to incremental output by setting the parameter `incremental_output`, which eliminates the need for the prefix-removal trick. - Removed some unused parameters, like `n`, `prefix_messages`. - Added `_stream` method. - Added async methods support, such as `_astream`, `_agenerate`, `_abatch`. - **Dependencies:** No new dependencies. - **Tag maintainer:** @hwchase17 > PS: Some may be confused about the terms `dashscope`, `tongyi`, and `Qwen`: > - `dashscope`: A platform to deploy LLMs and provide APIs to invoke the LLM. > - `tongyi`: A brand name or overall term about Alibaba Cloud's LLM/AI. > - `Qwen`: An LLM that is open-sourced and deployed in `dashscope`. > > We use the `dashscope` SDK to interact with the `tongyi`-`Qwen` LLM. --------- Co-authored-by: Harrison Chase <hw.chase.17@gmail.com>
This commit is contained in:
parent
6f15cc64b8
commit
4b53440e70
@ -46,6 +46,7 @@ from langchain_community.chat_models.ollama import ChatOllama
|
|||||||
from langchain_community.chat_models.openai import ChatOpenAI
|
from langchain_community.chat_models.openai import ChatOpenAI
|
||||||
from langchain_community.chat_models.pai_eas_endpoint import PaiEasChatEndpoint
|
from langchain_community.chat_models.pai_eas_endpoint import PaiEasChatEndpoint
|
||||||
from langchain_community.chat_models.promptlayer_openai import PromptLayerChatOpenAI
|
from langchain_community.chat_models.promptlayer_openai import PromptLayerChatOpenAI
|
||||||
|
from langchain_community.chat_models.tongyi import ChatTongyi
|
||||||
from langchain_community.chat_models.vertexai import ChatVertexAI
|
from langchain_community.chat_models.vertexai import ChatVertexAI
|
||||||
from langchain_community.chat_models.volcengine_maas import VolcEngineMaasChat
|
from langchain_community.chat_models.volcengine_maas import VolcEngineMaasChat
|
||||||
from langchain_community.chat_models.yandex import ChatYandexGPT
|
from langchain_community.chat_models.yandex import ChatYandexGPT
|
||||||
@ -76,6 +77,7 @@ __all__ = [
|
|||||||
"ChatKonko",
|
"ChatKonko",
|
||||||
"PaiEasChatEndpoint",
|
"PaiEasChatEndpoint",
|
||||||
"QianfanChatEndpoint",
|
"QianfanChatEndpoint",
|
||||||
|
"ChatTongyi",
|
||||||
"ChatFireworks",
|
"ChatFireworks",
|
||||||
"ChatYandexGPT",
|
"ChatYandexGPT",
|
||||||
"ChatBaichuan",
|
"ChatBaichuan",
|
||||||
|
@ -1,23 +1,25 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
import functools
|
||||||
import logging
|
import logging
|
||||||
from typing import (
|
from typing import (
|
||||||
Any,
|
Any,
|
||||||
|
AsyncIterator,
|
||||||
Callable,
|
Callable,
|
||||||
Dict,
|
Dict,
|
||||||
Iterator,
|
Iterator,
|
||||||
List,
|
List,
|
||||||
Mapping,
|
Mapping,
|
||||||
Optional,
|
Optional,
|
||||||
Tuple,
|
Union,
|
||||||
Type,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
from langchain_core.callbacks import CallbackManagerForLLMRun
|
from langchain_core.callbacks import (
|
||||||
from langchain_core.language_models.chat_models import (
|
AsyncCallbackManagerForLLMRun,
|
||||||
BaseChatModel,
|
CallbackManagerForLLMRun,
|
||||||
generate_from_stream,
|
|
||||||
)
|
)
|
||||||
|
from langchain_core.language_models.chat_models import BaseChatModel
|
||||||
from langchain_core.messages import (
|
from langchain_core.messages import (
|
||||||
AIMessage,
|
AIMessage,
|
||||||
AIMessageChunk,
|
AIMessageChunk,
|
||||||
@ -25,8 +27,6 @@ from langchain_core.messages import (
|
|||||||
BaseMessageChunk,
|
BaseMessageChunk,
|
||||||
ChatMessage,
|
ChatMessage,
|
||||||
ChatMessageChunk,
|
ChatMessageChunk,
|
||||||
FunctionMessage,
|
|
||||||
FunctionMessageChunk,
|
|
||||||
HumanMessage,
|
HumanMessage,
|
||||||
HumanMessageChunk,
|
HumanMessageChunk,
|
||||||
SystemMessage,
|
SystemMessage,
|
||||||
@ -36,41 +36,63 @@ from langchain_core.outputs import (
|
|||||||
ChatGeneration,
|
ChatGeneration,
|
||||||
ChatGenerationChunk,
|
ChatGenerationChunk,
|
||||||
ChatResult,
|
ChatResult,
|
||||||
GenerationChunk,
|
|
||||||
)
|
)
|
||||||
from langchain_core.pydantic_v1 import Field, root_validator
|
from langchain_core.pydantic_v1 import Field, root_validator
|
||||||
from langchain_core.utils import get_from_dict_or_env
|
from langchain_core.utils import get_from_dict_or_env
|
||||||
from requests.exceptions import HTTPError
|
from requests.exceptions import HTTPError
|
||||||
from tenacity import (
|
from tenacity import (
|
||||||
RetryCallState,
|
before_sleep_log,
|
||||||
retry,
|
retry,
|
||||||
retry_if_exception_type,
|
retry_if_exception_type,
|
||||||
stop_after_attempt,
|
stop_after_attempt,
|
||||||
wait_exponential,
|
wait_exponential,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
from langchain_community.llms.tongyi import check_response
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage:
|
def convert_dict_to_message(
|
||||||
"""Convert a dict to a message."""
|
_dict: Mapping[str, Any], is_chunk: bool = False
|
||||||
|
) -> Union[BaseMessage, BaseMessageChunk]:
|
||||||
role = _dict["role"]
|
role = _dict["role"]
|
||||||
|
content = _dict["content"]
|
||||||
if role == "user":
|
if role == "user":
|
||||||
return HumanMessage(content=_dict["content"])
|
return (
|
||||||
|
HumanMessageChunk(content=content)
|
||||||
|
if is_chunk
|
||||||
|
else HumanMessage(content=content)
|
||||||
|
)
|
||||||
elif role == "assistant":
|
elif role == "assistant":
|
||||||
content = _dict.get("content", "") or ""
|
return (
|
||||||
if _dict.get("function_call"):
|
AIMessageChunk(content=content) if is_chunk else AIMessage(content=content)
|
||||||
additional_kwargs = {"function_call": dict(_dict["function_call"])}
|
)
|
||||||
else:
|
|
||||||
additional_kwargs = {}
|
|
||||||
return AIMessage(content=content, additional_kwargs=additional_kwargs)
|
|
||||||
elif role == "system":
|
elif role == "system":
|
||||||
return SystemMessage(content=_dict["content"])
|
return (
|
||||||
elif role == "function":
|
SystemMessageChunk(content=content)
|
||||||
return FunctionMessage(content=_dict["content"], name=_dict["name"])
|
if is_chunk
|
||||||
|
else SystemMessage(content=content)
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
return ChatMessage(content=_dict["content"], role=role)
|
return (
|
||||||
|
ChatMessageChunk(role=role, content=content)
|
||||||
|
if is_chunk
|
||||||
|
else ChatMessage(role=role, content=content)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def convert_message_chunk_to_message(message_chunk: BaseMessageChunk) -> BaseMessage:
|
||||||
|
if isinstance(message_chunk, HumanMessageChunk):
|
||||||
|
return HumanMessage(content=message_chunk.content)
|
||||||
|
elif isinstance(message_chunk, AIMessageChunk):
|
||||||
|
return AIMessage(content=message_chunk.content)
|
||||||
|
elif isinstance(message_chunk, SystemMessageChunk):
|
||||||
|
return SystemMessage(content=message_chunk.content)
|
||||||
|
elif isinstance(message_chunk, ChatMessageChunk):
|
||||||
|
return ChatMessage(role=message_chunk.role, content=message_chunk.content)
|
||||||
|
else:
|
||||||
|
raise TypeError(f"Got unknown type {message_chunk}")
|
||||||
|
|
||||||
|
|
||||||
def convert_message_to_dict(message: BaseMessage) -> dict:
|
def convert_message_to_dict(message: BaseMessage) -> dict:
|
||||||
@ -83,109 +105,27 @@ def convert_message_to_dict(message: BaseMessage) -> dict:
|
|||||||
message_dict = {"role": "user", "content": message.content}
|
message_dict = {"role": "user", "content": message.content}
|
||||||
elif isinstance(message, AIMessage):
|
elif isinstance(message, AIMessage):
|
||||||
message_dict = {"role": "assistant", "content": message.content}
|
message_dict = {"role": "assistant", "content": message.content}
|
||||||
if "function_call" in message.additional_kwargs:
|
|
||||||
message_dict["function_call"] = message.additional_kwargs["function_call"]
|
|
||||||
# If function call only, content is None not empty string
|
|
||||||
if message_dict["content"] == "":
|
|
||||||
message_dict["content"] = None
|
|
||||||
elif isinstance(message, SystemMessage):
|
elif isinstance(message, SystemMessage):
|
||||||
message_dict = {"role": "system", "content": message.content}
|
message_dict = {"role": "system", "content": message.content}
|
||||||
elif isinstance(message, FunctionMessage):
|
|
||||||
message_dict = {
|
|
||||||
"role": "function",
|
|
||||||
"content": message.content,
|
|
||||||
"name": message.name,
|
|
||||||
}
|
|
||||||
else:
|
else:
|
||||||
raise TypeError(f"Got unknown type {message}")
|
raise TypeError(f"Got unknown type {message}")
|
||||||
if "name" in message.additional_kwargs:
|
|
||||||
message_dict["name"] = message.additional_kwargs["name"]
|
|
||||||
return message_dict
|
return message_dict
|
||||||
|
|
||||||
|
|
||||||
def _stream_response_to_generation_chunk(
|
def _create_retry_decorator(llm: ChatTongyi) -> Callable[[Any], Any]:
|
||||||
stream_response: Dict[str, Any],
|
|
||||||
length: int,
|
|
||||||
) -> GenerationChunk:
|
|
||||||
"""Convert a stream response to a generation chunk.
|
|
||||||
|
|
||||||
As the low level API implement is different from openai and other llm.
|
|
||||||
Stream response of Tongyi is not split into chunks, but all data generated before.
|
|
||||||
For example, the answer 'Hi Pickle Rick! How can I assist you today?'
|
|
||||||
Other llm will stream answer:
|
|
||||||
'Hi Pickle',
|
|
||||||
' Rick!',
|
|
||||||
' How can I assist you today?'.
|
|
||||||
|
|
||||||
Tongyi answer:
|
|
||||||
'Hi Pickle',
|
|
||||||
'Hi Pickle Rick!',
|
|
||||||
'Hi Pickle Rick! How can I assist you today?'.
|
|
||||||
|
|
||||||
As the GenerationChunk is implemented with chunks. Only return full_text[length:]
|
|
||||||
for new chunk.
|
|
||||||
"""
|
|
||||||
full_text = stream_response["output"]["text"]
|
|
||||||
text = full_text[length:]
|
|
||||||
finish_reason = stream_response["output"].get("finish_reason", None)
|
|
||||||
|
|
||||||
return GenerationChunk(
|
|
||||||
text=text,
|
|
||||||
generation_info=dict(
|
|
||||||
finish_reason=finish_reason,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def _create_retry_decorator(
|
|
||||||
llm: ChatTongyi,
|
|
||||||
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
|
||||||
) -> Callable[[Any], Any]:
|
|
||||||
def _before_sleep(retry_state: RetryCallState) -> None:
|
|
||||||
if run_manager:
|
|
||||||
run_manager.on_retry(retry_state)
|
|
||||||
return None
|
|
||||||
|
|
||||||
min_seconds = 1
|
min_seconds = 1
|
||||||
max_seconds = 4
|
max_seconds = 4
|
||||||
# Wait 2^x * 1 second between each retry starting with
|
# Wait 2^x * 1 second between each retry starting with
|
||||||
# 4 seconds, then up to 10 seconds, then 10 seconds afterwards
|
# 4 seconds, then up to 10 seconds, then 10 seconds afterward
|
||||||
return retry(
|
return retry(
|
||||||
reraise=True,
|
reraise=True,
|
||||||
stop=stop_after_attempt(llm.max_retries),
|
stop=stop_after_attempt(llm.max_retries),
|
||||||
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
|
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
|
||||||
retry=(retry_if_exception_type(HTTPError)),
|
retry=(retry_if_exception_type(HTTPError)),
|
||||||
before_sleep=_before_sleep,
|
before_sleep=before_sleep_log(logger, logging.WARNING),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def _convert_delta_to_message_chunk(
|
|
||||||
_dict: Mapping[str, Any],
|
|
||||||
default_class: Type[BaseMessageChunk],
|
|
||||||
length: int,
|
|
||||||
) -> BaseMessageChunk:
|
|
||||||
role = _dict.get("role")
|
|
||||||
full_content = _dict.get("content") or ""
|
|
||||||
content = full_content[length:]
|
|
||||||
if _dict.get("function_call"):
|
|
||||||
additional_kwargs = {"function_call": dict(_dict["function_call"])}
|
|
||||||
else:
|
|
||||||
additional_kwargs = {}
|
|
||||||
|
|
||||||
if role == "user" or default_class == HumanMessageChunk:
|
|
||||||
return HumanMessageChunk(content=content)
|
|
||||||
elif role == "assistant" or default_class == AIMessageChunk:
|
|
||||||
return AIMessageChunk(content=content, additional_kwargs=additional_kwargs)
|
|
||||||
elif role == "system" or default_class == SystemMessageChunk:
|
|
||||||
return SystemMessageChunk(content=content)
|
|
||||||
elif role == "function" or default_class == FunctionMessageChunk:
|
|
||||||
return FunctionMessageChunk(content=content, name=_dict["name"])
|
|
||||||
elif role or default_class == ChatMessageChunk:
|
|
||||||
return ChatMessageChunk(content=content, role=role)
|
|
||||||
else:
|
|
||||||
return default_class(content=content)
|
|
||||||
|
|
||||||
|
|
||||||
class ChatTongyi(BaseChatModel):
|
class ChatTongyi(BaseChatModel):
|
||||||
"""Alibaba Tongyi Qwen chat models API.
|
"""Alibaba Tongyi Qwen chat models API.
|
||||||
|
|
||||||
@ -204,10 +144,6 @@ class ChatTongyi(BaseChatModel):
|
|||||||
def lc_secrets(self) -> Dict[str, str]:
|
def lc_secrets(self) -> Dict[str, str]:
|
||||||
return {"dashscope_api_key": "DASHSCOPE_API_KEY"}
|
return {"dashscope_api_key": "DASHSCOPE_API_KEY"}
|
||||||
|
|
||||||
@property
|
|
||||||
def lc_serializable(self) -> bool:
|
|
||||||
return True
|
|
||||||
|
|
||||||
client: Any #: :meta private:
|
client: Any #: :meta private:
|
||||||
model_name: str = Field(default="qwen-turbo", alias="model")
|
model_name: str = Field(default="qwen-turbo", alias="model")
|
||||||
|
|
||||||
@ -218,10 +154,7 @@ class ChatTongyi(BaseChatModel):
|
|||||||
"""Total probability mass of tokens to consider at each step."""
|
"""Total probability mass of tokens to consider at each step."""
|
||||||
|
|
||||||
dashscope_api_key: Optional[str] = None
|
dashscope_api_key: Optional[str] = None
|
||||||
"""Dashscope api key provide by alicloud."""
|
"""Dashscope api key provide by Alibaba Cloud."""
|
||||||
|
|
||||||
n: int = 1
|
|
||||||
"""How many completions to generate for each prompt."""
|
|
||||||
|
|
||||||
streaming: bool = False
|
streaming: bool = False
|
||||||
"""Whether to stream the results or not."""
|
"""Whether to stream the results or not."""
|
||||||
@ -229,12 +162,6 @@ class ChatTongyi(BaseChatModel):
|
|||||||
max_retries: int = 10
|
max_retries: int = 10
|
||||||
"""Maximum number of retries to make when generating."""
|
"""Maximum number of retries to make when generating."""
|
||||||
|
|
||||||
prefix_messages: List = Field(default_factory=list)
|
|
||||||
"""Series of messages for Chat input."""
|
|
||||||
|
|
||||||
result_format: str = Field(default="message")
|
|
||||||
"""Return result format"""
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def _llm_type(self) -> str:
|
def _llm_type(self) -> str:
|
||||||
"""Return type of llm."""
|
"""Return type of llm."""
|
||||||
@ -243,7 +170,9 @@ class ChatTongyi(BaseChatModel):
|
|||||||
@root_validator()
|
@root_validator()
|
||||||
def validate_environment(cls, values: Dict) -> Dict:
|
def validate_environment(cls, values: Dict) -> Dict:
|
||||||
"""Validate that api key and python package exists in environment."""
|
"""Validate that api key and python package exists in environment."""
|
||||||
get_from_dict_or_env(values, "dashscope_api_key", "DASHSCOPE_API_KEY")
|
values["dashscope_api_key"] = get_from_dict_or_env(
|
||||||
|
values, "dashscope_api_key", "DASHSCOPE_API_KEY"
|
||||||
|
)
|
||||||
try:
|
try:
|
||||||
import dashscope
|
import dashscope
|
||||||
except ImportError:
|
except ImportError:
|
||||||
@ -264,81 +193,141 @@ class ChatTongyi(BaseChatModel):
|
|||||||
|
|
||||||
@property
|
@property
|
||||||
def _default_params(self) -> Dict[str, Any]:
|
def _default_params(self) -> Dict[str, Any]:
|
||||||
"""Get the default parameters for calling OpenAI API."""
|
"""Get the default parameters for calling Tongyi Qwen API."""
|
||||||
return {
|
return {
|
||||||
"model": self.model_name,
|
"model": self.model_name,
|
||||||
"top_p": self.top_p,
|
"top_p": self.top_p,
|
||||||
"stream": self.streaming,
|
"api_key": self.dashscope_api_key,
|
||||||
"n": self.n,
|
"result_format": "message",
|
||||||
"result_format": self.result_format,
|
|
||||||
**self.model_kwargs,
|
**self.model_kwargs,
|
||||||
}
|
}
|
||||||
|
|
||||||
def completion_with_retry(
|
def completion_with_retry(self, **kwargs: Any) -> Any:
|
||||||
self, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any
|
|
||||||
) -> Any:
|
|
||||||
"""Use tenacity to retry the completion call."""
|
"""Use tenacity to retry the completion call."""
|
||||||
retry_decorator = _create_retry_decorator(self, run_manager=run_manager)
|
retry_decorator = _create_retry_decorator(self)
|
||||||
|
|
||||||
@retry_decorator
|
@retry_decorator
|
||||||
def _completion_with_retry(**_kwargs: Any) -> Any:
|
def _completion_with_retry(**_kwargs: Any) -> Any:
|
||||||
resp = self.client.call(**_kwargs)
|
resp = self.client.call(**_kwargs)
|
||||||
if resp.status_code == 200:
|
return check_response(resp)
|
||||||
return resp
|
|
||||||
elif resp.status_code in [400, 401]:
|
|
||||||
raise ValueError(
|
|
||||||
f"status_code: {resp.status_code} \n "
|
|
||||||
f"code: {resp.code} \n message: {resp.message}"
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
raise HTTPError(
|
|
||||||
f"HTTP error occurred: status_code: {resp.status_code} \n "
|
|
||||||
f"code: {resp.code} \n message: {resp.message}",
|
|
||||||
response=resp,
|
|
||||||
)
|
|
||||||
|
|
||||||
return _completion_with_retry(**kwargs)
|
return _completion_with_retry(**kwargs)
|
||||||
|
|
||||||
def stream_completion_with_retry(
|
def stream_completion_with_retry(self, **kwargs: Any) -> Any:
|
||||||
self, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any
|
|
||||||
) -> Any:
|
|
||||||
"""Use tenacity to retry the completion call."""
|
"""Use tenacity to retry the completion call."""
|
||||||
retry_decorator = _create_retry_decorator(self, run_manager=run_manager)
|
retry_decorator = _create_retry_decorator(self)
|
||||||
|
|
||||||
@retry_decorator
|
@retry_decorator
|
||||||
def _stream_completion_with_retry(**_kwargs: Any) -> Any:
|
def _stream_completion_with_retry(**_kwargs: Any) -> Any:
|
||||||
return self.client.call(**_kwargs)
|
responses = self.client.call(**_kwargs)
|
||||||
|
for resp in responses:
|
||||||
|
yield check_response(resp)
|
||||||
|
|
||||||
return _stream_completion_with_retry(**kwargs)
|
return _stream_completion_with_retry(**kwargs)
|
||||||
|
|
||||||
|
async def astream_completion_with_retry(self, **kwargs: Any) -> Any:
|
||||||
|
"""Because the dashscope SDK doesn't provide an async API,
|
||||||
|
we wrap `stream_generate_with_retry` with an async generator."""
|
||||||
|
|
||||||
|
class _AioTongyiGenerator:
|
||||||
|
def __init__(self, generator: Any):
|
||||||
|
self.generator = generator
|
||||||
|
|
||||||
|
def __aiter__(self) -> AsyncIterator[Any]:
|
||||||
|
return self
|
||||||
|
|
||||||
|
async def __anext__(self) -> Any:
|
||||||
|
value = await asyncio.get_running_loop().run_in_executor(
|
||||||
|
None, self._safe_next
|
||||||
|
)
|
||||||
|
if value is not None:
|
||||||
|
return value
|
||||||
|
else:
|
||||||
|
raise StopAsyncIteration
|
||||||
|
|
||||||
|
def _safe_next(self) -> Any:
|
||||||
|
try:
|
||||||
|
return next(self.generator)
|
||||||
|
except StopIteration:
|
||||||
|
return None
|
||||||
|
|
||||||
|
async for chunk in _AioTongyiGenerator(
|
||||||
|
generator=self.stream_completion_with_retry(**kwargs)
|
||||||
|
):
|
||||||
|
yield chunk
|
||||||
|
|
||||||
def _generate(
|
def _generate(
|
||||||
self,
|
self,
|
||||||
messages: List[BaseMessage],
|
messages: List[BaseMessage],
|
||||||
stop: Optional[List[str]] = None,
|
stop: Optional[List[str]] = None,
|
||||||
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
||||||
stream: Optional[bool] = None,
|
|
||||||
**kwargs: Any,
|
**kwargs: Any,
|
||||||
) -> ChatResult:
|
) -> ChatResult:
|
||||||
should_stream = stream if stream is not None else self.streaming
|
generations = []
|
||||||
if should_stream:
|
if self.streaming:
|
||||||
stream_iter = self._stream(
|
generation: Optional[ChatGenerationChunk] = None
|
||||||
|
for chunk in self._stream(
|
||||||
messages, stop=stop, run_manager=run_manager, **kwargs
|
messages, stop=stop, run_manager=run_manager, **kwargs
|
||||||
|
):
|
||||||
|
if generation is None:
|
||||||
|
generation = chunk
|
||||||
|
else:
|
||||||
|
generation += chunk
|
||||||
|
assert generation is not None
|
||||||
|
generations.append(self._chunk_to_generation(generation))
|
||||||
|
else:
|
||||||
|
params: Dict[str, Any] = self._invocation_params(
|
||||||
|
messages=messages, stop=stop, **kwargs
|
||||||
)
|
)
|
||||||
return generate_from_stream(stream_iter)
|
resp = self.completion_with_retry(**params)
|
||||||
|
generations.append(
|
||||||
if not messages:
|
ChatGeneration(**self._chat_generation_from_qwen_resp(resp))
|
||||||
raise ValueError("No messages provided.")
|
)
|
||||||
|
return ChatResult(
|
||||||
message_dicts, params = self._create_message_dicts(messages, stop)
|
generations=generations,
|
||||||
|
llm_output={
|
||||||
if message_dicts[-1]["role"] != "user":
|
"model_name": self.model_name,
|
||||||
raise ValueError("Last message should be user message.")
|
},
|
||||||
|
)
|
||||||
params = {**params, **kwargs}
|
|
||||||
response = self.completion_with_retry(
|
async def _agenerate(
|
||||||
messages=message_dicts, run_manager=run_manager, **params
|
self,
|
||||||
|
messages: List[BaseMessage],
|
||||||
|
stop: Optional[List[str]] = None,
|
||||||
|
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
||||||
|
**kwargs: Any,
|
||||||
|
) -> ChatResult:
|
||||||
|
generations = []
|
||||||
|
if self.streaming:
|
||||||
|
generation: Optional[ChatGenerationChunk] = None
|
||||||
|
async for chunk in self._astream(
|
||||||
|
messages, stop=stop, run_manager=run_manager, **kwargs
|
||||||
|
):
|
||||||
|
if generation is None:
|
||||||
|
generation = chunk
|
||||||
|
else:
|
||||||
|
generation += chunk
|
||||||
|
assert generation is not None
|
||||||
|
generations.append(self._chunk_to_generation(generation))
|
||||||
|
else:
|
||||||
|
params: Dict[str, Any] = self._invocation_params(
|
||||||
|
messages=messages, stop=stop, **kwargs
|
||||||
|
)
|
||||||
|
resp = await asyncio.get_running_loop().run_in_executor(
|
||||||
|
None,
|
||||||
|
functools.partial(
|
||||||
|
self.completion_with_retry, **{"run_manager": run_manager, **params}
|
||||||
|
),
|
||||||
|
)
|
||||||
|
generations.append(
|
||||||
|
ChatGeneration(**self._chat_generation_from_qwen_resp(resp))
|
||||||
|
)
|
||||||
|
return ChatResult(
|
||||||
|
generations=generations,
|
||||||
|
llm_output={
|
||||||
|
"model_name": self.model_name,
|
||||||
|
},
|
||||||
)
|
)
|
||||||
return self._create_chat_result(response)
|
|
||||||
|
|
||||||
def _stream(
|
def _stream(
|
||||||
self,
|
self,
|
||||||
@ -347,62 +336,83 @@ class ChatTongyi(BaseChatModel):
|
|||||||
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
||||||
**kwargs: Any,
|
**kwargs: Any,
|
||||||
) -> Iterator[ChatGenerationChunk]:
|
) -> Iterator[ChatGenerationChunk]:
|
||||||
message_dicts, params = self._create_message_dicts(messages, stop)
|
params: Dict[str, Any] = self._invocation_params(
|
||||||
params = {**params, **kwargs, "stream": True}
|
messages=messages, stop=stop, stream=True, **kwargs
|
||||||
# Mark current chunk total length
|
|
||||||
length = 0
|
|
||||||
default_chunk_class = AIMessageChunk
|
|
||||||
for chunk in self.stream_completion_with_retry(
|
|
||||||
messages=message_dicts, run_manager=run_manager, **params
|
|
||||||
):
|
|
||||||
if len(chunk["output"]["choices"]) == 0:
|
|
||||||
continue
|
|
||||||
choice = chunk["output"]["choices"][0]
|
|
||||||
|
|
||||||
chunk = _convert_delta_to_message_chunk(
|
|
||||||
choice["message"], default_chunk_class, length
|
|
||||||
)
|
)
|
||||||
finish_reason = choice.get("finish_reason")
|
for stream_resp in self.stream_completion_with_retry(**params):
|
||||||
generation_info = (
|
chunk = ChatGenerationChunk(
|
||||||
dict(finish_reason=finish_reason) if finish_reason is not None else None
|
**self._chat_generation_from_qwen_resp(stream_resp, is_chunk=True)
|
||||||
)
|
)
|
||||||
default_chunk_class = chunk.__class__
|
|
||||||
chunk = ChatGenerationChunk(message=chunk, generation_info=generation_info)
|
|
||||||
yield chunk
|
yield chunk
|
||||||
if run_manager:
|
if run_manager:
|
||||||
run_manager.on_llm_new_token(chunk.text, chunk=chunk)
|
run_manager.on_llm_new_token(chunk.text, chunk=chunk)
|
||||||
length = len(choice["message"]["content"])
|
|
||||||
|
|
||||||
def _create_message_dicts(
|
async def _astream(
|
||||||
self, messages: List[BaseMessage], stop: Optional[List[str]]
|
self,
|
||||||
) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]:
|
messages: List[BaseMessage],
|
||||||
params = self._client_params()
|
stop: Optional[List[str]] = None,
|
||||||
|
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
||||||
|
**kwargs: Any,
|
||||||
|
) -> AsyncIterator[ChatGenerationChunk]:
|
||||||
|
params: Dict[str, Any] = self._invocation_params(
|
||||||
|
messages=messages, stop=stop, stream=True, **kwargs
|
||||||
|
)
|
||||||
|
async for stream_resp in self.astream_completion_with_retry(**params):
|
||||||
|
chunk = ChatGenerationChunk(
|
||||||
|
**self._chat_generation_from_qwen_resp(stream_resp, is_chunk=True)
|
||||||
|
)
|
||||||
|
yield chunk
|
||||||
|
if run_manager:
|
||||||
|
await run_manager.on_llm_new_token(chunk.text, chunk=chunk)
|
||||||
|
|
||||||
# Ensure `stop` is a list of strings
|
def _invocation_params(
|
||||||
|
self, messages: List[BaseMessage], stop: Any, **kwargs: Any
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
params = {**self._default_params, **kwargs}
|
||||||
if stop is not None:
|
if stop is not None:
|
||||||
if "stop" in params:
|
|
||||||
raise ValueError("`stop` found in both the input and default params.")
|
|
||||||
params["stop"] = stop
|
params["stop"] = stop
|
||||||
|
if params.get("stream"):
|
||||||
|
params["incremental_output"] = True
|
||||||
|
|
||||||
message_dicts = [convert_message_to_dict(m) for m in messages]
|
message_dicts = [convert_message_to_dict(m) for m in messages]
|
||||||
return message_dicts, params
|
|
||||||
|
|
||||||
def _client_params(self) -> Dict[str, Any]:
|
# According to the docs, the last message should be a `user` message
|
||||||
"""Get the parameters used for the openai client."""
|
if message_dicts[-1]["role"] != "user":
|
||||||
creds: Dict[str, Any] = {
|
raise ValueError("Last message should be user message.")
|
||||||
"api_key": self.dashscope_api_key,
|
# And the `system` message should be the first message if present
|
||||||
}
|
system_message_indices = [
|
||||||
return {**self._default_params, **creds}
|
i for i, m in enumerate(message_dicts) if m["role"] == "system"
|
||||||
|
]
|
||||||
|
if len(system_message_indices) != 1 or system_message_indices[0] != 0:
|
||||||
|
raise ValueError("System message can only be the first message.")
|
||||||
|
|
||||||
def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult:
|
params["messages"] = message_dicts
|
||||||
generations = []
|
|
||||||
for res in response["output"]["choices"]:
|
return params
|
||||||
message = convert_dict_to_message(res["message"])
|
|
||||||
gen = ChatGeneration(
|
def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) -> dict:
|
||||||
|
if llm_outputs[0] is None:
|
||||||
|
return {}
|
||||||
|
return llm_outputs[0]
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _chat_generation_from_qwen_resp(
|
||||||
|
resp: Any, is_chunk: bool = False
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
choice = resp["output"]["choices"][0]
|
||||||
|
message = convert_dict_to_message(choice["message"], is_chunk=is_chunk)
|
||||||
|
return dict(
|
||||||
message=message,
|
message=message,
|
||||||
generation_info=dict(finish_reason=res.get("finish_reason")),
|
generation_info=dict(
|
||||||
|
finish_reason=choice["finish_reason"],
|
||||||
|
request_id=resp["request_id"],
|
||||||
|
token_usage=dict(resp["usage"]),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _chunk_to_generation(chunk: ChatGenerationChunk) -> ChatGeneration:
|
||||||
|
return ChatGeneration(
|
||||||
|
message=convert_message_chunk_to_message(chunk.message),
|
||||||
|
generation_info=chunk.generation_info,
|
||||||
)
|
)
|
||||||
generations.append(gen)
|
|
||||||
token_usage = response.get("usage", {})
|
|
||||||
llm_output = {"token_usage": token_usage, "model_name": self.model_name}
|
|
||||||
return ChatResult(generations=generations, llm_output=llm_output)
|
|
||||||
|
@ -1,11 +1,25 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
import functools
|
||||||
import logging
|
import logging
|
||||||
from typing import Any, Callable, Dict, List, Optional
|
from typing import (
|
||||||
|
Any,
|
||||||
|
AsyncIterator,
|
||||||
|
Callable,
|
||||||
|
Dict,
|
||||||
|
Iterator,
|
||||||
|
List,
|
||||||
|
Mapping,
|
||||||
|
Optional,
|
||||||
|
)
|
||||||
|
|
||||||
from langchain_core.callbacks import CallbackManagerForLLMRun
|
from langchain_core.callbacks import (
|
||||||
from langchain_core.language_models.llms import LLM
|
AsyncCallbackManagerForLLMRun,
|
||||||
from langchain_core.outputs import Generation, LLMResult
|
CallbackManagerForLLMRun,
|
||||||
|
)
|
||||||
|
from langchain_core.language_models.llms import BaseLLM
|
||||||
|
from langchain_core.outputs import Generation, GenerationChunk, LLMResult
|
||||||
from langchain_core.pydantic_v1 import Field, root_validator
|
from langchain_core.pydantic_v1 import Field, root_validator
|
||||||
from langchain_core.utils import get_from_dict_or_env
|
from langchain_core.utils import get_from_dict_or_env
|
||||||
from requests.exceptions import HTTPError
|
from requests.exceptions import HTTPError
|
||||||
@ -24,7 +38,7 @@ def _create_retry_decorator(llm: Tongyi) -> Callable[[Any], Any]:
|
|||||||
min_seconds = 1
|
min_seconds = 1
|
||||||
max_seconds = 4
|
max_seconds = 4
|
||||||
# Wait 2^x * 1 second between each retry starting with
|
# Wait 2^x * 1 second between each retry starting with
|
||||||
# 4 seconds, then up to 10 seconds, then 10 seconds afterwards
|
# 4 seconds, then up to 10 seconds, then 10 seconds afterward
|
||||||
return retry(
|
return retry(
|
||||||
reraise=True,
|
reraise=True,
|
||||||
stop=stop_after_attempt(llm.max_retries),
|
stop=stop_after_attempt(llm.max_retries),
|
||||||
@ -34,13 +48,8 @@ def _create_retry_decorator(llm: Tongyi) -> Callable[[Any], Any]:
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def generate_with_retry(llm: Tongyi, **kwargs: Any) -> Any:
|
def check_response(resp: Any) -> Any:
|
||||||
"""Use tenacity to retry the completion call."""
|
"""Check the response from the completion call."""
|
||||||
retry_decorator = _create_retry_decorator(llm)
|
|
||||||
|
|
||||||
@retry_decorator
|
|
||||||
def _generate_with_retry(**_kwargs: Any) -> Any:
|
|
||||||
resp = llm.client.call(**_kwargs)
|
|
||||||
if resp.status_code == 200:
|
if resp.status_code == 200:
|
||||||
return resp
|
return resp
|
||||||
elif resp.status_code in [400, 401]:
|
elif resp.status_code in [400, 401]:
|
||||||
@ -55,6 +64,16 @@ def generate_with_retry(llm: Tongyi, **kwargs: Any) -> Any:
|
|||||||
response=resp,
|
response=resp,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def generate_with_retry(llm: Tongyi, **kwargs: Any) -> Any:
|
||||||
|
"""Use tenacity to retry the completion call."""
|
||||||
|
retry_decorator = _create_retry_decorator(llm)
|
||||||
|
|
||||||
|
@retry_decorator
|
||||||
|
def _generate_with_retry(**_kwargs: Any) -> Any:
|
||||||
|
resp = llm.client.call(**_kwargs)
|
||||||
|
return check_response(resp)
|
||||||
|
|
||||||
return _generate_with_retry(**kwargs)
|
return _generate_with_retry(**kwargs)
|
||||||
|
|
||||||
|
|
||||||
@ -64,28 +83,44 @@ def stream_generate_with_retry(llm: Tongyi, **kwargs: Any) -> Any:
|
|||||||
|
|
||||||
@retry_decorator
|
@retry_decorator
|
||||||
def _stream_generate_with_retry(**_kwargs: Any) -> Any:
|
def _stream_generate_with_retry(**_kwargs: Any) -> Any:
|
||||||
stream_resps = []
|
responses = llm.client.call(**_kwargs)
|
||||||
resps = llm.client.call(**_kwargs)
|
for resp in responses:
|
||||||
for resp in resps:
|
yield check_response(resp)
|
||||||
if resp.status_code == 200:
|
|
||||||
stream_resps.append(resp)
|
|
||||||
elif resp.status_code in [400, 401]:
|
|
||||||
raise ValueError(
|
|
||||||
f"status_code: {resp.status_code} \n "
|
|
||||||
f"code: {resp.code} \n message: {resp.message}"
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
raise HTTPError(
|
|
||||||
f"HTTP error occurred: status_code: {resp.status_code} \n "
|
|
||||||
f"code: {resp.code} \n message: {resp.message}",
|
|
||||||
response=resp,
|
|
||||||
)
|
|
||||||
return stream_resps
|
|
||||||
|
|
||||||
return _stream_generate_with_retry(**kwargs)
|
return _stream_generate_with_retry(**kwargs)
|
||||||
|
|
||||||
|
|
||||||
class Tongyi(LLM):
|
async def astream_generate_with_retry(llm: Tongyi, **kwargs: Any) -> Any:
|
||||||
|
"""Because the dashscope SDK doesn't provide an async API,
|
||||||
|
we wrap `stream_generate_with_retry` with an async generator."""
|
||||||
|
|
||||||
|
class _AioTongyiGenerator:
|
||||||
|
def __init__(self, _llm: Tongyi, **_kwargs: Any):
|
||||||
|
self.generator = stream_generate_with_retry(_llm, **_kwargs)
|
||||||
|
|
||||||
|
def __aiter__(self) -> AsyncIterator[Any]:
|
||||||
|
return self
|
||||||
|
|
||||||
|
async def __anext__(self) -> Any:
|
||||||
|
value = await asyncio.get_running_loop().run_in_executor(
|
||||||
|
None, self._safe_next
|
||||||
|
)
|
||||||
|
if value is not None:
|
||||||
|
return value
|
||||||
|
else:
|
||||||
|
raise StopAsyncIteration
|
||||||
|
|
||||||
|
def _safe_next(self) -> Any:
|
||||||
|
try:
|
||||||
|
return next(self.generator)
|
||||||
|
except StopIteration:
|
||||||
|
return None
|
||||||
|
|
||||||
|
async for chunk in _AioTongyiGenerator(llm, **kwargs):
|
||||||
|
yield chunk
|
||||||
|
|
||||||
|
|
||||||
|
class Tongyi(BaseLLM):
|
||||||
"""Tongyi Qwen large language models.
|
"""Tongyi Qwen large language models.
|
||||||
|
|
||||||
To use, you should have the ``dashscope`` python package installed, and the
|
To use, you should have the ``dashscope`` python package installed, and the
|
||||||
@ -96,17 +131,13 @@ class Tongyi(LLM):
|
|||||||
.. code-block:: python
|
.. code-block:: python
|
||||||
|
|
||||||
from langchain_community.llms import Tongyi
|
from langchain_community.llms import Tongyi
|
||||||
Tongyi = tongyi()
|
tongyi = tongyi()
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def lc_secrets(self) -> Dict[str, str]:
|
def lc_secrets(self) -> Dict[str, str]:
|
||||||
return {"dashscope_api_key": "DASHSCOPE_API_KEY"}
|
return {"dashscope_api_key": "DASHSCOPE_API_KEY"}
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def is_lc_serializable(cls) -> bool:
|
|
||||||
return False
|
|
||||||
|
|
||||||
client: Any #: :meta private:
|
client: Any #: :meta private:
|
||||||
model_name: str = "qwen-plus"
|
model_name: str = "qwen-plus"
|
||||||
|
|
||||||
@ -117,10 +148,7 @@ class Tongyi(LLM):
|
|||||||
"""Total probability mass of tokens to consider at each step."""
|
"""Total probability mass of tokens to consider at each step."""
|
||||||
|
|
||||||
dashscope_api_key: Optional[str] = None
|
dashscope_api_key: Optional[str] = None
|
||||||
"""Dashscope api key provide by alicloud."""
|
"""Dashscope api key provide by Alibaba Cloud."""
|
||||||
|
|
||||||
n: int = 1
|
|
||||||
"""How many completions to generate for each prompt."""
|
|
||||||
|
|
||||||
streaming: bool = False
|
streaming: bool = False
|
||||||
"""Whether to stream the results or not."""
|
"""Whether to stream the results or not."""
|
||||||
@ -128,9 +156,6 @@ class Tongyi(LLM):
|
|||||||
max_retries: int = 10
|
max_retries: int = 10
|
||||||
"""Maximum number of retries to make when generating."""
|
"""Maximum number of retries to make when generating."""
|
||||||
|
|
||||||
prefix_messages: List = Field(default_factory=list)
|
|
||||||
"""Series of messages for Chat input."""
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def _llm_type(self) -> str:
|
def _llm_type(self) -> str:
|
||||||
"""Return type of llm."""
|
"""Return type of llm."""
|
||||||
@ -139,7 +164,9 @@ class Tongyi(LLM):
|
|||||||
@root_validator()
|
@root_validator()
|
||||||
def validate_environment(cls, values: Dict) -> Dict:
|
def validate_environment(cls, values: Dict) -> Dict:
|
||||||
"""Validate that api key and python package exists in environment."""
|
"""Validate that api key and python package exists in environment."""
|
||||||
get_from_dict_or_env(values, "dashscope_api_key", "DASHSCOPE_API_KEY")
|
values["dashscope_api_key"] = get_from_dict_or_env(
|
||||||
|
values, "dashscope_api_key", "DASHSCOPE_API_KEY"
|
||||||
|
)
|
||||||
try:
|
try:
|
||||||
import dashscope
|
import dashscope
|
||||||
except ImportError:
|
except ImportError:
|
||||||
@ -160,45 +187,18 @@ class Tongyi(LLM):
|
|||||||
|
|
||||||
@property
|
@property
|
||||||
def _default_params(self) -> Dict[str, Any]:
|
def _default_params(self) -> Dict[str, Any]:
|
||||||
"""Get the default parameters for calling OpenAI API."""
|
"""Get the default parameters for calling Tongyi Qwen API."""
|
||||||
normal_params = {
|
normal_params = {
|
||||||
|
"model": self.model_name,
|
||||||
"top_p": self.top_p,
|
"top_p": self.top_p,
|
||||||
|
"api_key": self.dashscope_api_key,
|
||||||
}
|
}
|
||||||
|
|
||||||
return {**normal_params, **self.model_kwargs}
|
return {**normal_params, **self.model_kwargs}
|
||||||
|
|
||||||
def _call(
|
@property
|
||||||
self,
|
def _identifying_params(self) -> Mapping[str, Any]:
|
||||||
prompt: str,
|
return {"model_name": self.model_name, **super()._identifying_params}
|
||||||
stop: Optional[List[str]] = None,
|
|
||||||
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
|
||||||
**kwargs: Any,
|
|
||||||
) -> str:
|
|
||||||
"""Call out to Tongyi's generate endpoint.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
prompt: The prompt to pass into the model.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
The string generated by the model.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
.. code-block:: python
|
|
||||||
|
|
||||||
response = tongyi("Tell me a joke.")
|
|
||||||
"""
|
|
||||||
params: Dict[str, Any] = {
|
|
||||||
**{"model": self.model_name},
|
|
||||||
**self._default_params,
|
|
||||||
**kwargs,
|
|
||||||
}
|
|
||||||
|
|
||||||
completion = generate_with_retry(
|
|
||||||
self,
|
|
||||||
prompt=prompt,
|
|
||||||
**params,
|
|
||||||
)
|
|
||||||
return completion["output"]["text"]
|
|
||||||
|
|
||||||
def _generate(
|
def _generate(
|
||||||
self,
|
self,
|
||||||
@ -208,70 +208,136 @@ class Tongyi(LLM):
|
|||||||
**kwargs: Any,
|
**kwargs: Any,
|
||||||
) -> LLMResult:
|
) -> LLMResult:
|
||||||
generations = []
|
generations = []
|
||||||
params: Dict[str, Any] = {
|
|
||||||
**{"model": self.model_name},
|
|
||||||
**self._default_params,
|
|
||||||
**kwargs,
|
|
||||||
}
|
|
||||||
if self.streaming:
|
if self.streaming:
|
||||||
if len(prompts) > 1:
|
if len(prompts) > 1:
|
||||||
raise ValueError("Cannot stream results with multiple prompts.")
|
raise ValueError("Cannot stream results with multiple prompts.")
|
||||||
params["stream"] = True
|
generation: Optional[GenerationChunk] = None
|
||||||
temp = ""
|
for chunk in self._stream(prompts[0], stop, run_manager, **kwargs):
|
||||||
for stream_resp in stream_generate_with_retry(
|
if generation is None:
|
||||||
self, prompt=prompts[0], **params
|
generation = chunk
|
||||||
):
|
|
||||||
if run_manager:
|
|
||||||
stream_resp_text = stream_resp["output"]["text"]
|
|
||||||
stream_resp_text = stream_resp_text.replace(temp, "")
|
|
||||||
# Ali Cloud's streaming transmission interface, each return content
|
|
||||||
# will contain the output
|
|
||||||
# of the previous round(as of September 20, 2023, future updates to
|
|
||||||
# the Alibaba Cloud API may vary)
|
|
||||||
run_manager.on_llm_new_token(stream_resp_text)
|
|
||||||
# The implementation of streaming transmission primarily relies on
|
|
||||||
# the "on_llm_new_token" method
|
|
||||||
# of the streaming callback.
|
|
||||||
temp = stream_resp["output"]["text"]
|
|
||||||
|
|
||||||
generations.append(
|
|
||||||
[
|
|
||||||
Generation(
|
|
||||||
text=stream_resp["output"]["text"],
|
|
||||||
generation_info=dict(
|
|
||||||
finish_reason=stream_resp["output"]["finish_reason"],
|
|
||||||
),
|
|
||||||
)
|
|
||||||
]
|
|
||||||
)
|
|
||||||
generations.reverse()
|
|
||||||
# In the official implementation of the OpenAI API,
|
|
||||||
# the "generations" parameter passed to LLMResult seems to be a 1*1*1
|
|
||||||
# two-dimensional list
|
|
||||||
# (including in non-streaming mode).
|
|
||||||
# Considering that Alibaba Cloud's streaming transmission
|
|
||||||
# (as of September 20, 2023, future updates to the Alibaba Cloud API may
|
|
||||||
# vary)
|
|
||||||
# includes the output of the previous round in each return,
|
|
||||||
# reversing this "generations" list should suffice
|
|
||||||
# (This is the solution with the least amount of changes to the source code,
|
|
||||||
# while still allowing for convenient modifications in the future,
|
|
||||||
# although it may result in slightly more memory consumption).
|
|
||||||
else:
|
else:
|
||||||
|
generation += chunk
|
||||||
|
assert generation is not None
|
||||||
|
generations.append([self._chunk_to_generation(generation)])
|
||||||
|
else:
|
||||||
|
params: Dict[str, Any] = self._invocation_params(stop=stop, **kwargs)
|
||||||
for prompt in prompts:
|
for prompt in prompts:
|
||||||
completion = generate_with_retry(
|
completion = generate_with_retry(self, prompt=prompt, **params)
|
||||||
self,
|
|
||||||
prompt=prompt,
|
|
||||||
**params,
|
|
||||||
)
|
|
||||||
generations.append(
|
generations.append(
|
||||||
[
|
[Generation(**self._generation_from_qwen_resp(completion))]
|
||||||
Generation(
|
)
|
||||||
text=completion["output"]["text"],
|
return LLMResult(
|
||||||
generation_info=dict(
|
generations=generations,
|
||||||
finish_reason=completion["output"]["finish_reason"],
|
llm_output={
|
||||||
|
"model_name": self.model_name,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
async def _agenerate(
|
||||||
|
self,
|
||||||
|
prompts: List[str],
|
||||||
|
stop: Optional[List[str]] = None,
|
||||||
|
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
||||||
|
**kwargs: Any,
|
||||||
|
) -> LLMResult:
|
||||||
|
generations = []
|
||||||
|
if self.streaming:
|
||||||
|
if len(prompts) > 1:
|
||||||
|
raise ValueError("Cannot stream results with multiple prompts.")
|
||||||
|
generation: Optional[GenerationChunk] = None
|
||||||
|
async for chunk in self._astream(prompts[0], stop, run_manager, **kwargs):
|
||||||
|
if generation is None:
|
||||||
|
generation = chunk
|
||||||
|
else:
|
||||||
|
generation += chunk
|
||||||
|
assert generation is not None
|
||||||
|
generations.append([self._chunk_to_generation(generation)])
|
||||||
|
else:
|
||||||
|
params: Dict[str, Any] = self._invocation_params(stop=stop, **kwargs)
|
||||||
|
for prompt in prompts:
|
||||||
|
completion = await asyncio.get_running_loop().run_in_executor(
|
||||||
|
None,
|
||||||
|
functools.partial(
|
||||||
|
generate_with_retry, **{"llm": self, "prompt": prompt, **params}
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
]
|
generations.append(
|
||||||
|
[Generation(**self._generation_from_qwen_resp(completion))]
|
||||||
|
)
|
||||||
|
return LLMResult(
|
||||||
|
generations=generations,
|
||||||
|
llm_output={
|
||||||
|
"model_name": self.model_name,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
def _stream(
|
||||||
|
self,
|
||||||
|
prompt: str,
|
||||||
|
stop: Optional[List[str]] = None,
|
||||||
|
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
||||||
|
**kwargs: Any,
|
||||||
|
) -> Iterator[GenerationChunk]:
|
||||||
|
params: Dict[str, Any] = self._invocation_params(
|
||||||
|
stop=stop, stream=True, **kwargs
|
||||||
|
)
|
||||||
|
for stream_resp in stream_generate_with_retry(self, prompt=prompt, **params):
|
||||||
|
chunk = GenerationChunk(**self._generation_from_qwen_resp(stream_resp))
|
||||||
|
yield chunk
|
||||||
|
if run_manager:
|
||||||
|
run_manager.on_llm_new_token(
|
||||||
|
chunk.text,
|
||||||
|
chunk=chunk,
|
||||||
|
verbose=self.verbose,
|
||||||
|
)
|
||||||
|
|
||||||
|
async def _astream(
|
||||||
|
self,
|
||||||
|
prompt: str,
|
||||||
|
stop: Optional[List[str]] = None,
|
||||||
|
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
||||||
|
**kwargs: Any,
|
||||||
|
) -> AsyncIterator[GenerationChunk]:
|
||||||
|
params: Dict[str, Any] = self._invocation_params(
|
||||||
|
stop=stop, stream=True, **kwargs
|
||||||
|
)
|
||||||
|
async for stream_resp in astream_generate_with_retry(
|
||||||
|
self, prompt=prompt, **params
|
||||||
|
):
|
||||||
|
chunk = GenerationChunk(**self._generation_from_qwen_resp(stream_resp))
|
||||||
|
yield chunk
|
||||||
|
if run_manager:
|
||||||
|
await run_manager.on_llm_new_token(
|
||||||
|
chunk.text,
|
||||||
|
chunk=chunk,
|
||||||
|
verbose=self.verbose,
|
||||||
|
)
|
||||||
|
|
||||||
|
def _invocation_params(self, stop: Any, **kwargs: Any) -> Dict[str, Any]:
|
||||||
|
params = {
|
||||||
|
**self._default_params,
|
||||||
|
**kwargs,
|
||||||
|
}
|
||||||
|
if stop is not None:
|
||||||
|
params["stop"] = stop
|
||||||
|
if params.get("stream"):
|
||||||
|
params["incremental_output"] = True
|
||||||
|
return params
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _generation_from_qwen_resp(resp: Any) -> Dict[str, Any]:
|
||||||
|
return dict(
|
||||||
|
text=resp["output"]["text"],
|
||||||
|
generation_info=dict(
|
||||||
|
finish_reason=resp["output"]["finish_reason"],
|
||||||
|
request_id=resp["request_id"],
|
||||||
|
token_usage=dict(resp["usage"]),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _chunk_to_generation(chunk: GenerationChunk) -> Generation:
|
||||||
|
return Generation(
|
||||||
|
text=chunk.text,
|
||||||
|
generation_info=chunk.generation_info,
|
||||||
)
|
)
|
||||||
return LLMResult(generations=generations)
|
|
||||||
|
@ -26,6 +26,7 @@ EXPECTED_ALL = [
|
|||||||
"ChatKonko",
|
"ChatKonko",
|
||||||
"PaiEasChatEndpoint",
|
"PaiEasChatEndpoint",
|
||||||
"QianfanChatEndpoint",
|
"QianfanChatEndpoint",
|
||||||
|
"ChatTongyi",
|
||||||
"ChatFireworks",
|
"ChatFireworks",
|
||||||
"ChatYandexGPT",
|
"ChatYandexGPT",
|
||||||
"ChatBaichuan",
|
"ChatBaichuan",
|
||||||
|
Loading…
Reference in New Issue
Block a user