mirror of
https://github.com/hwchase17/langchain.git
synced 2025-07-05 20:58:25 +00:00
Modify Anyscale integration to work with Anyscale Endpoint (#11569)
**Description:** Modify Anyscale integration to work with [Anyscale Endpoint](https://docs.endpoints.anyscale.com/) and it supports invoke, async invoke, stream and async invoke features --------- Co-authored-by: Bagatur <baskaryan@gmail.com>
This commit is contained in:
parent
51193309ea
commit
467b082c34
@ -1,7 +1,6 @@
|
|||||||
{
|
{
|
||||||
"cells": [
|
"cells": [
|
||||||
{
|
{
|
||||||
"attachments": {},
|
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"id": "9597802c",
|
"id": "9597802c",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
@ -10,9 +9,7 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"[Anyscale](https://www.anyscale.com/) is a fully-managed [Ray](https://www.ray.io/) platform, on which you can build, deploy, and manage scalable AI and Python applications\n",
|
"[Anyscale](https://www.anyscale.com/) is a fully-managed [Ray](https://www.ray.io/) platform, on which you can build, deploy, and manage scalable AI and Python applications\n",
|
||||||
"\n",
|
"\n",
|
||||||
"This example goes over how to use LangChain to interact with `Anyscale` [service](https://docs.anyscale.com/productionize/services-v2/get-started). \n",
|
"This example goes over how to use LangChain to interact with [Anyscale Endpoint](https://app.endpoints.anyscale.com/). "
|
||||||
"\n",
|
|
||||||
"It will send the requests to Anyscale Service endpoint, which is concatenate `ANYSCALE_SERVICE_URL` and `ANYSCALE_SERVICE_ROUTE`, with a token defined in `ANYSCALE_SERVICE_TOKEN`"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -26,9 +23,8 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"import os\n",
|
"import os\n",
|
||||||
"\n",
|
"\n",
|
||||||
"os.environ[\"ANYSCALE_SERVICE_URL\"] = ANYSCALE_SERVICE_URL\n",
|
"os.environ[\"ANYSCALE_API_BASE\"] = ANYSCALE_API_BASE\n",
|
||||||
"os.environ[\"ANYSCALE_SERVICE_ROUTE\"] = ANYSCALE_SERVICE_ROUTE\n",
|
"os.environ[\"ANYSCALE_API_KEY\"] = ANYSCALE_API_KEY"
|
||||||
"os.environ[\"ANYSCALE_SERVICE_TOKEN\"] = ANYSCALE_SERVICE_TOKEN"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -41,7 +37,8 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from langchain.llms import Anyscale\n",
|
"from langchain.llms import Anyscale\n",
|
||||||
"from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain"
|
"from langchain.prompts import PromptTemplate\n",
|
||||||
|
"from langchain.chains import LLMChain"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -69,7 +66,7 @@
|
|||||||
},
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"llm = Anyscale()"
|
"llm = Anyscale(model_name=ANYSCALE_MODEL_NAME)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -99,7 +96,6 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"attachments": {},
|
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"id": "42f05b34-1a44-4cbd-8342-35c1572b6765",
|
"id": "42f05b34-1a44-4cbd-8342-35c1572b6765",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
@ -136,13 +132,11 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"import ray\n",
|
"import ray\n",
|
||||||
"\n",
|
"\n",
|
||||||
"\n",
|
"@ray.remote(num_cpus=0.1)\n",
|
||||||
"@ray.remote\n",
|
|
||||||
"def send_query(llm, prompt):\n",
|
"def send_query(llm, prompt):\n",
|
||||||
" resp = llm(prompt)\n",
|
" resp = llm(prompt)\n",
|
||||||
" return resp\n",
|
" return resp\n",
|
||||||
"\n",
|
"\n",
|
||||||
"\n",
|
|
||||||
"futures = [send_query.remote(llm, prompt) for prompt in prompt_list]\n",
|
"futures = [send_query.remote(llm, prompt) for prompt in prompt_list]\n",
|
||||||
"results = ray.get(futures)"
|
"results = ray.get(futures)"
|
||||||
]
|
]
|
||||||
|
@ -1,126 +1,272 @@
|
|||||||
from typing import Any, Dict, List, Mapping, Optional
|
"""Wrapper around Anyscale Endpoint"""
|
||||||
|
from typing import (
|
||||||
|
Any,
|
||||||
|
AsyncIterator,
|
||||||
|
Dict,
|
||||||
|
Iterator,
|
||||||
|
List,
|
||||||
|
Mapping,
|
||||||
|
Optional,
|
||||||
|
Set,
|
||||||
|
Tuple,
|
||||||
|
)
|
||||||
|
|
||||||
import requests
|
from langchain.callbacks.manager import (
|
||||||
|
AsyncCallbackManagerForLLMRun,
|
||||||
from langchain.callbacks.manager import CallbackManagerForLLMRun
|
CallbackManagerForLLMRun,
|
||||||
from langchain.llms.base import LLM
|
)
|
||||||
from langchain.llms.utils import enforce_stop_tokens
|
from langchain.llms.openai import (
|
||||||
from langchain.pydantic_v1 import Extra, root_validator
|
BaseOpenAI,
|
||||||
|
acompletion_with_retry,
|
||||||
|
completion_with_retry,
|
||||||
|
)
|
||||||
|
from langchain.pydantic_v1 import Field, root_validator
|
||||||
|
from langchain.schema import Generation, LLMResult
|
||||||
|
from langchain.schema.output import GenerationChunk
|
||||||
from langchain.utils import get_from_dict_or_env
|
from langchain.utils import get_from_dict_or_env
|
||||||
|
|
||||||
|
|
||||||
class Anyscale(LLM):
|
def update_token_usage(
|
||||||
"""Anyscale Service models.
|
keys: Set[str], response: Dict[str, Any], token_usage: Dict[str, Any]
|
||||||
|
) -> None:
|
||||||
|
"""Update token usage."""
|
||||||
|
_keys_to_use = keys.intersection(response["usage"])
|
||||||
|
for _key in _keys_to_use:
|
||||||
|
if _key not in token_usage:
|
||||||
|
token_usage[_key] = response["usage"][_key]
|
||||||
|
else:
|
||||||
|
token_usage[_key] += response["usage"][_key]
|
||||||
|
|
||||||
To use, you should have the environment variable ``ANYSCALE_SERVICE_URL``,
|
|
||||||
``ANYSCALE_SERVICE_ROUTE`` and ``ANYSCALE_SERVICE_TOKEN`` set with your Anyscale
|
def create_llm_result(
|
||||||
Service, or pass it as a named parameter to the constructor.
|
choices: Any, prompts: List[str], token_usage: Dict[str, int], model_name: str
|
||||||
|
) -> LLMResult:
|
||||||
|
"""Create the LLMResult from the choices and prompts."""
|
||||||
|
generations = []
|
||||||
|
for i, _ in enumerate(prompts):
|
||||||
|
choice = choices[i]
|
||||||
|
generations.append(
|
||||||
|
[
|
||||||
|
Generation(
|
||||||
|
text=choice["message"]["content"],
|
||||||
|
generation_info=dict(
|
||||||
|
finish_reason=choice.get("finish_reason"),
|
||||||
|
logprobs=choice.get("logprobs"),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
]
|
||||||
|
)
|
||||||
|
llm_output = {"token_usage": token_usage, "model_name": model_name}
|
||||||
|
return LLMResult(generations=generations, llm_output=llm_output)
|
||||||
|
|
||||||
|
|
||||||
|
class Anyscale(BaseOpenAI):
|
||||||
|
"""Wrapper around Anyscale Endpoint.
|
||||||
|
To use, you should have the environment variable ``ANYSCALE_API_BASE`` and
|
||||||
|
``ANYSCALE_API_KEY``set with your Anyscale Endpoint, or pass it as a named
|
||||||
|
parameter to the constructor.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
.. code-block:: python
|
.. code-block:: python
|
||||||
|
|
||||||
from langchain.llms import Anyscale
|
from langchain.llms import Anyscale
|
||||||
anyscale = Anyscale(anyscale_service_url="SERVICE_URL",
|
anyscalellm = Anyscale(anyscale_api_base="ANYSCALE_API_BASE",
|
||||||
anyscale_service_route="SERVICE_ROUTE",
|
anyscale_api_key="ANYSCALE_API_KEY",
|
||||||
anyscale_service_token="SERVICE_TOKEN")
|
model_name="meta-llama/Llama-2-7b-chat-hf")
|
||||||
|
# To leverage Ray for parallel processing
|
||||||
# Use Ray for distributed processing
|
@ray.remote(num_cpus=1)
|
||||||
import ray
|
def send_query(llm, text):
|
||||||
prompt_list=[]
|
resp = llm(text)
|
||||||
@ray.remote
|
|
||||||
def send_query(llm, prompt):
|
|
||||||
resp = llm(prompt)
|
|
||||||
return resp
|
return resp
|
||||||
futures = [send_query.remote(anyscale, prompt) for prompt in prompt_list]
|
futures = [send_query.remote(anyscalellm, text) for text in texts]
|
||||||
results = ray.get(futures)
|
results = ray.get(futures)
|
||||||
"""
|
"""
|
||||||
|
|
||||||
model_kwargs: Optional[dict] = None
|
"""Key word arguments to pass to the model."""
|
||||||
"""Keyword arguments to pass to the model. Reserved for future use"""
|
anyscale_api_base: Optional[str] = None
|
||||||
|
anyscale_api_key: Optional[str] = None
|
||||||
|
|
||||||
anyscale_service_url: Optional[str] = None
|
prefix_messages: List = Field(default_factory=list)
|
||||||
anyscale_service_route: Optional[str] = None
|
|
||||||
anyscale_service_token: Optional[str] = None
|
|
||||||
|
|
||||||
class Config:
|
|
||||||
"""Configuration for this pydantic object."""
|
|
||||||
|
|
||||||
extra = Extra.forbid
|
|
||||||
|
|
||||||
@root_validator()
|
@root_validator()
|
||||||
def validate_environment(cls, values: Dict) -> Dict:
|
def validate_environment(cls, values: Dict) -> Dict:
|
||||||
"""Validate that api key and python package exists in environment."""
|
"""Validate that api key and python package exists in environment."""
|
||||||
anyscale_service_url = get_from_dict_or_env(
|
values["anyscale_api_base"] = get_from_dict_or_env(
|
||||||
values, "anyscale_service_url", "ANYSCALE_SERVICE_URL"
|
values, "anyscale_api_base", "ANYSCALE_API_BASE"
|
||||||
)
|
)
|
||||||
anyscale_service_route = get_from_dict_or_env(
|
values["anyscale_api_key"] = get_from_dict_or_env(
|
||||||
values, "anyscale_service_route", "ANYSCALE_SERVICE_ROUTE"
|
values, "anyscale_api_key", "ANYSCALE_API_KEY"
|
||||||
)
|
)
|
||||||
anyscale_service_token = get_from_dict_or_env(
|
|
||||||
values, "anyscale_service_token", "ANYSCALE_SERVICE_TOKEN"
|
|
||||||
)
|
|
||||||
if anyscale_service_url.endswith("/"):
|
|
||||||
anyscale_service_url = anyscale_service_url[:-1]
|
|
||||||
if not anyscale_service_route.startswith("/"):
|
|
||||||
anyscale_service_route = "/" + anyscale_service_route
|
|
||||||
try:
|
try:
|
||||||
anyscale_service_endpoint = f"{anyscale_service_url}/-/routes"
|
import openai
|
||||||
headers = {"Authorization": f"Bearer {anyscale_service_token}"}
|
|
||||||
requests.get(anyscale_service_endpoint, headers=headers)
|
## Always create ChatComplete client, replacing the legacy Complete client
|
||||||
except requests.exceptions.RequestException as e:
|
values["client"] = openai.ChatCompletion
|
||||||
raise ValueError(e)
|
except ImportError:
|
||||||
|
raise ImportError(
|
||||||
|
"Could not import openai python package. "
|
||||||
|
"Please install it with `pip install openai`."
|
||||||
|
)
|
||||||
|
if values["streaming"] and values["n"] > 1:
|
||||||
|
raise ValueError("Cannot stream results when n > 1.")
|
||||||
|
if values["streaming"] and values["best_of"] > 1:
|
||||||
|
raise ValueError("Cannot stream results when best_of > 1.")
|
||||||
|
|
||||||
values["anyscale_service_url"] = anyscale_service_url
|
|
||||||
values["anyscale_service_route"] = anyscale_service_route
|
|
||||||
values["anyscale_service_token"] = anyscale_service_token
|
|
||||||
return values
|
return values
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def _identifying_params(self) -> Mapping[str, Any]:
|
def _identifying_params(self) -> Mapping[str, Any]:
|
||||||
"""Get the identifying parameters."""
|
"""Get the identifying parameters."""
|
||||||
return {
|
return {
|
||||||
"anyscale_service_url": self.anyscale_service_url,
|
**{"model_name": self.model_name},
|
||||||
"anyscale_service_route": self.anyscale_service_route,
|
**super()._identifying_params,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@property
|
||||||
|
def _invocation_params(self) -> Dict[str, Any]:
|
||||||
|
"""Get the parameters used to invoke the model."""
|
||||||
|
openai_creds: Dict[str, Any] = {
|
||||||
|
"api_key": self.anyscale_api_key,
|
||||||
|
"api_base": self.anyscale_api_base,
|
||||||
|
}
|
||||||
|
return {**openai_creds, **{"model": self.model_name}, **super()._default_params}
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def _llm_type(self) -> str:
|
def _llm_type(self) -> str:
|
||||||
"""Return type of llm."""
|
"""Return type of llm."""
|
||||||
return "anyscale"
|
return "Anyscale LLM"
|
||||||
|
|
||||||
def _call(
|
def _get_chat_messages(
|
||||||
|
self, prompts: List[str], stop: Optional[List[str]] = None
|
||||||
|
) -> Tuple:
|
||||||
|
if len(prompts) > 1:
|
||||||
|
raise ValueError(
|
||||||
|
f"Anyscale currently only supports single prompt, got {prompts}"
|
||||||
|
)
|
||||||
|
messages = self.prefix_messages + [{"role": "user", "content": prompts[0]}]
|
||||||
|
params: Dict[str, Any] = self._invocation_params
|
||||||
|
if stop is not None:
|
||||||
|
if "stop" in params:
|
||||||
|
raise ValueError("`stop` found in both the input and default params.")
|
||||||
|
params["stop"] = stop
|
||||||
|
if params.get("max_tokens") == -1:
|
||||||
|
# for Chat api, omitting max_tokens is equivalent to having no limit
|
||||||
|
del params["max_tokens"]
|
||||||
|
return messages, params
|
||||||
|
|
||||||
|
def _stream(
|
||||||
self,
|
self,
|
||||||
prompt: str,
|
prompt: str,
|
||||||
stop: Optional[List[str]] = None,
|
stop: Optional[List[str]] = None,
|
||||||
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
||||||
**kwargs: Any,
|
**kwargs: Any,
|
||||||
) -> str:
|
) -> Iterator[GenerationChunk]:
|
||||||
"""Call out to Anyscale Service endpoint.
|
messages, params = self._get_chat_messages([prompt], stop)
|
||||||
Args:
|
params = {**params, **kwargs, "stream": True}
|
||||||
prompt: The prompt to pass into the model.
|
for stream_resp in completion_with_retry(
|
||||||
stop: Optional list of stop words to use when generating.
|
self, messages=messages, run_manager=run_manager, **params
|
||||||
Returns:
|
):
|
||||||
The string generated by the model.
|
token = stream_resp["choices"][0]["delta"].get("content", "")
|
||||||
Example:
|
chunk = GenerationChunk(text=token)
|
||||||
.. code-block:: python
|
yield chunk
|
||||||
response = anyscale("Tell me a joke.")
|
if run_manager:
|
||||||
"""
|
run_manager.on_llm_new_token(token, chunk=chunk)
|
||||||
|
|
||||||
anyscale_service_endpoint = (
|
async def _astream(
|
||||||
f"{self.anyscale_service_url}{self.anyscale_service_route}"
|
self,
|
||||||
|
prompt: str,
|
||||||
|
stop: Optional[List[str]] = None,
|
||||||
|
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
||||||
|
**kwargs: Any,
|
||||||
|
) -> AsyncIterator[GenerationChunk]:
|
||||||
|
messages, params = self._get_chat_messages([prompt], stop)
|
||||||
|
params = {**params, **kwargs, "stream": True}
|
||||||
|
async for stream_resp in await acompletion_with_retry(
|
||||||
|
self, messages=messages, run_manager=run_manager, **params
|
||||||
|
):
|
||||||
|
token = stream_resp["choices"][0]["delta"].get("content", "")
|
||||||
|
chunk = GenerationChunk(text=token)
|
||||||
|
yield chunk
|
||||||
|
if run_manager:
|
||||||
|
await run_manager.on_llm_new_token(token, chunk=chunk)
|
||||||
|
|
||||||
|
def _generate(
|
||||||
|
self,
|
||||||
|
prompts: List[str],
|
||||||
|
stop: Optional[List[str]] = None,
|
||||||
|
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
||||||
|
**kwargs: Any,
|
||||||
|
) -> LLMResult:
|
||||||
|
choices = []
|
||||||
|
token_usage: Dict[str, int] = {}
|
||||||
|
_keys = {"completion_tokens", "prompt_tokens", "total_tokens"}
|
||||||
|
for prompt in prompts:
|
||||||
|
if self.streaming:
|
||||||
|
generation: Optional[GenerationChunk] = None
|
||||||
|
for chunk in self._stream(prompt, stop, run_manager, **kwargs):
|
||||||
|
if generation is None:
|
||||||
|
generation = chunk
|
||||||
|
else:
|
||||||
|
generation += chunk
|
||||||
|
assert generation is not None
|
||||||
|
choices.append(
|
||||||
|
{
|
||||||
|
"message": {"content": generation.text},
|
||||||
|
"finish_reason": generation.generation_info.get("finish_reason")
|
||||||
|
if generation.generation_info
|
||||||
|
else None,
|
||||||
|
"logprobs": generation.generation_info.get("logprobs")
|
||||||
|
if generation.generation_info
|
||||||
|
else None,
|
||||||
|
}
|
||||||
)
|
)
|
||||||
headers = {"Authorization": f"Bearer {self.anyscale_service_token}"}
|
|
||||||
body = {"prompt": prompt}
|
|
||||||
resp = requests.post(anyscale_service_endpoint, headers=headers, json=body)
|
|
||||||
|
|
||||||
if resp.status_code != 200:
|
else:
|
||||||
raise ValueError(
|
messages, params = self._get_chat_messages([prompt], stop)
|
||||||
f"Error returned by service, status code {resp.status_code}"
|
params = {**params, **kwargs}
|
||||||
|
response = completion_with_retry(
|
||||||
|
self, messages=messages, run_manager=run_manager, **params
|
||||||
)
|
)
|
||||||
text = resp.text
|
choices.extend(response["choices"])
|
||||||
|
update_token_usage(_keys, response, token_usage)
|
||||||
|
return create_llm_result(choices, prompts, token_usage, self.model_name)
|
||||||
|
|
||||||
if stop is not None:
|
async def _agenerate(
|
||||||
# This is a bit hacky, but I can't figure out a better way to enforce
|
self,
|
||||||
# stop tokens when making calls to huggingface_hub.
|
prompts: List[str],
|
||||||
text = enforce_stop_tokens(text, stop)
|
stop: Optional[List[str]] = None,
|
||||||
return text
|
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
||||||
|
**kwargs: Any,
|
||||||
|
) -> LLMResult:
|
||||||
|
choices = []
|
||||||
|
token_usage: Dict[str, int] = {}
|
||||||
|
_keys = {"completion_tokens", "prompt_tokens", "total_tokens"}
|
||||||
|
for prompt in prompts:
|
||||||
|
messages = self.prefix_messages + [{"role": "user", "content": prompt}]
|
||||||
|
if self.streaming:
|
||||||
|
generation: Optional[GenerationChunk] = None
|
||||||
|
async for chunk in self._astream(prompt, stop, run_manager, **kwargs):
|
||||||
|
if generation is None:
|
||||||
|
generation = chunk
|
||||||
|
else:
|
||||||
|
generation += chunk
|
||||||
|
assert generation is not None
|
||||||
|
choices.append(
|
||||||
|
{
|
||||||
|
"message": {"content": generation.text},
|
||||||
|
"finish_reason": generation.generation_info.get("finish_reason")
|
||||||
|
if generation.generation_info
|
||||||
|
else None,
|
||||||
|
"logprobs": generation.generation_info.get("logprobs")
|
||||||
|
if generation.generation_info
|
||||||
|
else None,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
messages, params = self._get_chat_messages([prompt], stop)
|
||||||
|
params = {**params, **kwargs}
|
||||||
|
response = await acompletion_with_retry(
|
||||||
|
self, messages=messages, run_manager=run_manager, **params
|
||||||
|
)
|
||||||
|
choices.extend(response["choices"])
|
||||||
|
update_token_usage(_keys, response, token_usage)
|
||||||
|
return create_llm_result(choices, prompts, token_usage, self.model_name)
|
||||||
|
Loading…
Reference in New Issue
Block a user