community[patch]: Remove model limitation on Anyscale LLM (#17662)

**Description:** Llama Guard is deprecated from Anyscale public
endpoint.
**Issue:** Change the default model. and remove the limitation of only
use Llama Guard with Anyscale LLMs
Anyscale LLM can also works with all other Chat model hosted on
Anyscale.
Also added `async_client` for Anyscale LLM
This commit is contained in:
kYLe 2024-02-25 20:21:19 -06:00 committed by GitHub
parent cc69976860
commit 17ecf6e119
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -24,10 +24,7 @@ from langchain_community.llms.openai import (
from langchain_community.utils.openai import is_openai_v1
DEFAULT_BASE_URL = "https://api.endpoints.anyscale.com/v1"
DEFAULT_MODEL = "Meta-Llama/Llama-Guard-7b"
# Completion models support by Anyscale Endpoints
COMPLETION_MODELS = ["Meta-Llama/Llama-Guard-7b"]
DEFAULT_MODEL = "mistralai/Mixtral-8x7B-Instruct-v0.1"
def update_token_usage(
@ -113,12 +110,6 @@ class Anyscale(BaseOpenAI):
"MODEL_NAME",
default=DEFAULT_MODEL,
)
if values["model_name"] not in COMPLETION_MODELS:
raise ValueError(
"langchain_community.llm.Anyscale ONLY works \
with completions models.For Chat models, please use \
langchain_community.chat_model.ChatAnyscale"
)
try:
import openai
@ -135,7 +126,12 @@ class Anyscale(BaseOpenAI):
# "default_query": values["default_query"],
# "http_client": values["http_client"],
}
values["client"] = openai.OpenAI(**client_params).completions
if not values.get("client"):
values["client"] = openai.OpenAI(**client_params).completions
if not values.get("async_client"):
values["async_client"] = openai.AsyncOpenAI(
**client_params
).completions
else:
values["openai_api_base"] = values["anyscale_api_base"]
values["openai_api_key"] = values["anyscale_api_key"].get_secret_value()