mirror of
https://github.com/hwchase17/langchain.git
synced 2025-08-13 14:50:00 +00:00
feat(llms): support more tasks in HuggingFaceHub LLM and remove deprecated dep (#14406)
- **Description:** this PR upgrades the `HuggingFaceHub` LLM: * support more tasks (`translation` and `conversational`) * replaced the deprecated `InferenceApi` with `InferenceClient` * adjusted the overall logic to use the "recommended" model for each task when no model is provided, and vice-versa. - **Tag mainter(s)**: @baskaryan @hwchase17
This commit is contained in:
parent
afb25eeec4
commit
e529939c54
@ -1,3 +1,4 @@
|
|||||||
|
import json
|
||||||
from typing import Any, Dict, List, Mapping, Optional
|
from typing import Any, Dict, List, Mapping, Optional
|
||||||
|
|
||||||
from langchain_core.callbacks import CallbackManagerForLLMRun
|
from langchain_core.callbacks import CallbackManagerForLLMRun
|
||||||
@ -7,8 +8,15 @@ from langchain_core.utils import get_from_dict_or_env
|
|||||||
|
|
||||||
from langchain_community.llms.utils import enforce_stop_tokens
|
from langchain_community.llms.utils import enforce_stop_tokens
|
||||||
|
|
||||||
DEFAULT_REPO_ID = "gpt2"
|
# key: task
|
||||||
VALID_TASKS = ("text2text-generation", "text-generation", "summarization")
|
# value: key in the output dictionary
|
||||||
|
VALID_TASKS_DICT = {
|
||||||
|
"translation": "translation_text",
|
||||||
|
"summarization": "summary_text",
|
||||||
|
"conversational": "generated_text",
|
||||||
|
"text-generation": "generated_text",
|
||||||
|
"text2text-generation": "generated_text",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
class HuggingFaceHub(LLM):
|
class HuggingFaceHub(LLM):
|
||||||
@ -18,7 +26,8 @@ class HuggingFaceHub(LLM):
|
|||||||
environment variable ``HUGGINGFACEHUB_API_TOKEN`` set with your API token, or pass
|
environment variable ``HUGGINGFACEHUB_API_TOKEN`` set with your API token, or pass
|
||||||
it as a named parameter to the constructor.
|
it as a named parameter to the constructor.
|
||||||
|
|
||||||
Only supports `text-generation`, `text2text-generation` and `summarization` for now.
|
Supports `text-generation`, `text2text-generation`, `conversational`, `translation`,
|
||||||
|
and `summarization`.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
.. code-block:: python
|
.. code-block:: python
|
||||||
@ -28,11 +37,13 @@ class HuggingFaceHub(LLM):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
client: Any #: :meta private:
|
client: Any #: :meta private:
|
||||||
repo_id: str = DEFAULT_REPO_ID
|
repo_id: Optional[str] = None
|
||||||
"""Model name to use."""
|
"""Model name to use.
|
||||||
|
If not provided, the default model for the chosen task will be used."""
|
||||||
task: Optional[str] = None
|
task: Optional[str] = None
|
||||||
"""Task to call the model with.
|
"""Task to call the model with.
|
||||||
Should be a task that returns `generated_text` or `summary_text`."""
|
Should be a task that returns `generated_text`, `summary_text`,
|
||||||
|
or `translation_text`."""
|
||||||
model_kwargs: Optional[dict] = None
|
model_kwargs: Optional[dict] = None
|
||||||
"""Keyword arguments to pass to the model."""
|
"""Keyword arguments to pass to the model."""
|
||||||
|
|
||||||
@ -50,18 +61,27 @@ class HuggingFaceHub(LLM):
|
|||||||
values, "huggingfacehub_api_token", "HUGGINGFACEHUB_API_TOKEN"
|
values, "huggingfacehub_api_token", "HUGGINGFACEHUB_API_TOKEN"
|
||||||
)
|
)
|
||||||
try:
|
try:
|
||||||
from huggingface_hub.inference_api import InferenceApi
|
from huggingface_hub import HfApi, InferenceClient
|
||||||
|
|
||||||
repo_id = values["repo_id"]
|
repo_id = values["repo_id"]
|
||||||
client = InferenceApi(
|
client = InferenceClient(
|
||||||
repo_id=repo_id,
|
model=repo_id,
|
||||||
token=huggingfacehub_api_token,
|
token=huggingfacehub_api_token,
|
||||||
task=values.get("task"),
|
|
||||||
)
|
)
|
||||||
if client.task not in VALID_TASKS:
|
if not values["task"]:
|
||||||
|
if not repo_id:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
f"Got invalid task {client.task}, "
|
"Must specify either `repo_id` or `task`, or both."
|
||||||
f"currently only {VALID_TASKS} are supported"
|
)
|
||||||
|
# Use the recommended task for the chosen model
|
||||||
|
model_info = HfApi(token=huggingfacehub_api_token).model_info(
|
||||||
|
repo_id=repo_id
|
||||||
|
)
|
||||||
|
values["task"] = model_info.pipeline_tag
|
||||||
|
if values["task"] not in VALID_TASKS_DICT:
|
||||||
|
raise ValueError(
|
||||||
|
f"Got invalid task {values['task']}, "
|
||||||
|
f"currently only {VALID_TASKS_DICT.keys()} are supported"
|
||||||
)
|
)
|
||||||
values["client"] = client
|
values["client"] = client
|
||||||
except ImportError:
|
except ImportError:
|
||||||
@ -108,23 +128,20 @@ class HuggingFaceHub(LLM):
|
|||||||
"""
|
"""
|
||||||
_model_kwargs = self.model_kwargs or {}
|
_model_kwargs = self.model_kwargs or {}
|
||||||
params = {**_model_kwargs, **kwargs}
|
params = {**_model_kwargs, **kwargs}
|
||||||
response = self.client(inputs=prompt, params=params)
|
|
||||||
|
response = self.client.post(
|
||||||
|
json={"inputs": prompt, "params": params}, task=self.task
|
||||||
|
)
|
||||||
|
response = json.loads(response.decode())
|
||||||
if "error" in response:
|
if "error" in response:
|
||||||
raise ValueError(f"Error raised by inference API: {response['error']}")
|
raise ValueError(f"Error raised by inference API: {response['error']}")
|
||||||
if self.client.task == "text-generation":
|
|
||||||
# Text generation sometimes return includes the starter text.
|
response_key = VALID_TASKS_DICT[self.task] # type: ignore
|
||||||
text = response[0]["generated_text"]
|
if isinstance(response, list):
|
||||||
if text.startswith(prompt):
|
text = response[0][response_key]
|
||||||
text = response[0]["generated_text"][len(prompt) :]
|
|
||||||
elif self.client.task == "text2text-generation":
|
|
||||||
text = response[0]["generated_text"]
|
|
||||||
elif self.client.task == "summarization":
|
|
||||||
text = response[0]["summary_text"]
|
|
||||||
else:
|
else:
|
||||||
raise ValueError(
|
text = response[response_key]
|
||||||
f"Got invalid task {self.client.task}, "
|
|
||||||
f"currently only {VALID_TASKS} are supported"
|
|
||||||
)
|
|
||||||
if stop is not None:
|
if stop is not None:
|
||||||
# This is a bit hacky, but I can't figure out a better way to enforce
|
# This is a bit hacky, but I can't figure out a better way to enforce
|
||||||
# stop tokens when making calls to huggingface_hub.
|
# stop tokens when making calls to huggingface_hub.
|
||||||
|
Loading…
Reference in New Issue
Block a user