mirror of
https://github.com/hwchase17/langchain.git
synced 2025-09-13 13:36:15 +00:00
Add summarization task type for HuggingFace APIs (#4721)
# Add summarization task type for HuggingFace APIs Add summarization task type for HuggingFace APIs. This task type is described by [HuggingFace inference API](https://huggingface.co/docs/api-inference/detailed_parameters#summarization-task) My project utilizes LangChain to connect multiple LLMs, including various HuggingFace models that support the summarization task. Integrating this task type is highly convenient and beneficial. Fixes #4720
This commit is contained in:
@@ -33,6 +33,16 @@ def test_huggingface_endpoint_text2text_generation() -> None:
|
||||
assert output == "Albany"
|
||||
|
||||
|
||||
@unittest.skip(
|
||||
"This test requires an inference endpoint. Tested with Hugging Face endpoints"
|
||||
)
|
||||
def test_huggingface_endpoint_summarization() -> None:
|
||||
"""Test valid call to HuggingFace summarization model."""
|
||||
llm = HuggingFaceEndpoint(endpoint_url="", task="summarization")
|
||||
output = llm("Say foo:")
|
||||
assert isinstance(output, str)
|
||||
|
||||
|
||||
def test_huggingface_endpoint_call_error() -> None:
|
||||
"""Test valid call to HuggingFace that errors."""
|
||||
llm = HuggingFaceEndpoint(model_kwargs={"max_new_tokens": -1})
|
||||
|
@@ -23,6 +23,13 @@ def test_huggingface_text2text_generation() -> None:
|
||||
assert output == "Albany"
|
||||
|
||||
|
||||
def test_huggingface_summarization() -> None:
|
||||
"""Test valid call to HuggingFace summarization model."""
|
||||
llm = HuggingFaceHub(repo_id="facebook/bart-large-cnn")
|
||||
output = llm("Say foo:")
|
||||
assert isinstance(output, str)
|
||||
|
||||
|
||||
def test_huggingface_call_error() -> None:
|
||||
"""Test valid call to HuggingFace that errors."""
|
||||
llm = HuggingFaceHub(model_kwargs={"max_new_tokens": -1})
|
||||
|
@@ -27,6 +27,15 @@ def test_huggingface_pipeline_text2text_generation() -> None:
|
||||
assert isinstance(output, str)
|
||||
|
||||
|
||||
def text_huggingface_pipeline_summarization() -> None:
|
||||
"""Test valid call to HuggingFace summarization model."""
|
||||
llm = HuggingFacePipeline.from_model_id(
|
||||
model_id="facebook/bart-large-cnn", task="summarization"
|
||||
)
|
||||
output = llm("Say foo:")
|
||||
assert isinstance(output, str)
|
||||
|
||||
|
||||
def test_saving_loading_llm(tmp_path: Path) -> None:
|
||||
"""Test saving/loading an HuggingFaceHub LLM."""
|
||||
llm = HuggingFacePipeline.from_model_id(
|
||||
|
@@ -43,6 +43,19 @@ def test_self_hosted_huggingface_pipeline_text2text_generation() -> None:
|
||||
assert isinstance(output, str)
|
||||
|
||||
|
||||
def test_self_hosted_huggingface_pipeline_summarization() -> None:
|
||||
"""Test valid call to self-hosted HuggingFace summarization model."""
|
||||
gpu = get_remote_instance()
|
||||
llm = SelfHostedHuggingFaceLLM(
|
||||
model_id="facebook/bart-large-cnn",
|
||||
task="summarization",
|
||||
hardware=gpu,
|
||||
model_reqs=model_reqs,
|
||||
)
|
||||
output = llm("Say foo:")
|
||||
assert isinstance(output, str)
|
||||
|
||||
|
||||
def load_pipeline() -> Any:
|
||||
"""Load pipeline for testing."""
|
||||
model_id = "gpt2"
|
||||
|
Reference in New Issue
Block a user