[Community]add option to delete the prompt from HF output (#22225)

This will help to solve pattern mismatching issue when parsing the
output in Agent.

https://github.com/langchain-ai/langchain/issues/21912
This commit is contained in:
Ethan Yang
2024-06-06 06:38:54 +08:00
committed by GitHub
parent c040dc7017
commit 29064848f9
4 changed files with 53 additions and 4 deletions

View File

@@ -261,6 +261,7 @@ class HuggingFacePipeline(BaseLLM):
# List to hold all results
text_generations: List[str] = []
pipeline_kwargs = kwargs.get("pipeline_kwargs", {})
skip_prompt = kwargs.get("skip_prompt", False)
for i in range(0, len(prompts), self.batch_size):
batch_prompts = prompts[i : i + self.batch_size]
@@ -290,7 +291,8 @@ class HuggingFacePipeline(BaseLLM):
f"Got invalid task {self.pipeline.task}, "
f"currently only {VALID_TASKS} are supported"
)
if skip_prompt:
text = text[len(batch_prompts[j]) :]
# Append the processed text to results
text_generations.append(text)