mirror of
https://github.com/hwchase17/langchain.git
synced 2025-09-09 15:03:21 +00:00
[Community]add option to delete the prompt from HF output (#22225)
This will help to solve pattern mismatching issue when parsing the output in Agent. https://github.com/langchain-ai/langchain/issues/21912
This commit is contained in:
@@ -261,6 +261,7 @@ class HuggingFacePipeline(BaseLLM):
|
||||
# List to hold all results
|
||||
text_generations: List[str] = []
|
||||
pipeline_kwargs = kwargs.get("pipeline_kwargs", {})
|
||||
skip_prompt = kwargs.get("skip_prompt", False)
|
||||
|
||||
for i in range(0, len(prompts), self.batch_size):
|
||||
batch_prompts = prompts[i : i + self.batch_size]
|
||||
@@ -290,7 +291,8 @@ class HuggingFacePipeline(BaseLLM):
|
||||
f"Got invalid task {self.pipeline.task}, "
|
||||
f"currently only {VALID_TASKS} are supported"
|
||||
)
|
||||
|
||||
if skip_prompt:
|
||||
text = text[len(batch_prompts[j]) :]
|
||||
# Append the processed text to results
|
||||
text_generations.append(text)
|
||||
|
||||
|
Reference in New Issue
Block a user