mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-19 13:23:35 +00:00
partners: Fixed the procedure of initializing pad_token_id (#29500)
- **Description:** Add to check pad_token_id and eos_token_id of model config. It seems that this is the same bug as the HuggingFace TGI bug. It's same bug as #29434 - **Issue:** #29431 - **Dependencies:** none - **Twitter handle:** tell14 Example code is followings: ```python from langchain_huggingface.llms import HuggingFacePipeline hf = HuggingFacePipeline.from_model_id( model_id="meta-llama/Llama-3.2-3B-Instruct", task="text-generation", pipeline_kwargs={"max_new_tokens": 10}, ) from langchain_core.prompts import PromptTemplate template = """Question: {question} Answer: Let's think step by step.""" prompt = PromptTemplate.from_template(template) chain = prompt | hf question = "What is electroencephalography?" print(chain.invoke({"question": question})) ```
This commit is contained in:
parent
e8b91283ef
commit
aeb42dc900
@ -202,7 +202,16 @@ class HuggingFacePipeline(BaseLLM):
|
||||
model = model_cls.from_pretrained(model_id, **_model_kwargs)
|
||||
|
||||
if tokenizer.pad_token is None:
|
||||
tokenizer.pad_token_id = model.config.eos_token_id
|
||||
if model.config.pad_token_id is not None:
|
||||
tokenizer.pad_token_id = model.config.pad_token_id
|
||||
elif model.config.eos_token_id is not None and isinstance(
|
||||
model.config.eos_token_id, int
|
||||
):
|
||||
tokenizer.pad_token_id = model.config.eos_token_id
|
||||
elif tokenizer.eos_token_id is not None:
|
||||
tokenizer.pad_token_id = tokenizer.eos_token_id
|
||||
else:
|
||||
tokenizer.add_special_tokens({"pad_token": "[PAD]"})
|
||||
|
||||
if (
|
||||
(
|
||||
|
@ -67,12 +67,14 @@ class TestHuggingFaceEndpoint(ChatModelIntegrationTests):
|
||||
super().test_bind_runnables_as_tools(model)
|
||||
|
||||
@pytest.mark.xfail(reason=("Not implemented"))
|
||||
def test_structured_output(self, model: BaseChatModel) -> None:
|
||||
super().test_structured_output(model)
|
||||
def test_structured_output(self, model: BaseChatModel, schema_type: str) -> None:
|
||||
super().test_structured_output(model, schema_type)
|
||||
|
||||
@pytest.mark.xfail(reason=("Not implemented"))
|
||||
def test_structured_output_async(self, model: BaseChatModel) -> None: # type: ignore[override]
|
||||
super().test_structured_output(model)
|
||||
async def test_structured_output_async(
|
||||
self, model: BaseChatModel, schema_type: str
|
||||
) -> None: # type: ignore[override]
|
||||
super().test_structured_output(model, schema_type)
|
||||
|
||||
@pytest.mark.xfail(reason=("Not implemented"))
|
||||
def test_structured_output_pydantic_2_v1(self, model: BaseChatModel) -> None:
|
||||
|
@ -248,10 +248,13 @@ def test_bind_tools_errors(
|
||||
|
||||
def test_bind_tools(chat_hugging_face: Any) -> None:
|
||||
tools = [MagicMock(spec=BaseTool)]
|
||||
with patch(
|
||||
"langchain_huggingface.chat_models.huggingface.convert_to_openai_tool",
|
||||
side_effect=lambda x: x,
|
||||
), patch("langchain_core.runnables.base.Runnable.bind") as mock_super_bind:
|
||||
with (
|
||||
patch(
|
||||
"langchain_huggingface.chat_models.huggingface.convert_to_openai_tool",
|
||||
side_effect=lambda x: x,
|
||||
),
|
||||
patch("langchain_core.runnables.base.Runnable.bind") as mock_super_bind,
|
||||
):
|
||||
chat_hugging_face.bind_tools(tools, tool_choice="auto")
|
||||
mock_super_bind.assert_called_once()
|
||||
_, kwargs = mock_super_bind.call_args
|
||||
|
Loading…
Reference in New Issue
Block a user