mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-27 17:08:47 +00:00
improves huggingface_hub example (#988)
The provided example uses the default `max_length` of `20` tokens, which leads to the example generation getting cut off. 20 tokens is way too short to show CoT reasoning, so I boosted it to `64`. Without knowing HF's API well, it can be hard to figure out just where those `model_kwargs` come from, and `max_length` is a super critical one.
This commit is contained in:
parent
c2d1d903fa
commit
e9799d6821
@ -20,7 +20,7 @@
|
|||||||
"name": "stdout",
|
"name": "stdout",
|
||||||
"output_type": "stream",
|
"output_type": "stream",
|
||||||
"text": [
|
"text": [
|
||||||
"The Seattle Seahawks won the Super Bowl in 2010. Justin Beiber was born in 2010. The\n"
|
"The Seattle Seahawks won the Super Bowl in 2010. Justin Beiber was born in 2010. The final answer: Seattle Seahawks.\n"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
@ -31,7 +31,7 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"Answer: Let's think step by step.\"\"\"\n",
|
"Answer: Let's think step by step.\"\"\"\n",
|
||||||
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n",
|
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n",
|
||||||
"llm_chain = LLMChain(prompt=prompt, llm=HuggingFaceHub(repo_id=\"google/flan-t5-xl\", model_kwargs={\"temperature\":1e-10}))\n",
|
"llm_chain = LLMChain(prompt=prompt, llm=HuggingFaceHub(repo_id=\"google/flan-t5-xl\", model_kwargs={\"temperature\":0, \"max_length\":64}))\n",
|
||||||
"\n",
|
"\n",
|
||||||
"question = \"What NFL team won the Super Bowl in the year Justin Beiber was born?\"\n",
|
"question = \"What NFL team won the Super Bowl in the year Justin Beiber was born?\"\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
Loading…
Reference in New Issue
Block a user