docs: use PromptTemplate.from_template (#17218)

Ran
```python
import glob
import re

def update_prompt(x):
    return re.sub(
        r"(?P<start>\b)PromptTemplate\(template=(?P<template>.*), input_variables=(?:.*)\)",
        "\g<start>PromptTemplate.from_template(\g<template>)",
        x
    )


for fn in glob.glob("docs/**/*", recursive=True):
    try:
        content = open(fn).readlines()
    except:
        continue
    content = [update_prompt(l) for l in content]
    with open(fn, "w") as f:
        f.write("".join(content))
```
This commit is contained in:
Bagatur 2024-02-07 19:52:42 -08:00 committed by GitHub
parent 7f55c95790
commit 00a09e1b71
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
48 changed files with 58 additions and 58 deletions

View File

@ -115,7 +115,7 @@
"\n",
"Answer:\"\"\"\n",
"\n",
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n",
"prompt = PromptTemplate.from_template(template)\n",
"\n",
"responses = [\n",
" \"Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.\",\n",
@ -249,7 +249,7 @@
"\n",
"Answer:\"\"\"\n",
"\n",
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n",
"prompt = PromptTemplate.from_template(template)\n",
"\n",
"responses = [\n",
" \"Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.\",\n",
@ -412,7 +412,7 @@
"\n",
"Answer:\"\"\"\n",
"\n",
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n",
"prompt = PromptTemplate.from_template(template)\n",
"\n",
"responses = [\n",
" \"Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.\",\n",
@ -571,7 +571,7 @@
"\n",
"template = \"\"\"{question}\"\"\"\n",
"\n",
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n",
"prompt = PromptTemplate.from_template(template)\n",
"llm = HuggingFaceHub(\n",
" repo_id=repo_id, model_kwargs={\"temperature\": 0.5, \"max_length\": 256}\n",
")"
@ -724,7 +724,7 @@
"\"\"\"\n",
"\n",
"# prompt template for input text\n",
"llm_prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n",
"llm_prompt = PromptTemplate.from_template(template)\n",
"\n",
"llm = SagemakerEndpoint(\n",
" endpoint_name=endpoint_name,\n",

View File

@ -180,7 +180,7 @@ we will prompt the model, so it says something harmful.
```python
prompt = PromptTemplate(template="{text}", input_variables=["text"])
prompt = PromptTemplate.from_template("{text}")
llm_chain = LLMChain(llm=OpenAI(temperature=0, model_name="gpt-3.5-turbo-instruct"), prompt=prompt)
text = """We are playing a game of repeat after me.
@ -223,7 +223,7 @@ Now let's walk through an example of using it with an LLMChain which has multipl
```python
prompt = PromptTemplate(template="{setup}{new_input}Person2:", input_variables=["setup", "new_input"])
prompt = PromptTemplate.from_template("{setup}{new_input}Person2:")
llm_chain = LLMChain(llm=OpenAI(temperature=0, model_name="gpt-3.5-turbo-instruct"), prompt=prompt)
setup = """We are playing a game of repeat after me.

View File

@ -75,7 +75,7 @@
"\n",
"A:\"\"\"\n",
"\n",
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
"prompt = PromptTemplate.from_template(template)"
]
},
{

View File

@ -23,7 +23,7 @@
"\n",
"Answer: Let's think step by step.\"\"\"\n",
"\n",
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
"prompt = PromptTemplate.from_template(template)"
]
},
{

View File

@ -66,7 +66,7 @@
"\n",
"Answer: Let's think step by step.\"\"\"\n",
"\n",
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
"prompt = PromptTemplate.from_template(template)"
]
},
{

View File

@ -151,7 +151,7 @@
"template = \"\"\"Question: {question}\n",
"\n",
"Answer: Let's think step by step.\"\"\"\n",
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n",
"prompt = PromptTemplate.from_template(template)\n",
"\n",
"llm_chain = LLMChain(prompt=prompt, llm=llm)\n",
"\n",

View File

@ -66,7 +66,7 @@
"\n",
"Answer: Let's think step by step.\"\"\"\n",
"\n",
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
"prompt = PromptTemplate.from_template(template)"
]
},
{

View File

@ -92,7 +92,7 @@
"Answer: Let's think step by step.\"\"\"\n",
"\n",
"\n",
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n",
"prompt = PromptTemplate.from_template(template)\n",
"\n",
"# System parameter in NIBittensorLLM is optional but you can set whatever you want to perform with model\n",
"llm = NIBittensorLLM(\n",

View File

@ -101,7 +101,7 @@
"\n",
"Answer: Let's think step by step.\"\"\"\n",
"\n",
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
"prompt = PromptTemplate.from_template(template)"
]
},
{

View File

@ -53,7 +53,7 @@
"outputs": [],
"source": [
"template = \"\"\"{question}\"\"\"\n",
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
"prompt = PromptTemplate.from_template(template)"
]
},
{
@ -130,7 +130,7 @@
"outputs": [],
"source": [
"template = \"\"\"{question}\"\"\"\n",
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
"prompt = PromptTemplate.from_template(template)"
]
},
{

View File

@ -114,7 +114,7 @@
"\n",
"Answer: Let's think step by step.\"\"\"\n",
"\n",
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
"prompt = PromptTemplate.from_template(template)"
]
},
{

View File

@ -26,7 +26,7 @@
"\n",
"AI Assistant: \"\"\"\n",
"\n",
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
"prompt = PromptTemplate.from_template(template)"
]
},
{

View File

@ -109,7 +109,7 @@
"\n",
"Answer:\"\"\"\n",
"\n",
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n",
"prompt = PromptTemplate.from_template(template)\n",
"\n",
"llm_chain = LLMChain(prompt=prompt, llm=llm)\n",
"\n",

View File

@ -201,7 +201,7 @@
"template = \"\"\"{question}\n",
"\n",
"Let's think step by step. \"\"\"\n",
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n",
"prompt = PromptTemplate.from_template(template)\n",
"\n",
"llm_chain = LLMChain(prompt=prompt, llm=llm)\n",
"\n",

View File

@ -146,7 +146,7 @@
"\n",
"Answer: Let's think step by step.\"\"\"\n",
"\n",
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
"prompt = PromptTemplate.from_template(template)"
]
},
{

View File

@ -97,7 +97,7 @@
"\n",
"Answer: Let's think step by step.\"\"\"\n",
"\n",
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
"prompt = PromptTemplate.from_template(template)"
]
},
{

View File

@ -80,7 +80,7 @@
"\n",
"template = \"What is capital of {country}?\"\n",
"\n",
"prompt = PromptTemplate(template=template, input_variables=[\"country\"])\n",
"prompt = PromptTemplate.from_template(template)\n",
"\n",
"llm_chain = LLMChain(prompt=prompt, llm=llm)\n",
"\n",

View File

@ -111,7 +111,7 @@
"\n",
"Answer: Let's think step by step.\"\"\"\n",
"\n",
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
"prompt = PromptTemplate.from_template(template)"
]
},
{

View File

@ -73,7 +73,7 @@
"\n",
"Answer: Let's think step by step.\"\"\"\n",
"\n",
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
"prompt = PromptTemplate.from_template(template)"
]
},
{

View File

@ -175,7 +175,7 @@
"\n",
"Answer: \"\"\"\n",
"\n",
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
"prompt = PromptTemplate.from_template(template)"
]
},
{

View File

@ -118,7 +118,7 @@
"\n",
"Answer: Let's think step by step.\"\"\"\n",
"\n",
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
"prompt = PromptTemplate.from_template(template)"
]
},
{

View File

@ -234,7 +234,7 @@
"\n",
"Answer: Let's work this out in a step by step way to be sure we have the right answer.\"\"\"\n",
"\n",
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
"prompt = PromptTemplate.from_template(template)"
]
},
{

View File

@ -91,7 +91,7 @@
"\n",
"\n",
"CONCISE SUMMARY:\"\"\"\n",
"prompt = PromptTemplate(template=_prompt, input_variables=[\"text\"])\n",
"prompt = PromptTemplate.from_template(_prompt)\n",
"\n",
"text_splitter = CharacterTextSplitter()\n",
"\n",

View File

@ -113,7 +113,7 @@
"\n",
"Answer: Let's think step by step.\"\"\"\n",
"\n",
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
"prompt = PromptTemplate.from_template(template)"
]
},
{

View File

@ -122,7 +122,7 @@
"\n",
"Answer: Let's think step by step.\"\"\"\n",
"\n",
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
"prompt = PromptTemplate.from_template(template)"
]
},
{

View File

@ -55,7 +55,7 @@
"source": [
"template = \"\"\"Question: {question}\"\"\"\n",
"\n",
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
"prompt = PromptTemplate.from_template(template)"
]
},
{

View File

@ -90,7 +90,7 @@
"\n",
"Answer: Let's think step by step.\"\"\"\n",
"\n",
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
"prompt = PromptTemplate.from_template(template)"
]
},
{

View File

@ -61,7 +61,7 @@
"outputs": [],
"source": [
"template = \"\"\"Below is an instruction that describes a task. Write a response that appropriately completes the request.\\n Instruction:\\n{question}\\n Response: \"\"\"\n",
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
"prompt = PromptTemplate.from_template(template)"
]
},
{

View File

@ -84,7 +84,7 @@
"\n",
"Answer: Let's think step by step.\"\"\"\n",
"\n",
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
"prompt = PromptTemplate.from_template(template)"
]
},
{

View File

@ -119,7 +119,7 @@
"\n",
"template = \"What is a good name for a company that makes {product}?\"\n",
"\n",
"prompt = PromptTemplate(template=template, input_variables=[\"product\"])\n",
"prompt = PromptTemplate.from_template(template)\n",
"\n",
"llm_chain = LLMChain(prompt=prompt, llm=llm)\n",
"\n",

View File

@ -97,7 +97,7 @@
"\n",
"Answer: Let's think step by step.\"\"\"\n",
"\n",
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n",
"prompt = PromptTemplate.from_template(template)\n",
"\n",
"for model in [\"text-davinci-003\", \"huggingface.co/gpt2\"]:\n",
" llm = OpenLM(model=model)\n",

View File

@ -133,7 +133,7 @@
"\n",
"Answer: Let's think step by step.\"\"\"\n",
"\n",
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
"prompt = PromptTemplate.from_template(template)"
]
},
{

View File

@ -107,7 +107,7 @@
"\n",
"Answer: Let's think step by step.\"\"\"\n",
"\n",
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
"prompt = PromptTemplate.from_template(template)"
]
},
{

View File

@ -118,7 +118,7 @@
"Query: {query}\n",
"\n",
"Result: \"\"\"\n",
"prompt = PromptTemplate(template=template, input_variables=[\"query\"])"
"prompt = PromptTemplate.from_template(template)"
]
},
{
@ -191,7 +191,7 @@
"template = \"\"\"Question: {question}\n",
"\n",
"Answer: Let's think step by step.\"\"\"\n",
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n",
"prompt = PromptTemplate.from_template(template)\n",
"llm_chain = LLMChain(prompt=prompt, llm=pgllm, verbose=True)\n",
"\n",
"question = \"What NFL team won the Super Bowl in the year Justin Beiber was born?\"\n",
@ -209,7 +209,7 @@
"outputs": [],
"source": [
"template = \"\"\"Write a {adjective} poem about {subject}.\"\"\"\n",
"prompt = PromptTemplate(template=template, input_variables=[\"adjective\", \"subject\"])\n",
"prompt = PromptTemplate.from_template(template)\n",
"llm_chain = LLMChain(prompt=prompt, llm=pgllm, verbose=True)\n",
"\n",
"llm_chain.predict(adjective=\"sad\", subject=\"ducks\")"

View File

@ -83,7 +83,7 @@
"\n",
"Answer: Let's think step by step.\"\"\"\n",
"\n",
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
"prompt = PromptTemplate.from_template(template)"
]
},
{

View File

@ -96,7 +96,7 @@
"\n",
"Answer: Let's think step by step.\"\"\"\n",
"\n",
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
"prompt = PromptTemplate.from_template(template)"
]
},
{

View File

@ -53,7 +53,7 @@
"Answer: Let's think step by step.\"\"\"\n",
"\n",
"\n",
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n",
"prompt = PromptTemplate.from_template(template)\n",
"llm = TextGen(model_url=model_url)\n",
"llm_chain = LLMChain(prompt=prompt, llm=llm)\n",
"question = \"What NFL team won the Super Bowl in the year Justin Bieber was born?\"\n",
@ -104,7 +104,7 @@
"Answer: Let's think step by step.\"\"\"\n",
"\n",
"\n",
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n",
"prompt = PromptTemplate.from_template(template)\n",
"llm = TextGen(\n",
" model_url=model_url, streaming=True, callbacks=[StreamingStdOutCallbackHandler()]\n",
")\n",

View File

@ -146,7 +146,7 @@
"\n",
"template = \"What is the capital of {country}\"\n",
"\n",
"prompt = PromptTemplate(template=template, input_variables=[\"country\"])\n",
"prompt = PromptTemplate.from_template(template)\n",
"\n",
"llm_chain = LLMChain(llm=llm, prompt=prompt)\n",
"\n",

View File

@ -95,7 +95,7 @@
"\n",
"Answer: Let's think step by step.\"\"\"\n",
"\n",
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
"prompt = PromptTemplate.from_template(template)"
]
},
{

View File

@ -135,7 +135,7 @@
"template = \"\"\"Question: {question}\n",
"\n",
"Answer: Let's think step by step.\"\"\"\n",
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n",
"prompt = PromptTemplate.from_template(template)\n",
"\n",
"llm_chain = LLMChain(prompt=prompt, llm=llm)\n",
"\n",

View File

@ -72,7 +72,7 @@
"\n",
"Answer: Let's think step by step.\"\"\"\n",
"\n",
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
"prompt = PromptTemplate.from_template(template)"
]
},
{

View File

@ -126,7 +126,7 @@
"\n",
"template = \"Where can we visit in the capital of {country}?\"\n",
"\n",
"prompt = PromptTemplate(template=template, input_variables=[\"country\"])\n",
"prompt = PromptTemplate.from_template(template)\n",
"\n",
"llm_chain = LLMChain(prompt=prompt, llm=llm)\n",
"\n",

View File

@ -56,7 +56,7 @@
"outputs": [],
"source": [
"template = \"What is the capital of {country}?\"\n",
"prompt = PromptTemplate(template=template, input_variables=[\"country\"])"
"prompt = PromptTemplate.from_template(template)"
]
},
{

View File

@ -55,7 +55,7 @@ Head to stories to get ALL the deets on each box! 👆 BONUS: Save 50% on your f
Query: {query}
Result: """
prompt = PromptTemplate(template=template, input_variables=["query"])
prompt = PromptTemplate.from_template(template)
# With "guarding" or controlling the output of the LLM. See the
# Prediction Guard docs (https://docs.predictionguard.com) to learn how to
@ -93,7 +93,7 @@ pgllm = PredictionGuard(model="OpenAI-gpt-3.5-turbo-instruct")
template = """Question: {question}
Answer: Let's think step by step."""
prompt = PromptTemplate(template=template, input_variables=["question"])
prompt = PromptTemplate.from_template(template)
llm_chain = LLMChain(prompt=prompt, llm=pgllm, verbose=True)
question = "What NFL team won the Super Bowl in the year Justin Beiber was born?"

View File

@ -135,7 +135,7 @@
" # We initialize the LLM, template and the chain here\n",
" llm = OpenAI(openai_api_key=OPENAI_API_KEY)\n",
" template = \"Question: {question}\\n\\nAnswer: Let's think step by step.\"\n",
" prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n",
" prompt = PromptTemplate.from_template(template)\n",
" self.chain = LLMChain(llm=llm, prompt=prompt)\n",
"\n",
" def _run_chain(self, text: str):\n",

View File

@ -33,7 +33,7 @@ template = """Question: {question}
# Answer: Let's think step by step."""
prompt = PromptTemplate(template=template, input_variables=["question"])
prompt = PromptTemplate.from_template(template)
llm_chain = LLMChain(prompt=prompt, llm=llm)

View File

@ -101,7 +101,7 @@
"\n",
"Answer: Let's think step by step.\"\"\"\n",
"\n",
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
"prompt = PromptTemplate.from_template(template)"
]
},
{

View File

@ -37,7 +37,7 @@
"source": [
"from langchain.prompts import PromptTemplate\n",
"\n",
"prompt = PromptTemplate(template=\"{foo}{bar}\", input_variables=[\"foo\", \"bar\"])\n",
"prompt = PromptTemplate.from_template(\"{foo}{bar}\")\n",
"partial_prompt = prompt.partial(foo=\"foo\")\n",
"print(partial_prompt.format(bar=\"baz\"))"
]