diff --git a/docs/docs/guides/safety/amazon_comprehend_chain.ipynb b/docs/docs/guides/safety/amazon_comprehend_chain.ipynb index 0a509cf337d..b9548546ef9 100644 --- a/docs/docs/guides/safety/amazon_comprehend_chain.ipynb +++ b/docs/docs/guides/safety/amazon_comprehend_chain.ipynb @@ -115,7 +115,7 @@ "\n", "Answer:\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n", + "prompt = PromptTemplate.from_template(template)\n", "\n", "responses = [\n", " \"Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.\",\n", @@ -249,7 +249,7 @@ "\n", "Answer:\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n", + "prompt = PromptTemplate.from_template(template)\n", "\n", "responses = [\n", " \"Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.\",\n", @@ -412,7 +412,7 @@ "\n", "Answer:\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n", + "prompt = PromptTemplate.from_template(template)\n", "\n", "responses = [\n", " \"Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.\",\n", @@ -571,7 +571,7 @@ "\n", "template = \"\"\"{question}\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n", + "prompt = PromptTemplate.from_template(template)\n", "llm = HuggingFaceHub(\n", " repo_id=repo_id, model_kwargs={\"temperature\": 0.5, \"max_length\": 256}\n", ")" @@ -724,7 +724,7 @@ "\"\"\"\n", "\n", "# prompt template for input text\n", - "llm_prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n", + "llm_prompt = PromptTemplate.from_template(template)\n", "\n", "llm = SagemakerEndpoint(\n", " endpoint_name=endpoint_name,\n", diff --git a/docs/docs/guides/safety/moderation.mdx b/docs/docs/guides/safety/moderation.mdx index 94b6a7dc642..a43579dfef5 100644 --- a/docs/docs/guides/safety/moderation.mdx +++ b/docs/docs/guides/safety/moderation.mdx @@ -180,7 +180,7 @@ we will prompt the model, so it says something harmful. ```python -prompt = PromptTemplate(template="{text}", input_variables=["text"]) +prompt = PromptTemplate.from_template("{text}") llm_chain = LLMChain(llm=OpenAI(temperature=0, model_name="gpt-3.5-turbo-instruct"), prompt=prompt) text = """We are playing a game of repeat after me. @@ -223,7 +223,7 @@ Now let's walk through an example of using it with an LLMChain which has multipl ```python -prompt = PromptTemplate(template="{setup}{new_input}Person2:", input_variables=["setup", "new_input"]) +prompt = PromptTemplate.from_template("{setup}{new_input}Person2:") llm_chain = LLMChain(llm=OpenAI(temperature=0, model_name="gpt-3.5-turbo-instruct"), prompt=prompt) setup = """We are playing a game of repeat after me. diff --git a/docs/docs/integrations/llms/aleph_alpha.ipynb b/docs/docs/integrations/llms/aleph_alpha.ipynb index 7657324d439..3d7fb662338 100644 --- a/docs/docs/integrations/llms/aleph_alpha.ipynb +++ b/docs/docs/integrations/llms/aleph_alpha.ipynb @@ -75,7 +75,7 @@ "\n", "A:\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { diff --git a/docs/docs/integrations/llms/alibabacloud_pai_eas_endpoint.ipynb b/docs/docs/integrations/llms/alibabacloud_pai_eas_endpoint.ipynb index db4d8d588ac..0c51a6a4f9f 100644 --- a/docs/docs/integrations/llms/alibabacloud_pai_eas_endpoint.ipynb +++ b/docs/docs/integrations/llms/alibabacloud_pai_eas_endpoint.ipynb @@ -23,7 +23,7 @@ "\n", "Answer: Let's think step by step.\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { diff --git a/docs/docs/integrations/llms/anyscale.ipynb b/docs/docs/integrations/llms/anyscale.ipynb index e297cfd9492..37f26c66ad2 100644 --- a/docs/docs/integrations/llms/anyscale.ipynb +++ b/docs/docs/integrations/llms/anyscale.ipynb @@ -66,7 +66,7 @@ "\n", "Answer: Let's think step by step.\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { diff --git a/docs/docs/integrations/llms/aphrodite.ipynb b/docs/docs/integrations/llms/aphrodite.ipynb index 85f2a1c4572..5cbbfb1ce84 100644 --- a/docs/docs/integrations/llms/aphrodite.ipynb +++ b/docs/docs/integrations/llms/aphrodite.ipynb @@ -151,7 +151,7 @@ "template = \"\"\"Question: {question}\n", "\n", "Answer: Let's think step by step.\"\"\"\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n", + "prompt = PromptTemplate.from_template(template)\n", "\n", "llm_chain = LLMChain(prompt=prompt, llm=llm)\n", "\n", diff --git a/docs/docs/integrations/llms/banana.ipynb b/docs/docs/integrations/llms/banana.ipynb index f66f15c0a8e..7fbdc2921d5 100644 --- a/docs/docs/integrations/llms/banana.ipynb +++ b/docs/docs/integrations/llms/banana.ipynb @@ -66,7 +66,7 @@ "\n", "Answer: Let's think step by step.\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { diff --git a/docs/docs/integrations/llms/bittensor.ipynb b/docs/docs/integrations/llms/bittensor.ipynb index 191a84df7ca..92ebb9b7ac6 100644 --- a/docs/docs/integrations/llms/bittensor.ipynb +++ b/docs/docs/integrations/llms/bittensor.ipynb @@ -92,7 +92,7 @@ "Answer: Let's think step by step.\"\"\"\n", "\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n", + "prompt = PromptTemplate.from_template(template)\n", "\n", "# System parameter in NIBittensorLLM is optional but you can set whatever you want to perform with model\n", "llm = NIBittensorLLM(\n", diff --git a/docs/docs/integrations/llms/cerebriumai.ipynb b/docs/docs/integrations/llms/cerebriumai.ipynb index a557a7c2a7c..e062e4ad496 100644 --- a/docs/docs/integrations/llms/cerebriumai.ipynb +++ b/docs/docs/integrations/llms/cerebriumai.ipynb @@ -101,7 +101,7 @@ "\n", "Answer: Let's think step by step.\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { diff --git a/docs/docs/integrations/llms/chatglm.ipynb b/docs/docs/integrations/llms/chatglm.ipynb index 12de26dacfe..c004219061f 100644 --- a/docs/docs/integrations/llms/chatglm.ipynb +++ b/docs/docs/integrations/llms/chatglm.ipynb @@ -53,7 +53,7 @@ "outputs": [], "source": [ "template = \"\"\"{question}\"\"\"\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { @@ -130,7 +130,7 @@ "outputs": [], "source": [ "template = \"\"\"{question}\"\"\"\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { diff --git a/docs/docs/integrations/llms/clarifai.ipynb b/docs/docs/integrations/llms/clarifai.ipynb index 023bd3ac501..952263de025 100644 --- a/docs/docs/integrations/llms/clarifai.ipynb +++ b/docs/docs/integrations/llms/clarifai.ipynb @@ -114,7 +114,7 @@ "\n", "Answer: Let's think step by step.\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { diff --git a/docs/docs/integrations/llms/cloudflare_workersai.ipynb b/docs/docs/integrations/llms/cloudflare_workersai.ipynb index a4cf57440a0..030b192d093 100644 --- a/docs/docs/integrations/llms/cloudflare_workersai.ipynb +++ b/docs/docs/integrations/llms/cloudflare_workersai.ipynb @@ -26,7 +26,7 @@ "\n", "AI Assistant: \"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { diff --git a/docs/docs/integrations/llms/ctransformers.ipynb b/docs/docs/integrations/llms/ctransformers.ipynb index fa9d604cd16..6231c0a2e1b 100644 --- a/docs/docs/integrations/llms/ctransformers.ipynb +++ b/docs/docs/integrations/llms/ctransformers.ipynb @@ -109,7 +109,7 @@ "\n", "Answer:\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n", + "prompt = PromptTemplate.from_template(template)\n", "\n", "llm_chain = LLMChain(prompt=prompt, llm=llm)\n", "\n", diff --git a/docs/docs/integrations/llms/ctranslate2.ipynb b/docs/docs/integrations/llms/ctranslate2.ipynb index 33d41d4233b..f80d320bf22 100644 --- a/docs/docs/integrations/llms/ctranslate2.ipynb +++ b/docs/docs/integrations/llms/ctranslate2.ipynb @@ -201,7 +201,7 @@ "template = \"\"\"{question}\n", "\n", "Let's think step by step. \"\"\"\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n", + "prompt = PromptTemplate.from_template(template)\n", "\n", "llm_chain = LLMChain(prompt=prompt, llm=llm)\n", "\n", diff --git a/docs/docs/integrations/llms/deepinfra.ipynb b/docs/docs/integrations/llms/deepinfra.ipynb index 14d2730a093..871a0f4d9de 100644 --- a/docs/docs/integrations/llms/deepinfra.ipynb +++ b/docs/docs/integrations/llms/deepinfra.ipynb @@ -146,7 +146,7 @@ "\n", "Answer: Let's think step by step.\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { diff --git a/docs/docs/integrations/llms/forefrontai.ipynb b/docs/docs/integrations/llms/forefrontai.ipynb index 075436f7d8b..eef4fcb8e98 100644 --- a/docs/docs/integrations/llms/forefrontai.ipynb +++ b/docs/docs/integrations/llms/forefrontai.ipynb @@ -97,7 +97,7 @@ "\n", "Answer: Let's think step by step.\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { diff --git a/docs/docs/integrations/llms/gigachat.ipynb b/docs/docs/integrations/llms/gigachat.ipynb index 20a70872688..8e1e4a43d07 100644 --- a/docs/docs/integrations/llms/gigachat.ipynb +++ b/docs/docs/integrations/llms/gigachat.ipynb @@ -80,7 +80,7 @@ "\n", "template = \"What is capital of {country}?\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"country\"])\n", + "prompt = PromptTemplate.from_template(template)\n", "\n", "llm_chain = LLMChain(prompt=prompt, llm=llm)\n", "\n", diff --git a/docs/docs/integrations/llms/gooseai.ipynb b/docs/docs/integrations/llms/gooseai.ipynb index e417f613657..b665106ebe7 100644 --- a/docs/docs/integrations/llms/gooseai.ipynb +++ b/docs/docs/integrations/llms/gooseai.ipynb @@ -111,7 +111,7 @@ "\n", "Answer: Let's think step by step.\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { diff --git a/docs/docs/integrations/llms/gpt4all.ipynb b/docs/docs/integrations/llms/gpt4all.ipynb index 3fd8b51a76a..a1593774587 100644 --- a/docs/docs/integrations/llms/gpt4all.ipynb +++ b/docs/docs/integrations/llms/gpt4all.ipynb @@ -73,7 +73,7 @@ "\n", "Answer: Let's think step by step.\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { diff --git a/docs/docs/integrations/llms/gradient.ipynb b/docs/docs/integrations/llms/gradient.ipynb index 8d46fa08968..d1bfe21e658 100644 --- a/docs/docs/integrations/llms/gradient.ipynb +++ b/docs/docs/integrations/llms/gradient.ipynb @@ -175,7 +175,7 @@ "\n", "Answer: \"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { diff --git a/docs/docs/integrations/llms/huggingface_hub.ipynb b/docs/docs/integrations/llms/huggingface_hub.ipynb index b05e3b15ad6..67dbe3c41f3 100644 --- a/docs/docs/integrations/llms/huggingface_hub.ipynb +++ b/docs/docs/integrations/llms/huggingface_hub.ipynb @@ -118,7 +118,7 @@ "\n", "Answer: Let's think step by step.\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { diff --git a/docs/docs/integrations/llms/llamacpp.ipynb b/docs/docs/integrations/llms/llamacpp.ipynb index a3a22acb7bb..9868fab6ae9 100644 --- a/docs/docs/integrations/llms/llamacpp.ipynb +++ b/docs/docs/integrations/llms/llamacpp.ipynb @@ -234,7 +234,7 @@ "\n", "Answer: Let's work this out in a step by step way to be sure we have the right answer.\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { diff --git a/docs/docs/integrations/llms/manifest.ipynb b/docs/docs/integrations/llms/manifest.ipynb index a4a09fe1804..005141cf9ad 100644 --- a/docs/docs/integrations/llms/manifest.ipynb +++ b/docs/docs/integrations/llms/manifest.ipynb @@ -91,7 +91,7 @@ "\n", "\n", "CONCISE SUMMARY:\"\"\"\n", - "prompt = PromptTemplate(template=_prompt, input_variables=[\"text\"])\n", + "prompt = PromptTemplate.from_template(_prompt)\n", "\n", "text_splitter = CharacterTextSplitter()\n", "\n", diff --git a/docs/docs/integrations/llms/minimax.ipynb b/docs/docs/integrations/llms/minimax.ipynb index efb3a924b5d..b4ed78c2e17 100644 --- a/docs/docs/integrations/llms/minimax.ipynb +++ b/docs/docs/integrations/llms/minimax.ipynb @@ -113,7 +113,7 @@ "\n", "Answer: Let's think step by step.\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { diff --git a/docs/docs/integrations/llms/modal.ipynb b/docs/docs/integrations/llms/modal.ipynb index f81abf6f434..de601cf8e60 100644 --- a/docs/docs/integrations/llms/modal.ipynb +++ b/docs/docs/integrations/llms/modal.ipynb @@ -122,7 +122,7 @@ "\n", "Answer: Let's think step by step.\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { diff --git a/docs/docs/integrations/llms/mosaicml.ipynb b/docs/docs/integrations/llms/mosaicml.ipynb index c20bf8c4f2c..48307b409d0 100644 --- a/docs/docs/integrations/llms/mosaicml.ipynb +++ b/docs/docs/integrations/llms/mosaicml.ipynb @@ -55,7 +55,7 @@ "source": [ "template = \"\"\"Question: {question}\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { diff --git a/docs/docs/integrations/llms/nlpcloud.ipynb b/docs/docs/integrations/llms/nlpcloud.ipynb index 342e8df591f..dd93614efbd 100644 --- a/docs/docs/integrations/llms/nlpcloud.ipynb +++ b/docs/docs/integrations/llms/nlpcloud.ipynb @@ -90,7 +90,7 @@ "\n", "Answer: Let's think step by step.\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { diff --git a/docs/docs/integrations/llms/octoai.ipynb b/docs/docs/integrations/llms/octoai.ipynb index 589880f293f..242588d07f8 100644 --- a/docs/docs/integrations/llms/octoai.ipynb +++ b/docs/docs/integrations/llms/octoai.ipynb @@ -61,7 +61,7 @@ "outputs": [], "source": [ "template = \"\"\"Below is an instruction that describes a task. Write a response that appropriately completes the request.\\n Instruction:\\n{question}\\n Response: \"\"\"\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { diff --git a/docs/docs/integrations/llms/openai.ipynb b/docs/docs/integrations/llms/openai.ipynb index cbaab6002f7..8e072675373 100644 --- a/docs/docs/integrations/llms/openai.ipynb +++ b/docs/docs/integrations/llms/openai.ipynb @@ -84,7 +84,7 @@ "\n", "Answer: Let's think step by step.\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { diff --git a/docs/docs/integrations/llms/openllm.ipynb b/docs/docs/integrations/llms/openllm.ipynb index 6fc14b3d469..0bcd3a9bb14 100644 --- a/docs/docs/integrations/llms/openllm.ipynb +++ b/docs/docs/integrations/llms/openllm.ipynb @@ -119,7 +119,7 @@ "\n", "template = \"What is a good name for a company that makes {product}?\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"product\"])\n", + "prompt = PromptTemplate.from_template(template)\n", "\n", "llm_chain = LLMChain(prompt=prompt, llm=llm)\n", "\n", diff --git a/docs/docs/integrations/llms/openlm.ipynb b/docs/docs/integrations/llms/openlm.ipynb index 766a4419c3b..5d800e130f6 100644 --- a/docs/docs/integrations/llms/openlm.ipynb +++ b/docs/docs/integrations/llms/openlm.ipynb @@ -97,7 +97,7 @@ "\n", "Answer: Let's think step by step.\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n", + "prompt = PromptTemplate.from_template(template)\n", "\n", "for model in [\"text-davinci-003\", \"huggingface.co/gpt2\"]:\n", " llm = OpenLM(model=model)\n", diff --git a/docs/docs/integrations/llms/petals.ipynb b/docs/docs/integrations/llms/petals.ipynb index 4d0900a53ee..779a8d9e2bf 100644 --- a/docs/docs/integrations/llms/petals.ipynb +++ b/docs/docs/integrations/llms/petals.ipynb @@ -133,7 +133,7 @@ "\n", "Answer: Let's think step by step.\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { diff --git a/docs/docs/integrations/llms/pipelineai.ipynb b/docs/docs/integrations/llms/pipelineai.ipynb index 7ed35946563..ed97a58e00c 100644 --- a/docs/docs/integrations/llms/pipelineai.ipynb +++ b/docs/docs/integrations/llms/pipelineai.ipynb @@ -107,7 +107,7 @@ "\n", "Answer: Let's think step by step.\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { diff --git a/docs/docs/integrations/llms/predictionguard.ipynb b/docs/docs/integrations/llms/predictionguard.ipynb index c6daa2d5185..1200680cd9b 100644 --- a/docs/docs/integrations/llms/predictionguard.ipynb +++ b/docs/docs/integrations/llms/predictionguard.ipynb @@ -118,7 +118,7 @@ "Query: {query}\n", "\n", "Result: \"\"\"\n", - "prompt = PromptTemplate(template=template, input_variables=[\"query\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { @@ -191,7 +191,7 @@ "template = \"\"\"Question: {question}\n", "\n", "Answer: Let's think step by step.\"\"\"\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n", + "prompt = PromptTemplate.from_template(template)\n", "llm_chain = LLMChain(prompt=prompt, llm=pgllm, verbose=True)\n", "\n", "question = \"What NFL team won the Super Bowl in the year Justin Beiber was born?\"\n", @@ -209,7 +209,7 @@ "outputs": [], "source": [ "template = \"\"\"Write a {adjective} poem about {subject}.\"\"\"\n", - "prompt = PromptTemplate(template=template, input_variables=[\"adjective\", \"subject\"])\n", + "prompt = PromptTemplate.from_template(template)\n", "llm_chain = LLMChain(prompt=prompt, llm=pgllm, verbose=True)\n", "\n", "llm_chain.predict(adjective=\"sad\", subject=\"ducks\")" diff --git a/docs/docs/integrations/llms/runhouse.ipynb b/docs/docs/integrations/llms/runhouse.ipynb index 30fb5654946..fe44389d52a 100644 --- a/docs/docs/integrations/llms/runhouse.ipynb +++ b/docs/docs/integrations/llms/runhouse.ipynb @@ -83,7 +83,7 @@ "\n", "Answer: Let's think step by step.\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { diff --git a/docs/docs/integrations/llms/stochasticai.ipynb b/docs/docs/integrations/llms/stochasticai.ipynb index 1f3ecd98d0d..6a58aae7361 100644 --- a/docs/docs/integrations/llms/stochasticai.ipynb +++ b/docs/docs/integrations/llms/stochasticai.ipynb @@ -96,7 +96,7 @@ "\n", "Answer: Let's think step by step.\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { diff --git a/docs/docs/integrations/llms/textgen.ipynb b/docs/docs/integrations/llms/textgen.ipynb index b7bf0941c05..1b4aed8320a 100644 --- a/docs/docs/integrations/llms/textgen.ipynb +++ b/docs/docs/integrations/llms/textgen.ipynb @@ -53,7 +53,7 @@ "Answer: Let's think step by step.\"\"\"\n", "\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n", + "prompt = PromptTemplate.from_template(template)\n", "llm = TextGen(model_url=model_url)\n", "llm_chain = LLMChain(prompt=prompt, llm=llm)\n", "question = \"What NFL team won the Super Bowl in the year Justin Bieber was born?\"\n", @@ -104,7 +104,7 @@ "Answer: Let's think step by step.\"\"\"\n", "\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n", + "prompt = PromptTemplate.from_template(template)\n", "llm = TextGen(\n", " model_url=model_url, streaming=True, callbacks=[StreamingStdOutCallbackHandler()]\n", ")\n", diff --git a/docs/docs/integrations/llms/titan_takeoff.ipynb b/docs/docs/integrations/llms/titan_takeoff.ipynb index ce2ad317974..b7df1bb0016 100644 --- a/docs/docs/integrations/llms/titan_takeoff.ipynb +++ b/docs/docs/integrations/llms/titan_takeoff.ipynb @@ -146,7 +146,7 @@ "\n", "template = \"What is the capital of {country}\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"country\"])\n", + "prompt = PromptTemplate.from_template(template)\n", "\n", "llm_chain = LLMChain(llm=llm, prompt=prompt)\n", "\n", diff --git a/docs/docs/integrations/llms/tongyi.ipynb b/docs/docs/integrations/llms/tongyi.ipynb index 1e3da253910..7b57e4c462d 100644 --- a/docs/docs/integrations/llms/tongyi.ipynb +++ b/docs/docs/integrations/llms/tongyi.ipynb @@ -95,7 +95,7 @@ "\n", "Answer: Let's think step by step.\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { diff --git a/docs/docs/integrations/llms/vllm.ipynb b/docs/docs/integrations/llms/vllm.ipynb index 6abf9a1376b..4d88a2714fa 100644 --- a/docs/docs/integrations/llms/vllm.ipynb +++ b/docs/docs/integrations/llms/vllm.ipynb @@ -135,7 +135,7 @@ "template = \"\"\"Question: {question}\n", "\n", "Answer: Let's think step by step.\"\"\"\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n", + "prompt = PromptTemplate.from_template(template)\n", "\n", "llm_chain = LLMChain(prompt=prompt, llm=llm)\n", "\n", diff --git a/docs/docs/integrations/llms/writer.ipynb b/docs/docs/integrations/llms/writer.ipynb index fed4af16690..5c2206d1f56 100644 --- a/docs/docs/integrations/llms/writer.ipynb +++ b/docs/docs/integrations/llms/writer.ipynb @@ -72,7 +72,7 @@ "\n", "Answer: Let's think step by step.\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { diff --git a/docs/docs/integrations/llms/xinference.ipynb b/docs/docs/integrations/llms/xinference.ipynb index a2907446705..5643750a4b9 100644 --- a/docs/docs/integrations/llms/xinference.ipynb +++ b/docs/docs/integrations/llms/xinference.ipynb @@ -126,7 +126,7 @@ "\n", "template = \"Where can we visit in the capital of {country}?\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"country\"])\n", + "prompt = PromptTemplate.from_template(template)\n", "\n", "llm_chain = LLMChain(prompt=prompt, llm=llm)\n", "\n", diff --git a/docs/docs/integrations/llms/yandex.ipynb b/docs/docs/integrations/llms/yandex.ipynb index 0dfa53bd61c..42093fec896 100644 --- a/docs/docs/integrations/llms/yandex.ipynb +++ b/docs/docs/integrations/llms/yandex.ipynb @@ -56,7 +56,7 @@ "outputs": [], "source": [ "template = \"What is the capital of {country}?\"\n", - "prompt = PromptTemplate(template=template, input_variables=[\"country\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { diff --git a/docs/docs/integrations/providers/predictionguard.mdx b/docs/docs/integrations/providers/predictionguard.mdx index 72d36bc0193..fdb0f0a397c 100644 --- a/docs/docs/integrations/providers/predictionguard.mdx +++ b/docs/docs/integrations/providers/predictionguard.mdx @@ -55,7 +55,7 @@ Head to stories to get ALL the deets on each box! 👆 BONUS: Save 50% on your f Query: {query} Result: """ -prompt = PromptTemplate(template=template, input_variables=["query"]) +prompt = PromptTemplate.from_template(template) # With "guarding" or controlling the output of the LLM. See the # Prediction Guard docs (https://docs.predictionguard.com) to learn how to @@ -93,7 +93,7 @@ pgllm = PredictionGuard(model="OpenAI-gpt-3.5-turbo-instruct") template = """Question: {question} Answer: Let's think step by step.""" -prompt = PromptTemplate(template=template, input_variables=["question"]) +prompt = PromptTemplate.from_template(template) llm_chain = LLMChain(prompt=prompt, llm=pgllm, verbose=True) question = "What NFL team won the Super Bowl in the year Justin Beiber was born?" diff --git a/docs/docs/integrations/providers/ray_serve.ipynb b/docs/docs/integrations/providers/ray_serve.ipynb index 144fb572334..b48e76710d0 100644 --- a/docs/docs/integrations/providers/ray_serve.ipynb +++ b/docs/docs/integrations/providers/ray_serve.ipynb @@ -135,7 +135,7 @@ " # We initialize the LLM, template and the chain here\n", " llm = OpenAI(openai_api_key=OPENAI_API_KEY)\n", " template = \"Question: {question}\\n\\nAnswer: Let's think step by step.\"\n", - " prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n", + " prompt = PromptTemplate.from_template(template)\n", " self.chain = LLMChain(llm=llm, prompt=prompt)\n", "\n", " def _run_chain(self, text: str):\n", diff --git a/docs/docs/integrations/providers/shaleprotocol.md b/docs/docs/integrations/providers/shaleprotocol.md index 2aced9cb638..dbdd3caa6cf 100644 --- a/docs/docs/integrations/providers/shaleprotocol.md +++ b/docs/docs/integrations/providers/shaleprotocol.md @@ -33,7 +33,7 @@ template = """Question: {question} # Answer: Let's think step by step.""" -prompt = PromptTemplate(template=template, input_variables=["question"]) +prompt = PromptTemplate.from_template(template) llm_chain = LLMChain(prompt=prompt, llm=llm) diff --git a/docs/docs/integrations/text_embedding/clarifai.ipynb b/docs/docs/integrations/text_embedding/clarifai.ipynb index ab006c48b35..f10a9a463a6 100644 --- a/docs/docs/integrations/text_embedding/clarifai.ipynb +++ b/docs/docs/integrations/text_embedding/clarifai.ipynb @@ -101,7 +101,7 @@ "\n", "Answer: Let's think step by step.\"\"\"\n", "\n", - "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" + "prompt = PromptTemplate.from_template(template)" ] }, { diff --git a/docs/docs/modules/model_io/prompts/partial.ipynb b/docs/docs/modules/model_io/prompts/partial.ipynb index 4c937ba7286..274cc6fb720 100644 --- a/docs/docs/modules/model_io/prompts/partial.ipynb +++ b/docs/docs/modules/model_io/prompts/partial.ipynb @@ -37,7 +37,7 @@ "source": [ "from langchain.prompts import PromptTemplate\n", "\n", - "prompt = PromptTemplate(template=\"{foo}{bar}\", input_variables=[\"foo\", \"bar\"])\n", + "prompt = PromptTemplate.from_template(\"{foo}{bar}\")\n", "partial_prompt = prompt.partial(foo=\"foo\")\n", "print(partial_prompt.format(bar=\"baz\"))" ]