diff --git a/docs/docs/guides/evaluation/comparison/pairwise_string.ipynb b/docs/docs/guides/evaluation/comparison/pairwise_string.ipynb index 259affbd2b2..f96db6137ef 100644 --- a/docs/docs/guides/evaluation/comparison/pairwise_string.ipynb +++ b/docs/docs/guides/evaluation/comparison/pairwise_string.ipynb @@ -294,7 +294,7 @@ }, "outputs": [], "source": [ - "from langchain.prompts import PromptTemplate\n", + "from langchain_core.prompts import PromptTemplate\n", "\n", "prompt_template = PromptTemplate.from_template(\n", " \"\"\"Given the input context, which do you prefer: A or B?\n", diff --git a/docs/docs/guides/evaluation/string/criteria_eval_chain.ipynb b/docs/docs/guides/evaluation/string/criteria_eval_chain.ipynb index 9754094d4ff..d061fece4ae 100644 --- a/docs/docs/guides/evaluation/string/criteria_eval_chain.ipynb +++ b/docs/docs/guides/evaluation/string/criteria_eval_chain.ipynb @@ -380,7 +380,7 @@ }, "outputs": [], "source": [ - "from langchain.prompts import PromptTemplate\n", + "from langchain_core.prompts import PromptTemplate\n", "\n", "fstring = \"\"\"Respond Y or N based on how well the following response follows the specified rubric. Grade only based on the rubric and expected response:\n", "\n", diff --git a/docs/docs/guides/fallbacks.ipynb b/docs/docs/guides/fallbacks.ipynb index ff992316736..36762d4bbcb 100644 --- a/docs/docs/guides/fallbacks.ipynb +++ b/docs/docs/guides/fallbacks.ipynb @@ -216,7 +216,7 @@ "outputs": [], "source": [ "# Now lets create a chain with the normal OpenAI model\n", - "from langchain.prompts import PromptTemplate\n", + "from langchain_core.prompts import PromptTemplate\n", "from langchain_openai import OpenAI\n", "\n", "prompt_template = \"\"\"Instructions: You should always include a compliment in your response.\n", diff --git a/docs/docs/guides/local_llms.ipynb b/docs/docs/guides/local_llms.ipynb index 23253640b02..fef32b9785b 100644 --- a/docs/docs/guides/local_llms.ipynb +++ b/docs/docs/guides/local_llms.ipynb @@ -546,7 +546,7 @@ "source": [ "from langchain.chains import LLMChain\n", "from langchain.chains.prompt_selector import ConditionalPromptSelector\n", - "from langchain.prompts import PromptTemplate\n", + "from langchain_core.prompts import PromptTemplate\n", "\n", "DEFAULT_LLAMA_SEARCH_PROMPT = PromptTemplate(\n", " input_variables=[\"question\"],\n", diff --git a/docs/docs/guides/model_laboratory.ipynb b/docs/docs/guides/model_laboratory.ipynb index 540bc4023fb..e36fe1b0bd3 100644 --- a/docs/docs/guides/model_laboratory.ipynb +++ b/docs/docs/guides/model_laboratory.ipynb @@ -30,8 +30,8 @@ "outputs": [], "source": [ "from langchain.model_laboratory import ModelLaboratory\n", - "from langchain.prompts import PromptTemplate\n", "from langchain_community.llms import Cohere, HuggingFaceHub\n", + "from langchain_core.prompts import PromptTemplate\n", "from langchain_openai import OpenAI" ] }, diff --git a/docs/docs/guides/safety/amazon_comprehend_chain.ipynb b/docs/docs/guides/safety/amazon_comprehend_chain.ipynb index b9548546ef9..256bc334d0d 100644 --- a/docs/docs/guides/safety/amazon_comprehend_chain.ipynb +++ b/docs/docs/guides/safety/amazon_comprehend_chain.ipynb @@ -105,8 +105,8 @@ }, "outputs": [], "source": [ - "from langchain.prompts import PromptTemplate\n", "from langchain_community.llms.fake import FakeListLLM\n", + "from langchain_core.prompts import PromptTemplate\n", "from langchain_experimental.comprehend_moderation.base_moderation_exceptions import (\n", " ModerationPiiError,\n", ")\n", @@ -242,8 +242,8 @@ }, "outputs": [], "source": [ - "from langchain.prompts import PromptTemplate\n", "from langchain_community.llms.fake import FakeListLLM\n", + "from langchain_core.prompts import PromptTemplate\n", "\n", "template = \"\"\"Question: {question}\n", "\n", @@ -405,8 +405,8 @@ }, "outputs": [], "source": [ - "from langchain.prompts import PromptTemplate\n", "from langchain_community.llms.fake import FakeListLLM\n", + "from langchain_core.prompts import PromptTemplate\n", "\n", "template = \"\"\"Question: {question}\n", "\n", @@ -566,8 +566,8 @@ }, "outputs": [], "source": [ - "from langchain.prompts import PromptTemplate\n", "from langchain_community.llms import HuggingFaceHub\n", + "from langchain_core.prompts import PromptTemplate\n", "\n", "template = \"\"\"{question}\"\"\"\n", "\n", @@ -696,9 +696,9 @@ "source": [ "import json\n", "\n", - "from langchain.prompts import PromptTemplate\n", "from langchain_community.llms import SagemakerEndpoint\n", "from langchain_community.llms.sagemaker_endpoint import LLMContentHandler\n", + "from langchain_core.prompts import PromptTemplate\n", "\n", "\n", "class ContentHandler(LLMContentHandler):\n", diff --git a/docs/docs/guides/safety/constitutional_chain.mdx b/docs/docs/guides/safety/constitutional_chain.mdx index 4b982501315..e81c5ca6094 100644 --- a/docs/docs/guides/safety/constitutional_chain.mdx +++ b/docs/docs/guides/safety/constitutional_chain.mdx @@ -13,7 +13,7 @@ content that may violate guidelines, be offensive, or deviate from the desired c ```python # Imports from langchain_openai import OpenAI -from langchain.prompts import PromptTemplate +from langchain_core.prompts import PromptTemplate from langchain.chains.llm import LLMChain from langchain.chains.constitutional_ai.base import ConstitutionalChain ``` diff --git a/docs/docs/guides/safety/logical_fallacy_chain.mdx b/docs/docs/guides/safety/logical_fallacy_chain.mdx index d25dd37cd3a..dc87a94fffe 100644 --- a/docs/docs/guides/safety/logical_fallacy_chain.mdx +++ b/docs/docs/guides/safety/logical_fallacy_chain.mdx @@ -22,7 +22,7 @@ Therefore, it is crucial that model developers proactively address logical falla ```python # Imports from langchain_openai import OpenAI -from langchain.prompts import PromptTemplate +from langchain_core.prompts import PromptTemplate from langchain.chains.llm import LLMChain from langchain_experimental.fallacy_removal.base import FallacyChain ``` diff --git a/docs/docs/guides/safety/moderation.mdx b/docs/docs/guides/safety/moderation.mdx index a43579dfef5..4afda1556f5 100644 --- a/docs/docs/guides/safety/moderation.mdx +++ b/docs/docs/guides/safety/moderation.mdx @@ -24,7 +24,7 @@ We'll show: ```python from langchain_openai import OpenAI from langchain.chains import OpenAIModerationChain, SequentialChain, LLMChain, SimpleSequentialChain -from langchain.prompts import PromptTemplate +from langchain_core.prompts import PromptTemplate ``` ## How to use the moderation chain diff --git a/docs/docs/integrations/llms/aleph_alpha.ipynb b/docs/docs/integrations/llms/aleph_alpha.ipynb index 3d7fb662338..95351992ed6 100644 --- a/docs/docs/integrations/llms/aleph_alpha.ipynb +++ b/docs/docs/integrations/llms/aleph_alpha.ipynb @@ -58,8 +58,8 @@ }, "outputs": [], "source": [ - "from langchain.prompts import PromptTemplate\n", - "from langchain_community.llms import AlephAlpha" + "from langchain_community.llms import AlephAlpha\n", + "from langchain_core.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/integrations/llms/anyscale.ipynb b/docs/docs/integrations/llms/anyscale.ipynb index ed035219698..105746779c5 100644 --- a/docs/docs/integrations/llms/anyscale.ipynb +++ b/docs/docs/integrations/llms/anyscale.ipynb @@ -49,8 +49,8 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.prompts import PromptTemplate\n", - "from langchain_community.llms import Anyscale" + "from langchain_community.llms import Anyscale\n", + "from langchain_core.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/integrations/llms/aphrodite.ipynb b/docs/docs/integrations/llms/aphrodite.ipynb index 5cbbfb1ce84..f90c4fae25e 100644 --- a/docs/docs/integrations/llms/aphrodite.ipynb +++ b/docs/docs/integrations/llms/aphrodite.ipynb @@ -146,7 +146,7 @@ ], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.prompts import PromptTemplate\n", + "from langchain_core.prompts import PromptTemplate\n", "\n", "template = \"\"\"Question: {question}\n", "\n", diff --git a/docs/docs/integrations/llms/azure_ml.ipynb b/docs/docs/integrations/llms/azure_ml.ipynb index b2adb40a84b..bfee9ed3cb1 100644 --- a/docs/docs/integrations/llms/azure_ml.ipynb +++ b/docs/docs/integrations/llms/azure_ml.ipynb @@ -228,8 +228,8 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.prompts import PromptTemplate\n", "from langchain_community.llms.azureml_endpoint import DollyContentFormatter\n", + "from langchain_core.prompts import PromptTemplate\n", "\n", "formatter_template = \"Write a {word_count} word essay about {topic}.\"\n", "\n", diff --git a/docs/docs/integrations/llms/banana.ipynb b/docs/docs/integrations/llms/banana.ipynb index 7fbdc2921d5..a9836f09220 100644 --- a/docs/docs/integrations/llms/banana.ipynb +++ b/docs/docs/integrations/llms/banana.ipynb @@ -52,8 +52,8 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.prompts import PromptTemplate\n", - "from langchain_community.llms import Banana" + "from langchain_community.llms import Banana\n", + "from langchain_core.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/integrations/llms/baseten.ipynb b/docs/docs/integrations/llms/baseten.ipynb index c15b2ecd7f3..e8c92bee9cf 100644 --- a/docs/docs/integrations/llms/baseten.ipynb +++ b/docs/docs/integrations/llms/baseten.ipynb @@ -94,7 +94,7 @@ "source": [ "from langchain.chains import LLMChain\n", "from langchain.memory import ConversationBufferWindowMemory\n", - "from langchain.prompts import PromptTemplate\n", + "from langchain_core.prompts import PromptTemplate\n", "\n", "template = \"\"\"Assistant is a large language model trained by OpenAI.\n", "\n", diff --git a/docs/docs/integrations/llms/bittensor.ipynb b/docs/docs/integrations/llms/bittensor.ipynb index 92ebb9b7ac6..b5c9bc2b5a1 100644 --- a/docs/docs/integrations/llms/bittensor.ipynb +++ b/docs/docs/integrations/llms/bittensor.ipynb @@ -82,8 +82,8 @@ "source": [ "from langchain.chains import LLMChain\n", "from langchain.globals import set_debug\n", - "from langchain.prompts import PromptTemplate\n", "from langchain_community.llms import NIBittensorLLM\n", + "from langchain_core.prompts import PromptTemplate\n", "\n", "set_debug(True)\n", "\n", @@ -142,8 +142,8 @@ ")\n", "from langchain.chains import LLMChain\n", "from langchain.memory import ConversationBufferMemory\n", - "from langchain.prompts import PromptTemplate\n", "from langchain_community.llms import NIBittensorLLM\n", + "from langchain_core.prompts import PromptTemplate\n", "\n", "memory = ConversationBufferMemory(memory_key=\"chat_history\")\n", "\n", diff --git a/docs/docs/integrations/llms/cerebriumai.ipynb b/docs/docs/integrations/llms/cerebriumai.ipynb index e062e4ad496..524c678921b 100644 --- a/docs/docs/integrations/llms/cerebriumai.ipynb +++ b/docs/docs/integrations/llms/cerebriumai.ipynb @@ -45,8 +45,8 @@ "import os\n", "\n", "from langchain.chains import LLMChain\n", - "from langchain.prompts import PromptTemplate\n", - "from langchain_community.llms import CerebriumAI" + "from langchain_community.llms import CerebriumAI\n", + "from langchain_core.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/integrations/llms/chatglm.ipynb b/docs/docs/integrations/llms/chatglm.ipynb index c004219061f..645f880b3a2 100644 --- a/docs/docs/integrations/llms/chatglm.ipynb +++ b/docs/docs/integrations/llms/chatglm.ipynb @@ -41,9 +41,9 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.prompts import PromptTemplate\n", "from langchain.schema.messages import AIMessage\n", - "from langchain_community.llms.chatglm3 import ChatGLM3" + "from langchain_community.llms.chatglm3 import ChatGLM3\n", + "from langchain_core.prompts import PromptTemplate" ] }, { @@ -117,8 +117,8 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.prompts import PromptTemplate\n", "from langchain_community.llms import ChatGLM\n", + "from langchain_core.prompts import PromptTemplate\n", "\n", "# import os" ] diff --git a/docs/docs/integrations/llms/clarifai.ipynb b/docs/docs/integrations/llms/clarifai.ipynb index 952263de025..e982d550dec 100644 --- a/docs/docs/integrations/llms/clarifai.ipynb +++ b/docs/docs/integrations/llms/clarifai.ipynb @@ -87,8 +87,8 @@ "source": [ "# Import the required modules\n", "from langchain.chains import LLMChain\n", - "from langchain.prompts import PromptTemplate\n", - "from langchain_community.llms import Clarifai" + "from langchain_community.llms import Clarifai\n", + "from langchain_core.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/integrations/llms/cloudflare_workersai.ipynb b/docs/docs/integrations/llms/cloudflare_workersai.ipynb index 030b192d093..b90bfb31369 100644 --- a/docs/docs/integrations/llms/cloudflare_workersai.ipynb +++ b/docs/docs/integrations/llms/cloudflare_workersai.ipynb @@ -19,8 +19,8 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.prompts import PromptTemplate\n", "from langchain_community.llms.cloudflare_workersai import CloudflareWorkersAI\n", + "from langchain_core.prompts import PromptTemplate\n", "\n", "template = \"\"\"Human: {question}\n", "\n", diff --git a/docs/docs/integrations/llms/ctransformers.ipynb b/docs/docs/integrations/llms/ctransformers.ipynb index 6231c0a2e1b..7c6248c0136 100644 --- a/docs/docs/integrations/llms/ctransformers.ipynb +++ b/docs/docs/integrations/llms/ctransformers.ipynb @@ -103,7 +103,7 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.prompts import PromptTemplate\n", + "from langchain_core.prompts import PromptTemplate\n", "\n", "template = \"\"\"Question: {question}\n", "\n", diff --git a/docs/docs/integrations/llms/ctranslate2.ipynb b/docs/docs/integrations/llms/ctranslate2.ipynb index f80d320bf22..c13a4d76255 100644 --- a/docs/docs/integrations/llms/ctranslate2.ipynb +++ b/docs/docs/integrations/llms/ctranslate2.ipynb @@ -196,7 +196,7 @@ ], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.prompts import PromptTemplate\n", + "from langchain_core.prompts import PromptTemplate\n", "\n", "template = \"\"\"{question}\n", "\n", diff --git a/docs/docs/integrations/llms/deepinfra.ipynb b/docs/docs/integrations/llms/deepinfra.ipynb index 871a0f4d9de..e57ff22659c 100644 --- a/docs/docs/integrations/llms/deepinfra.ipynb +++ b/docs/docs/integrations/llms/deepinfra.ipynb @@ -140,7 +140,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.prompts import PromptTemplate\n", + "from langchain_core.prompts import PromptTemplate\n", "\n", "template = \"\"\"Question: {question}\n", "\n", diff --git a/docs/docs/integrations/llms/edenai.ipynb b/docs/docs/integrations/llms/edenai.ipynb index b42fc5d9875..b6231654df5 100644 --- a/docs/docs/integrations/llms/edenai.ipynb +++ b/docs/docs/integrations/llms/edenai.ipynb @@ -98,7 +98,7 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.prompts import PromptTemplate\n", + "from langchain_core.prompts import PromptTemplate\n", "\n", "llm = EdenAI(\n", " feature=\"text\",\n", @@ -220,7 +220,7 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain, SimpleSequentialChain\n", - "from langchain.prompts import PromptTemplate" + "from langchain_core.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/integrations/llms/fireworks.ipynb b/docs/docs/integrations/llms/fireworks.ipynb index 57e772f760c..fc84b59a838 100644 --- a/docs/docs/integrations/llms/fireworks.ipynb +++ b/docs/docs/integrations/llms/fireworks.ipynb @@ -182,7 +182,7 @@ } ], "source": [ - "from langchain.prompts import PromptTemplate\n", + "from langchain_core.prompts import PromptTemplate\n", "from langchain_fireworks import Fireworks\n", "\n", "llm = Fireworks(\n", diff --git a/docs/docs/integrations/llms/forefrontai.ipynb b/docs/docs/integrations/llms/forefrontai.ipynb index eef4fcb8e98..34dec0be5ed 100644 --- a/docs/docs/integrations/llms/forefrontai.ipynb +++ b/docs/docs/integrations/llms/forefrontai.ipynb @@ -28,8 +28,8 @@ "import os\n", "\n", "from langchain.chains import LLMChain\n", - "from langchain.prompts import PromptTemplate\n", - "from langchain_community.llms import ForefrontAI" + "from langchain_community.llms import ForefrontAI\n", + "from langchain_core.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/integrations/llms/gigachat.ipynb b/docs/docs/integrations/llms/gigachat.ipynb index 7e92a38aa73..19400be4474 100644 --- a/docs/docs/integrations/llms/gigachat.ipynb +++ b/docs/docs/integrations/llms/gigachat.ipynb @@ -80,7 +80,7 @@ ], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.prompts import PromptTemplate\n", + "from langchain_core.prompts import PromptTemplate\n", "\n", "template = \"What is capital of {country}?\"\n", "\n", diff --git a/docs/docs/integrations/llms/google_ai.ipynb b/docs/docs/integrations/llms/google_ai.ipynb index 2aa49753c1d..c248d6f5fbe 100644 --- a/docs/docs/integrations/llms/google_ai.ipynb +++ b/docs/docs/integrations/llms/google_ai.ipynb @@ -180,7 +180,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.prompts import PromptTemplate" + "from langchain_core.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/integrations/llms/gooseai.ipynb b/docs/docs/integrations/llms/gooseai.ipynb index b665106ebe7..66fc43bcdd0 100644 --- a/docs/docs/integrations/llms/gooseai.ipynb +++ b/docs/docs/integrations/llms/gooseai.ipynb @@ -44,8 +44,8 @@ "import os\n", "\n", "from langchain.chains import LLMChain\n", - "from langchain.prompts import PromptTemplate\n", - "from langchain_community.llms import GooseAI" + "from langchain_community.llms import GooseAI\n", + "from langchain_core.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/integrations/llms/gpt4all.ipynb b/docs/docs/integrations/llms/gpt4all.ipynb index a1593774587..3a1b084e2f3 100644 --- a/docs/docs/integrations/llms/gpt4all.ipynb +++ b/docs/docs/integrations/llms/gpt4all.ipynb @@ -49,8 +49,8 @@ "source": [ "from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n", "from langchain.chains import LLMChain\n", - "from langchain.prompts import PromptTemplate\n", - "from langchain_community.llms import GPT4All" + "from langchain_community.llms import GPT4All\n", + "from langchain_core.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/integrations/llms/gradient.ipynb b/docs/docs/integrations/llms/gradient.ipynb index d1bfe21e658..c46a2d1e39c 100644 --- a/docs/docs/integrations/llms/gradient.ipynb +++ b/docs/docs/integrations/llms/gradient.ipynb @@ -25,8 +25,8 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.prompts import PromptTemplate\n", - "from langchain_community.llms import GradientLLM" + "from langchain_community.llms import GradientLLM\n", + "from langchain_core.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/integrations/llms/huggingface_pipelines.ipynb b/docs/docs/integrations/llms/huggingface_pipelines.ipynb index 2d74d445421..c47beae6642 100644 --- a/docs/docs/integrations/llms/huggingface_pipelines.ipynb +++ b/docs/docs/integrations/llms/huggingface_pipelines.ipynb @@ -107,7 +107,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.prompts import PromptTemplate\n", + "from langchain_core.prompts import PromptTemplate\n", "\n", "template = \"\"\"Question: {question}\n", "\n", diff --git a/docs/docs/integrations/llms/javelin.ipynb b/docs/docs/integrations/llms/javelin.ipynb index c5bcc247d2d..fe479bdba7f 100644 --- a/docs/docs/integrations/llms/javelin.ipynb +++ b/docs/docs/integrations/llms/javelin.ipynb @@ -92,8 +92,8 @@ ], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.prompts import PromptTemplate\n", "from langchain_community.llms import JavelinAIGateway\n", + "from langchain_core.prompts import PromptTemplate\n", "\n", "route_completions = \"eng_dept03\"\n", "\n", diff --git a/docs/docs/integrations/llms/manifest.ipynb b/docs/docs/integrations/llms/manifest.ipynb index 8ac42dc524e..fbd2c4aedfb 100644 --- a/docs/docs/integrations/llms/manifest.ipynb +++ b/docs/docs/integrations/llms/manifest.ipynb @@ -81,7 +81,7 @@ "source": [ "# Map reduce example\n", "from langchain.chains.mapreduce import MapReduceChain\n", - "from langchain.prompts import PromptTemplate\n", + "from langchain_core.prompts import PromptTemplate\n", "from langchain_text_splitters import CharacterTextSplitter\n", "\n", "_prompt = \"\"\"Write a concise summary of the following:\n", diff --git a/docs/docs/integrations/llms/minimax.ipynb b/docs/docs/integrations/llms/minimax.ipynb index b4ed78c2e17..a43cf79f93e 100644 --- a/docs/docs/integrations/llms/minimax.ipynb +++ b/docs/docs/integrations/llms/minimax.ipynb @@ -97,8 +97,8 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.prompts import PromptTemplate\n", - "from langchain_community.llms import Minimax" + "from langchain_community.llms import Minimax\n", + "from langchain_core.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/integrations/llms/modal.ipynb b/docs/docs/integrations/llms/modal.ipynb index de601cf8e60..ecbb37efbf6 100644 --- a/docs/docs/integrations/llms/modal.ipynb +++ b/docs/docs/integrations/llms/modal.ipynb @@ -108,8 +108,8 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.prompts import PromptTemplate\n", - "from langchain_community.llms import Modal" + "from langchain_community.llms import Modal\n", + "from langchain_core.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/integrations/llms/mosaicml.ipynb b/docs/docs/integrations/llms/mosaicml.ipynb index 48307b409d0..47114d3e20d 100644 --- a/docs/docs/integrations/llms/mosaicml.ipynb +++ b/docs/docs/integrations/llms/mosaicml.ipynb @@ -43,8 +43,8 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.prompts import PromptTemplate\n", - "from langchain_community.llms import MosaicML" + "from langchain_community.llms import MosaicML\n", + "from langchain_core.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/integrations/llms/nlpcloud.ipynb b/docs/docs/integrations/llms/nlpcloud.ipynb index dd93614efbd..66262a092a9 100644 --- a/docs/docs/integrations/llms/nlpcloud.ipynb +++ b/docs/docs/integrations/llms/nlpcloud.ipynb @@ -73,8 +73,8 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.prompts import PromptTemplate\n", - "from langchain_community.llms import NLPCloud" + "from langchain_community.llms import NLPCloud\n", + "from langchain_core.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/integrations/llms/octoai.ipynb b/docs/docs/integrations/llms/octoai.ipynb index a47016a8586..d54e52e8a42 100644 --- a/docs/docs/integrations/llms/octoai.ipynb +++ b/docs/docs/integrations/llms/octoai.ipynb @@ -40,8 +40,8 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.prompts import PromptTemplate\n", - "from langchain_community.llms.octoai_endpoint import OctoAIEndpoint" + "from langchain_community.llms.octoai_endpoint import OctoAIEndpoint\n", + "from langchain_core.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/integrations/llms/opaqueprompts.ipynb b/docs/docs/integrations/llms/opaqueprompts.ipynb index e154348ed38..272ebfd89ae 100644 --- a/docs/docs/integrations/llms/opaqueprompts.ipynb +++ b/docs/docs/integrations/llms/opaqueprompts.ipynb @@ -62,8 +62,8 @@ "from langchain.chains import LLMChain\n", "from langchain.globals import set_debug, set_verbose\n", "from langchain.memory import ConversationBufferWindowMemory\n", - "from langchain.prompts import PromptTemplate\n", "from langchain_community.llms import OpaquePrompts\n", + "from langchain_core.prompts import PromptTemplate\n", "from langchain_openai import OpenAI\n", "\n", "set_debug(True)\n", diff --git a/docs/docs/integrations/llms/openai.ipynb b/docs/docs/integrations/llms/openai.ipynb index 8e072675373..458505c30ec 100644 --- a/docs/docs/integrations/llms/openai.ipynb +++ b/docs/docs/integrations/llms/openai.ipynb @@ -67,7 +67,7 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.prompts import PromptTemplate\n", + "from langchain_core.prompts import PromptTemplate\n", "from langchain_openai import OpenAI" ] }, diff --git a/docs/docs/integrations/llms/openllm.ipynb b/docs/docs/integrations/llms/openllm.ipynb index 0bcd3a9bb14..279ba6435bc 100644 --- a/docs/docs/integrations/llms/openllm.ipynb +++ b/docs/docs/integrations/llms/openllm.ipynb @@ -115,7 +115,7 @@ ], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.prompts import PromptTemplate\n", + "from langchain_core.prompts import PromptTemplate\n", "\n", "template = \"What is a good name for a company that makes {product}?\"\n", "\n", diff --git a/docs/docs/integrations/llms/openlm.ipynb b/docs/docs/integrations/llms/openlm.ipynb index 5d800e130f6..19da8753236 100644 --- a/docs/docs/integrations/llms/openlm.ipynb +++ b/docs/docs/integrations/llms/openlm.ipynb @@ -69,8 +69,8 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.prompts import PromptTemplate\n", - "from langchain_community.llms import OpenLM" + "from langchain_community.llms import OpenLM\n", + "from langchain_core.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/integrations/llms/petals.ipynb b/docs/docs/integrations/llms/petals.ipynb index 779a8d9e2bf..3cbe68ce4e4 100644 --- a/docs/docs/integrations/llms/petals.ipynb +++ b/docs/docs/integrations/llms/petals.ipynb @@ -46,8 +46,8 @@ "import os\n", "\n", "from langchain.chains import LLMChain\n", - "from langchain.prompts import PromptTemplate\n", - "from langchain_community.llms import Petals" + "from langchain_community.llms import Petals\n", + "from langchain_core.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/integrations/llms/pipelineai.ipynb b/docs/docs/integrations/llms/pipelineai.ipynb index ed97a58e00c..142d72d3e6f 100644 --- a/docs/docs/integrations/llms/pipelineai.ipynb +++ b/docs/docs/integrations/llms/pipelineai.ipynb @@ -51,8 +51,8 @@ "import os\n", "\n", "from langchain.chains import LLMChain\n", - "from langchain.prompts import PromptTemplate\n", - "from langchain_community.llms import PipelineAI" + "from langchain_community.llms import PipelineAI\n", + "from langchain_core.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/integrations/llms/predibase.ipynb b/docs/docs/integrations/llms/predibase.ipynb index 8f5441f496a..750cbf90387 100644 --- a/docs/docs/integrations/llms/predibase.ipynb +++ b/docs/docs/integrations/llms/predibase.ipynb @@ -96,7 +96,7 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.prompts import PromptTemplate" + "from langchain_core.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/integrations/llms/predictionguard.ipynb b/docs/docs/integrations/llms/predictionguard.ipynb index 1200680cd9b..e5a632257d0 100644 --- a/docs/docs/integrations/llms/predictionguard.ipynb +++ b/docs/docs/integrations/llms/predictionguard.ipynb @@ -32,8 +32,8 @@ "import os\n", "\n", "from langchain.chains import LLMChain\n", - "from langchain.prompts import PromptTemplate\n", - "from langchain_community.llms import PredictionGuard" + "from langchain_community.llms import PredictionGuard\n", + "from langchain_core.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/integrations/llms/replicate.ipynb b/docs/docs/integrations/llms/replicate.ipynb index 46f34a7825c..d0339570e76 100644 --- a/docs/docs/integrations/llms/replicate.ipynb +++ b/docs/docs/integrations/llms/replicate.ipynb @@ -104,8 +104,8 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.prompts import PromptTemplate\n", - "from langchain_community.llms import Replicate" + "from langchain_community.llms import Replicate\n", + "from langchain_core.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/integrations/llms/runhouse.ipynb b/docs/docs/integrations/llms/runhouse.ipynb index fe44389d52a..c086f9c5d4a 100644 --- a/docs/docs/integrations/llms/runhouse.ipynb +++ b/docs/docs/integrations/llms/runhouse.ipynb @@ -45,8 +45,8 @@ "source": [ "import runhouse as rh\n", "from langchain.chains import LLMChain\n", - "from langchain.prompts import PromptTemplate\n", - "from langchain_community.llms import SelfHostedHuggingFaceLLM, SelfHostedPipeline" + "from langchain_community.llms import SelfHostedHuggingFaceLLM, SelfHostedPipeline\n", + "from langchain_core.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/integrations/llms/sagemaker.ipynb b/docs/docs/integrations/llms/sagemaker.ipynb index 594f39e186f..4f418039dc7 100644 --- a/docs/docs/integrations/llms/sagemaker.ipynb +++ b/docs/docs/integrations/llms/sagemaker.ipynb @@ -104,9 +104,9 @@ "\n", "import boto3\n", "from langchain.chains.question_answering import load_qa_chain\n", - "from langchain.prompts import PromptTemplate\n", "from langchain_community.llms import SagemakerEndpoint\n", "from langchain_community.llms.sagemaker_endpoint import LLMContentHandler\n", + "from langchain_core.prompts import PromptTemplate\n", "\n", "query = \"\"\"How long was Elizabeth hospitalized?\n", "\"\"\"\n", @@ -174,9 +174,9 @@ "from typing import Dict\n", "\n", "from langchain.chains.question_answering import load_qa_chain\n", - "from langchain.prompts import PromptTemplate\n", "from langchain_community.llms import SagemakerEndpoint\n", "from langchain_community.llms.sagemaker_endpoint import LLMContentHandler\n", + "from langchain_core.prompts import PromptTemplate\n", "\n", "query = \"\"\"How long was Elizabeth hospitalized?\n", "\"\"\"\n", diff --git a/docs/docs/integrations/llms/stochasticai.ipynb b/docs/docs/integrations/llms/stochasticai.ipynb index 6a58aae7361..f3a58f7c79c 100644 --- a/docs/docs/integrations/llms/stochasticai.ipynb +++ b/docs/docs/integrations/llms/stochasticai.ipynb @@ -80,8 +80,8 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.prompts import PromptTemplate\n", - "from langchain_community.llms import StochasticAI" + "from langchain_community.llms import StochasticAI\n", + "from langchain_core.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/integrations/llms/symblai_nebula.ipynb b/docs/docs/integrations/llms/symblai_nebula.ipynb index cbea2f6e9cb..fdf70bfba3c 100644 --- a/docs/docs/integrations/llms/symblai_nebula.ipynb +++ b/docs/docs/integrations/llms/symblai_nebula.ipynb @@ -59,7 +59,7 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.prompts import PromptTemplate\n", + "from langchain_core.prompts import PromptTemplate\n", "\n", "conversation = \"\"\"Sam: Good morning, team! Let's keep this standup concise. We'll go in the usual order: what you did yesterday, what you plan to do today, and any blockers. Alex, kick us off.\n", "Alex: Morning! Yesterday, I wrapped up the UI for the user dashboard. The new charts and widgets are now responsive. I also had a sync with the design team to ensure the final touchups are in line with the brand guidelines. Today, I'll start integrating the frontend with the new API endpoints Rhea was working on. The only blocker is waiting for some final API documentation, but I guess Rhea can update on that.\n", diff --git a/docs/docs/integrations/llms/textgen.ipynb b/docs/docs/integrations/llms/textgen.ipynb index 1b4aed8320a..1a31298e16d 100644 --- a/docs/docs/integrations/llms/textgen.ipynb +++ b/docs/docs/integrations/llms/textgen.ipynb @@ -43,8 +43,8 @@ "source": [ "from langchain.chains import LLMChain\n", "from langchain.globals import set_debug\n", - "from langchain.prompts import PromptTemplate\n", "from langchain_community.llms import TextGen\n", + "from langchain_core.prompts import PromptTemplate\n", "\n", "set_debug(True)\n", "\n", @@ -94,8 +94,8 @@ "from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n", "from langchain.chains import LLMChain\n", "from langchain.globals import set_debug\n", - "from langchain.prompts import PromptTemplate\n", "from langchain_community.llms import TextGen\n", + "from langchain_core.prompts import PromptTemplate\n", "\n", "set_debug(True)\n", "\n", diff --git a/docs/docs/integrations/llms/titan_takeoff.ipynb b/docs/docs/integrations/llms/titan_takeoff.ipynb index b7df1bb0016..5611210c3bf 100644 --- a/docs/docs/integrations/llms/titan_takeoff.ipynb +++ b/docs/docs/integrations/llms/titan_takeoff.ipynb @@ -140,7 +140,7 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.prompts import PromptTemplate\n", + "from langchain_core.prompts import PromptTemplate\n", "\n", "llm = TitanTakeoff()\n", "\n", diff --git a/docs/docs/integrations/llms/titan_takeoff_pro.ipynb b/docs/docs/integrations/llms/titan_takeoff_pro.ipynb index 37d108fe3b3..b728556eed2 100644 --- a/docs/docs/integrations/llms/titan_takeoff_pro.ipynb +++ b/docs/docs/integrations/llms/titan_takeoff_pro.ipynb @@ -32,8 +32,8 @@ "source": [ "from langchain.callbacks.manager import CallbackManager\n", "from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n", - "from langchain.prompts import PromptTemplate\n", "from langchain_community.llms import TitanTakeoffPro\n", + "from langchain_core.prompts import PromptTemplate\n", "\n", "# Example 1: Basic use\n", "llm = TitanTakeoffPro()\n", diff --git a/docs/docs/integrations/llms/vllm.ipynb b/docs/docs/integrations/llms/vllm.ipynb index 4d88a2714fa..6d45b102dc1 100644 --- a/docs/docs/integrations/llms/vllm.ipynb +++ b/docs/docs/integrations/llms/vllm.ipynb @@ -130,7 +130,7 @@ ], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.prompts import PromptTemplate\n", + "from langchain_core.prompts import PromptTemplate\n", "\n", "template = \"\"\"Question: {question}\n", "\n", diff --git a/docs/docs/integrations/llms/volcengine_maas.ipynb b/docs/docs/integrations/llms/volcengine_maas.ipynb index cd0d37dd2f5..813f63e93e5 100644 --- a/docs/docs/integrations/llms/volcengine_maas.ipynb +++ b/docs/docs/integrations/llms/volcengine_maas.ipynb @@ -38,9 +38,9 @@ }, "outputs": [], "source": [ - "from langchain.prompts import PromptTemplate\n", "from langchain_community.llms import VolcEngineMaasLLM\n", - "from langchain_core.output_parsers import StrOutputParser" + "from langchain_core.output_parsers import StrOutputParser\n", + "from langchain_core.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/integrations/llms/writer.ipynb b/docs/docs/integrations/llms/writer.ipynb index 5c2206d1f56..7488eff3efe 100644 --- a/docs/docs/integrations/llms/writer.ipynb +++ b/docs/docs/integrations/llms/writer.ipynb @@ -56,8 +56,8 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.prompts import PromptTemplate\n", - "from langchain_community.llms import Writer" + "from langchain_community.llms import Writer\n", + "from langchain_core.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/integrations/llms/xinference.ipynb b/docs/docs/integrations/llms/xinference.ipynb index 5643750a4b9..714db52a43b 100644 --- a/docs/docs/integrations/llms/xinference.ipynb +++ b/docs/docs/integrations/llms/xinference.ipynb @@ -122,7 +122,7 @@ ], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.prompts import PromptTemplate\n", + "from langchain_core.prompts import PromptTemplate\n", "\n", "template = \"Where can we visit in the capital of {country}?\"\n", "\n", diff --git a/docs/docs/integrations/llms/yandex.ipynb b/docs/docs/integrations/llms/yandex.ipynb index 2a91a7f7f42..d0d93b0146a 100644 --- a/docs/docs/integrations/llms/yandex.ipynb +++ b/docs/docs/integrations/llms/yandex.ipynb @@ -45,8 +45,8 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.prompts import PromptTemplate\n", - "from langchain_community.llms import YandexGPT" + "from langchain_community.llms import YandexGPT\n", + "from langchain_core.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/modules/callbacks/filecallbackhandler.ipynb b/docs/docs/modules/callbacks/filecallbackhandler.ipynb index 0223ef6d54d..0ca7f1e81a9 100644 --- a/docs/docs/modules/callbacks/filecallbackhandler.ipynb +++ b/docs/docs/modules/callbacks/filecallbackhandler.ipynb @@ -47,7 +47,7 @@ "source": [ "from langchain.callbacks import FileCallbackHandler\n", "from langchain.chains import LLMChain\n", - "from langchain.prompts import PromptTemplate\n", + "from langchain_core.prompts import PromptTemplate\n", "from langchain_openai import OpenAI\n", "from loguru import logger\n", "\n", diff --git a/docs/docs/modules/data_connection/retrievers/MultiQueryRetriever.ipynb b/docs/docs/modules/data_connection/retrievers/MultiQueryRetriever.ipynb index 814aba352a0..1b38be518e0 100644 --- a/docs/docs/modules/data_connection/retrievers/MultiQueryRetriever.ipynb +++ b/docs/docs/modules/data_connection/retrievers/MultiQueryRetriever.ipynb @@ -129,7 +129,7 @@ "\n", "from langchain.chains import LLMChain\n", "from langchain.output_parsers import PydanticOutputParser\n", - "from langchain.prompts import PromptTemplate\n", + "from langchain_core.prompts import PromptTemplate\n", "from pydantic import BaseModel, Field\n", "\n", "\n", diff --git a/docs/docs/modules/memory/adding_memory.ipynb b/docs/docs/modules/memory/adding_memory.ipynb index bbfe51344a6..ba994224b07 100644 --- a/docs/docs/modules/memory/adding_memory.ipynb +++ b/docs/docs/modules/memory/adding_memory.ipynb @@ -25,7 +25,7 @@ "source": [ "from langchain.chains import LLMChain\n", "from langchain.memory import ConversationBufferMemory\n", - "from langchain.prompts import PromptTemplate\n", + "from langchain_core.prompts import PromptTemplate\n", "from langchain_openai import OpenAI" ] }, diff --git a/docs/docs/modules/memory/adding_memory_chain_multiple_inputs.ipynb b/docs/docs/modules/memory/adding_memory_chain_multiple_inputs.ipynb index 2806cf2f800..ddc4b5c4b8f 100644 --- a/docs/docs/modules/memory/adding_memory_chain_multiple_inputs.ipynb +++ b/docs/docs/modules/memory/adding_memory_chain_multiple_inputs.ipynb @@ -78,7 +78,7 @@ "source": [ "from langchain.chains.question_answering import load_qa_chain\n", "from langchain.memory import ConversationBufferMemory\n", - "from langchain.prompts import PromptTemplate\n", + "from langchain_core.prompts import PromptTemplate\n", "from langchain_openai import OpenAI" ] }, diff --git a/docs/docs/modules/memory/index.mdx b/docs/docs/modules/memory/index.mdx index f9da94819e2..455dbd85209 100644 --- a/docs/docs/modules/memory/index.mdx +++ b/docs/docs/modules/memory/index.mdx @@ -164,7 +164,7 @@ We'll use an `LLMChain`, and show working with both an LLM and a ChatModel. ```python from langchain_openai import OpenAI -from langchain.prompts import PromptTemplate +from langchain_core.prompts import PromptTemplate from langchain.chains import LLMChain from langchain.memory import ConversationBufferMemory diff --git a/docs/docs/modules/memory/multiple_memory.ipynb b/docs/docs/modules/memory/multiple_memory.ipynb index 9a1e420c2c5..72281f6c7d0 100644 --- a/docs/docs/modules/memory/multiple_memory.ipynb +++ b/docs/docs/modules/memory/multiple_memory.ipynb @@ -23,7 +23,7 @@ " ConversationBufferMemory,\n", " ConversationSummaryMemory,\n", ")\n", - "from langchain.prompts import PromptTemplate\n", + "from langchain_core.prompts import PromptTemplate\n", "from langchain_openai import OpenAI\n", "\n", "conv_memory = ConversationBufferMemory(\n", diff --git a/docs/docs/modules/memory/types/vectorstore_retriever_memory.mdx b/docs/docs/modules/memory/types/vectorstore_retriever_memory.mdx index a4d82133128..14c7ad0bdca 100644 --- a/docs/docs/modules/memory/types/vectorstore_retriever_memory.mdx +++ b/docs/docs/modules/memory/types/vectorstore_retriever_memory.mdx @@ -12,7 +12,7 @@ from langchain_openai import OpenAIEmbeddings from langchain_openai import OpenAI from langchain.memory import VectorStoreRetrieverMemory from langchain.chains import ConversationChain -from langchain.prompts import PromptTemplate +from langchain_core.prompts import PromptTemplate ``` ### Initialize your vector store diff --git a/docs/docs/use_cases/apis.ipynb b/docs/docs/use_cases/apis.ipynb index bdd063505a3..02415c48bcf 100644 --- a/docs/docs/use_cases/apis.ipynb +++ b/docs/docs/use_cases/apis.ipynb @@ -380,7 +380,7 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain, LLMRequestsChain\n", - "from langchain.prompts import PromptTemplate\n", + "from langchain_core.prompts import PromptTemplate\n", "from langchain_openai import OpenAI" ] }, diff --git a/docs/docs/use_cases/data_generation.ipynb b/docs/docs/use_cases/data_generation.ipynb index 3c394bce6d8..5ac66c60818 100644 --- a/docs/docs/use_cases/data_generation.ipynb +++ b/docs/docs/use_cases/data_generation.ipynb @@ -491,7 +491,7 @@ "\n", "from langchain.chains import create_extraction_chain_pydantic\n", "from langchain.output_parsers import PydanticOutputParser\n", - "from langchain.prompts import PromptTemplate\n", + "from langchain_core.prompts import PromptTemplate\n", "from langchain_openai import OpenAI\n", "from pydantic import BaseModel, Field" ] diff --git a/docs/docs/use_cases/summarization.ipynb b/docs/docs/use_cases/summarization.ipynb index bf45545875b..c637e0fa87e 100644 --- a/docs/docs/use_cases/summarization.ipynb +++ b/docs/docs/use_cases/summarization.ipynb @@ -246,7 +246,7 @@ "source": [ "from langchain.chains.combine_documents.stuff import StuffDocumentsChain\n", "from langchain.chains.llm import LLMChain\n", - "from langchain.prompts import PromptTemplate\n", + "from langchain_core.prompts import PromptTemplate\n", "\n", "# Define prompt\n", "prompt_template = \"\"\"Write a concise summary of the following:\n",