docs: PromptTemplate import from core (#19616)

Changed import of `PromptTemplate` from `langchain` to `langchain_core`
in all examples (notebooks)
This commit is contained in:
Leonid Ganeline 2024-03-26 17:03:36 -07:00 committed by GitHub
parent 3dc0f3c371
commit 4d85485e71
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
70 changed files with 104 additions and 104 deletions

View File

@ -294,7 +294,7 @@
},
"outputs": [],
"source": [
"from langchain.prompts import PromptTemplate\n",
"from langchain_core.prompts import PromptTemplate\n",
"\n",
"prompt_template = PromptTemplate.from_template(\n",
" \"\"\"Given the input context, which do you prefer: A or B?\n",

View File

@ -380,7 +380,7 @@
},
"outputs": [],
"source": [
"from langchain.prompts import PromptTemplate\n",
"from langchain_core.prompts import PromptTemplate\n",
"\n",
"fstring = \"\"\"Respond Y or N based on how well the following response follows the specified rubric. Grade only based on the rubric and expected response:\n",
"\n",

View File

@ -216,7 +216,7 @@
"outputs": [],
"source": [
"# Now lets create a chain with the normal OpenAI model\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_core.prompts import PromptTemplate\n",
"from langchain_openai import OpenAI\n",
"\n",
"prompt_template = \"\"\"Instructions: You should always include a compliment in your response.\n",

View File

@ -546,7 +546,7 @@
"source": [
"from langchain.chains import LLMChain\n",
"from langchain.chains.prompt_selector import ConditionalPromptSelector\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_core.prompts import PromptTemplate\n",
"\n",
"DEFAULT_LLAMA_SEARCH_PROMPT = PromptTemplate(\n",
" input_variables=[\"question\"],\n",

View File

@ -30,8 +30,8 @@
"outputs": [],
"source": [
"from langchain.model_laboratory import ModelLaboratory\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_community.llms import Cohere, HuggingFaceHub\n",
"from langchain_core.prompts import PromptTemplate\n",
"from langchain_openai import OpenAI"
]
},

View File

@ -105,8 +105,8 @@
},
"outputs": [],
"source": [
"from langchain.prompts import PromptTemplate\n",
"from langchain_community.llms.fake import FakeListLLM\n",
"from langchain_core.prompts import PromptTemplate\n",
"from langchain_experimental.comprehend_moderation.base_moderation_exceptions import (\n",
" ModerationPiiError,\n",
")\n",
@ -242,8 +242,8 @@
},
"outputs": [],
"source": [
"from langchain.prompts import PromptTemplate\n",
"from langchain_community.llms.fake import FakeListLLM\n",
"from langchain_core.prompts import PromptTemplate\n",
"\n",
"template = \"\"\"Question: {question}\n",
"\n",
@ -405,8 +405,8 @@
},
"outputs": [],
"source": [
"from langchain.prompts import PromptTemplate\n",
"from langchain_community.llms.fake import FakeListLLM\n",
"from langchain_core.prompts import PromptTemplate\n",
"\n",
"template = \"\"\"Question: {question}\n",
"\n",
@ -566,8 +566,8 @@
},
"outputs": [],
"source": [
"from langchain.prompts import PromptTemplate\n",
"from langchain_community.llms import HuggingFaceHub\n",
"from langchain_core.prompts import PromptTemplate\n",
"\n",
"template = \"\"\"{question}\"\"\"\n",
"\n",
@ -696,9 +696,9 @@
"source": [
"import json\n",
"\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_community.llms import SagemakerEndpoint\n",
"from langchain_community.llms.sagemaker_endpoint import LLMContentHandler\n",
"from langchain_core.prompts import PromptTemplate\n",
"\n",
"\n",
"class ContentHandler(LLMContentHandler):\n",

View File

@ -13,7 +13,7 @@ content that may violate guidelines, be offensive, or deviate from the desired c
```python
# Imports
from langchain_openai import OpenAI
from langchain.prompts import PromptTemplate
from langchain_core.prompts import PromptTemplate
from langchain.chains.llm import LLMChain
from langchain.chains.constitutional_ai.base import ConstitutionalChain
```

View File

@ -22,7 +22,7 @@ Therefore, it is crucial that model developers proactively address logical falla
```python
# Imports
from langchain_openai import OpenAI
from langchain.prompts import PromptTemplate
from langchain_core.prompts import PromptTemplate
from langchain.chains.llm import LLMChain
from langchain_experimental.fallacy_removal.base import FallacyChain
```

View File

@ -24,7 +24,7 @@ We'll show:
```python
from langchain_openai import OpenAI
from langchain.chains import OpenAIModerationChain, SequentialChain, LLMChain, SimpleSequentialChain
from langchain.prompts import PromptTemplate
from langchain_core.prompts import PromptTemplate
```
## How to use the moderation chain

View File

@ -58,8 +58,8 @@
},
"outputs": [],
"source": [
"from langchain.prompts import PromptTemplate\n",
"from langchain_community.llms import AlephAlpha"
"from langchain_community.llms import AlephAlpha\n",
"from langchain_core.prompts import PromptTemplate"
]
},
{

View File

@ -49,8 +49,8 @@
"outputs": [],
"source": [
"from langchain.chains import LLMChain\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_community.llms import Anyscale"
"from langchain_community.llms import Anyscale\n",
"from langchain_core.prompts import PromptTemplate"
]
},
{

View File

@ -146,7 +146,7 @@
],
"source": [
"from langchain.chains import LLMChain\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_core.prompts import PromptTemplate\n",
"\n",
"template = \"\"\"Question: {question}\n",
"\n",

View File

@ -228,8 +228,8 @@
"outputs": [],
"source": [
"from langchain.chains import LLMChain\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_community.llms.azureml_endpoint import DollyContentFormatter\n",
"from langchain_core.prompts import PromptTemplate\n",
"\n",
"formatter_template = \"Write a {word_count} word essay about {topic}.\"\n",
"\n",

View File

@ -52,8 +52,8 @@
"outputs": [],
"source": [
"from langchain.chains import LLMChain\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_community.llms import Banana"
"from langchain_community.llms import Banana\n",
"from langchain_core.prompts import PromptTemplate"
]
},
{

View File

@ -94,7 +94,7 @@
"source": [
"from langchain.chains import LLMChain\n",
"from langchain.memory import ConversationBufferWindowMemory\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_core.prompts import PromptTemplate\n",
"\n",
"template = \"\"\"Assistant is a large language model trained by OpenAI.\n",
"\n",

View File

@ -82,8 +82,8 @@
"source": [
"from langchain.chains import LLMChain\n",
"from langchain.globals import set_debug\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_community.llms import NIBittensorLLM\n",
"from langchain_core.prompts import PromptTemplate\n",
"\n",
"set_debug(True)\n",
"\n",
@ -142,8 +142,8 @@
")\n",
"from langchain.chains import LLMChain\n",
"from langchain.memory import ConversationBufferMemory\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_community.llms import NIBittensorLLM\n",
"from langchain_core.prompts import PromptTemplate\n",
"\n",
"memory = ConversationBufferMemory(memory_key=\"chat_history\")\n",
"\n",

View File

@ -45,8 +45,8 @@
"import os\n",
"\n",
"from langchain.chains import LLMChain\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_community.llms import CerebriumAI"
"from langchain_community.llms import CerebriumAI\n",
"from langchain_core.prompts import PromptTemplate"
]
},
{

View File

@ -41,9 +41,9 @@
"outputs": [],
"source": [
"from langchain.chains import LLMChain\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain.schema.messages import AIMessage\n",
"from langchain_community.llms.chatglm3 import ChatGLM3"
"from langchain_community.llms.chatglm3 import ChatGLM3\n",
"from langchain_core.prompts import PromptTemplate"
]
},
{
@ -117,8 +117,8 @@
"outputs": [],
"source": [
"from langchain.chains import LLMChain\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_community.llms import ChatGLM\n",
"from langchain_core.prompts import PromptTemplate\n",
"\n",
"# import os"
]

View File

@ -87,8 +87,8 @@
"source": [
"# Import the required modules\n",
"from langchain.chains import LLMChain\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_community.llms import Clarifai"
"from langchain_community.llms import Clarifai\n",
"from langchain_core.prompts import PromptTemplate"
]
},
{

View File

@ -19,8 +19,8 @@
"outputs": [],
"source": [
"from langchain.chains import LLMChain\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_community.llms.cloudflare_workersai import CloudflareWorkersAI\n",
"from langchain_core.prompts import PromptTemplate\n",
"\n",
"template = \"\"\"Human: {question}\n",
"\n",

View File

@ -103,7 +103,7 @@
"outputs": [],
"source": [
"from langchain.chains import LLMChain\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_core.prompts import PromptTemplate\n",
"\n",
"template = \"\"\"Question: {question}\n",
"\n",

View File

@ -196,7 +196,7 @@
],
"source": [
"from langchain.chains import LLMChain\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_core.prompts import PromptTemplate\n",
"\n",
"template = \"\"\"{question}\n",
"\n",

View File

@ -140,7 +140,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.prompts import PromptTemplate\n",
"from langchain_core.prompts import PromptTemplate\n",
"\n",
"template = \"\"\"Question: {question}\n",
"\n",

View File

@ -98,7 +98,7 @@
"outputs": [],
"source": [
"from langchain.chains import LLMChain\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_core.prompts import PromptTemplate\n",
"\n",
"llm = EdenAI(\n",
" feature=\"text\",\n",
@ -220,7 +220,7 @@
"outputs": [],
"source": [
"from langchain.chains import LLMChain, SimpleSequentialChain\n",
"from langchain.prompts import PromptTemplate"
"from langchain_core.prompts import PromptTemplate"
]
},
{

View File

@ -182,7 +182,7 @@
}
],
"source": [
"from langchain.prompts import PromptTemplate\n",
"from langchain_core.prompts import PromptTemplate\n",
"from langchain_fireworks import Fireworks\n",
"\n",
"llm = Fireworks(\n",

View File

@ -28,8 +28,8 @@
"import os\n",
"\n",
"from langchain.chains import LLMChain\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_community.llms import ForefrontAI"
"from langchain_community.llms import ForefrontAI\n",
"from langchain_core.prompts import PromptTemplate"
]
},
{

View File

@ -80,7 +80,7 @@
],
"source": [
"from langchain.chains import LLMChain\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_core.prompts import PromptTemplate\n",
"\n",
"template = \"What is capital of {country}?\"\n",
"\n",

View File

@ -180,7 +180,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.prompts import PromptTemplate"
"from langchain_core.prompts import PromptTemplate"
]
},
{

View File

@ -44,8 +44,8 @@
"import os\n",
"\n",
"from langchain.chains import LLMChain\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_community.llms import GooseAI"
"from langchain_community.llms import GooseAI\n",
"from langchain_core.prompts import PromptTemplate"
]
},
{

View File

@ -49,8 +49,8 @@
"source": [
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
"from langchain.chains import LLMChain\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_community.llms import GPT4All"
"from langchain_community.llms import GPT4All\n",
"from langchain_core.prompts import PromptTemplate"
]
},
{

View File

@ -25,8 +25,8 @@
"outputs": [],
"source": [
"from langchain.chains import LLMChain\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_community.llms import GradientLLM"
"from langchain_community.llms import GradientLLM\n",
"from langchain_core.prompts import PromptTemplate"
]
},
{

View File

@ -107,7 +107,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.prompts import PromptTemplate\n",
"from langchain_core.prompts import PromptTemplate\n",
"\n",
"template = \"\"\"Question: {question}\n",
"\n",

View File

@ -92,8 +92,8 @@
],
"source": [
"from langchain.chains import LLMChain\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_community.llms import JavelinAIGateway\n",
"from langchain_core.prompts import PromptTemplate\n",
"\n",
"route_completions = \"eng_dept03\"\n",
"\n",

View File

@ -81,7 +81,7 @@
"source": [
"# Map reduce example\n",
"from langchain.chains.mapreduce import MapReduceChain\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_core.prompts import PromptTemplate\n",
"from langchain_text_splitters import CharacterTextSplitter\n",
"\n",
"_prompt = \"\"\"Write a concise summary of the following:\n",

View File

@ -97,8 +97,8 @@
"outputs": [],
"source": [
"from langchain.chains import LLMChain\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_community.llms import Minimax"
"from langchain_community.llms import Minimax\n",
"from langchain_core.prompts import PromptTemplate"
]
},
{

View File

@ -108,8 +108,8 @@
"outputs": [],
"source": [
"from langchain.chains import LLMChain\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_community.llms import Modal"
"from langchain_community.llms import Modal\n",
"from langchain_core.prompts import PromptTemplate"
]
},
{

View File

@ -43,8 +43,8 @@
"outputs": [],
"source": [
"from langchain.chains import LLMChain\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_community.llms import MosaicML"
"from langchain_community.llms import MosaicML\n",
"from langchain_core.prompts import PromptTemplate"
]
},
{

View File

@ -73,8 +73,8 @@
"outputs": [],
"source": [
"from langchain.chains import LLMChain\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_community.llms import NLPCloud"
"from langchain_community.llms import NLPCloud\n",
"from langchain_core.prompts import PromptTemplate"
]
},
{

View File

@ -40,8 +40,8 @@
"outputs": [],
"source": [
"from langchain.chains import LLMChain\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_community.llms.octoai_endpoint import OctoAIEndpoint"
"from langchain_community.llms.octoai_endpoint import OctoAIEndpoint\n",
"from langchain_core.prompts import PromptTemplate"
]
},
{

View File

@ -62,8 +62,8 @@
"from langchain.chains import LLMChain\n",
"from langchain.globals import set_debug, set_verbose\n",
"from langchain.memory import ConversationBufferWindowMemory\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_community.llms import OpaquePrompts\n",
"from langchain_core.prompts import PromptTemplate\n",
"from langchain_openai import OpenAI\n",
"\n",
"set_debug(True)\n",

View File

@ -67,7 +67,7 @@
"outputs": [],
"source": [
"from langchain.chains import LLMChain\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_core.prompts import PromptTemplate\n",
"from langchain_openai import OpenAI"
]
},

View File

@ -115,7 +115,7 @@
],
"source": [
"from langchain.chains import LLMChain\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_core.prompts import PromptTemplate\n",
"\n",
"template = \"What is a good name for a company that makes {product}?\"\n",
"\n",

View File

@ -69,8 +69,8 @@
"outputs": [],
"source": [
"from langchain.chains import LLMChain\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_community.llms import OpenLM"
"from langchain_community.llms import OpenLM\n",
"from langchain_core.prompts import PromptTemplate"
]
},
{

View File

@ -46,8 +46,8 @@
"import os\n",
"\n",
"from langchain.chains import LLMChain\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_community.llms import Petals"
"from langchain_community.llms import Petals\n",
"from langchain_core.prompts import PromptTemplate"
]
},
{

View File

@ -51,8 +51,8 @@
"import os\n",
"\n",
"from langchain.chains import LLMChain\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_community.llms import PipelineAI"
"from langchain_community.llms import PipelineAI\n",
"from langchain_core.prompts import PromptTemplate"
]
},
{

View File

@ -96,7 +96,7 @@
"outputs": [],
"source": [
"from langchain.chains import LLMChain\n",
"from langchain.prompts import PromptTemplate"
"from langchain_core.prompts import PromptTemplate"
]
},
{

View File

@ -32,8 +32,8 @@
"import os\n",
"\n",
"from langchain.chains import LLMChain\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_community.llms import PredictionGuard"
"from langchain_community.llms import PredictionGuard\n",
"from langchain_core.prompts import PromptTemplate"
]
},
{

View File

@ -104,8 +104,8 @@
"outputs": [],
"source": [
"from langchain.chains import LLMChain\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_community.llms import Replicate"
"from langchain_community.llms import Replicate\n",
"from langchain_core.prompts import PromptTemplate"
]
},
{

View File

@ -45,8 +45,8 @@
"source": [
"import runhouse as rh\n",
"from langchain.chains import LLMChain\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_community.llms import SelfHostedHuggingFaceLLM, SelfHostedPipeline"
"from langchain_community.llms import SelfHostedHuggingFaceLLM, SelfHostedPipeline\n",
"from langchain_core.prompts import PromptTemplate"
]
},
{

View File

@ -104,9 +104,9 @@
"\n",
"import boto3\n",
"from langchain.chains.question_answering import load_qa_chain\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_community.llms import SagemakerEndpoint\n",
"from langchain_community.llms.sagemaker_endpoint import LLMContentHandler\n",
"from langchain_core.prompts import PromptTemplate\n",
"\n",
"query = \"\"\"How long was Elizabeth hospitalized?\n",
"\"\"\"\n",
@ -174,9 +174,9 @@
"from typing import Dict\n",
"\n",
"from langchain.chains.question_answering import load_qa_chain\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_community.llms import SagemakerEndpoint\n",
"from langchain_community.llms.sagemaker_endpoint import LLMContentHandler\n",
"from langchain_core.prompts import PromptTemplate\n",
"\n",
"query = \"\"\"How long was Elizabeth hospitalized?\n",
"\"\"\"\n",

View File

@ -80,8 +80,8 @@
"outputs": [],
"source": [
"from langchain.chains import LLMChain\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_community.llms import StochasticAI"
"from langchain_community.llms import StochasticAI\n",
"from langchain_core.prompts import PromptTemplate"
]
},
{

View File

@ -59,7 +59,7 @@
"outputs": [],
"source": [
"from langchain.chains import LLMChain\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_core.prompts import PromptTemplate\n",
"\n",
"conversation = \"\"\"Sam: Good morning, team! Let's keep this standup concise. We'll go in the usual order: what you did yesterday, what you plan to do today, and any blockers. Alex, kick us off.\n",
"Alex: Morning! Yesterday, I wrapped up the UI for the user dashboard. The new charts and widgets are now responsive. I also had a sync with the design team to ensure the final touchups are in line with the brand guidelines. Today, I'll start integrating the frontend with the new API endpoints Rhea was working on. The only blocker is waiting for some final API documentation, but I guess Rhea can update on that.\n",

View File

@ -43,8 +43,8 @@
"source": [
"from langchain.chains import LLMChain\n",
"from langchain.globals import set_debug\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_community.llms import TextGen\n",
"from langchain_core.prompts import PromptTemplate\n",
"\n",
"set_debug(True)\n",
"\n",
@ -94,8 +94,8 @@
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
"from langchain.chains import LLMChain\n",
"from langchain.globals import set_debug\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_community.llms import TextGen\n",
"from langchain_core.prompts import PromptTemplate\n",
"\n",
"set_debug(True)\n",
"\n",

View File

@ -140,7 +140,7 @@
"outputs": [],
"source": [
"from langchain.chains import LLMChain\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_core.prompts import PromptTemplate\n",
"\n",
"llm = TitanTakeoff()\n",
"\n",

View File

@ -32,8 +32,8 @@
"source": [
"from langchain.callbacks.manager import CallbackManager\n",
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_community.llms import TitanTakeoffPro\n",
"from langchain_core.prompts import PromptTemplate\n",
"\n",
"# Example 1: Basic use\n",
"llm = TitanTakeoffPro()\n",

View File

@ -130,7 +130,7 @@
],
"source": [
"from langchain.chains import LLMChain\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_core.prompts import PromptTemplate\n",
"\n",
"template = \"\"\"Question: {question}\n",
"\n",

View File

@ -38,9 +38,9 @@
},
"outputs": [],
"source": [
"from langchain.prompts import PromptTemplate\n",
"from langchain_community.llms import VolcEngineMaasLLM\n",
"from langchain_core.output_parsers import StrOutputParser"
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.prompts import PromptTemplate"
]
},
{

View File

@ -56,8 +56,8 @@
"outputs": [],
"source": [
"from langchain.chains import LLMChain\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_community.llms import Writer"
"from langchain_community.llms import Writer\n",
"from langchain_core.prompts import PromptTemplate"
]
},
{

View File

@ -122,7 +122,7 @@
],
"source": [
"from langchain.chains import LLMChain\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_core.prompts import PromptTemplate\n",
"\n",
"template = \"Where can we visit in the capital of {country}?\"\n",
"\n",

View File

@ -45,8 +45,8 @@
"outputs": [],
"source": [
"from langchain.chains import LLMChain\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_community.llms import YandexGPT"
"from langchain_community.llms import YandexGPT\n",
"from langchain_core.prompts import PromptTemplate"
]
},
{

View File

@ -47,7 +47,7 @@
"source": [
"from langchain.callbacks import FileCallbackHandler\n",
"from langchain.chains import LLMChain\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_core.prompts import PromptTemplate\n",
"from langchain_openai import OpenAI\n",
"from loguru import logger\n",
"\n",

View File

@ -129,7 +129,7 @@
"\n",
"from langchain.chains import LLMChain\n",
"from langchain.output_parsers import PydanticOutputParser\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_core.prompts import PromptTemplate\n",
"from pydantic import BaseModel, Field\n",
"\n",
"\n",

View File

@ -25,7 +25,7 @@
"source": [
"from langchain.chains import LLMChain\n",
"from langchain.memory import ConversationBufferMemory\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_core.prompts import PromptTemplate\n",
"from langchain_openai import OpenAI"
]
},

View File

@ -78,7 +78,7 @@
"source": [
"from langchain.chains.question_answering import load_qa_chain\n",
"from langchain.memory import ConversationBufferMemory\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_core.prompts import PromptTemplate\n",
"from langchain_openai import OpenAI"
]
},

View File

@ -164,7 +164,7 @@ We'll use an `LLMChain`, and show working with both an LLM and a ChatModel.
```python
from langchain_openai import OpenAI
from langchain.prompts import PromptTemplate
from langchain_core.prompts import PromptTemplate
from langchain.chains import LLMChain
from langchain.memory import ConversationBufferMemory

View File

@ -23,7 +23,7 @@
" ConversationBufferMemory,\n",
" ConversationSummaryMemory,\n",
")\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_core.prompts import PromptTemplate\n",
"from langchain_openai import OpenAI\n",
"\n",
"conv_memory = ConversationBufferMemory(\n",

View File

@ -12,7 +12,7 @@ from langchain_openai import OpenAIEmbeddings
from langchain_openai import OpenAI
from langchain.memory import VectorStoreRetrieverMemory
from langchain.chains import ConversationChain
from langchain.prompts import PromptTemplate
from langchain_core.prompts import PromptTemplate
```
### Initialize your vector store

View File

@ -380,7 +380,7 @@
"outputs": [],
"source": [
"from langchain.chains import LLMChain, LLMRequestsChain\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_core.prompts import PromptTemplate\n",
"from langchain_openai import OpenAI"
]
},

View File

@ -491,7 +491,7 @@
"\n",
"from langchain.chains import create_extraction_chain_pydantic\n",
"from langchain.output_parsers import PydanticOutputParser\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_core.prompts import PromptTemplate\n",
"from langchain_openai import OpenAI\n",
"from pydantic import BaseModel, Field"
]

View File

@ -246,7 +246,7 @@
"source": [
"from langchain.chains.combine_documents.stuff import StuffDocumentsChain\n",
"from langchain.chains.llm import LLMChain\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_core.prompts import PromptTemplate\n",
"\n",
"# Define prompt\n",
"prompt_template = \"\"\"Write a concise summary of the following:\n",