mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-22 14:49:29 +00:00
docs: imports update (#20625)
Updated imports in docs Co-authored-by: Erick Friis <erick@langchain.dev>
This commit is contained in:
parent
53ae77b13e
commit
27a4682415
@ -63,7 +63,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"from langchain_core.runnables import ConfigurableField\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
@ -285,8 +285,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain_community.chat_models import ChatAnthropic\n",
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"from langchain_core.runnables import ConfigurableField\n",
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
|
@ -137,7 +137,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.prompts.prompt import PromptTemplate\n",
|
||||
"from langchain_core.prompts.prompt import PromptTemplate\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"anonymizer = PresidioAnonymizer()\n",
|
||||
|
@ -878,8 +878,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts.prompt import PromptTemplate\n",
|
||||
"from langchain_core.prompts import format_document\n",
|
||||
"from langchain_core.prompts.prompt import PromptTemplate\n",
|
||||
"\n",
|
||||
"DEFAULT_DOCUMENT_PROMPT = PromptTemplate.from_template(template=\"{page_content}\")\n",
|
||||
"\n",
|
||||
|
@ -207,7 +207,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.prompts.prompt import PromptTemplate\n",
|
||||
"from langchain_core.prompts.prompt import PromptTemplate\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"anonymizer = PresidioReversibleAnonymizer()\n",
|
||||
|
@ -151,7 +151,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import (\n",
|
||||
"from langchain_core.prompts import (\n",
|
||||
" ChatPromptTemplate,\n",
|
||||
" FewShotChatMessagePromptTemplate,\n",
|
||||
")\n",
|
||||
|
@ -60,9 +60,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts.chat import ChatPromptTemplate\n",
|
||||
"from langchain_community.chat_models import ChatMaritalk\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts.chat import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"llm = ChatMaritalk(\n",
|
||||
" model=\"sabia-2-medium\", # Available models: sabia-2-small and sabia-2-medium\n",
|
||||
|
@ -79,8 +79,8 @@
|
||||
"\n",
|
||||
"from gqlalchemy import Memgraph\n",
|
||||
"from langchain.chains import GraphCypherQAChain\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain_community.graphs import MemgraphGraph\n",
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
|
@ -389,7 +389,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts.prompt import PromptTemplate\n",
|
||||
"from langchain_core.prompts.prompt import PromptTemplate\n",
|
||||
"\n",
|
||||
"CYPHER_GENERATION_TEMPLATE = \"\"\"Task:Generate Cypher statement to query a graph database.\n",
|
||||
"Instructions:\n",
|
||||
|
@ -16,8 +16,8 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain_community.llms.pai_eas_endpoint import PaiEasEndpoint\n",
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"\n",
|
||||
"template = \"\"\"Question: {question}\n",
|
||||
"\n",
|
||||
|
@ -210,7 +210,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"\n",
|
||||
"template = \"Generate a random question about {topic}: Question: \"\n",
|
||||
"prompt = PromptTemplate.from_template(template)"
|
||||
|
@ -103,7 +103,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"\n",
|
||||
"template = \"\"\"Question: {question}\n",
|
||||
"\n",
|
||||
|
@ -116,7 +116,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"\n",
|
||||
"template = \"\"\"Question: {question}\n",
|
||||
"\n",
|
||||
|
@ -31,8 +31,8 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain_community.llms.solar import Solar\n",
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"\n",
|
||||
"template = \"\"\"Question: {question}\n",
|
||||
"\n",
|
||||
|
@ -115,7 +115,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"\n",
|
||||
"template = \"\"\"Question: {question}\n",
|
||||
"\n",
|
||||
|
@ -415,7 +415,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"\n",
|
||||
"prompt_template = \"\"\"Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.\n",
|
||||
"\n",
|
||||
|
@ -403,7 +403,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain_core.prompts import PromptTemplate\n",
|
||||
"\n",
|
||||
"prompt_template = \"\"\"\n",
|
||||
"You are an expert in state of the union topics. You are provided multiple context items that are related to the prompt you have to answer.\n",
|
||||
|
@ -17,8 +17,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import FewShotPromptTemplate, PromptTemplate\n",
|
||||
"from langchain.prompts.example_selector import LengthBasedExampleSelector\n",
|
||||
"from langchain_core.example_selectors import LengthBasedExampleSelector\n",
|
||||
"from langchain_core.prompts import FewShotPromptTemplate, PromptTemplate\n",
|
||||
"\n",
|
||||
"# Examples of a pretend task of creating antonyms.\n",
|
||||
"examples = [\n",
|
||||
|
@ -17,12 +17,12 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import FewShotPromptTemplate, PromptTemplate\n",
|
||||
"from langchain.prompts.example_selector import (\n",
|
||||
"from langchain_community.vectorstores import FAISS\n",
|
||||
"from langchain_core.example_selectors import (\n",
|
||||
" MaxMarginalRelevanceExampleSelector,\n",
|
||||
" SemanticSimilarityExampleSelector,\n",
|
||||
")\n",
|
||||
"from langchain_community.vectorstores import FAISS\n",
|
||||
"from langchain_core.prompts import FewShotPromptTemplate, PromptTemplate\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"example_prompt = PromptTemplate(\n",
|
||||
|
@ -19,8 +19,10 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import FewShotPromptTemplate, PromptTemplate\n",
|
||||
"from langchain.prompts.example_selector.ngram_overlap import NGramOverlapExampleSelector\n",
|
||||
"from langchain_community.example_selector.ngram_overlap import (\n",
|
||||
" NGramOverlapExampleSelector,\n",
|
||||
")\n",
|
||||
"from langchain_core.prompts import FewShotPromptTemplate, PromptTemplate\n",
|
||||
"\n",
|
||||
"example_prompt = PromptTemplate(\n",
|
||||
" input_variables=[\"input\", \"output\"],\n",
|
||||
|
@ -17,9 +17,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import FewShotPromptTemplate, PromptTemplate\n",
|
||||
"from langchain.prompts.example_selector import SemanticSimilarityExampleSelector\n",
|
||||
"from langchain_chroma import Chroma\n",
|
||||
"from langchain_core.example_selectors import SemanticSimilarityExampleSelector\n",
|
||||
"from langchain_core.prompts import FewShotPromptTemplate, PromptTemplate\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"example_prompt = PromptTemplate(\n",
|
||||
|
@ -183,7 +183,7 @@ Each `ChatMessageTemplate` contains instructions for how to format that `ChatMes
|
||||
Let's take a look at this below:
|
||||
|
||||
```python
|
||||
from langchain.prompts.chat import ChatPromptTemplate
|
||||
from langchain_core.prompts.chat import ChatPromptTemplate
|
||||
|
||||
template = "You are a helpful assistant that translates {input_language} to {output_language}."
|
||||
human_template = "{text}"
|
||||
|
@ -287,8 +287,8 @@
|
||||
"from langchain.agents import AgentExecutor\n",
|
||||
"from langchain.agents.format_scratchpad import format_to_openai_function_messages\n",
|
||||
"from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser\n",
|
||||
"from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
|
||||
"from langchain_core.messages import AIMessage, HumanMessage\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
|
||||
"from langchain_core.utils.function_calling import convert_to_openai_function\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
|
Loading…
Reference in New Issue
Block a user