docs[patch], templates[patch]: Import from core (#14575)

Update imports to use core for the low-hanging fruit changes. Ran
following

```bash
git grep -l 'langchain.schema.runnable' {docs,templates,cookbook}  | xargs sed -i '' 's/langchain\.schema\.runnable/langchain_core.runnables/g'
git grep -l 'langchain.schema.output_parser' {docs,templates,cookbook} | xargs sed -i '' 's/langchain\.schema\.output_parser/langchain_core.output_parsers/g'
git grep -l 'langchain.schema.messages' {docs,templates,cookbook} | xargs sed -i '' 's/langchain\.schema\.messages/langchain_core.messages/g'
git grep -l 'langchain.schema.chat_histry' {docs,templates,cookbook} | xargs sed -i '' 's/langchain\.schema\.chat_history/langchain_core.chat_history/g'
git grep -l 'langchain.schema.prompt_template' {docs,templates,cookbook} | xargs sed -i '' 's/langchain\.schema\.prompt_template/langchain_core.prompts/g'
git grep -l 'from langchain.pydantic_v1' {docs,templates,cookbook} | xargs sed -i '' 's/from langchain\.pydantic_v1/from langchain_core.pydantic_v1/g'
git grep -l 'from langchain.tools.base' {docs,templates,cookbook} | xargs sed -i '' 's/from langchain\.tools\.base/from langchain_core.tools/g'
git grep -l 'from langchain.chat_models.base' {docs,templates,cookbook} | xargs sed -i '' 's/from langchain\.chat_models.base/from langchain_core.language_models.chat_models/g'
git grep -l 'from langchain.llms.base' {docs,templates,cookbook} | xargs sed -i '' 's/from langchain\.llms\.base\ /from langchain_core.language_models.llms\ /g'
git grep -l 'from langchain.embeddings.base' {docs,templates,cookbook} | xargs sed -i '' 's/from langchain\.embeddings\.base/from langchain_core.embeddings/g'
git grep -l 'from langchain.vectorstores.base' {docs,templates,cookbook} | xargs sed -i '' 's/from langchain\.vectorstores\.base/from langchain_core.vectorstores/g'
git grep -l 'from langchain.agents.tools' {docs,templates,cookbook} | xargs sed -i '' 's/from langchain\.agents\.tools/from langchain_core.tools/g'
git grep -l 'from langchain.schema.output' {docs,templates,cookbook} | xargs sed -i '' 's/from langchain\.schema\.output\ /from langchain_core.outputs\ /g'
git grep -l 'from langchain.schema.embeddings' {docs,templates,cookbook} | xargs sed -i '' 's/from langchain\.schema\.embeddings/from langchain_core.embeddings/g'
git grep -l 'from langchain.schema.document' {docs,templates,cookbook} | xargs sed -i '' 's/from langchain\.schema\.document/from langchain_core.documents/g'
git grep -l 'from langchain.schema.agent' {docs,templates,cookbook} | xargs sed -i '' 's/from langchain\.schema\.agent/from langchain_core.agents/g'
git grep -l 'from langchain.schema.prompt ' {docs,templates,cookbook} | xargs sed -i '' 's/from langchain\.schema\.prompt\ /from langchain_core.prompt_values /g'
git grep -l 'from langchain.schema.language_model' {docs,templates,cookbook} | xargs sed -i '' 's/from langchain\.schema\.language_model/from langchain_core.language_models/g'


```
This commit is contained in:
Bagatur 2023-12-11 16:49:10 -08:00 committed by GitHub
parent 0a9d933bb2
commit 9ffca3b92a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
163 changed files with 368 additions and 369 deletions

View File

@ -164,8 +164,8 @@
")\n",
"\n",
"# Chain to query\n",
"from langchain.schema.output_parser import StrOutputParser\n",
"from langchain.schema.runnable import RunnablePassthrough\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"\n",
"sql_response = (\n",
" RunnablePassthrough.assign(schema=get_schema)\n",
@ -293,7 +293,7 @@
"memory = ConversationBufferMemory(return_messages=True)\n",
"\n",
"# Chain to query with memory\n",
"from langchain.schema.runnable import RunnableLambda\n",
"from langchain_core.runnables import RunnableLambda\n",
"\n",
"sql_chain = (\n",
" RunnablePassthrough.assign(\n",

View File

@ -200,7 +200,7 @@
"source": [
"from langchain.chat_models import ChatOpenAI\n",
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain.schema.output_parser import StrOutputParser\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"\n",
"\n",
"# Generate summaries of text elements\n",
@ -270,7 +270,7 @@
"import base64\n",
"import os\n",
"\n",
"from langchain.schema.messages import HumanMessage\n",
"from langchain_core.messages import HumanMessage\n",
"\n",
"\n",
"def encode_image(image_path):\n",
@ -355,9 +355,9 @@
"\n",
"from langchain.embeddings import OpenAIEmbeddings\n",
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
"from langchain.schema.document import Document\n",
"from langchain.storage import InMemoryStore\n",
"from langchain.vectorstores import Chroma\n",
"from langchain_core.documents import Document\n",
"\n",
"\n",
"def create_multi_vector_retriever(\n",
@ -442,7 +442,7 @@
"import re\n",
"\n",
"from IPython.display import HTML, display\n",
"from langchain.schema.runnable import RunnableLambda, RunnablePassthrough\n",
"from langchain_core.runnables import RunnableLambda, RunnablePassthrough\n",
"from PIL import Image\n",
"\n",
"\n",

View File

@ -237,7 +237,7 @@
"source": [
"from langchain.chat_models import ChatOpenAI\n",
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain.schema.output_parser import StrOutputParser"
"from langchain_core.output_parsers import StrOutputParser"
]
},
{
@ -320,9 +320,9 @@
"\n",
"from langchain.embeddings import OpenAIEmbeddings\n",
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
"from langchain.schema.document import Document\n",
"from langchain.storage import InMemoryStore\n",
"from langchain.vectorstores import Chroma\n",
"from langchain_core.documents import Document\n",
"\n",
"# The vectorstore to use to index the child chunks\n",
"vectorstore = Chroma(collection_name=\"summaries\", embedding_function=OpenAIEmbeddings())\n",
@ -374,7 +374,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.schema.runnable import RunnablePassthrough\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"\n",
"# Prompt template\n",
"template = \"\"\"Answer the question based only on the following context, which can include text and tables:\n",

View File

@ -213,7 +213,7 @@
"source": [
"from langchain.chat_models import ChatOpenAI\n",
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain.schema.output_parser import StrOutputParser"
"from langchain_core.output_parsers import StrOutputParser"
]
},
{
@ -375,9 +375,9 @@
"\n",
"from langchain.embeddings import OpenAIEmbeddings\n",
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
"from langchain.schema.document import Document\n",
"from langchain.storage import InMemoryStore\n",
"from langchain.vectorstores import Chroma\n",
"from langchain_core.documents import Document\n",
"\n",
"# The vectorstore to use to index the child chunks\n",
"vectorstore = Chroma(collection_name=\"summaries\", embedding_function=OpenAIEmbeddings())\n",
@ -646,7 +646,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.schema.runnable import RunnablePassthrough\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"\n",
"# Prompt template\n",
"template = \"\"\"Answer the question based only on the following context, which can include text and tables:\n",

View File

@ -211,7 +211,7 @@
"source": [
"from langchain.chat_models import ChatOllama\n",
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain.schema.output_parser import StrOutputParser"
"from langchain_core.output_parsers import StrOutputParser"
]
},
{
@ -378,9 +378,9 @@
"\n",
"from langchain.embeddings import GPT4AllEmbeddings\n",
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
"from langchain.schema.document import Document\n",
"from langchain.storage import InMemoryStore\n",
"from langchain.vectorstores import Chroma\n",
"from langchain_core.documents import Document\n",
"\n",
"# The vectorstore to use to index the child chunks\n",
"vectorstore = Chroma(\n",
@ -532,7 +532,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.schema.runnable import RunnablePassthrough\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"\n",
"# Prompt template\n",
"template = \"\"\"Answer the question based only on the following context, which can include text and tables:\n",

View File

@ -162,7 +162,7 @@
"source": [
"from langchain.chat_models import ChatOpenAI\n",
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain.schema.output_parser import StrOutputParser\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"\n",
"# Prompt\n",
"prompt_text = \"\"\"You are an assistant tasked with summarizing tables and text for retrieval. \\\n",
@ -202,7 +202,7 @@
"import os\n",
"from io import BytesIO\n",
"\n",
"from langchain.schema.messages import HumanMessage\n",
"from langchain_core.messages import HumanMessage\n",
"from PIL import Image\n",
"\n",
"\n",
@ -273,8 +273,8 @@
"from base64 import b64decode\n",
"\n",
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
"from langchain.schema.document import Document\n",
"from langchain.storage import InMemoryStore\n",
"from langchain_core.documents import Document\n",
"\n",
"\n",
"def create_multi_vector_retriever(\n",
@ -475,7 +475,7 @@
"source": [
"from operator import itemgetter\n",
"\n",
"from langchain.schema.runnable import RunnablePassthrough\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"\n",
"# Prompt\n",
"template = \"\"\"Answer the question based only on the following context, which can include text and tables:\n",
@ -521,7 +521,7 @@
"import re\n",
"\n",
"from langchain.schema import Document\n",
"from langchain.schema.runnable import RunnableLambda\n",
"from langchain_core.runnables import RunnableLambda\n",
"\n",
"\n",
"def looks_like_base64(sb):\n",

View File

@ -476,7 +476,7 @@
" HumanMessagePromptTemplate,\n",
" SystemMessagePromptTemplate,\n",
")\n",
"from langchain.schema.output_parser import StrOutputParser"
"from langchain_core.output_parsers import StrOutputParser"
]
},
{
@ -547,9 +547,9 @@
"\n",
"from langchain.embeddings import OpenAIEmbeddings\n",
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
"from langchain.schema.document import Document\n",
"from langchain.storage import InMemoryStore\n",
"from langchain.vectorstores.chroma import Chroma\n",
"from langchain_core.documents import Document\n",
"\n",
"\n",
"def build_retriever(text_elements, tables, table_summaries):\n",
@ -605,7 +605,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.schema.runnable import RunnablePassthrough\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"\n",
"system_prompt = SystemMessagePromptTemplate.from_template(\n",
" \"You are a helpful assistant that answers questions based on provided context. Your provided context can include text or tables, \"\n",

View File

@ -23,7 +23,7 @@
"\n",
"from langchain.chains.openai_tools import create_extraction_chain_pydantic\n",
"from langchain.chat_models import ChatOpenAI\n",
"from langchain.pydantic_v1 import BaseModel"
"from langchain_core.pydantic_v1 import BaseModel"
]
},
{
@ -151,11 +151,11 @@
"\n",
"from langchain.output_parsers.openai_tools import PydanticToolsParser\n",
"from langchain.utils.openai_functions import convert_pydantic_to_openai_tool\n",
"from langchain.schema.runnable import Runnable\n",
"from langchain.pydantic_v1 import BaseModel\n",
"from langchain_core.runnables import Runnable\n",
"from langchain_core.pydantic_v1 import BaseModel\n",
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain.schema.messages import SystemMessage\n",
"from langchain.schema.language_model import BaseLanguageModel\n",
"from langchain_core.messages import SystemMessage\n",
"from langchain_core.language_models import BaseLanguageModel\n",
"\n",
"_EXTRACTION_TEMPLATE = \"\"\"Extract and save the relevant entities mentioned \\\n",
"in the following passage together with their properties.\n",

View File

@ -92,7 +92,7 @@
"outputs": [],
"source": [
"from langchain.chat_models import ChatOpenAI\n",
"from langchain.schema.messages import HumanMessage, SystemMessage"
"from langchain_core.messages import HumanMessage, SystemMessage"
]
},
{

View File

@ -316,9 +316,9 @@
"from operator import itemgetter\n",
"\n",
"from langchain.chat_models import ChatOpenAI\n",
"from langchain.schema.messages import HumanMessage, SystemMessage\n",
"from langchain.schema.output_parser import StrOutputParser\n",
"from langchain.schema.runnable import RunnableLambda, RunnablePassthrough\n",
"from langchain_core.messages import HumanMessage, SystemMessage\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.runnables import RunnableLambda, RunnablePassthrough\n",
"\n",
"\n",
"def prompt_func(data_dict):\n",

View File

@ -29,7 +29,7 @@
"outputs": [],
"source": [
"from langchain.chat_models import ChatOpenAI\n",
"from langchain.schema.messages import HumanMessage, SystemMessage"
"from langchain_core.messages import HumanMessage, SystemMessage"
]
},
{
@ -252,7 +252,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.schema.agent import AgentFinish\n",
"from langchain_core.agents import AgentFinish\n",
"\n",
"\n",
"def execute_agent(agent, tools, input):\n",
@ -457,8 +457,8 @@
"\n",
"from langchain.output_parsers.openai_tools import PydanticToolsParser\n",
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain.pydantic_v1 import BaseModel, Field\n",
"from langchain.utils.openai_functions import convert_pydantic_to_openai_tool\n",
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"\n",
"\n",
"class GetCurrentWeather(BaseModel):\n",

View File

@ -29,11 +29,11 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.agents.tools import Tool\n",
"from langchain.chains import LLMMathChain\n",
"from langchain.chat_models import ChatOpenAI\n",
"from langchain.llms import OpenAI\n",
"from langchain.utilities import DuckDuckGoSearchAPIWrapper\n",
"from langchain_core.tools import Tool\n",
"from langchain_experimental.plan_and_execute import (\n",
" PlanAndExecute,\n",
" load_agent_executor,\n",

View File

@ -87,7 +87,7 @@
"outputs": [],
"source": [
"from langchain.chat_models import ChatOpenAI\n",
"from langchain.schema.output_parser import StrOutputParser"
"from langchain_core.output_parsers import StrOutputParser"
]
},
{

View File

@ -268,8 +268,8 @@
"outputs": [],
"source": [
"from langchain.chat_models import ChatOpenAI\n",
"from langchain.schema.output_parser import StrOutputParser\n",
"from langchain.schema.runnable import RunnablePassthrough\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"\n",
"db = SQLDatabase.from_uri(\n",
" CONNECTION_STRING\n",
@ -324,7 +324,7 @@
"source": [
"import re\n",
"\n",
"from langchain.schema.runnable import RunnableLambda\n",
"from langchain_core.runnables import RunnableLambda\n",
"\n",
"\n",
"def replace_brackets(match):\n",

View File

@ -33,9 +33,9 @@
"source": [
"from langchain.chat_models import ChatOpenAI\n",
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain.schema.output_parser import StrOutputParser\n",
"from langchain.schema.runnable import RunnablePassthrough\n",
"from langchain.utilities import DuckDuckGoSearchAPIWrapper"
"from langchain.utilities import DuckDuckGoSearchAPIWrapper\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.runnables import RunnablePassthrough"
]
},
{

View File

@ -19,8 +19,8 @@
"source": [
"from langchain.chat_models import ChatOpenAI\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain.schema.output_parser import StrOutputParser\n",
"from langchain.schema.prompt import PromptValue"
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.prompt_values import PromptValue"
]
},
{

View File

@ -25,8 +25,8 @@
"source": [
"from langchain.chat_models import ChatOpenAI\n",
"from langchain.prompts import ChatPromptTemplate, FewShotChatMessagePromptTemplate\n",
"from langchain.schema.output_parser import StrOutputParser\n",
"from langchain.schema.runnable import RunnableLambda"
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.runnables import RunnableLambda"
]
},
{

View File

@ -21,7 +21,7 @@
"from langchain.prompts import (\n",
" ChatPromptTemplate,\n",
")\n",
"from langchain.schema.output_parser import StrOutputParser\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_experimental.utilities import PythonREPL"
]
},

View File

@ -22,9 +22,9 @@
"from langchain.chat_models import ChatOpenAI\n",
"from langchain.embeddings import OpenAIEmbeddings\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain.schema.output_parser import StrOutputParser\n",
"from langchain.schema.runnable import RunnableLambda, RunnablePassthrough\n",
"from langchain.utils.math import cosine_similarity\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.runnables import RunnableLambda, RunnablePassthrough\n",
"\n",
"physics_template = \"\"\"You are a very smart physics professor. \\\n",
"You are great at answering questions about physics in a concise and easy to understand manner. \\\n",

View File

@ -22,7 +22,7 @@
"from langchain.chat_models import ChatOpenAI\n",
"from langchain.memory import ConversationBufferMemory\n",
"from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
"from langchain.schema.runnable import RunnableLambda, RunnablePassthrough\n",
"from langchain_core.runnables import RunnableLambda, RunnablePassthrough\n",
"\n",
"model = ChatOpenAI()\n",
"prompt = ChatPromptTemplate.from_messages(\n",

View File

@ -69,7 +69,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.schema.runnable import RunnablePassthrough\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"\n",
"prompt1 = ChatPromptTemplate.from_template(\n",
" \"generate a {attribute} color. Return the name of the color and nothing else:\"\n",

View File

@ -191,7 +191,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.schema.output_parser import StrOutputParser\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"\n",
"chain = prompt | model | StrOutputParser()"
]
@ -327,7 +327,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.schema.runnable import RunnableParallel, RunnablePassthrough\n",
"from langchain_core.runnables import RunnableParallel, RunnablePassthrough\n",
"\n",
"map_ = RunnableParallel(foo=RunnablePassthrough())\n",
"chain = (\n",

View File

@ -41,9 +41,9 @@
"from langchain.chat_models import ChatOpenAI\n",
"from langchain.embeddings import OpenAIEmbeddings\n",
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain.schema.output_parser import StrOutputParser\n",
"from langchain.schema.runnable import RunnableLambda, RunnablePassthrough\n",
"from langchain.vectorstores import FAISS"
"from langchain.vectorstores import FAISS\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.runnables import RunnableLambda, RunnablePassthrough"
]
},
{
@ -171,9 +171,8 @@
"outputs": [],
"source": [
"from langchain.schema import format_document\n",
"from langchain.schema.messages import get_buffer_string\n",
"from langchain.schema.runnable import RunnableParallel\n",
"from langchain_core.messages import AIMessage, HumanMessage"
"from langchain_core.messages import AIMessage, HumanMessage, get_buffer_string\n",
"from langchain_core.runnables import RunnableParallel"
]
},
{

View File

@ -94,8 +94,8 @@
"outputs": [],
"source": [
"from langchain.chat_models import ChatOpenAI\n",
"from langchain.schema.output_parser import StrOutputParser\n",
"from langchain.schema.runnable import RunnablePassthrough\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"\n",
"model = ChatOpenAI()\n",
"\n",

View File

@ -29,8 +29,8 @@
"source": [
"from langchain.chat_models import ChatOpenAI\n",
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain.schema.output_parser import StrOutputParser\n",
"from langchain.tools import DuckDuckGoSearchRun"
"from langchain.tools import DuckDuckGoSearchRun\n",
"from langchain_core.output_parsers import StrOutputParser"
]
},
{

View File

@ -49,7 +49,7 @@
"source": [
"from langchain.chat_models import ChatOpenAI\n",
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain.schema.output_parser import StrOutputParser\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"\n",
"prompt = ChatPromptTemplate.from_template(\"tell me a short joke about {topic}\")\n",
"model = ChatOpenAI()\n",
@ -326,9 +326,9 @@
"from langchain.chat_models import ChatOpenAI\n",
"from langchain.embeddings import OpenAIEmbeddings\n",
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain.schema.output_parser import StrOutputParser\n",
"from langchain.schema.runnable import RunnableParallel, RunnablePassthrough\n",
"from langchain.vectorstores import DocArrayInMemorySearch\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.runnables import RunnableParallel, RunnablePassthrough\n",
"\n",
"vectorstore = DocArrayInMemorySearch.from_texts(\n",
" [\"harrison worked at kensho\", \"bears like to eat honey\"],\n",

View File

@ -22,7 +22,7 @@
"from langchain.chat_models import ChatOpenAI\n",
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain.schema import StrOutputParser\n",
"from langchain.schema.runnable import RunnablePassthrough"
"from langchain_core.runnables import RunnablePassthrough"
]
},
{

View File

@ -43,7 +43,7 @@
"source": [
"from langchain.chat_models import ChatOpenAI\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain.schema.runnable import ConfigurableField\n",
"from langchain_core.runnables import ConfigurableField\n",
"\n",
"model = ChatOpenAI(temperature=0).configurable_fields(\n",
" temperature=ConfigurableField(\n",
@ -265,7 +265,7 @@
"source": [
"from langchain.chat_models import ChatAnthropic, ChatOpenAI\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain.schema.runnable import ConfigurableField"
"from langchain_core.runnables import ConfigurableField"
]
},
{

View File

@ -216,7 +216,7 @@
"source": [
"# First let's create a chain with a ChatModel\n",
"# We add in a string output parser here so the outputs between the two are the same type\n",
"from langchain.schema.output_parser import StrOutputParser\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"\n",
"chat_prompt = ChatPromptTemplate.from_messages(\n",
" [\n",

View File

@ -34,7 +34,7 @@
"\n",
"from langchain.chat_models import ChatOpenAI\n",
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain.schema.runnable import RunnableLambda\n",
"from langchain_core.runnables import RunnableLambda\n",
"\n",
"\n",
"def length_function(text):\n",
@ -103,8 +103,8 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.schema.output_parser import StrOutputParser\n",
"from langchain.schema.runnable import RunnableConfig"
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.runnables import RunnableConfig"
]
},
{

View File

@ -34,7 +34,7 @@
"\n",
"from langchain.chat_models import ChatOpenAI\n",
"from langchain.prompts.chat import ChatPromptTemplate\n",
"from langchain.schema.output_parser import StrOutputParser\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"\n",
"prompt = ChatPromptTemplate.from_template(\n",
" \"Write a comma-separated list of 5 animals similar to: {animal}\"\n",

View File

@ -47,9 +47,9 @@
"from langchain.chat_models import ChatOpenAI\n",
"from langchain.embeddings import OpenAIEmbeddings\n",
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain.schema.output_parser import StrOutputParser\n",
"from langchain.schema.runnable import RunnablePassthrough\n",
"from langchain.vectorstores import FAISS\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"\n",
"vectorstore = FAISS.from_texts(\n",
" [\"harrison worked at kensho\"], embedding=OpenAIEmbeddings()\n",
@ -131,9 +131,9 @@
"from langchain.chat_models import ChatOpenAI\n",
"from langchain.embeddings import OpenAIEmbeddings\n",
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain.schema.output_parser import StrOutputParser\n",
"from langchain.schema.runnable import RunnablePassthrough\n",
"from langchain.vectorstores import FAISS\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"\n",
"vectorstore = FAISS.from_texts(\n",
" [\"harrison worked at kensho\"], embedding=OpenAIEmbeddings()\n",
@ -194,7 +194,7 @@
"source": [
"from langchain.chat_models import ChatOpenAI\n",
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain.schema.runnable import RunnableParallel\n",
"from langchain_core.runnables import RunnableParallel\n",
"\n",
"model = ChatOpenAI()\n",
"joke_chain = ChatPromptTemplate.from_template(\"tell me a joke about {topic}\") | model\n",

View File

@ -132,8 +132,8 @@
"from langchain.chat_models import ChatAnthropic\n",
"from langchain.memory.chat_message_histories import RedisChatMessageHistory\n",
"from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
"from langchain.schema.chat_history import BaseChatMessageHistory\n",
"from langchain.schema.runnable.history import RunnableWithMessageHistory"
"from langchain_core.chat_history import BaseChatMessageHistory\n",
"from langchain_core.runnables.history import RunnableWithMessageHistory"
]
},
{
@ -292,8 +292,8 @@
}
],
"source": [
"from langchain.schema.messages import HumanMessage\n",
"from langchain.schema.runnable import RunnableParallel\n",
"from langchain_core.messages import HumanMessage\n",
"from langchain_core.runnables import RunnableParallel\n",
"\n",
"chain = RunnableParallel({\"output_message\": ChatAnthropic(model=\"claude-2\")})\n",
"chain_with_history = RunnableWithMessageHistory(\n",

View File

@ -46,7 +46,7 @@
}
],
"source": [
"from langchain.schema.runnable import RunnableParallel, RunnablePassthrough\n",
"from langchain_core.runnables import RunnableParallel, RunnablePassthrough\n",
"\n",
"runnable = RunnableParallel(\n",
" passed=RunnablePassthrough(),\n",
@ -100,9 +100,9 @@
"from langchain.chat_models import ChatOpenAI\n",
"from langchain.embeddings import OpenAIEmbeddings\n",
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain.schema.output_parser import StrOutputParser\n",
"from langchain.schema.runnable import RunnablePassthrough\n",
"from langchain.vectorstores import FAISS\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"\n",
"vectorstore = FAISS.from_texts(\n",
" [\"harrison worked at kensho\"], embedding=OpenAIEmbeddings()\n",

View File

@ -53,7 +53,7 @@
"source": [
"from langchain.chat_models import ChatAnthropic\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain.schema.output_parser import StrOutputParser"
"from langchain_core.output_parsers import StrOutputParser"
]
},
{
@ -164,7 +164,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.schema.runnable import RunnableBranch\n",
"from langchain_core.runnables import RunnableBranch\n",
"\n",
"branch = RunnableBranch(\n",
" (lambda x: \"anthropic\" in x[\"topic\"].lower(), anthropic_chain),\n",
@ -279,7 +279,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.schema.runnable import RunnableLambda\n",
"from langchain_core.runnables import RunnableLambda\n",
"\n",
"full_chain = {\"topic\": chain, \"question\": lambda x: x[\"question\"]} | RunnableLambda(\n",
" route\n",

View File

@ -660,9 +660,9 @@
],
"source": [
"from langchain.embeddings import OpenAIEmbeddings\n",
"from langchain.schema.output_parser import StrOutputParser\n",
"from langchain.schema.runnable import RunnablePassthrough\n",
"from langchain.vectorstores import FAISS\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"\n",
"template = \"\"\"Answer the question based only on the following context:\n",
"{context}\n",
@ -920,7 +920,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.schema.runnable import RunnableParallel\n",
"from langchain_core.runnables import RunnableParallel\n",
"\n",
"chain1 = ChatPromptTemplate.from_template(\"tell me a joke about {topic}\") | model\n",
"chain2 = (\n",

View File

@ -44,7 +44,7 @@
"source": [
"from langchain.chat_models import ChatOpenAI\n",
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain.schema.output_parser import StrOutputParser\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"\n",
"\n",
"prompt = ChatPromptTemplate.from_template(\"Tell me a short joke about {topic}\")\n",

View File

@ -181,7 +181,7 @@
"source": [
"# First let's create a chain with a ChatModel\n",
"# We add in a string output parser here so the outputs between the two are the same type\n",
"from langchain.schema.output_parser import StrOutputParser\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"\n",
"chat_prompt = ChatPromptTemplate.from_messages(\n",
" [\n",

View File

@ -666,8 +666,8 @@
"\n",
"from langchain.chat_models.openai import ChatOpenAI\n",
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain.schema.output_parser import StrOutputParser\n",
"from langchain.schema.runnable import (\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.runnables import (\n",
" RunnableLambda,\n",
" RunnableParallel,\n",
" RunnablePassthrough,\n",

View File

@ -73,7 +73,7 @@ CustomTool(
**YES**
```python
from langchain.tools.base import Tool
from langchain_core.tools import Tool
from pydantic.v1 import BaseModel, Field # <-- Uses v1 namespace
class CalculatorInput(BaseModel):
@ -90,7 +90,7 @@ Tool.from_function( # <-- tool uses v1 namespace
**NO**
```python
from langchain.tools.base import Tool
from langchain_core.tools import Tool
from pydantic import BaseModel, Field # <-- Uses v2 namespace
class CalculatorInput(BaseModel):

View File

@ -71,7 +71,7 @@
"import os\n",
"\n",
"from langchain.chat_models import QianfanChatEndpoint\n",
"from langchain.chat_models.base import HumanMessage\n",
"from langchain_core.language_models.chat_models import HumanMessage\n",
"\n",
"os.environ[\"QIANFAN_AK\"] = \"your_ak\"\n",
"os.environ[\"QIANFAN_SK\"] = \"your_sk\"\n",

View File

@ -159,7 +159,7 @@
"from langchain.chat_models import ChatFireworks\n",
"from langchain.memory import ConversationBufferMemory\n",
"from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
"from langchain.schema.runnable import RunnablePassthrough\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"\n",
"llm = ChatFireworks(\n",
" model=\"accounts/fireworks/models/llama-v2-13b-chat\",\n",

View File

@ -41,7 +41,7 @@
"import os\n",
"\n",
"from langchain.chat_models import PaiEasChatEndpoint\n",
"from langchain.chat_models.base import HumanMessage\n",
"from langchain_core.language_models.chat_models import HumanMessage\n",
"\n",
"os.environ[\"EAS_SERVICE_URL\"] = \"Your_EAS_Service_URL\"\n",
"os.environ[\"EAS_SERVICE_TOKEN\"] = \"Your_EAS_Service_Token\"\n",

View File

@ -516,7 +516,7 @@
"outputs": [],
"source": [
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain.schema.output_parser import StrOutputParser\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"\n",
"prompt = ChatPromptTemplate.from_messages(\n",
" [\n",

View File

@ -360,7 +360,7 @@
"outputs": [],
"source": [
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain.schema.output_parser import StrOutputParser\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"\n",
"prompt = ChatPromptTemplate.from_messages(\n",
" [\n",

View File

@ -72,7 +72,7 @@
"source": [
"from enum import Enum\n",
"\n",
"from langchain.pydantic_v1 import BaseModel, Field\n",
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"\n",
"\n",
"class Operation(Enum):\n",
@ -135,8 +135,8 @@
"source": [
"from pprint import pprint\n",
"\n",
"from langchain.pydantic_v1 import BaseModel\n",
"from langchain.utils.openai_functions import convert_pydantic_to_openai_function\n",
"from langchain_core.pydantic_v1 import BaseModel\n",
"\n",
"openai_function_def = convert_pydantic_to_openai_function(Calculator)\n",
"pprint(openai_function_def)"

View File

@ -472,7 +472,7 @@
"from typing import Dict, List\n",
"\n",
"from langchain.document_loaders import DocugamiLoader\n",
"from langchain.schema.document import Document\n",
"from langchain_core.documents import Document\n",
"\n",
"loader = DocugamiLoader(docset_id=\"zo954yqy53wp\")\n",
"loader.include_xml_tags = (\n",

View File

@ -74,7 +74,7 @@
"import asyncio\n",
"\n",
"from langchain.document_transformers.nuclia_text_transform import NucliaTextTransformer\n",
"from langchain.schema.document import Document\n",
"from langchain_core.documents import Document\n",
"\n",
"\n",
"async def process():\n",

View File

@ -80,7 +80,7 @@
],
"source": [
"from langchain.chat_models import ChatDatabricks\n",
"from langchain.schema.messages import HumanMessage\n",
"from langchain_core.messages import HumanMessage\n",
"from mlflow.deployments import get_deploy_client\n",
"\n",
"client = get_deploy_client(\"databricks\")\n",

View File

@ -174,8 +174,8 @@
"outputs": [],
"source": [
"import langchain.utilities.opaqueprompts as op\n",
"from langchain.schema.output_parser import StrOutputParser\n",
"from langchain.schema.runnable import RunnablePassthrough\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"\n",
"prompt = (PromptTemplate.from_template(prompt_template),)\n",
"llm = OpenAI()\n",

View File

@ -40,7 +40,7 @@
"source": [
"from langchain.llms import VolcEngineMaasLLM\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain.schema.output_parser import StrOutputParser"
"from langchain_core.output_parsers import StrOutputParser"
]
},
{

View File

@ -31,7 +31,7 @@ Databricks External Models
```python
from langchain.chat_models import ChatDatabricks
from langchain.schema.messages import HumanMessage
from langchain_core.messages import HumanMessage
from mlflow.deployments import get_deploy_client

View File

@ -21,7 +21,7 @@
"source": [
"from langchain.chat_models import ChatCohere\n",
"from langchain.retrievers import CohereRagRetriever\n",
"from langchain.schema.document import Document"
"from langchain_core.documents import Document"
]
},
{

File diff suppressed because one or more lines are too long

View File

@ -65,9 +65,9 @@
"from langchain.embeddings import OpenAIEmbeddings\n",
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain.schema import Document\n",
"from langchain.schema.output_parser import StrOutputParser\n",
"from langchain.schema.runnable import RunnablePassthrough\n",
"from langchain.text_splitter import RecursiveCharacterTextSplitter"
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.runnables import RunnablePassthrough"
]
},
{

View File

@ -563,8 +563,8 @@
}
],
"source": [
"from langchain.schema.output_parser import StrOutputParser\n",
"from langchain.schema.runnable import RunnablePassthrough\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"\n",
"rag_chain = (\n",
" {\"context\": retriever, \"question\": RunnablePassthrough()}\n",

View File

@ -193,7 +193,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.schema.agent import AgentFinish\n",
"from langchain_core.agents import AgentFinish\n",
"\n",
"\n",
"def execute_agent(agent, tools, input):\n",

View File

@ -23,9 +23,9 @@
"outputs": [],
"source": [
"from langchain.agents import AgentType, initialize_agent\n",
"from langchain.agents.tools import Tool\n",
"from langchain.chains import LLMMathChain\n",
"from langchain.chat_models import ChatOpenAI\n",
"from langchain_core.tools import Tool\n",
"from pydantic.v1 import BaseModel, Field"
]
},

View File

@ -166,7 +166,7 @@
"source": [
"import json\n",
"\n",
"from langchain.schema.agent import AgentActionMessageLog, AgentFinish"
"from langchain_core.agents import AgentActionMessageLog, AgentFinish"
]
},
{

View File

@ -357,7 +357,7 @@
}
],
"source": [
"from langchain.schema.agent import AgentFinish\n",
"from langchain_core.agents import AgentFinish\n",
"\n",
"user_input = \"how many letters in the word educa?\"\n",
"intermediate_steps = []\n",
@ -519,7 +519,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.schema.messages import AIMessage, HumanMessage\n",
"from langchain_core.messages import AIMessage, HumanMessage\n",
"\n",
"chat_history = []"
]

View File

@ -938,8 +938,8 @@
"from langchain.agents import AgentType, initialize_agent\n",
"from langchain.chat_models import ChatOpenAI\n",
"from langchain.tools import Tool\n",
"from langchain.tools.base import ToolException\n",
"from langchain.utilities import SerpAPIWrapper\n",
"from langchain_core.tools import ToolException\n",
"\n",
"\n",
"def _handle_error(error: ToolException) -> str:\n",

View File

@ -35,8 +35,8 @@
"from langchain.chat_models import ChatAnthropic\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain.schema import StrOutputParser\n",
"from langchain.schema.prompt_template import format_document\n",
"from langchain.schema.runnable import RunnableParallel, RunnablePassthrough"
"from langchain_core.prompts import format_document\n",
"from langchain_core.runnables import RunnableParallel, RunnablePassthrough"
]
},
{

View File

@ -32,9 +32,9 @@
"from langchain.chat_models import ChatOpenAI\n",
"from langchain.output_parsers.openai_functions import PydanticOutputFunctionsParser\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain.pydantic_v1 import BaseModel, Field\n",
"from langchain.schema.prompt_template import format_document\n",
"from langchain.utils.openai_functions import convert_pydantic_to_openai_function"
"from langchain.utils.openai_functions import convert_pydantic_to_openai_function\n",
"from langchain_core.prompts import format_document\n",
"from langchain_core.pydantic_v1 import BaseModel, Field"
]
},
{

View File

@ -51,7 +51,7 @@
"from langchain.chat_models import ChatAnthropic\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain.schema import StrOutputParser\n",
"from langchain.schema.prompt_template import format_document"
"from langchain_core.prompts import format_document"
]
},
{

View File

@ -43,7 +43,7 @@
"from langchain.chat_models import ChatAnthropic\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain.schema import StrOutputParser\n",
"from langchain.schema.prompt_template import format_document"
"from langchain_core.prompts import format_document"
]
},
{

View File

@ -58,8 +58,8 @@
"outputs": [],
"source": [
"from langchain.chat_models import ChatOpenAI\n",
"from langchain.schema.output_parser import StrOutputParser\n",
"from langchain.schema.runnable import RunnableBranch"
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.runnables import RunnableBranch"
]
},
{
@ -89,8 +89,8 @@
"from typing import Literal\n",
"\n",
"from langchain.output_parsers.openai_functions import PydanticAttrOutputFunctionsParser\n",
"from langchain.pydantic_v1 import BaseModel\n",
"from langchain.utils.openai_functions import convert_pydantic_to_openai_function\n",
"from langchain_core.pydantic_v1 import BaseModel\n",
"\n",
"\n",
"class TopicClassifier(BaseModel):\n",
@ -119,8 +119,8 @@
"source": [
"from operator import itemgetter\n",
"\n",
"from langchain.schema.output_parser import StrOutputParser\n",
"from langchain.schema.runnable import RunnablePassthrough\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"\n",
"final_chain = (\n",
" RunnablePassthrough.assign(topic=itemgetter(\"input\") | classifier_chain)\n",

View File

@ -107,7 +107,7 @@
}
],
"source": [
"from langchain.schema.runnable import RunnablePassthrough\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"\n",
"synopsis_chain = synopsis_prompt | llm | StrOutputParser()\n",
"review_chain = review_prompt | llm | StrOutputParser()\n",

View File

@ -29,7 +29,7 @@
")\n",
"from langchain.chains.base import Chain\n",
"from langchain.prompts.base import BasePromptTemplate\n",
"from langchain.schema.language_model import BaseLanguageModel\n",
"from langchain_core.language_models import BaseLanguageModel\n",
"from pydantic import Extra\n",
"\n",
"\n",

View File

@ -56,7 +56,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.pydantic_v1 import BaseModel, Field\n",
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"\n",
"\n",
"class Person(BaseModel):\n",

View File

@ -118,7 +118,7 @@
"from langchain.chat_models import ChatOpenAI\n",
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain.schema import StrOutputParser\n",
"from langchain.schema.runnable import RunnablePassthrough\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"\n",
"template = \"\"\"Answer the question based only on the following context:\n",
"\n",

View File

@ -232,8 +232,8 @@
"\n",
"from langchain.chat_models import ChatOpenAI\n",
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain.schema.document import Document\n",
"from langchain.schema.output_parser import StrOutputParser"
"from langchain_core.documents import Document\n",
"from langchain_core.output_parsers import StrOutputParser"
]
},
{

View File

@ -104,7 +104,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.schema.messages import HumanMessage, SystemMessage\n",
"from langchain_core.messages import HumanMessage, SystemMessage\n",
"\n",
"messages = [\n",
" SystemMessage(content=\"You're a helpful assistant\"),\n",

View File

@ -30,7 +30,7 @@
"from typing import Any, List, Mapping, Optional\n",
"\n",
"from langchain.callbacks.manager import CallbackManagerForLLMRun\n",
"from langchain.llms.base import LLM"
"from langchain_core.language_models.llms import LLM"
]
},
{

View File

@ -53,7 +53,7 @@
"from langchain.llms import OpenAI\n",
"from langchain.output_parsers import PydanticOutputParser\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain.pydantic_v1 import BaseModel, Field, validator\n",
"from langchain_core.pydantic_v1 import BaseModel, Field, validator\n",
"\n",
"model = OpenAI(model_name=\"text-davinci-003\", temperature=0.0)\n",
"\n",

View File

@ -9,7 +9,7 @@ For this example, we'll use the above Pydantic output parser. Here's what happen
```python
from langchain.chat_models import ChatOpenAI
from langchain.output_parsers import PydanticOutputParser
from langchain.pydantic_v1 import BaseModel, Field
from langchain_core.pydantic_v1 import BaseModel, Field
from typing import List
```

View File

@ -25,7 +25,7 @@
"from langchain.llms import OpenAI\n",
"from langchain.output_parsers import PydanticOutputParser\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain.pydantic_v1 import BaseModel, Field, validator"
"from langchain_core.pydantic_v1 import BaseModel, Field, validator"
]
},
{

View File

@ -203,7 +203,7 @@
"source": [
"from langchain.chat_models import ChatOpenAI\n",
"from langchain.prompts import HumanMessagePromptTemplate\n",
"from langchain.schema.messages import SystemMessage\n",
"from langchain_core.messages import SystemMessage\n",
"\n",
"chat_template = ChatPromptTemplate.from_messages(\n",
" [\n",

View File

@ -38,7 +38,7 @@ chat_prompt = ChatPromptTemplate.from_messages([MessagesPlaceholder(variable_nam
```python
from langchain.schema.messages import HumanMessage, AIMessage
from langchain_core.messages import HumanMessage, AIMessage
human_message = HumanMessage(content="What is the best way to learn programming?")
ai_message = AIMessage(content="""\

View File

@ -66,7 +66,7 @@
"\n",
"from langchain.chat_models import ChatOpenAI\n",
"from langchain.prompts import FewShotPromptTemplate, PromptTemplate\n",
"from langchain.pydantic_v1 import BaseModel\n",
"from langchain_core.pydantic_v1 import BaseModel\n",
"from langchain_experimental.tabular_synthetic_data.openai import (\n",
" OPENAI_TEMPLATE,\n",
" create_openai_data_generator,\n",

View File

@ -382,7 +382,7 @@
"from typing import Optional\n",
"\n",
"from langchain.chains import create_extraction_chain_pydantic\n",
"from langchain.pydantic_v1 import BaseModel\n",
"from langchain_core.pydantic_v1 import BaseModel\n",
"\n",
"\n",
"# Pydantic data class\n",

View File

@ -250,7 +250,7 @@
{
"data": {
"text/plain": [
"'To initialize a ReAct agent, you need to follow these steps:\\n\\n1. Initialize a language model `llm` of type `BaseLanguageModel`.\\n\\n2. Initialize a document store `docstore` of type `Docstore`.\\n\\n3. Create a `DocstoreExplorer` with the initialized `docstore`. The `DocstoreExplorer` is used to search for and look up terms in the document store.\\n\\n4. Create an array of `Tool` objects. The `Tool` objects represent the actions that the agent can perform. In the case of `ReActDocstoreAgent`, the tools must be \"Search\" and \"Lookup\" with their corresponding functions from the `DocstoreExplorer`.\\n\\n5. Initialize the `ReActDocstoreAgent` using the `from_llm_and_tools` method with the `llm` (language model) and `tools` as parameters.\\n\\n6. Initialize the `ReActChain` (which is the `AgentExecutor`) using the `ReActDocstoreAgent` and `tools` as parameters.\\n\\nHere is an example of how to do this:\\n\\n```python\\nfrom langchain.chains import ReActChain, OpenAI\\nfrom langchain.docstore.base import Docstore\\nfrom langchain.docstore.document import Document\\nfrom langchain.tools.base import BaseTool\\n\\n# Initialize the LLM and a docstore\\nllm = OpenAI()\\ndocstore = Docstore()\\n\\ndocstore_explorer = DocstoreExplorer(docstore)\\ntools = [\\n Tool(\\n name=\"Search\",\\n func=docstore_explorer.search,\\n description=\"Search for a term in the docstore.\",\\n ),\\n Tool(\\n name=\"Lookup\",\\n func=docstore_explorer.lookup,\\n description=\"Lookup a term in the docstore.\",\\n ),\\n]\\nagent = ReActDocstoreAgent.from_llm_and_tools(llm, tools)\\nreact = ReActChain(agent=agent, tools=tools)\\n```\\n\\nKeep in mind that this is a simplified example and you might need to adapt it to your specific needs.'"
"'To initialize a ReAct agent, you need to follow these steps:\\n\\n1. Initialize a language model `llm` of type `BaseLanguageModel`.\\n\\n2. Initialize a document store `docstore` of type `Docstore`.\\n\\n3. Create a `DocstoreExplorer` with the initialized `docstore`. The `DocstoreExplorer` is used to search for and look up terms in the document store.\\n\\n4. Create an array of `Tool` objects. The `Tool` objects represent the actions that the agent can perform. In the case of `ReActDocstoreAgent`, the tools must be \"Search\" and \"Lookup\" with their corresponding functions from the `DocstoreExplorer`.\\n\\n5. Initialize the `ReActDocstoreAgent` using the `from_llm_and_tools` method with the `llm` (language model) and `tools` as parameters.\\n\\n6. Initialize the `ReActChain` (which is the `AgentExecutor`) using the `ReActDocstoreAgent` and `tools` as parameters.\\n\\nHere is an example of how to do this:\\n\\n```python\\nfrom langchain.chains import ReActChain, OpenAI\\nfrom langchain.docstore.base import Docstore\\nfrom langchain.docstore.document import Document\\nfrom langchain_core.tools import BaseTool\\n\\n# Initialize the LLM and a docstore\\nllm = OpenAI()\\ndocstore = Docstore()\\n\\ndocstore_explorer = DocstoreExplorer(docstore)\\ntools = [\\n Tool(\\n name=\"Search\",\\n func=docstore_explorer.search,\\n description=\"Search for a term in the docstore.\",\\n ),\\n Tool(\\n name=\"Lookup\",\\n func=docstore_explorer.lookup,\\n description=\"Lookup a term in the docstore.\",\\n ),\\n]\\nagent = ReActDocstoreAgent.from_llm_and_tools(llm, tools)\\nreact = ReActChain(agent=agent, tools=tools)\\n```\\n\\nKeep in mind that this is a simplified example and you might need to adapt it to your specific needs.'"
]
},
"execution_count": 43,

View File

@ -429,7 +429,7 @@
"source": [
"from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent\n",
"from langchain.prompts import MessagesPlaceholder\n",
"from langchain.schema.messages import SystemMessage"
"from langchain_core.messages import SystemMessage"
]
},
{

View File

@ -162,9 +162,9 @@
"from langchain.document_loaders import WebBaseLoader\n",
"from langchain.embeddings import OpenAIEmbeddings\n",
"from langchain.schema import StrOutputParser\n",
"from langchain.schema.runnable import RunnablePassthrough\n",
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
"from langchain.vectorstores import Chroma"
"from langchain.vectorstores import Chroma\n",
"from langchain_core.runnables import RunnablePassthrough"
]
},
{
@ -687,7 +687,7 @@
"outputs": [],
"source": [
"from langchain.schema import StrOutputParser\n",
"from langchain.schema.runnable import RunnablePassthrough\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"\n",
"\n",
"def format_docs(docs):\n",
@ -855,7 +855,7 @@
"source": [
"from operator import itemgetter\n",
"\n",
"from langchain.schema.runnable import RunnableParallel\n",
"from langchain_core.runnables import RunnableParallel\n",
"\n",
"rag_chain_from_docs = (\n",
" {\n",
@ -943,7 +943,7 @@
}
],
"source": [
"from langchain.schema.messages import AIMessage, HumanMessage\n",
"from langchain_core.messages import AIMessage, HumanMessage\n",
"\n",
"condense_q_chain.invoke(\n",
" {\n",

View File

@ -323,7 +323,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.pydantic_v1 import BaseModel, Field"
"from langchain_core.pydantic_v1 import BaseModel, Field"
]
},
{

View File

@ -2,8 +2,8 @@ import os
from pathlib import Path
from langchain import chat_models, llms
from langchain.chat_models.base import BaseChatModel, SimpleChatModel
from langchain.llms.base import LLM, BaseLLM
from langchain_core.language_models.chat_models import BaseChatModel, SimpleChatModel
from langchain_core.language_models.llms import LLM, BaseLLM
INTEGRATIONS_DIR = Path(os.path.abspath(__file__)).parents[1] / "docs" / "integrations"
LLM_IGNORE = ("FakeListLLM", "OpenAIChat", "PromptLayerOpenAIChat")

View File

@ -1,8 +1,8 @@
from langchain.chat_models import ChatAnthropic
from langchain.prompts import ChatPromptTemplate
from langchain.pydantic_v1 import BaseModel
from langchain.schema.output_parser import StrOutputParser
from langchain.schema.runnable import ConfigurableField
from langchain_core.output_parsers import StrOutputParser
from langchain_core.pydantic_v1 import BaseModel
from langchain_core.runnables import ConfigurableField
from .prompts import answer_prompt
from .retriever_agent import executor

View File

@ -1,6 +1,6 @@
import re
from langchain.schema.agent import AgentAction, AgentFinish
from langchain_core.agents import AgentAction, AgentFinish
from .agent_scratchpad import _format_docs

View File

@ -1,8 +1,8 @@
from langchain.agents import AgentExecutor
from langchain.chat_models import ChatAnthropic
from langchain.prompts import ChatPromptTemplate
from langchain.schema.output_parser import StrOutputParser
from langchain.schema.runnable import RunnableParallel, RunnablePassthrough
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnableParallel, RunnablePassthrough
from .agent_scratchpad import format_agent_scratchpad
from .output_parser import parse_output

View File

@ -7,8 +7,8 @@ from typing import Any, Dict, Sequence
from langchain.chains.openai_functions import convert_to_openai_function
from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from langchain.pydantic_v1 import BaseModel, Field, ValidationError, conint
from langchain.schema.runnable import (
from langchain_core.pydantic_v1 import BaseModel, Field, ValidationError, conint
from langchain_core.runnables import (
Runnable,
RunnableBranch,
RunnableLambda,

View File

@ -4,9 +4,9 @@ import cassio
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from langchain.prompts import ChatPromptTemplate
from langchain.schema.output_parser import StrOutputParser
from langchain.schema.runnable import RunnablePassthrough
from langchain.vectorstores import Cassandra
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough
from .populate_vector_store import populate

View File

@ -6,7 +6,7 @@ from langchain.cache import CassandraCache
from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from langchain.schema import BaseMessage
from langchain.schema.runnable import RunnableLambda
from langchain_core.runnables import RunnableLambda
use_cassandra = int(os.environ.get("USE_CASSANDRA_CLUSTER", "0"))
if use_cassandra:

View File

@ -1,9 +1,9 @@
from langchain import hub
from langchain.chat_models import ChatAnthropic
from langchain.pydantic_v1 import BaseModel
from langchain.schema import StrOutputParser
from langchain.schema.runnable import RunnableLambda, RunnablePassthrough
from langchain.utilities import WikipediaAPIWrapper
from langchain_core.pydantic_v1 import BaseModel
from langchain_core.runnables import RunnableLambda, RunnablePassthrough
class Question(BaseModel):

View File

@ -15,7 +15,7 @@ from langchain.schema import (
StrOutputParser,
get_buffer_string,
)
from langchain.schema.runnable import Runnable
from langchain_core.runnables import Runnable
from langsmith.evaluation import EvaluationResult, RunEvaluator
from langsmith.schemas import Example
from pydantic import BaseModel, Field

View File

@ -5,9 +5,9 @@ from langchain.agents import AgentExecutor, OpenAIFunctionsAgent
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain.pydantic_v1 import BaseModel, Field
from langchain.tools.retriever import create_retriever_tool
from langchain.vectorstores import FAISS
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_experimental.tools import PythonAstREPLTool
MAIN_DIR = Path(__file__).parents[1]

View File

@ -1,7 +1,7 @@
from elasticsearch import Elasticsearch
from langchain.chat_models import ChatOpenAI
from langchain.output_parsers.json import SimpleJsonOutputParser
from langchain.pydantic_v1 import BaseModel
from langchain_core.pydantic_v1 import BaseModel
from .elastic_index_info import get_indices_infos
from .prompts import DSL_PROMPT

View File

@ -2,8 +2,8 @@ from typing import List, Optional
from langchain.output_parsers.openai_functions import JsonKeyOutputFunctionsParser
from langchain.prompts import ChatPromptTemplate
from langchain.pydantic_v1 import BaseModel
from langchain.utils.openai_functions import convert_pydantic_to_openai_function
from langchain_core.pydantic_v1 import BaseModel
from langchain_experimental.llms.anthropic_functions import AnthropicFunctions
template = """A article will be passed to you. Extract from it all papers that are mentioned by this article.

View File

@ -3,8 +3,8 @@ from typing import List, Optional
from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from langchain.pydantic_v1 import BaseModel
from langchain.utils.openai_functions import convert_pydantic_to_openai_function
from langchain_core.pydantic_v1 import BaseModel
template = """A article will be passed to you. Extract from it all papers that are mentioned by this article.

View File

@ -4,8 +4,8 @@ import weaviate
from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from langchain.retrievers.weaviate_hybrid_search import WeaviateHybridSearchRetriever
from langchain.schema.output_parser import StrOutputParser
from langchain.schema.runnable import RunnableParallel, RunnablePassthrough
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnableParallel, RunnablePassthrough
# Check env vars
if os.environ.get("WEAVIATE_API_KEY", None) is None:

View File

@ -1,10 +1,10 @@
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from langchain.prompts import ChatPromptTemplate
from langchain.pydantic_v1 import BaseModel
from langchain.schema.output_parser import StrOutputParser
from langchain.schema.runnable import RunnableParallel
from langchain.vectorstores import Chroma
from langchain_core.output_parsers import StrOutputParser
from langchain_core.pydantic_v1 import BaseModel
from langchain_core.runnables import RunnableParallel
from hyde.prompts import hyde_prompt

View File

@ -3,11 +3,11 @@ import os
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from langchain.prompts import ChatPromptTemplate
from langchain.pydantic_v1 import BaseModel
from langchain.schema.document import Document
from langchain.schema.output_parser import StrOutputParser
from langchain.schema.runnable import RunnableParallel, RunnablePassthrough
from langchain.vectorstores import MongoDBAtlasVectorSearch
from langchain_core.documents import Document
from langchain_core.output_parsers import StrOutputParser
from langchain_core.pydantic_v1 import BaseModel
from langchain_core.runnables import RunnableParallel, RunnablePassthrough
from pymongo import MongoClient
MONGO_URI = os.environ["MONGO_URI"]

Some files were not shown because too many files have changed in this diff Show More