Compare commits

..

1 Commits

Author SHA1 Message Date
Bagatur
ea6721a6eb cli[patch]: bump pkg tmpl langchain verion 2024-01-06 12:26:37 -05:00
931 changed files with 10345 additions and 22740 deletions

View File

@@ -1,9 +1,6 @@
blank_issues_enabled: true
version: 2.1
contact_links:
- name: 🤔 Question or Problem
about: Ask a question or ask about a problem in GitHub Discussions.
url: https://github.com/langchain-ai/langchain/discussions
- name: Discord
url: https://discord.gg/6adMQxSpJS
about: General community discussions

View File

@@ -26,7 +26,7 @@ inputs:
runs:
using: composite
steps:
- uses: actions/setup-python@v4
- uses: actions/setup-python@v5
name: Setup python ${{ inputs.python-version }}
with:
python-version: ${{ inputs.python-version }}

View File

@@ -149,7 +149,7 @@
],
"source": [
"# Prompt\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain.prompts import ChatPromptTemplate\n",
"\n",
"# Update the template based on the type of SQL Database like MySQL, Microsoft SQL Server and so on\n",
"template = \"\"\"Based on the table schema below, write a SQL query that would answer the user's question:\n",
@@ -278,7 +278,7 @@
"source": [
"# Prompt\n",
"from langchain.memory import ConversationBufferMemory\n",
"from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
"from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
"\n",
"template = \"\"\"Given an input question, convert it to a SQL query. No pre-amble. Based on the table schema below, write a SQL query that would answer the user's question:\n",
"{schema}\n",

View File

@@ -198,9 +198,9 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain_community.chat_models import ChatOpenAI\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"\n",
"# Generate summaries of text elements\n",
@@ -355,9 +355,9 @@
"\n",
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
"from langchain.storage import InMemoryStore\n",
"from langchain_community.embeddings import OpenAIEmbeddings\n",
"from langchain_community.vectorstores import Chroma\n",
"from langchain_core.documents import Document\n",
"from langchain_openai import OpenAIEmbeddings\n",
"\n",
"\n",
"def create_multi_vector_retriever(\n",

View File

@@ -235,9 +235,9 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_openai import ChatOpenAI"
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain_community.chat_models import ChatOpenAI\n",
"from langchain_core.output_parsers import StrOutputParser"
]
},
{
@@ -320,9 +320,9 @@
"\n",
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
"from langchain.storage import InMemoryStore\n",
"from langchain_community.embeddings import OpenAIEmbeddings\n",
"from langchain_community.vectorstores import Chroma\n",
"from langchain_core.documents import Document\n",
"from langchain_openai import OpenAIEmbeddings\n",
"\n",
"# The vectorstore to use to index the child chunks\n",
"vectorstore = Chroma(collection_name=\"summaries\", embedding_function=OpenAIEmbeddings())\n",

View File

@@ -211,9 +211,9 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_openai import ChatOpenAI"
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain_community.chat_models import ChatOpenAI\n",
"from langchain_core.output_parsers import StrOutputParser"
]
},
{
@@ -375,9 +375,9 @@
"\n",
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
"from langchain.storage import InMemoryStore\n",
"from langchain_community.embeddings import OpenAIEmbeddings\n",
"from langchain_community.vectorstores import Chroma\n",
"from langchain_core.documents import Document\n",
"from langchain_openai import OpenAIEmbeddings\n",
"\n",
"# The vectorstore to use to index the child chunks\n",
"vectorstore = Chroma(collection_name=\"summaries\", embedding_function=OpenAIEmbeddings())\n",

View File

@@ -209,9 +209,9 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain_community.chat_models import ChatOllama\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.prompts import ChatPromptTemplate"
"from langchain_core.output_parsers import StrOutputParser"
]
},
{

View File

@@ -132,8 +132,8 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.embeddings import OpenAIEmbeddings\n",
"from langchain_community.vectorstores import Chroma\n",
"from langchain_openai import OpenAIEmbeddings\n",
"\n",
"baseline = Chroma.from_texts(\n",
" texts=all_splits_pypdf_texts,\n",
@@ -160,9 +160,9 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain_community.chat_models import ChatOpenAI\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"# Prompt\n",
"prompt_text = \"\"\"You are an assistant tasked with summarizing tables and text for retrieval. \\\n",

View File

@@ -29,8 +29,9 @@
"source": [
"from langchain.chains import RetrievalQA\n",
"from langchain.text_splitter import CharacterTextSplitter\n",
"from langchain_community.embeddings.openai import OpenAIEmbeddings\n",
"from langchain_community.llms import OpenAI\n",
"from langchain_community.vectorstores import Chroma\n",
"from langchain_openai import OpenAI, OpenAIEmbeddings\n",
"\n",
"llm = OpenAI(temperature=0)"
]
@@ -160,7 +161,7 @@
"source": [
"# Import things that are needed generically\n",
"from langchain.agents import AgentType, Tool, initialize_agent\n",
"from langchain_openai import OpenAI"
"from langchain_community.llms import OpenAI"
]
},
{

View File

@@ -29,7 +29,7 @@
"outputs": [],
"source": [
"from langchain.chains import AnalyzeDocumentChain\n",
"from langchain_openai import ChatOpenAI\n",
"from langchain_community.chat_models import ChatOpenAI\n",
"\n",
"llm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)"
]

View File

@@ -62,8 +62,8 @@
"outputs": [],
"source": [
"from langchain.docstore import InMemoryDocstore\n",
"from langchain_community.vectorstores import FAISS\n",
"from langchain_openai import OpenAIEmbeddings"
"from langchain_community.embeddings import OpenAIEmbeddings\n",
"from langchain_community.vectorstores import FAISS"
]
},
{
@@ -100,8 +100,8 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_experimental.autonomous_agents import AutoGPT\n",
"from langchain_openai import ChatOpenAI"
"from langchain_community.chat_models import ChatOpenAI\n",
"from langchain_experimental.autonomous_agents import AutoGPT"
]
},
{

View File

@@ -41,8 +41,8 @@
"import pandas as pd\n",
"from langchain.docstore.document import Document\n",
"from langchain_community.agent_toolkits.pandas.base import create_pandas_dataframe_agent\n",
"from langchain_community.chat_models import ChatOpenAI\n",
"from langchain_experimental.autonomous_agents import AutoGPT\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"# Needed synce jupyter runs an async eventloop\n",
"nest_asyncio.apply()"
@@ -311,8 +311,8 @@
"# Memory\n",
"import faiss\n",
"from langchain.docstore import InMemoryDocstore\n",
"from langchain_community.embeddings import OpenAIEmbeddings\n",
"from langchain_community.vectorstores import FAISS\n",
"from langchain_openai import OpenAIEmbeddings\n",
"\n",
"embeddings_model = OpenAIEmbeddings()\n",
"embedding_size = 1536\n",

View File

@@ -31,8 +31,9 @@
"source": [
"from typing import Optional\n",
"\n",
"from langchain_experimental.autonomous_agents import BabyAGI\n",
"from langchain_openai import OpenAI, OpenAIEmbeddings"
"from langchain_community.embeddings import OpenAIEmbeddings\n",
"from langchain_community.llms import OpenAI\n",
"from langchain_experimental.autonomous_agents import BabyAGI"
]
},
{

View File

@@ -29,8 +29,9 @@
"\n",
"from langchain.chains import LLMChain\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_experimental.autonomous_agents import BabyAGI\n",
"from langchain_openai import OpenAI, OpenAIEmbeddings"
"from langchain_community.embeddings import OpenAIEmbeddings\n",
"from langchain_community.llms import OpenAI\n",
"from langchain_experimental.autonomous_agents import BabyAGI"
]
},
{
@@ -107,8 +108,8 @@
"source": [
"from langchain.agents import AgentExecutor, Tool, ZeroShotAgent\n",
"from langchain.chains import LLMChain\n",
"from langchain_community.llms import OpenAI\n",
"from langchain_community.utilities import SerpAPIWrapper\n",
"from langchain_openai import OpenAI\n",
"\n",
"todo_prompt = PromptTemplate.from_template(\n",
" \"You are a planner who is an expert at coming up with a todo list for a given objective. Come up with a todo list for this objective: {objective}\"\n",

View File

@@ -46,7 +46,7 @@
" HumanMessage,\n",
" SystemMessage,\n",
")\n",
"from langchain_openai import ChatOpenAI"
"from langchain_community.chat_models import ChatOpenAI"
]
},
{

View File

@@ -47,9 +47,9 @@
"outputs": [],
"source": [
"from IPython.display import SVG\n",
"from langchain_community.llms import OpenAI\n",
"from langchain_experimental.cpal.base import CPALChain\n",
"from langchain_experimental.pal_chain import PALChain\n",
"from langchain_openai import OpenAI\n",
"\n",
"llm = OpenAI(temperature=0, max_tokens=512)\n",
"cpal_chain = CPALChain.from_univariate_prompt(llm=llm, verbose=True)\n",

View File

@@ -657,7 +657,7 @@
}
],
"source": [
"from langchain_openai import OpenAIEmbeddings\n",
"from langchain_community.embeddings.openai import OpenAIEmbeddings\n",
"\n",
"embeddings = OpenAIEmbeddings()\n",
"embeddings"
@@ -834,7 +834,7 @@
"outputs": [],
"source": [
"from langchain.chains import ConversationalRetrievalChain\n",
"from langchain_openai import ChatOpenAI\n",
"from langchain_community.chat_models import ChatOpenAI\n",
"\n",
"model = ChatOpenAI(\n",
" model_name=\"gpt-3.5-turbo-0613\"\n",

View File

@@ -44,8 +44,8 @@
"from langchain.prompts import StringPromptTemplate\n",
"from langchain.schema import AgentAction, AgentFinish\n",
"from langchain_community.agent_toolkits import NLAToolkit\n",
"from langchain_community.tools.plugin import AIPlugin\n",
"from langchain_openai import OpenAI"
"from langchain_community.llms import OpenAI\n",
"from langchain_community.tools.plugin import AIPlugin"
]
},
{
@@ -115,8 +115,8 @@
"outputs": [],
"source": [
"from langchain.schema import Document\n",
"from langchain_community.vectorstores import FAISS\n",
"from langchain_openai import OpenAIEmbeddings"
"from langchain_community.embeddings import OpenAIEmbeddings\n",
"from langchain_community.vectorstores import FAISS"
]
},
{

View File

@@ -69,8 +69,8 @@
"from langchain.prompts import StringPromptTemplate\n",
"from langchain.schema import AgentAction, AgentFinish\n",
"from langchain_community.agent_toolkits import NLAToolkit\n",
"from langchain_community.tools.plugin import AIPlugin\n",
"from langchain_openai import OpenAI"
"from langchain_community.llms import OpenAI\n",
"from langchain_community.tools.plugin import AIPlugin"
]
},
{
@@ -139,8 +139,8 @@
"outputs": [],
"source": [
"from langchain.schema import Document\n",
"from langchain_community.vectorstores import FAISS\n",
"from langchain_openai import OpenAIEmbeddings"
"from langchain_community.embeddings import OpenAIEmbeddings\n",
"from langchain_community.vectorstores import FAISS"
]
},
{

View File

@@ -41,8 +41,8 @@
"from langchain.chains import LLMChain\n",
"from langchain.prompts import StringPromptTemplate\n",
"from langchain.schema import AgentAction, AgentFinish\n",
"from langchain_community.utilities import SerpAPIWrapper\n",
"from langchain_openai import OpenAI"
"from langchain_community.llms import OpenAI\n",
"from langchain_community.utilities import SerpAPIWrapper"
]
},
{
@@ -104,8 +104,8 @@
"outputs": [],
"source": [
"from langchain.schema import Document\n",
"from langchain_community.vectorstores import FAISS\n",
"from langchain_openai import OpenAIEmbeddings"
"from langchain_community.embeddings import OpenAIEmbeddings\n",
"from langchain_community.vectorstores import FAISS"
]
},
{

View File

@@ -93,7 +93,7 @@
"outputs": [],
"source": [
"# Creating a OpenAI Chat LLM wrapper\n",
"from langchain_openai import ChatOpenAI\n",
"from langchain_community.chat_models import ChatOpenAI\n",
"\n",
"llm = ChatOpenAI(temperature=0, model_name=\"gpt-4\")"
]

View File

@@ -56,8 +56,9 @@
" CharacterTextSplitter,\n",
" RecursiveCharacterTextSplitter,\n",
")\n",
"from langchain_community.embeddings.openai import OpenAIEmbeddings\n",
"from langchain_community.llms import OpenAI\n",
"from langchain_community.vectorstores import DeepLake\n",
"from langchain_openai import OpenAI, OpenAIEmbeddings\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")\n",
"activeloop_token = getpass.getpass(\"Activeloop Token:\")\n",

View File

@@ -475,8 +475,8 @@
" HumanMessagePromptTemplate,\n",
" SystemMessagePromptTemplate,\n",
")\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_openai import ChatOpenAI"
"from langchain_community.chat_models import ChatOpenAI\n",
"from langchain_core.output_parsers import StrOutputParser"
]
},
{
@@ -547,9 +547,9 @@
"\n",
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
"from langchain.storage import InMemoryStore\n",
"from langchain_community.embeddings import OpenAIEmbeddings\n",
"from langchain_community.vectorstores.chroma import Chroma\n",
"from langchain_core.documents import Document\n",
"from langchain_openai import OpenAIEmbeddings\n",
"\n",
"\n",
"def build_retriever(text_elements, tables, table_summaries):\n",

View File

@@ -39,7 +39,7 @@
"source": [
"from elasticsearch import Elasticsearch\n",
"from langchain.chains.elasticsearch_database import ElasticsearchDatabaseChain\n",
"from langchain_openai import ChatOpenAI"
"from langchain_community.chat_models import ChatOpenAI"
]
},
{

View File

@@ -22,8 +22,8 @@
"from typing import List, Optional\n",
"\n",
"from langchain.chains.openai_tools import create_extraction_chain_pydantic\n",
"from langchain_core.pydantic_v1 import BaseModel\n",
"from langchain_openai import ChatOpenAI"
"from langchain_community.chat_models import ChatOpenAI\n",
"from langchain_core.pydantic_v1 import BaseModel"
]
},
{
@@ -153,7 +153,7 @@
"from langchain.utils.openai_functions import convert_pydantic_to_openai_tool\n",
"from langchain_core.runnables import Runnable\n",
"from langchain_core.pydantic_v1 import BaseModel\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain_core.messages import SystemMessage\n",
"from langchain_core.language_models import BaseLanguageModel\n",
"\n",

View File

@@ -74,8 +74,9 @@
" CallbackManagerForRetrieverRun,\n",
")\n",
"from langchain.schema import BaseRetriever, Document\n",
"from langchain_community.utilities import GoogleSerperAPIWrapper\n",
"from langchain_openai import ChatOpenAI, OpenAI"
"from langchain_community.chat_models import ChatOpenAI\n",
"from langchain_community.llms import OpenAI\n",
"from langchain_community.utilities import GoogleSerperAPIWrapper"
]
},
{

View File

@@ -49,8 +49,9 @@
"\n",
"from langchain.docstore import InMemoryDocstore\n",
"from langchain.retrievers import TimeWeightedVectorStoreRetriever\n",
"from langchain_community.chat_models import ChatOpenAI\n",
"from langchain_community.embeddings import OpenAIEmbeddings\n",
"from langchain_community.vectorstores import FAISS\n",
"from langchain_openai import ChatOpenAI, OpenAIEmbeddings\n",
"from termcolor import colored"
]
},

View File

@@ -75,8 +75,8 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.llms import OpenAI\n",
"from langchain_experimental.autonomous_agents import HuggingGPT\n",
"from langchain_openai import OpenAI\n",
"\n",
"# %env OPENAI_API_BASE=http://localhost:8000/v1"
]

View File

@@ -159,7 +159,7 @@
"outputs": [],
"source": [
"from langchain.agents import AgentType, initialize_agent, load_tools\n",
"from langchain_openai import OpenAI"
"from langchain_community.llms import OpenAI"
]
},
{

View File

@@ -22,7 +22,8 @@
"source": [
"from langchain.chains import HypotheticalDocumentEmbedder, LLMChain\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_openai import OpenAI, OpenAIEmbeddings"
"from langchain_community.embeddings import OpenAIEmbeddings\n",
"from langchain_community.llms import OpenAI"
]
},
{

View File

@@ -49,7 +49,7 @@
"source": [
"# pick and configure the LLM of your choice\n",
"\n",
"from langchain_openai import OpenAI\n",
"from langchain_community.llms import OpenAI\n",
"\n",
"llm = OpenAI(model=\"gpt-3.5-turbo-instruct\")"
]

View File

@@ -43,8 +43,8 @@
}
],
"source": [
"from langchain_community.llms import OpenAI\n",
"from langchain_experimental.llm_bash.base import LLMBashChain\n",
"from langchain_openai import OpenAI\n",
"\n",
"llm = OpenAI(temperature=0)\n",
"\n",

View File

@@ -42,7 +42,7 @@
],
"source": [
"from langchain.chains import LLMCheckerChain\n",
"from langchain_openai import OpenAI\n",
"from langchain_community.llms import OpenAI\n",
"\n",
"llm = OpenAI(temperature=0.7)\n",
"\n",

View File

@@ -46,7 +46,7 @@
],
"source": [
"from langchain.chains import LLMMathChain\n",
"from langchain_openai import OpenAI\n",
"from langchain_community.llms import OpenAI\n",
"\n",
"llm = OpenAI(temperature=0)\n",
"llm_math = LLMMathChain.from_llm(llm, verbose=True)\n",

View File

@@ -331,7 +331,7 @@
],
"source": [
"from langchain.chains import LLMSummarizationCheckerChain\n",
"from langchain_openai import OpenAI\n",
"from langchain_community.llms import OpenAI\n",
"\n",
"llm = OpenAI(temperature=0)\n",
"checker_chain = LLMSummarizationCheckerChain.from_llm(llm, verbose=True, max_checks=2)\n",
@@ -822,7 +822,7 @@
],
"source": [
"from langchain.chains import LLMSummarizationCheckerChain\n",
"from langchain_openai import OpenAI\n",
"from langchain_community.llms import OpenAI\n",
"\n",
"llm = OpenAI(temperature=0)\n",
"checker_chain = LLMSummarizationCheckerChain.from_llm(llm, verbose=True, max_checks=3)\n",
@@ -1096,7 +1096,7 @@
],
"source": [
"from langchain.chains import LLMSummarizationCheckerChain\n",
"from langchain_openai import OpenAI\n",
"from langchain_community.llms import OpenAI\n",
"\n",
"llm = OpenAI(temperature=0)\n",
"checker_chain = LLMSummarizationCheckerChain.from_llm(llm, max_checks=3, verbose=True)\n",

View File

@@ -14,8 +14,8 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.llms import OpenAI\n",
"from langchain_experimental.llm_symbolic_math.base import LLMSymbolicMathChain\n",
"from langchain_openai import OpenAI\n",
"\n",
"llm = OpenAI(temperature=0)\n",
"llm_symbolic_math = LLMSymbolicMathChain.from_llm(llm)"

View File

@@ -59,7 +59,7 @@
"from langchain.chains import LLMChain\n",
"from langchain.memory import ConversationBufferWindowMemory\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_openai import OpenAI"
"from langchain_community.llms import OpenAI"
]
},
{

View File

@@ -91,8 +91,8 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.messages import HumanMessage, SystemMessage\n",
"from langchain_openai import ChatOpenAI"
"from langchain_community.chat_models import ChatOpenAI\n",
"from langchain_core.messages import HumanMessage, SystemMessage"
]
},
{

View File

@@ -315,10 +315,10 @@
"source": [
"from operator import itemgetter\n",
"\n",
"from langchain_community.chat_models import ChatOpenAI\n",
"from langchain_core.messages import HumanMessage, SystemMessage\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.runnables import RunnableLambda, RunnablePassthrough\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"\n",
"def prompt_func(data_dict):\n",

View File

@@ -44,7 +44,7 @@
"source": [
"from langchain.agents import AgentType, initialize_agent\n",
"from langchain.tools import SteamshipImageGenerationTool\n",
"from langchain_openai import OpenAI"
"from langchain_community.llms import OpenAI"
]
},
{

View File

@@ -32,7 +32,7 @@
" HumanMessage,\n",
" SystemMessage,\n",
")\n",
"from langchain_openai import ChatOpenAI"
"from langchain_community.chat_models import ChatOpenAI"
]
},
{

View File

@@ -41,7 +41,7 @@
" HumanMessage,\n",
" SystemMessage,\n",
")\n",
"from langchain_openai import ChatOpenAI"
"from langchain_community.chat_models import ChatOpenAI"
]
},
{

View File

@@ -33,7 +33,7 @@
" HumanMessage,\n",
" SystemMessage,\n",
")\n",
"from langchain_openai import ChatOpenAI"
"from langchain_community.chat_models import ChatOpenAI"
]
},
{

View File

@@ -32,9 +32,9 @@
"\n",
"from langchain.chains import LLMChain\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_community.llms import OpenAI\n",
"from langchain_community.utilities import SQLDatabase\n",
"from langchain_experimental.sql.vector_sql import VectorSQLDatabaseChain\n",
"from langchain_openai import OpenAI\n",
"from sqlalchemy import MetaData, create_engine\n",
"\n",
"MYSCALE_HOST = \"msc-4a9e710a.us-east-1.aws.staging.myscale.cloud\"\n",
@@ -75,10 +75,10 @@
"outputs": [],
"source": [
"from langchain.callbacks import StdOutCallbackHandler\n",
"from langchain_community.llms import OpenAI\n",
"from langchain_community.utilities.sql_database import SQLDatabase\n",
"from langchain_experimental.sql.prompt import MYSCALE_PROMPT\n",
"from langchain_experimental.sql.vector_sql import VectorSQLDatabaseChain\n",
"from langchain_openai import OpenAI\n",
"\n",
"chain = VectorSQLDatabaseChain(\n",
" llm_chain=LLMChain(\n",
@@ -117,6 +117,7 @@
"outputs": [],
"source": [
"from langchain.chains.qa_with_sources.retrieval import RetrievalQAWithSourcesChain\n",
"from langchain_community.chat_models import ChatOpenAI\n",
"from langchain_experimental.retrievers.vector_sql_database import (\n",
" VectorSQLDatabaseChainRetriever,\n",
")\n",
@@ -125,7 +126,6 @@
" VectorSQLDatabaseChain,\n",
" VectorSQLRetrieveAllOutputParser,\n",
")\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"output_parser_retrieve_all = VectorSQLRetrieveAllOutputParser.from_embeddings(\n",
" output_parser.model\n",

View File

@@ -22,8 +22,8 @@
"from langchain.chains import RetrievalQA\n",
"from langchain.text_splitter import CharacterTextSplitter\n",
"from langchain_community.document_loaders import TextLoader\n",
"from langchain_community.vectorstores import Chroma\n",
"from langchain_openai import OpenAIEmbeddings"
"from langchain_community.embeddings.openai import OpenAIEmbeddings\n",
"from langchain_community.vectorstores import Chroma"
]
},
{
@@ -53,7 +53,7 @@
"from langchain.chains import create_qa_with_sources_chain\n",
"from langchain.chains.combine_documents.stuff import StuffDocumentsChain\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_openai import ChatOpenAI"
"from langchain_community.chat_models import ChatOpenAI"
]
},
{

View File

@@ -28,8 +28,8 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.messages import HumanMessage, SystemMessage\n",
"from langchain_openai import ChatOpenAI"
"from langchain_community.chat_models import ChatOpenAI\n",
"from langchain_core.messages import HumanMessage, SystemMessage"
]
},
{
@@ -414,7 +414,7 @@
"BREAKING CHANGES:\n",
"- To use Azure embeddings with OpenAI V1, you'll need to use the new `AzureOpenAIEmbeddings` instead of the existing `OpenAIEmbeddings`. `OpenAIEmbeddings` continue to work when using Azure with `openai<1`.\n",
"```python\n",
"from langchain_openai import AzureOpenAIEmbeddings\n",
"from langchain_community.embeddings import AzureOpenAIEmbeddings\n",
"```\n",
"\n",
"\n",
@@ -456,8 +456,8 @@
"from typing import Literal\n",
"\n",
"from langchain.output_parsers.openai_tools import PydanticToolsParser\n",
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain.utils.openai_functions import convert_pydantic_to_openai_tool\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"\n",
"\n",

View File

@@ -52,7 +52,7 @@
" HumanMessage,\n",
" SystemMessage,\n",
")\n",
"from langchain_openai import ChatOpenAI"
"from langchain_community.chat_models import ChatOpenAI"
]
},
{

View File

@@ -30,14 +30,15 @@
"outputs": [],
"source": [
"from langchain.chains import LLMMathChain\n",
"from langchain_community.chat_models import ChatOpenAI\n",
"from langchain_community.llms import OpenAI\n",
"from langchain_community.utilities import DuckDuckGoSearchAPIWrapper\n",
"from langchain_core.tools import Tool\n",
"from langchain_experimental.plan_and_execute import (\n",
" PlanAndExecute,\n",
" load_agent_executor,\n",
" load_chat_planner,\n",
")\n",
"from langchain_openai import ChatOpenAI, OpenAI"
")"
]
},
{

View File

@@ -82,7 +82,7 @@
"source": [
"from langchain.chains import ConversationalRetrievalChain\n",
"from langchain.retrievers import KayAiRetriever\n",
"from langchain_openai import ChatOpenAI\n",
"from langchain_community.chat_models import ChatOpenAI\n",
"\n",
"model = ChatOpenAI(model_name=\"gpt-3.5-turbo\")\n",
"retriever = KayAiRetriever.create(\n",

View File

@@ -17,8 +17,8 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_experimental.pal_chain import PALChain\n",
"from langchain_openai import OpenAI"
"from langchain_community.llms import OpenAI\n",
"from langchain_experimental.pal_chain import PALChain"
]
},
{

View File

@@ -27,7 +27,7 @@
],
"source": [
"from langchain.chains import create_citation_fuzzy_match_chain\n",
"from langchain_openai import ChatOpenAI"
"from langchain_community.chat_models import ChatOpenAI"
]
},
{

View File

@@ -30,8 +30,8 @@
"outputs": [],
"source": [
"import pinecone\n",
"from langchain_community.embeddings import OpenAIEmbeddings\n",
"from langchain_community.vectorstores import Pinecone\n",
"from langchain_openai import OpenAIEmbeddings\n",
"\n",
"pinecone.init(api_key=\"...\", environment=\"...\")"
]
@@ -86,8 +86,8 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_openai import ChatOpenAI"
"from langchain_community.chat_models import ChatOpenAI\n",
"from langchain_core.output_parsers import StrOutputParser"
]
},
{

View File

@@ -43,7 +43,7 @@
"outputs": [],
"source": [
"from langchain.sql_database import SQLDatabase\n",
"from langchain_openai import ChatOpenAI\n",
"from langchain_community.chat_models import ChatOpenAI\n",
"\n",
"CONNECTION_STRING = \"postgresql+psycopg2://postgres:test@localhost:5432/vectordb\" # Replace with your own\n",
"db = SQLDatabase.from_uri(CONNECTION_STRING)"
@@ -88,7 +88,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_openai import OpenAIEmbeddings\n",
"from langchain_community.embeddings import OpenAIEmbeddings\n",
"\n",
"embeddings_model = OpenAIEmbeddings()"
]
@@ -219,7 +219,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain.prompts import ChatPromptTemplate\n",
"\n",
"template = \"\"\"You are a Postgres expert. Given an input question, first create a syntactically correct Postgres query to run, then look at the results of the query and return the answer to the input question.\n",
"Unless the user specifies in the question a specific number of examples to obtain, query for at most 5 results using the LIMIT clause as per Postgres. You can order the results to return the most informative data in the database.\n",
@@ -267,9 +267,9 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.chat_models import ChatOpenAI\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"db = SQLDatabase.from_uri(\n",
" CONNECTION_STRING\n",

View File

@@ -31,11 +31,11 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain_community.chat_models import ChatOpenAI\n",
"from langchain_community.utilities import DuckDuckGoSearchAPIWrapper\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"from langchain_openai import ChatOpenAI"
"from langchain_core.runnables import RunnablePassthrough"
]
},
{

View File

@@ -53,9 +53,10 @@
"from langchain.prompts.base import StringPromptTemplate\n",
"from langchain.schema import AgentAction, AgentFinish\n",
"from langchain.text_splitter import CharacterTextSplitter\n",
"from langchain_community.llms import BaseLLM\n",
"from langchain_community.chat_models import ChatOpenAI\n",
"from langchain_community.embeddings.openai import OpenAIEmbeddings\n",
"from langchain_community.llms import BaseLLM, OpenAI\n",
"from langchain_community.vectorstores import Chroma\n",
"from langchain_openai import ChatOpenAI, OpenAI, OpenAIEmbeddings\n",
"from pydantic import BaseModel, Field"
]
},

View File

@@ -18,9 +18,9 @@
"outputs": [],
"source": [
"from langchain.prompts import PromptTemplate\n",
"from langchain_community.chat_models import ChatOpenAI\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.prompt_values import PromptValue\n",
"from langchain_openai import ChatOpenAI"
"from langchain_core.prompt_values import PromptValue"
]
},
{

View File

@@ -255,7 +255,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_openai import ChatOpenAI\n",
"from langchain_community.chat_models import ChatOpenAI\n",
"\n",
"model = ChatOpenAI(model=\"gpt-4\")\n",
"res = model.predict(\n",
@@ -1083,8 +1083,8 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.embeddings import OpenAIEmbeddings\n",
"from langchain_community.vectorstores import ElasticsearchStore\n",
"from langchain_openai import OpenAIEmbeddings\n",
"\n",
"embeddings = OpenAIEmbeddings()"
]

View File

@@ -26,8 +26,8 @@
"from langchain.chains import LLMChain\n",
"from langchain.memory import ConversationBufferMemory, ReadOnlySharedMemory\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_community.utilities import GoogleSearchAPIWrapper\n",
"from langchain_openai import OpenAI"
"from langchain_community.llms import OpenAI\n",
"from langchain_community.utilities import GoogleSearchAPIWrapper"
]
},
{

View File

@@ -52,8 +52,8 @@
"outputs": [],
"source": [
"from langchain.prompts import PromptTemplate\n",
"from langchain_experimental.smart_llm import SmartLLMChain\n",
"from langchain_openai import ChatOpenAI"
"from langchain_community.chat_models import ChatOpenAI\n",
"from langchain_experimental.smart_llm import SmartLLMChain"
]
},
{

View File

@@ -9,7 +9,7 @@ To set it up, follow the instructions on https://database.guide/2-sample-databas
```python
from langchain_openai import OpenAI
from langchain_community.llms import OpenAI
from langchain_community.utilities import SQLDatabase
from langchain_experimental.sql import SQLDatabaseChain
```
@@ -200,7 +200,7 @@ result["intermediate_steps"]
How to add memory to a SQLDatabaseChain:
```python
from langchain_openai import OpenAI
from langchain_community.llms import OpenAI
from langchain_community.utilities import SQLDatabase
from langchain_experimental.sql import SQLDatabaseChain
```

View File

@@ -23,10 +23,10 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.prompts import ChatPromptTemplate, FewShotChatMessagePromptTemplate\n",
"from langchain_community.chat_models import ChatOpenAI\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.prompts import ChatPromptTemplate, FewShotChatMessagePromptTemplate\n",
"from langchain_core.runnables import RunnableLambda\n",
"from langchain_openai import ChatOpenAI"
"from langchain_core.runnables import RunnableLambda"
]
},
{

View File

@@ -24,7 +24,7 @@
}
],
"source": [
"from langchain_openai import OpenAI\n",
"from langchain_community.llms import OpenAI\n",
"\n",
"llm = OpenAI(temperature=1, max_tokens=512, model=\"gpt-3.5-turbo-instruct\")"
]

View File

@@ -37,8 +37,8 @@
"import getpass\n",
"import os\n",
"\n",
"from langchain_community.embeddings.openai import OpenAIEmbeddings\n",
"from langchain_community.vectorstores import DeepLake\n",
"from langchain_openai import OpenAIEmbeddings\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")\n",
"activeloop_token = getpass.getpass(\"Activeloop Token:\")\n",
@@ -3809,7 +3809,7 @@
"outputs": [],
"source": [
"from langchain.chains import ConversationalRetrievalChain\n",
"from langchain_openai import ChatOpenAI\n",
"from langchain_community.chat_models import ChatOpenAI\n",
"\n",
"model = ChatOpenAI(model_name=\"gpt-3.5-turbo-0613\") # switch to 'gpt-4'\n",
"qa = ConversationalRetrievalChain.from_llm(model, retriever=retriever)"

View File

@@ -30,7 +30,7 @@
" HumanMessage,\n",
" SystemMessage,\n",
")\n",
"from langchain_openai import ChatOpenAI"
"from langchain_community.chat_models import ChatOpenAI"
]
},
{

View File

@@ -28,7 +28,7 @@
" HumanMessage,\n",
" SystemMessage,\n",
")\n",
"from langchain_openai import ChatOpenAI"
"from langchain_community.chat_models import ChatOpenAI"
]
},
{

View File

@@ -599,7 +599,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_openai import ChatOpenAI\n",
"from langchain_community.chat_models import ChatOpenAI\n",
"\n",
"llm = ChatOpenAI(model_name=\"gpt-4\", temperature=0)"
]

View File

@@ -1,27 +0,0 @@
# langchain-core
## 0.1.7 (Jan 5, 2024)
#### Deleted
No deletions.
#### Deprecated
- `BaseChatModel` methods `__call__`, `call_as_llm`, `predict`, `predict_messages`. Will be removed in 0.2.0. Use `BaseChatModel.invoke` instead.
- `BaseChatModel` methods `apredict`, `apredict_messages`. Will be removed in 0.2.0. Use `BaseChatModel.ainvoke` instead.
- `BaseLLM` methods `__call__, `predict`, `predict_messages`. Will be removed in 0.2.0. Use `BaseLLM.invoke` instead.
- `BaseLLM` methods `apredict`, `apredict_messages`. Will be removed in 0.2.0. Use `BaseLLM.ainvoke` instead.
#### Fixed
- Restrict recursive URL scraping: [#15559](https://github.com/langchain-ai/langchain/pull/15559)
#### Added
No additions.
#### Beta
- Marked `langchain_core.load.load` and `langchain_core.load.loads` as beta.
- Marked `langchain_core.beta.runnables.context.ContextGet` and `langchain_core.beta.runnables.context.ContextSet` as beta.

View File

@@ -1,36 +0,0 @@
# langchain
## 0.1.0 (Jan 5, 2024)
#### Deleted
No deletions.
#### Deprecated
Deprecated classes and methods will be removed in 0.2.0
| Deprecated | Alternative | Reason |
|---------------------------------|-----------------------------------|------------------------------------------------|
| ChatVectorDBChain | ConversationalRetrievalChain | More general to all retrievers |
| create_ernie_fn_chain | create_ernie_fn_runnable | Use LCEL under the hood |
| created_structured_output_chain | create_structured_output_runnable | Use LCEL under the hood |
| NatBotChain | | Not used |
| create_openai_fn_chain | create_openai_fn_runnable | Use LCEL under the hood |
| create_structured_output_chain | create_structured_output_runnable | Use LCEL under the hood |
| load_query_constructor_chain | load_query_constructor_runnable | Use LCEL under the hood |
| VectorDBQA | RetrievalQA | More general to all retrievers |
| Sequential Chain | LCEL | Obviated by LCEL |
| SimpleSequentialChain | LCEL | Obviated by LCEL |
| TransformChain | LCEL/RunnableLambda | Obviated by LCEL |
| create_tagging_chain | create_structured_output_runnable | Use LCEL under the hood |
| ChatAgent | create_react_agent | Use LCEL builder over a class |
| ConversationalAgent | create_react_agent | Use LCEL builder over a class |
| ConversationalChatAgent | create_json_chat_agent | Use LCEL builder over a class |
| initialize_agent | Individual create agent methods | Individual create agent methods are more clear |
| ZeroShotAgent | create_react_agent | Use LCEL builder over a class |
| OpenAIFunctionsAgent | create_openai_functions_agent | Use LCEL builder over a class |
| OpenAIMultiFunctionsAgent | create_openai_tools_agent | Use LCEL builder over a class |
| SelfAskWithSearchAgent | create_self_ask_with_search | Use LCEL builder over a class |
| StructuredChatAgent | create_structured_chat_agent | Use LCEL builder over a class |
| XMLAgent | create_xml_agent | Use LCEL builder over a class |

53
docs/docs/community.md Normal file
View File

@@ -0,0 +1,53 @@
# Community navigator
Hi! Thanks for being here. Were lucky to have a community of so many passionate developers building with LangChainwe have so much to teach and learn from each other. Community members contribute code, host meetups, write blog posts, amplify each others work, become each other's customers and collaborators, and so much more.
Whether youre new to LangChain, looking to go deeper, or just want to get more exposure to the world of building with LLMs, this page can point you in the right direction.
- **🦜 Contribute to LangChain**
- **🌍 Meetups, Events, and Hackathons**
- **📣 Help Us Amplify Your Work**
- **💬 Stay in the loop**
# 🦜 Contribute to LangChain
LangChain is the product of over 5,000+ contributions by 1,500+ contributors, and there is ******still****** so much to do together. Here are some ways to get involved:
- **[Open a pull request](https://github.com/langchain-ai/langchain/issues):** Wed appreciate all forms of contributionsnew features, infrastructure improvements, better documentation, bug fixes, etc. If you have an improvement or an idea, wed love to work on it with you.
- **[Read our contributor guidelines:](./contributing/)** We ask contributors to follow a ["fork and pull request"](https://docs.github.com/en/get-started/quickstart/contributing-to-projects) workflow, run a few local checks for formatting, linting, and testing before submitting, and follow certain documentation and testing conventions.
- **First time contributor?** [Try one of these PRs with the “good first issue” tag](https://github.com/langchain-ai/langchain/contribute).
- **Become an expert:** Our experts help the community by answering product questions in Discord. If thats a role youd like to play, wed be so grateful! (And we have some special experts-only goodies/perks we can tell you more about). Send us an email to introduce yourself at hello@langchain.dev and well take it from there!
- **Integrate with LangChain:** If your product integrates with LangChainor aspires towe want to help make sure the experience is as smooth as possible for you and end users. Send us an email at hello@langchain.dev and tell us what youre working on.
- **Become an Integration Maintainer:** Partner with our team to ensure your integration stays up-to-date and talk directly with users (and answer their inquiries) in our Discord. Introduce yourself at hello@langchain.dev if youd like to explore this role.
# 🌍 Meetups, Events, and Hackathons
One of our favorite things about working in AI is how much enthusiasm there is for building together. We want to help make that as easy and impactful for you as possible!
- **Find a meetup, hackathon, or webinar:** You can find the one for you on our [global events calendar](https://mirror-feeling-d80.notion.site/0bc81da76a184297b86ca8fc782ee9a3?v=0d80342540df465396546976a50cfb3f).
- **Submit an event to our calendar:** Email us at events@langchain.dev with a link to your event page! We can also help you spread the word with our local communities.
- **Host a meetup:** If you want to bring a group of builders together, we want to help! We can publicize your event on our event calendar/Twitter, share it with our local communities in Discord, send swag, or potentially hook you up with a sponsor. Email us at events@langchain.dev to tell us about your event!
- **Become a meetup sponsor:** We often hear from groups of builders that want to get together, but are blocked or limited on some dimension (space to host, budget for snacks, prizes to distribute, etc.). If youd like to help, send us an email to events@langchain.dev we can share more about how it works!
- **Speak at an event:** Meetup hosts are always looking for great speakers, presenters, and panelists. If youd like to do that at an event, send us an email to hello@langchain.dev with more information about yourself, what you want to talk about, and what city youre based in and well try to match you with an upcoming event!
- **Tell us about your LLM community:** If you host or participate in a community that would welcome support from LangChain and/or our team, send us an email at hello@langchain.dev and let us know how we can help.
# 📣 Help Us Amplify Your Work
If youre working on something youre proud of, and think the LangChain community would benefit from knowing about it, we want to help you show it off.
- **Post about your work and mention us:** We love hanging out on Twitter to see what people in the space are talking about and working on. If you tag [@langchainai](https://twitter.com/LangChainAI), well almost certainly see it and can show you some love.
- **Publish something on our blog:** If youre writing about your experience building with LangChain, wed love to post (or crosspost) it on our blog! E-mail hello@langchain.dev with a draft of your post! Or even an idea for something you want to write about.
- **Get your product onto our [integrations hub](https://integrations.langchain.com/):** Many developers take advantage of our seamless integrations with other products, and come to our integrations hub to find out who those are. If you want to get your product up there, tell us about it (and how it works with LangChain) at hello@langchain.dev.
# ☀️ Stay in the loop
Heres where our team hangs out, talks shop, spotlights cool work, and shares what were up to. Wed love to see you there too.
- **[Twitter](https://twitter.com/LangChainAI):** We post about what were working on and what cool things were seeing in the space. If you tag @langchainai in your post, well almost certainly see it, and can show you some love!
- **[Discord](https://discord.gg/6adMQxSpJS):** connect with over 30,000 developers who are building with LangChain.
- **[GitHub](https://github.com/langchain-ai/langchain):** Open pull requests, contribute to a discussion, and/or contribute
- **[Subscribe to our bi-weekly Release Notes](https://6w1pwbss0py.typeform.com/to/KjZB1auB):** a twice/month email roundup of the coolest things going on in our orbit

View File

@@ -40,8 +40,3 @@ smooth for future contributors.
In a similar vein, we do enforce certain linting, formatting, and documentation standards in the codebase.
If you are finding these difficult (or even just annoying) to work with, feel free to contact a maintainer for help -
we do not want these to get in the way of getting good code into the codebase.
# 🌟 Recognition
If your contribution has made its way into a release, we will want to give you credit on Twitter (only if you want though)!
If you have a Twitter account you would like us to mention, please let us know in the PR or through another means.

View File

@@ -53,9 +53,9 @@ And we would write tests in:
- Integration tests: `libs/community/tests/integration_tests/chat_models/test_parrot_link.py`
And add documentation to:
- `docs/docs/integrations/chat/parrot_link.ipynb`
- `docs/docs/
## Partner Packages
Partner packages are in `libs/partners/*` and are installed by users with `pip install langchain-{partner}`, and exported members can be imported with code like

View File

@@ -0,0 +1,56 @@
---
sidebar_label: Package Versioning
sidebar_position: 4
---
# 📕 Package Versioning
As of now, LangChain has an ad hoc release process: releases are cut with high frequency by
a maintainer and published to [PyPI](https://pypi.org/).
The different packages are versioned slightly differently.
## `langchain-core`
`langchain-core` is currently on version `0.1.x`.
As `langchain-core` contains the base abstractions and runtime for the whole LangChain ecosystem, we will communicate any breaking changes with advance notice and version bumps. The exception for this is anything in `langchain_core.beta`. The reason for `langchain_core.beta` is that given the rate of change of the field, being able to move quickly is still a priority, and this module is our attempt to do so.
Minor version increases will occur for:
- Breaking changes for any public interfaces NOT in `langchain_core.beta`
Patch version increases will occur for:
- Bug fixes
- New features
- Any changes to private interfaces
- Any changes to `langchain_core.beta`
## `langchain`
`langchain` is currently on version `0.0.x`
All changes will be accompanied by a patch version increase. Any changes to public interfaces are nearly always done in a backwards compatible way and will be communicated ahead of time when they are not backwards compatible.
We are targeting January 2024 for a release of `langchain` v0.1, at which point `langchain` will adopt the same versioning policy as `langchain-core`.
## `langchain-community`
`langchain-community` is currently on version `0.0.x`
All changes will be accompanied by a patch version increase.
## `langchain-experimental`
`langchain-experimental` is currently on version `0.0.x`
All changes will be accompanied by a patch version increase.
## Partner Packages
Partner packages are versioned independently.
# 🌟 Recognition
If your contribution has made its way into a release, we will want to give you credit on Twitter (only if you want though)!
If you have a Twitter account you would like us to mention, please let us know in the PR or through another means.

View File

@@ -10,16 +10,6 @@
"Example of how to use LCEL to write Python code."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0653c7c7",
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain-core langchain-experimental langchain-openai"
]
},
{
"cell_type": "code",
"execution_count": 1,
@@ -27,12 +17,12 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.prompts import (\n",
"from langchain.prompts import (\n",
" ChatPromptTemplate,\n",
")\n",
"from langchain_experimental.utilities import PythonREPL\n",
"from langchain_openai import ChatOpenAI"
"from langchain_community.chat_models import ChatOpenAI\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_experimental.utilities import PythonREPL"
]
},
{

View File

@@ -12,16 +12,6 @@
"One especially useful technique is to use embeddings to route a query to the most relevant prompt. Here's a very simple example."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b793a0aa",
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain-core langchain langchain-openai"
]
},
{
"cell_type": "code",
"execution_count": 1,
@@ -29,11 +19,12 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.prompts import PromptTemplate\n",
"from langchain.utils.math import cosine_similarity\n",
"from langchain_community.chat_models import ChatOpenAI\n",
"from langchain_community.embeddings import OpenAIEmbeddings\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.prompts import PromptTemplate\n",
"from langchain_core.runnables import RunnableLambda, RunnablePassthrough\n",
"from langchain_openai import ChatOpenAI, OpenAIEmbeddings\n",
"\n",
"physics_template = \"\"\"You are a very smart physics professor. \\\n",
"You are great at answering questions about physics in a concise and easy to understand manner. \\\n",

View File

@@ -10,16 +10,6 @@
"This shows how to add memory to an arbitrary chain. Right now, you can use the memory classes but need to hook it up manually"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "18753dee",
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain langchain-openai"
]
},
{
"cell_type": "code",
"execution_count": 1,
@@ -30,9 +20,9 @@
"from operator import itemgetter\n",
"\n",
"from langchain.memory import ConversationBufferMemory\n",
"from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
"from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
"from langchain_community.chat_models import ChatOpenAI\n",
"from langchain_core.runnables import RunnableLambda, RunnablePassthrough\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"model = ChatOpenAI()\n",
"prompt = ChatPromptTemplate.from_messages(\n",

View File

@@ -10,16 +10,6 @@
"This shows how to add in moderation (or other safeguards) around your LLM application."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "6acf3505",
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain langchain-openai"
]
},
{
"cell_type": "code",
"execution_count": 20,
@@ -28,8 +18,8 @@
"outputs": [],
"source": [
"from langchain.chains import OpenAIModerationChain\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_openai import OpenAI"
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain_community.llms import OpenAI"
]
},
{

View File

@@ -19,14 +19,6 @@
"Runnables can easily be used to string together multiple Chains"
]
},
{
"cell_type": "raw",
"id": "0f316b5c",
"metadata": {},
"source": [
"%pip install --upgrade --quiet langchain langchain-openai"
]
},
{
"cell_type": "code",
"execution_count": 4,
@@ -47,9 +39,9 @@
"source": [
"from operator import itemgetter\n",
"\n",
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain.schema import StrOutputParser\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_openai import ChatOpenAI\n",
"from langchain_community.chat_models import ChatOpenAI\n",
"\n",
"prompt1 = ChatPromptTemplate.from_template(\"what is the city {person} is from?\")\n",
"prompt2 = ChatPromptTemplate.from_template(\n",

View File

@@ -35,14 +35,6 @@
"Note, you can mix and match PromptTemplate/ChatPromptTemplates and LLMs/ChatModels as you like here."
]
},
{
"cell_type": "raw",
"id": "ef79a54b",
"metadata": {},
"source": [
"%pip install --upgrade --quiet langchain langchain-openai"
]
},
{
"cell_type": "code",
"execution_count": 1,
@@ -50,8 +42,8 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_openai import ChatOpenAI\n",
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain_community.chat_models import ChatOpenAI\n",
"\n",
"prompt = ChatPromptTemplate.from_template(\"tell me a joke about {foo}\")\n",
"model = ChatOpenAI()\n",

View File

@@ -12,16 +12,6 @@
"With LCEL, it's easy to add custom functionality for managing the size of prompts within your chain or agent. Let's look at simple agent example that can search Wikipedia for information."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "1846587d",
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain langchain-openai wikipedia"
]
},
{
"cell_type": "code",
"execution_count": 1,
@@ -29,17 +19,19 @@
"metadata": {},
"outputs": [],
"source": [
"# !pip install langchain wikipedia\n",
"\n",
"from operator import itemgetter\n",
"\n",
"from langchain.agents import AgentExecutor, load_tools\n",
"from langchain.agents.format_scratchpad import format_to_openai_function_messages\n",
"from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser\n",
"from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
"from langchain.prompts.chat import ChatPromptValue\n",
"from langchain.tools import WikipediaQueryRun\n",
"from langchain_community.chat_models import ChatOpenAI\n",
"from langchain_community.tools.convert_to_openai import format_tool_to_openai_function\n",
"from langchain_community.utilities import WikipediaAPIWrapper\n",
"from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
"from langchain_openai import ChatOpenAI"
"from langchain_community.utilities import WikipediaAPIWrapper"
]
},
{

View File

@@ -26,7 +26,7 @@
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain langchain-openai faiss-cpu tiktoken"
"!pip install langchain openai faiss-cpu tiktoken"
]
},
{
@@ -38,11 +38,12 @@
"source": [
"from operator import itemgetter\n",
"\n",
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain_community.chat_models import ChatOpenAI\n",
"from langchain_community.embeddings import OpenAIEmbeddings\n",
"from langchain_community.vectorstores import FAISS\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.runnables import RunnableLambda, RunnablePassthrough\n",
"from langchain_openai import ChatOpenAI, OpenAIEmbeddings"
"from langchain_core.runnables import RunnableLambda, RunnablePassthrough"
]
},
{

View File

@@ -19,14 +19,6 @@
"We can replicate our SQLDatabaseChain with Runnables."
]
},
{
"cell_type": "raw",
"id": "b3121aa8",
"metadata": {},
"source": [
"%pip install --upgrade --quiet langchain langchain-openai"
]
},
{
"cell_type": "code",
"execution_count": 1,
@@ -34,7 +26,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain.prompts import ChatPromptTemplate\n",
"\n",
"template = \"\"\"Based on the table schema below, write a SQL query that would answer the user's question:\n",
"{schema}\n",
@@ -101,9 +93,9 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.chat_models import ChatOpenAI\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"model = ChatOpenAI()\n",
"\n",

View File

@@ -17,7 +17,7 @@
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain langchain-openai duckduckgo-search"
"!pip install duckduckgo-search"
]
},
{
@@ -27,10 +27,10 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain.tools import DuckDuckGoSearchRun\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_openai import ChatOpenAI"
"from langchain_community.chat_models import ChatOpenAI\n",
"from langchain_core.output_parsers import StrOutputParser"
]
},
{

View File

@@ -30,38 +30,30 @@
"The most basic and common use case is chaining a prompt template and a model together. To see how this works, let's create a chain that takes a topic and generates a joke:"
]
},
{
"cell_type": "raw",
"id": "278b0027",
"metadata": {},
"source": [
"%pip install --upgrade --quiet langchain-core langchain-community langchain-openai"
]
},
{
"cell_type": "code",
"execution_count": 1,
"execution_count": 7,
"id": "466b65b3",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"\"Why don't ice creams ever get invited to parties?\\n\\nBecause they always drip when things heat up!\""
"\"Why did the ice cream go to therapy?\\n\\nBecause it had too many toppings and couldn't find its cone-fidence!\""
]
},
"execution_count": 1,
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain_community.chat_models import ChatOpenAI\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"prompt = ChatPromptTemplate.from_template(\"tell me a short joke about {topic}\")\n",
"model = ChatOpenAI(model=\"gpt-4\")\n",
"model = ChatOpenAI()\n",
"output_parser = StrOutputParser()\n",
"\n",
"chain = prompt | model | output_parser\n",
@@ -97,7 +89,7 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": 8,
"id": "b8656990",
"metadata": {},
"outputs": [
@@ -107,7 +99,7 @@
"ChatPromptValue(messages=[HumanMessage(content='tell me a short joke about ice cream')])"
]
},
"execution_count": 2,
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
@@ -119,7 +111,7 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": 9,
"id": "e6034488",
"metadata": {},
"outputs": [
@@ -129,7 +121,7 @@
"[HumanMessage(content='tell me a short joke about ice cream')]"
]
},
"execution_count": 3,
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
@@ -140,7 +132,7 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 10,
"id": "60565463",
"metadata": {},
"outputs": [
@@ -150,7 +142,7 @@
"'Human: tell me a short joke about ice cream'"
]
},
"execution_count": 4,
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
@@ -171,17 +163,17 @@
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": 11,
"id": "33cf5f72",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content=\"Why don't ice creams ever get invited to parties?\\n\\nBecause they always bring a melt down!\")"
"AIMessage(content=\"Why did the ice cream go to therapy? \\n\\nBecause it had too many toppings and couldn't find its cone-fidence!\")"
]
},
"execution_count": 5,
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}
@@ -201,23 +193,23 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": 12,
"id": "8feb05da",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'\\n\\nRobot: Why did the ice cream truck break down? Because it had a meltdown!'"
"'\\n\\nRobot: Why did the ice cream go to therapy? Because it had a rocky road.'"
]
},
"execution_count": 6,
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain_openai.llms import OpenAI\n",
"from langchain_community.llms import OpenAI\n",
"\n",
"llm = OpenAI(model=\"gpt-3.5-turbo-instruct\")\n",
"llm.invoke(prompt_value)"
@@ -332,12 +324,12 @@
"# Requires:\n",
"# pip install langchain docarray tiktoken\n",
"\n",
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain_community.chat_models import ChatOpenAI\n",
"from langchain_community.embeddings import OpenAIEmbeddings\n",
"from langchain_community.vectorstores import DocArrayInMemorySearch\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.runnables import RunnableParallel, RunnablePassthrough\n",
"from langchain_openai.chat_models import ChatOpenAI\n",
"from langchain_openai.embeddings import OpenAIEmbeddings\n",
"\n",
"vectorstore = DocArrayInMemorySearch.from_texts(\n",
" [\"harrison worked at kensho\", \"bears like to eat honey\"],\n",
@@ -494,7 +486,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.4"
"version": "3.9.1"
}
},
"nbformat": 4,

View File

@@ -12,16 +12,6 @@
"Suppose we have a simple prompt + model sequence:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c5dad8b5",
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain langchain-openai"
]
},
{
"cell_type": "code",
"execution_count": 1,
@@ -29,10 +19,10 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain.schema import StrOutputParser\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"from langchain_openai import ChatOpenAI"
"from langchain_community.chat_models import ChatOpenAI\n",
"from langchain_core.runnables import RunnablePassthrough"
]
},
{

View File

@@ -34,16 +34,6 @@
"With LLMs we can configure things like temperature"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "40ed76a2",
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain langchain-openai"
]
},
{
"cell_type": "code",
"execution_count": 35,
@@ -52,8 +42,8 @@
"outputs": [],
"source": [
"from langchain.prompts import PromptTemplate\n",
"from langchain_community.chat_models import ChatOpenAI\n",
"from langchain_core.runnables import ConfigurableField\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"model = ChatOpenAI(temperature=0).configurable_fields(\n",
" temperature=ConfigurableField(\n",
@@ -274,9 +264,8 @@
"outputs": [],
"source": [
"from langchain.prompts import PromptTemplate\n",
"from langchain_community.chat_models import ChatAnthropic\n",
"from langchain_core.runnables import ConfigurableField\n",
"from langchain_openai import ChatOpenAI"
"from langchain_community.chat_models import ChatAnthropic, ChatOpenAI\n",
"from langchain_core.runnables import ConfigurableField"
]
},
{

View File

@@ -16,16 +16,6 @@
"Let's take a look at this in action!"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "23b2b564",
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain langchain-openai"
]
},
{
"cell_type": "code",
"execution_count": 16,
@@ -33,10 +23,10 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.chat_models import ChatOpenAI\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.runnables import chain\n",
"from langchain_openai import ChatOpenAI"
"from langchain_core.runnables import chain"
]
},
{

View File

@@ -24,16 +24,6 @@
"IMPORTANT: By default, a lot of the LLM wrappers catch errors and retry. You will most likely want to turn those off when working with fallbacks. Otherwise the first wrapper will keep on retrying and not failing."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ebb61b1f",
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain langchain-openai"
]
},
{
"cell_type": "code",
"execution_count": 1,
@@ -41,8 +31,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.chat_models import ChatAnthropic\n",
"from langchain_openai import ChatOpenAI"
"from langchain_community.chat_models import ChatAnthropic, ChatOpenAI"
]
},
{
@@ -152,7 +141,7 @@
}
],
"source": [
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain.prompts import ChatPromptTemplate\n",
"\n",
"prompt = ChatPromptTemplate.from_messages(\n",
" [\n",
@@ -252,7 +241,7 @@
"source": [
"# Now lets create a chain with the normal OpenAI model\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_openai import OpenAI\n",
"from langchain_community.llms import OpenAI\n",
"\n",
"prompt_template = \"\"\"Instructions: You should always include a compliment in your response.\n",
"\n",
@@ -302,7 +291,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.4"
"version": "3.10.12"
}
},
"nbformat": 4,

View File

@@ -24,14 +24,6 @@
"Note that all inputs to these functions need to be a SINGLE argument. If you have a function that accepts multiple arguments, you should write a wrapper that accepts a single input and unpacks it into multiple argument."
]
},
{
"cell_type": "raw",
"id": "9a5fe916",
"metadata": {},
"source": [
"%pip install --upgrade --quiet langchain langchain-openai"
]
},
{
"cell_type": "code",
"execution_count": 1,
@@ -41,9 +33,9 @@
"source": [
"from operator import itemgetter\n",
"\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain_community.chat_models import ChatOpenAI\n",
"from langchain_core.runnables import RunnableLambda\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"\n",
"def length_function(text):\n",

View File

@@ -24,15 +24,6 @@
"## Sync version"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain langchain-openai"
]
},
{
"cell_type": "code",
"execution_count": 1,
@@ -42,8 +33,8 @@
"from typing import Iterator, List\n",
"\n",
"from langchain.prompts.chat import ChatPromptTemplate\n",
"from langchain_community.chat_models import ChatOpenAI\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"prompt = ChatPromptTemplate.from_template(\n",
" \"Write a comma-separated list of 5 animals similar to: {animal}\"\n",

View File

@@ -15,11 +15,11 @@
{
"cell_type": "code",
"execution_count": null,
"id": "d816e954",
"id": "8bc5d235",
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain langchain-openai faiss-cpu tiktoken"
"!pip install langchain openai faiss-cpu tiktoken"
]
},
{
@@ -29,11 +29,14 @@
"metadata": {},
"outputs": [],
"source": [
"from operator import itemgetter\n",
"\n",
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain.vectorstores import FAISS\n",
"from langchain_community.chat_models import ChatOpenAI\n",
"from langchain_community.embeddings import OpenAIEmbeddings\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.runnables import RunnableLambda, RunnablePassthrough\n",
"from langchain_openai import ChatOpenAI, OpenAIEmbeddings"
"from langchain_core.runnables import RunnableLambda, RunnablePassthrough"
]
},
{

View File

@@ -1,7 +1,7 @@
{
"cells": [
{
"cell_type": "raw",
"cell_type": "markdown",
"id": "e2596041-9b76-4e74-836f-e6235086bbf0",
"metadata": {},
"source": [
@@ -26,16 +26,6 @@
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2627ffd7",
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain langchain-openai"
]
},
{
"cell_type": "code",
"execution_count": 3,
@@ -54,11 +44,12 @@
}
],
"source": [
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain_community.chat_models import ChatOpenAI\n",
"from langchain_community.embeddings import OpenAIEmbeddings\n",
"from langchain_community.vectorstores import FAISS\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"from langchain_openai import ChatOpenAI, OpenAIEmbeddings\n",
"\n",
"vectorstore = FAISS.from_texts(\n",
" [\"harrison worked at kensho\"], embedding=OpenAIEmbeddings()\n",
@@ -137,11 +128,12 @@
"source": [
"from operator import itemgetter\n",
"\n",
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain_community.chat_models import ChatOpenAI\n",
"from langchain_community.embeddings import OpenAIEmbeddings\n",
"from langchain_community.vectorstores import FAISS\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"from langchain_openai import ChatOpenAI, OpenAIEmbeddings\n",
"\n",
"vectorstore = FAISS.from_texts(\n",
" [\"harrison worked at kensho\"], embedding=OpenAIEmbeddings()\n",
@@ -200,9 +192,9 @@
}
],
"source": [
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain_community.chat_models import ChatOpenAI\n",
"from langchain_core.runnables import RunnableParallel\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"model = ChatOpenAI()\n",
"joke_chain = ChatPromptTemplate.from_template(\"tell me a joke about {topic}\") | model\n",

View File

@@ -41,7 +41,7 @@
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain redis anthropic"
"!pip install -U langchain redis anthropic"
]
},
{
@@ -131,10 +131,10 @@
"source": [
"from typing import Optional\n",
"\n",
"from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
"from langchain_community.chat_message_histories import RedisChatMessageHistory\n",
"from langchain_community.chat_models import ChatAnthropic\n",
"from langchain_core.chat_history import BaseChatMessageHistory\n",
"from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
"from langchain_core.runnables.history import RunnableWithMessageHistory"
]
},

View File

@@ -28,16 +28,6 @@
"See the example below:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e169b952",
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain langchain-openai"
]
},
{
"cell_type": "code",
"execution_count": 11,
@@ -107,11 +97,12 @@
}
],
"source": [
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain_community.chat_models import ChatOpenAI\n",
"from langchain_community.embeddings import OpenAIEmbeddings\n",
"from langchain_community.vectorstores import FAISS\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"from langchain_openai import ChatOpenAI, OpenAIEmbeddings\n",
"\n",
"vectorstore = FAISS.from_texts(\n",
" [\"harrison worked at kensho\"], embedding=OpenAIEmbeddings()\n",

View File

@@ -50,14 +50,6 @@
"Let's take a look at these methods. To do so, we'll create a super simple PromptTemplate + ChatModel chain."
]
},
{
"cell_type": "raw",
"id": "57768739",
"metadata": {},
"source": [
"%pip install --upgrade --quiet langchain-core langchain-community langchain-openai"
]
},
{
"cell_type": "code",
"execution_count": 1,
@@ -65,8 +57,8 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_openai import ChatOpenAI\n",
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain_community.chat_models import ChatOpenAI\n",
"\n",
"model = ChatOpenAI()\n",
"prompt = ChatPromptTemplate.from_template(\"tell me a joke about {topic}\")\n",
@@ -667,10 +659,10 @@
}
],
"source": [
"from langchain_community.embeddings import OpenAIEmbeddings\n",
"from langchain_community.vectorstores import FAISS\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"from langchain_openai import OpenAIEmbeddings\n",
"\n",
"template = \"\"\"Answer the question based only on the following context:\n",
"{context}\n",

View File

@@ -35,14 +35,6 @@
"To better understand the value of LCEL, it's helpful to see it in action and think about how we might recreate similar functionality without it. In this walkthrough we'll do just that with our [basic example](/docs/expression_language/get_started#basic_example) from the get started section. We'll take our simple prompt + model chain, which under the hood already defines a lot of functionality, and see what it would take to recreate all of it."
]
},
{
"cell_type": "raw",
"id": "b99b47ec",
"metadata": {},
"source": [
"%pip install --upgrade --quiet langchain-core langchain-openai"
]
},
{
"cell_type": "code",
"execution_count": null,
@@ -50,8 +42,8 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_openai import ChatOpenAI\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_community.chat_models import ChatOpenAI\n",
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"\n",
"\n",
@@ -397,7 +389,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_openai import OpenAI\n",
"from langchain_community.llms import OpenAI\n",
"\n",
"llm = OpenAI(model=\"gpt-3.5-turbo-instruct\")\n",
"llm_chain = (\n",
@@ -1010,12 +1002,11 @@
"source": [
"import os\n",
"\n",
"from langchain_community.chat_models import ChatAnthropic\n",
"from langchain_openai import ChatOpenAI\n",
"from langchain_openai import OpenAI\n",
"from langchain_community.chat_models import ChatAnthropic, ChatOpenAI\n",
"from langchain_community.llms import OpenAI\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.runnables import RunnablePassthrough, ConfigurableField\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"\n",
"os.environ[\"LANGCHAIN_API_KEY\"] = \"...\"\n",
"os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n",

View File

@@ -70,10 +70,10 @@ For this getting started guide, we will provide two options: using OpenAI (a pop
<Tabs>
<TabItem value="openai" label="OpenAI" default>
First we'll need to import the LangChain x OpenAI integration package.
First we'll need to install their Python package:
```shell
pip install langchain-openai
pip install openai
```
Accessing the API requires an API key, which you can get by creating an account and heading [here](https://platform.openai.com/account/api-keys). Once we have a key we'll want to set it as an environment variable by running:
@@ -85,7 +85,7 @@ export OPENAI_API_KEY="..."
We can then initialize the model:
```python
from langchain_openai import ChatOpenAI
from langchain_community.chat_models import ChatOpenAI
llm = ChatOpenAI()
```
@@ -93,7 +93,7 @@ llm = ChatOpenAI()
If you'd prefer not to set an environment variable you can pass the key in directly via the `openai_api_key` named parameter when initiating the OpenAI LLM class:
```python
from langchain_openai import ChatOpenAI
from langchain_community.chat_models import ChatOpenAI
llm = ChatOpenAI(openai_api_key="...")
```
@@ -128,7 +128,7 @@ We can also guide it's response with a prompt template.
Prompt templates are used to convert raw user input to a better input to the LLM.
```python
from langchain_core.prompts import ChatPromptTemplate
from langchain.prompts import ChatPromptTemplate
prompt = ChatPromptTemplate.from_messages([
("system", "You are world class technical documentation writer."),
("user", "{input}")
@@ -199,10 +199,10 @@ For embedding models, we once again provide examples for accessing via OpenAI or
<Tabs>
<TabItem value="openai" label="OpenAI" default>
Make sure you have the `langchain_openai` package installed an the appropriate environment variables set (these are the same as needed for the LLM).
Make sure you have the openai package installed an the appropriate environment variables set (these are the same as needed for the LLM).
```python
from langchain_openai import OpenAIEmbeddings
from langchain_community.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
```
@@ -221,24 +221,25 @@ embeddings = OllamaEmbeddings()
</Tabs>
Now, we can use this embedding model to ingest documents into a vectorstore.
We will use a simple local vectorstore, [FAISS](/docs/integrations/vectorstores/faiss), for simplicity's sake.
We will use a simple local vectorstore, [DocArray InMemorySearch](/docs/integrations/vectorstores/docarray_in_memory), for simplicity's sake.
First we need to install the required packages for that:
```shell
pip install faiss-cpu
pip install docarray
pip install tiktoken
```
Then we can build our index:
```python
from langchain_community.vectorstores import FAISS
from langchain_community.vectorstores import DocArrayInMemorySearch
from langchain.text_splitter import RecursiveCharacterTextSplitter
text_splitter = RecursiveCharacterTextSplitter()
documents = text_splitter.split_documents(docs)
vector = FAISS.from_documents(documents, embeddings)
vector = DocArrayInMemorySearch.from_documents(documents, embeddings)
```
Now that we have this data indexed in a vectorstore, we will create a retrieval chain.
@@ -415,7 +416,7 @@ pip install langchainhub
Now we can use it to get a predefined prompt
```python
from langchain_openai import ChatOpenAI
from langchain_community.chat_models import ChatOpenAI
from langchain import hub
from langchain.agents import create_openai_functions_agent
from langchain.agents import AgentExecutor
@@ -478,15 +479,15 @@ To create a server for our application we'll make a `serve.py` file. This will c
from typing import List
from fastapi import FastAPI
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from langchain_community.chat_models import ChatOpenAI
from langchain_community.document_loaders import WebBaseLoader
from langchain_openai import OpenAIEmbeddings
from langchain_community.vectorstores import FAISS
from langchain_community.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores import DocArrayInMemorySearch
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.tools.retriever import create_retriever_tool
from langchain_community.tools.tavily_search import TavilySearchResults
from langchain_openai import ChatOpenAI
from langchain_community.chat_models import ChatOpenAI
from langchain import hub
from langchain.agents import create_openai_functions_agent
from langchain.agents import AgentExecutor
@@ -500,7 +501,7 @@ docs = loader.load()
text_splitter = RecursiveCharacterTextSplitter()
documents = text_splitter.split_documents(docs)
embeddings = OpenAIEmbeddings()
vector = FAISS.from_documents(documents, embeddings)
vector = DocArrayInMemorySearch.from_documents(documents, embeddings)
retriever = vector.as_retriever()
# 2. Create Tools

View File

@@ -25,7 +25,7 @@ Let's suppose we have a simple agent, and want to visualize the actions it takes
```python
from langchain.agents import AgentType, initialize_agent, load_tools
from langchain_openai import ChatOpenAI
from langchain_community.chat_models import ChatOpenAI
llm = ChatOpenAI(model_name="gpt-4", temperature=0)
tools = load_tools(["ddg-search", "llm-math"], llm=llm)

View File

@@ -104,7 +104,7 @@
},
"outputs": [],
"source": [
"%pip install --upgrade --quiet anthropic\n",
"# %pip install anthropic\n",
"# %env ANTHROPIC_API_KEY=YOUR_API_KEY"
]
},

View File

@@ -23,15 +23,6 @@
"In this example, you will use gpt-4 to select which output is preferred."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain langchain-openai"
]
},
{
"cell_type": "code",
"execution_count": 1,
@@ -108,8 +99,8 @@
"outputs": [],
"source": [
"from langchain.agents import AgentType, Tool, initialize_agent\n",
"from langchain_community.chat_models import ChatOpenAI\n",
"from langchain_community.utilities import SerpAPIWrapper\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"# Initialize the language model\n",
"# You can add your own OpenAI API key by adding openai_api_key=\"<your_api_key>\"\n",

View File

@@ -318,7 +318,7 @@
},
"outputs": [],
"source": [
"%pip install --upgrade --quiet anthropic\n",
"# %pip install ChatAnthropic\n",
"# %env ANTHROPIC_API_KEY=<API_KEY>"
]
},
@@ -464,4 +464,4 @@
},
"nbformat": 4,
"nbformat_minor": 5
}
}

Some files were not shown because too many files have changed in this diff Show More