mirror of
https://github.com/hwchase17/langchain.git
synced 2025-04-27 11:41:51 +00:00
docs: langchain-openai (#15513)
Updates docs and cookbooks to import ChatOpenAI, OpenAI, and OpenAI Embeddings from `langchain_openai` There are likely more --------- Co-authored-by: Harrison Chase <hw.chase.17@gmail.com>
This commit is contained in:
parent
be612f408e
commit
b1fa726377
@ -149,7 +149,7 @@
|
||||
],
|
||||
"source": [
|
||||
"# Prompt\n",
|
||||
"from langchain.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"# Update the template based on the type of SQL Database like MySQL, Microsoft SQL Server and so on\n",
|
||||
"template = \"\"\"Based on the table schema below, write a SQL query that would answer the user's question:\n",
|
||||
@ -278,7 +278,7 @@
|
||||
"source": [
|
||||
"# Prompt\n",
|
||||
"from langchain.memory import ConversationBufferMemory\n",
|
||||
"from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
|
||||
"\n",
|
||||
"template = \"\"\"Given an input question, convert it to a SQL query. No pre-amble. Based on the table schema below, write a SQL query that would answer the user's question:\n",
|
||||
"{schema}\n",
|
||||
|
@ -198,9 +198,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Generate summaries of text elements\n",
|
||||
@ -355,9 +355,9 @@
|
||||
"\n",
|
||||
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
|
||||
"from langchain.storage import InMemoryStore\n",
|
||||
"from langchain_community.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def create_multi_vector_retriever(\n",
|
||||
|
@ -235,9 +235,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser"
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -320,9 +320,9 @@
|
||||
"\n",
|
||||
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
|
||||
"from langchain.storage import InMemoryStore\n",
|
||||
"from langchain_community.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"# The vectorstore to use to index the child chunks\n",
|
||||
"vectorstore = Chroma(collection_name=\"summaries\", embedding_function=OpenAIEmbeddings())\n",
|
||||
|
@ -211,9 +211,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser"
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -375,9 +375,9 @@
|
||||
"\n",
|
||||
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
|
||||
"from langchain.storage import InMemoryStore\n",
|
||||
"from langchain_community.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"# The vectorstore to use to index the child chunks\n",
|
||||
"vectorstore = Chroma(collection_name=\"summaries\", embedding_function=OpenAIEmbeddings())\n",
|
||||
|
@ -209,9 +209,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_community.chat_models import ChatOllama\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser"
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -132,8 +132,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"baseline = Chroma.from_texts(\n",
|
||||
" texts=all_splits_pypdf_texts,\n",
|
||||
@ -160,9 +160,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"# Prompt\n",
|
||||
"prompt_text = \"\"\"You are an assistant tasked with summarizing tables and text for retrieval. \\\n",
|
||||
|
@ -29,9 +29,8 @@
|
||||
"source": [
|
||||
"from langchain.chains import RetrievalQA\n",
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain_community.embeddings.openai import OpenAIEmbeddings\n",
|
||||
"from langchain_community.llms import OpenAI\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_openai import OpenAI, OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"llm = OpenAI(temperature=0)"
|
||||
]
|
||||
@ -161,7 +160,7 @@
|
||||
"source": [
|
||||
"# Import things that are needed generically\n",
|
||||
"from langchain.agents import AgentType, Tool, initialize_agent\n",
|
||||
"from langchain_community.llms import OpenAI"
|
||||
"from langchain_openai import OpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -29,7 +29,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import AnalyzeDocumentChain\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)"
|
||||
]
|
||||
|
@ -62,8 +62,8 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.docstore import InMemoryDocstore\n",
|
||||
"from langchain_community.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain_community.vectorstores import FAISS"
|
||||
"from langchain_community.vectorstores import FAISS\n",
|
||||
"from langchain_openai import OpenAIEmbeddings"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -100,8 +100,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_experimental.autonomous_agents import AutoGPT"
|
||||
"from langchain_experimental.autonomous_agents import AutoGPT\n",
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -41,8 +41,8 @@
|
||||
"import pandas as pd\n",
|
||||
"from langchain.docstore.document import Document\n",
|
||||
"from langchain_community.agent_toolkits.pandas.base import create_pandas_dataframe_agent\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_experimental.autonomous_agents import AutoGPT\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"# Needed synce jupyter runs an async eventloop\n",
|
||||
"nest_asyncio.apply()"
|
||||
@ -311,8 +311,8 @@
|
||||
"# Memory\n",
|
||||
"import faiss\n",
|
||||
"from langchain.docstore import InMemoryDocstore\n",
|
||||
"from langchain_community.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain_community.vectorstores import FAISS\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"embeddings_model = OpenAIEmbeddings()\n",
|
||||
"embedding_size = 1536\n",
|
||||
|
@ -31,9 +31,8 @@
|
||||
"source": [
|
||||
"from typing import Optional\n",
|
||||
"\n",
|
||||
"from langchain_community.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain_community.llms import OpenAI\n",
|
||||
"from langchain_experimental.autonomous_agents import BabyAGI"
|
||||
"from langchain_experimental.autonomous_agents import BabyAGI\n",
|
||||
"from langchain_openai import OpenAI, OpenAIEmbeddings"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -29,9 +29,8 @@
|
||||
"\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain_community.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain_community.llms import OpenAI\n",
|
||||
"from langchain_experimental.autonomous_agents import BabyAGI"
|
||||
"from langchain_experimental.autonomous_agents import BabyAGI\n",
|
||||
"from langchain_openai import OpenAI, OpenAIEmbeddings"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -108,8 +107,8 @@
|
||||
"source": [
|
||||
"from langchain.agents import AgentExecutor, Tool, ZeroShotAgent\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain_community.llms import OpenAI\n",
|
||||
"from langchain_community.utilities import SerpAPIWrapper\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
"todo_prompt = PromptTemplate.from_template(\n",
|
||||
" \"You are a planner who is an expert at coming up with a todo list for a given objective. Come up with a todo list for this objective: {objective}\"\n",
|
||||
|
@ -46,7 +46,7 @@
|
||||
" HumanMessage,\n",
|
||||
" SystemMessage,\n",
|
||||
")\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI"
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -47,9 +47,9 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from IPython.display import SVG\n",
|
||||
"from langchain_community.llms import OpenAI\n",
|
||||
"from langchain_experimental.cpal.base import CPALChain\n",
|
||||
"from langchain_experimental.pal_chain import PALChain\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
"llm = OpenAI(temperature=0, max_tokens=512)\n",
|
||||
"cpal_chain = CPALChain.from_univariate_prompt(llm=llm, verbose=True)\n",
|
||||
|
@ -657,7 +657,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.embeddings.openai import OpenAIEmbeddings\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"embeddings = OpenAIEmbeddings()\n",
|
||||
"embeddings"
|
||||
@ -834,7 +834,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import ConversationalRetrievalChain\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"model = ChatOpenAI(\n",
|
||||
" model_name=\"gpt-3.5-turbo-0613\"\n",
|
||||
|
@ -44,8 +44,8 @@
|
||||
"from langchain.prompts import StringPromptTemplate\n",
|
||||
"from langchain.schema import AgentAction, AgentFinish\n",
|
||||
"from langchain_community.agent_toolkits import NLAToolkit\n",
|
||||
"from langchain_community.llms import OpenAI\n",
|
||||
"from langchain_community.tools.plugin import AIPlugin"
|
||||
"from langchain_community.tools.plugin import AIPlugin\n",
|
||||
"from langchain_openai import OpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -115,8 +115,8 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import Document\n",
|
||||
"from langchain_community.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain_community.vectorstores import FAISS"
|
||||
"from langchain_community.vectorstores import FAISS\n",
|
||||
"from langchain_openai import OpenAIEmbeddings"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -69,8 +69,8 @@
|
||||
"from langchain.prompts import StringPromptTemplate\n",
|
||||
"from langchain.schema import AgentAction, AgentFinish\n",
|
||||
"from langchain_community.agent_toolkits import NLAToolkit\n",
|
||||
"from langchain_community.llms import OpenAI\n",
|
||||
"from langchain_community.tools.plugin import AIPlugin"
|
||||
"from langchain_community.tools.plugin import AIPlugin\n",
|
||||
"from langchain_openai import OpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -139,8 +139,8 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import Document\n",
|
||||
"from langchain_community.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain_community.vectorstores import FAISS"
|
||||
"from langchain_community.vectorstores import FAISS\n",
|
||||
"from langchain_openai import OpenAIEmbeddings"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -41,8 +41,8 @@
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.prompts import StringPromptTemplate\n",
|
||||
"from langchain.schema import AgentAction, AgentFinish\n",
|
||||
"from langchain_community.llms import OpenAI\n",
|
||||
"from langchain_community.utilities import SerpAPIWrapper"
|
||||
"from langchain_community.utilities import SerpAPIWrapper\n",
|
||||
"from langchain_openai import OpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -104,8 +104,8 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.schema import Document\n",
|
||||
"from langchain_community.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain_community.vectorstores import FAISS"
|
||||
"from langchain_community.vectorstores import FAISS\n",
|
||||
"from langchain_openai import OpenAIEmbeddings"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -93,7 +93,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Creating a OpenAI Chat LLM wrapper\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(temperature=0, model_name=\"gpt-4\")"
|
||||
]
|
||||
|
@ -56,9 +56,8 @@
|
||||
" CharacterTextSplitter,\n",
|
||||
" RecursiveCharacterTextSplitter,\n",
|
||||
")\n",
|
||||
"from langchain_community.embeddings.openai import OpenAIEmbeddings\n",
|
||||
"from langchain_community.llms import OpenAI\n",
|
||||
"from langchain_community.vectorstores import DeepLake\n",
|
||||
"from langchain_openai import OpenAI, OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")\n",
|
||||
"activeloop_token = getpass.getpass(\"Activeloop Token:\")\n",
|
||||
|
@ -475,8 +475,8 @@
|
||||
" HumanMessagePromptTemplate,\n",
|
||||
" SystemMessagePromptTemplate,\n",
|
||||
")\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser"
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -547,9 +547,9 @@
|
||||
"\n",
|
||||
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
|
||||
"from langchain.storage import InMemoryStore\n",
|
||||
"from langchain_community.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain_community.vectorstores.chroma import Chroma\n",
|
||||
"from langchain_core.documents import Document\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def build_retriever(text_elements, tables, table_summaries):\n",
|
||||
|
@ -39,7 +39,7 @@
|
||||
"source": [
|
||||
"from elasticsearch import Elasticsearch\n",
|
||||
"from langchain.chains.elasticsearch_database import ElasticsearchDatabaseChain\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI"
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -22,8 +22,8 @@
|
||||
"from typing import List, Optional\n",
|
||||
"\n",
|
||||
"from langchain.chains.openai_tools import create_extraction_chain_pydantic\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_core.pydantic_v1 import BaseModel"
|
||||
"from langchain_core.pydantic_v1 import BaseModel\n",
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -153,7 +153,7 @@
|
||||
"from langchain.utils.openai_functions import convert_pydantic_to_openai_tool\n",
|
||||
"from langchain_core.runnables import Runnable\n",
|
||||
"from langchain_core.pydantic_v1 import BaseModel\n",
|
||||
"from langchain.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.messages import SystemMessage\n",
|
||||
"from langchain_core.language_models import BaseLanguageModel\n",
|
||||
"\n",
|
||||
|
@ -74,9 +74,8 @@
|
||||
" CallbackManagerForRetrieverRun,\n",
|
||||
")\n",
|
||||
"from langchain.schema import BaseRetriever, Document\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_community.llms import OpenAI\n",
|
||||
"from langchain_community.utilities import GoogleSerperAPIWrapper"
|
||||
"from langchain_community.utilities import GoogleSerperAPIWrapper\n",
|
||||
"from langchain_openai import ChatOpenAI, OpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -49,9 +49,8 @@
|
||||
"\n",
|
||||
"from langchain.docstore import InMemoryDocstore\n",
|
||||
"from langchain.retrievers import TimeWeightedVectorStoreRetriever\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_community.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain_community.vectorstores import FAISS\n",
|
||||
"from langchain_openai import ChatOpenAI, OpenAIEmbeddings\n",
|
||||
"from termcolor import colored"
|
||||
]
|
||||
},
|
||||
|
@ -75,8 +75,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.llms import OpenAI\n",
|
||||
"from langchain_experimental.autonomous_agents import HuggingGPT\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
"# %env OPENAI_API_BASE=http://localhost:8000/v1"
|
||||
]
|
||||
|
@ -159,7 +159,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import AgentType, initialize_agent, load_tools\n",
|
||||
"from langchain_community.llms import OpenAI"
|
||||
"from langchain_openai import OpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -22,8 +22,7 @@
|
||||
"source": [
|
||||
"from langchain.chains import HypotheticalDocumentEmbedder, LLMChain\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain_community.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain_community.llms import OpenAI"
|
||||
"from langchain_openai import OpenAI, OpenAIEmbeddings"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -49,7 +49,7 @@
|
||||
"source": [
|
||||
"# pick and configure the LLM of your choice\n",
|
||||
"\n",
|
||||
"from langchain_community.llms import OpenAI\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
"llm = OpenAI(model=\"gpt-3.5-turbo-instruct\")"
|
||||
]
|
||||
|
@ -43,8 +43,8 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.llms import OpenAI\n",
|
||||
"from langchain_experimental.llm_bash.base import LLMBashChain\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"\n",
|
||||
|
@ -42,7 +42,7 @@
|
||||
],
|
||||
"source": [
|
||||
"from langchain.chains import LLMCheckerChain\n",
|
||||
"from langchain_community.llms import OpenAI\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
"llm = OpenAI(temperature=0.7)\n",
|
||||
"\n",
|
||||
|
@ -46,7 +46,7 @@
|
||||
],
|
||||
"source": [
|
||||
"from langchain.chains import LLMMathChain\n",
|
||||
"from langchain_community.llms import OpenAI\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"llm_math = LLMMathChain.from_llm(llm, verbose=True)\n",
|
||||
|
@ -331,7 +331,7 @@
|
||||
],
|
||||
"source": [
|
||||
"from langchain.chains import LLMSummarizationCheckerChain\n",
|
||||
"from langchain_community.llms import OpenAI\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"checker_chain = LLMSummarizationCheckerChain.from_llm(llm, verbose=True, max_checks=2)\n",
|
||||
@ -822,7 +822,7 @@
|
||||
],
|
||||
"source": [
|
||||
"from langchain.chains import LLMSummarizationCheckerChain\n",
|
||||
"from langchain_community.llms import OpenAI\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"checker_chain = LLMSummarizationCheckerChain.from_llm(llm, verbose=True, max_checks=3)\n",
|
||||
@ -1096,7 +1096,7 @@
|
||||
],
|
||||
"source": [
|
||||
"from langchain.chains import LLMSummarizationCheckerChain\n",
|
||||
"from langchain_community.llms import OpenAI\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"checker_chain = LLMSummarizationCheckerChain.from_llm(llm, max_checks=3, verbose=True)\n",
|
||||
|
@ -14,8 +14,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.llms import OpenAI\n",
|
||||
"from langchain_experimental.llm_symbolic_math.base import LLMSymbolicMathChain\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"llm_symbolic_math = LLMSymbolicMathChain.from_llm(llm)"
|
||||
|
@ -59,7 +59,7 @@
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.memory import ConversationBufferWindowMemory\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain_community.llms import OpenAI"
|
||||
"from langchain_openai import OpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -91,8 +91,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage"
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage\n",
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -315,10 +315,10 @@
|
||||
"source": [
|
||||
"from operator import itemgetter\n",
|
||||
"\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.runnables import RunnableLambda, RunnablePassthrough\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def prompt_func(data_dict):\n",
|
||||
|
@ -44,7 +44,7 @@
|
||||
"source": [
|
||||
"from langchain.agents import AgentType, initialize_agent\n",
|
||||
"from langchain.tools import SteamshipImageGenerationTool\n",
|
||||
"from langchain_community.llms import OpenAI"
|
||||
"from langchain_openai import OpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -32,7 +32,7 @@
|
||||
" HumanMessage,\n",
|
||||
" SystemMessage,\n",
|
||||
")\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI"
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -41,7 +41,7 @@
|
||||
" HumanMessage,\n",
|
||||
" SystemMessage,\n",
|
||||
")\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI"
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -33,7 +33,7 @@
|
||||
" HumanMessage,\n",
|
||||
" SystemMessage,\n",
|
||||
")\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI"
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -32,9 +32,9 @@
|
||||
"\n",
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain_community.llms import OpenAI\n",
|
||||
"from langchain_community.utilities import SQLDatabase\n",
|
||||
"from langchain_experimental.sql.vector_sql import VectorSQLDatabaseChain\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"from sqlalchemy import MetaData, create_engine\n",
|
||||
"\n",
|
||||
"MYSCALE_HOST = \"msc-4a9e710a.us-east-1.aws.staging.myscale.cloud\"\n",
|
||||
@ -75,10 +75,10 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.callbacks import StdOutCallbackHandler\n",
|
||||
"from langchain_community.llms import OpenAI\n",
|
||||
"from langchain_community.utilities.sql_database import SQLDatabase\n",
|
||||
"from langchain_experimental.sql.prompt import MYSCALE_PROMPT\n",
|
||||
"from langchain_experimental.sql.vector_sql import VectorSQLDatabaseChain\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
"chain = VectorSQLDatabaseChain(\n",
|
||||
" llm_chain=LLMChain(\n",
|
||||
@ -117,7 +117,6 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains.qa_with_sources.retrieval import RetrievalQAWithSourcesChain\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_experimental.retrievers.vector_sql_database import (\n",
|
||||
" VectorSQLDatabaseChainRetriever,\n",
|
||||
")\n",
|
||||
@ -126,6 +125,7 @@
|
||||
" VectorSQLDatabaseChain,\n",
|
||||
" VectorSQLRetrieveAllOutputParser,\n",
|
||||
")\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"output_parser_retrieve_all = VectorSQLRetrieveAllOutputParser.from_embeddings(\n",
|
||||
" output_parser.model\n",
|
||||
|
@ -22,8 +22,8 @@
|
||||
"from langchain.chains import RetrievalQA\n",
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain_community.document_loaders import TextLoader\n",
|
||||
"from langchain_community.embeddings.openai import OpenAIEmbeddings\n",
|
||||
"from langchain_community.vectorstores import Chroma"
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_openai import OpenAIEmbeddings"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -53,7 +53,7 @@
|
||||
"from langchain.chains import create_qa_with_sources_chain\n",
|
||||
"from langchain.chains.combine_documents.stuff import StuffDocumentsChain\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI"
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -28,8 +28,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage"
|
||||
"from langchain_core.messages import HumanMessage, SystemMessage\n",
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -414,7 +414,7 @@
|
||||
"BREAKING CHANGES:\n",
|
||||
"- To use Azure embeddings with OpenAI V1, you'll need to use the new `AzureOpenAIEmbeddings` instead of the existing `OpenAIEmbeddings`. `OpenAIEmbeddings` continue to work when using Azure with `openai<1`.\n",
|
||||
"```python\n",
|
||||
"from langchain_community.embeddings import AzureOpenAIEmbeddings\n",
|
||||
"from langchain_openai import AzureOpenAIEmbeddings\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"\n",
|
||||
@ -456,8 +456,8 @@
|
||||
"from typing import Literal\n",
|
||||
"\n",
|
||||
"from langchain.output_parsers.openai_tools import PydanticToolsParser\n",
|
||||
"from langchain.prompts import ChatPromptTemplate\n",
|
||||
"from langchain.utils.openai_functions import convert_pydantic_to_openai_tool\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
|
||||
"\n",
|
||||
"\n",
|
||||
|
@ -52,7 +52,7 @@
|
||||
" HumanMessage,\n",
|
||||
" SystemMessage,\n",
|
||||
")\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI"
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -30,15 +30,14 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import LLMMathChain\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_community.llms import OpenAI\n",
|
||||
"from langchain_community.utilities import DuckDuckGoSearchAPIWrapper\n",
|
||||
"from langchain_core.tools import Tool\n",
|
||||
"from langchain_experimental.plan_and_execute import (\n",
|
||||
" PlanAndExecute,\n",
|
||||
" load_agent_executor,\n",
|
||||
" load_chat_planner,\n",
|
||||
")"
|
||||
")\n",
|
||||
"from langchain_openai import ChatOpenAI, OpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -82,7 +82,7 @@
|
||||
"source": [
|
||||
"from langchain.chains import ConversationalRetrievalChain\n",
|
||||
"from langchain.retrievers import KayAiRetriever\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"model = ChatOpenAI(model_name=\"gpt-3.5-turbo\")\n",
|
||||
"retriever = KayAiRetriever.create(\n",
|
||||
|
@ -17,8 +17,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.llms import OpenAI\n",
|
||||
"from langchain_experimental.pal_chain import PALChain"
|
||||
"from langchain_experimental.pal_chain import PALChain\n",
|
||||
"from langchain_openai import OpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -27,7 +27,7 @@
|
||||
],
|
||||
"source": [
|
||||
"from langchain.chains import create_citation_fuzzy_match_chain\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI"
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -30,8 +30,8 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import pinecone\n",
|
||||
"from langchain_community.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain_community.vectorstores import Pinecone\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"pinecone.init(api_key=\"...\", environment=\"...\")"
|
||||
]
|
||||
@ -86,8 +86,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser"
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -43,7 +43,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.sql_database import SQLDatabase\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"CONNECTION_STRING = \"postgresql+psycopg2://postgres:test@localhost:5432/vectordb\" # Replace with your own\n",
|
||||
"db = SQLDatabase.from_uri(CONNECTION_STRING)"
|
||||
@ -88,7 +88,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"embeddings_model = OpenAIEmbeddings()"
|
||||
]
|
||||
@ -219,7 +219,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"template = \"\"\"You are a Postgres expert. Given an input question, first create a syntactically correct Postgres query to run, then look at the results of the query and return the answer to the input question.\n",
|
||||
"Unless the user specifies in the question a specific number of examples to obtain, query for at most 5 results using the LIMIT clause as per Postgres. You can order the results to return the most informative data in the database.\n",
|
||||
@ -267,9 +267,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.runnables import RunnablePassthrough\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"db = SQLDatabase.from_uri(\n",
|
||||
" CONNECTION_STRING\n",
|
||||
|
@ -31,11 +31,11 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_community.utilities import DuckDuckGoSearchAPIWrapper\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.runnables import RunnablePassthrough"
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.runnables import RunnablePassthrough\n",
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -53,10 +53,9 @@
|
||||
"from langchain.prompts.base import StringPromptTemplate\n",
|
||||
"from langchain.schema import AgentAction, AgentFinish\n",
|
||||
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_community.embeddings.openai import OpenAIEmbeddings\n",
|
||||
"from langchain_community.llms import BaseLLM, OpenAI\n",
|
||||
"from langchain_community.llms import BaseLLM\n",
|
||||
"from langchain_community.vectorstores import Chroma\n",
|
||||
"from langchain_openai import ChatOpenAI, OpenAI, OpenAIEmbeddings\n",
|
||||
"from pydantic import BaseModel, Field"
|
||||
]
|
||||
},
|
||||
|
@ -18,9 +18,9 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompt_values import PromptValue"
|
||||
"from langchain_core.prompt_values import PromptValue\n",
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -255,7 +255,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"model = ChatOpenAI(model=\"gpt-4\")\n",
|
||||
"res = model.predict(\n",
|
||||
@ -1083,8 +1083,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain_community.vectorstores import ElasticsearchStore\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"embeddings = OpenAIEmbeddings()"
|
||||
]
|
||||
|
@ -26,8 +26,8 @@
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.memory import ConversationBufferMemory, ReadOnlySharedMemory\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain_community.llms import OpenAI\n",
|
||||
"from langchain_community.utilities import GoogleSearchAPIWrapper"
|
||||
"from langchain_community.utilities import GoogleSearchAPIWrapper\n",
|
||||
"from langchain_openai import OpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -52,8 +52,8 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_experimental.smart_llm import SmartLLMChain"
|
||||
"from langchain_experimental.smart_llm import SmartLLMChain\n",
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -9,7 +9,7 @@ To set it up, follow the instructions on https://database.guide/2-sample-databas
|
||||
|
||||
|
||||
```python
|
||||
from langchain_community.llms import OpenAI
|
||||
from langchain_openai import OpenAI
|
||||
from langchain_community.utilities import SQLDatabase
|
||||
from langchain_experimental.sql import SQLDatabaseChain
|
||||
```
|
||||
@ -200,7 +200,7 @@ result["intermediate_steps"]
|
||||
How to add memory to a SQLDatabaseChain:
|
||||
|
||||
```python
|
||||
from langchain_community.llms import OpenAI
|
||||
from langchain_openai import OpenAI
|
||||
from langchain_community.utilities import SQLDatabase
|
||||
from langchain_experimental.sql import SQLDatabaseChain
|
||||
```
|
||||
|
@ -23,10 +23,10 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import ChatPromptTemplate, FewShotChatMessagePromptTemplate\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.runnables import RunnableLambda"
|
||||
"from langchain_core.prompts import ChatPromptTemplate, FewShotChatMessagePromptTemplate\n",
|
||||
"from langchain_core.runnables import RunnableLambda\n",
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -24,7 +24,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.llms import OpenAI\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
"llm = OpenAI(temperature=1, max_tokens=512, model=\"gpt-3.5-turbo-instruct\")"
|
||||
]
|
||||
|
@ -37,8 +37,8 @@
|
||||
"import getpass\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from langchain_community.embeddings.openai import OpenAIEmbeddings\n",
|
||||
"from langchain_community.vectorstores import DeepLake\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")\n",
|
||||
"activeloop_token = getpass.getpass(\"Activeloop Token:\")\n",
|
||||
@ -3809,7 +3809,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import ConversationalRetrievalChain\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"model = ChatOpenAI(model_name=\"gpt-3.5-turbo-0613\") # switch to 'gpt-4'\n",
|
||||
"qa = ConversationalRetrievalChain.from_llm(model, retriever=retriever)"
|
||||
|
@ -30,7 +30,7 @@
|
||||
" HumanMessage,\n",
|
||||
" SystemMessage,\n",
|
||||
")\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI"
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -28,7 +28,7 @@
|
||||
" HumanMessage,\n",
|
||||
" SystemMessage,\n",
|
||||
")\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI"
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -599,7 +599,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = ChatOpenAI(model_name=\"gpt-4\", temperature=0)"
|
||||
]
|
||||
|
@ -20,9 +20,9 @@
|
||||
"from langchain.prompts import (\n",
|
||||
" ChatPromptTemplate,\n",
|
||||
")\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_experimental.utilities import PythonREPL"
|
||||
"from langchain_experimental.utilities import PythonREPL\n",
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -21,10 +21,9 @@
|
||||
"source": [
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain.utils.math import cosine_similarity\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_community.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.runnables import RunnableLambda, RunnablePassthrough\n",
|
||||
"from langchain_openai import ChatOpenAI, OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"physics_template = \"\"\"You are a very smart physics professor. \\\n",
|
||||
"You are great at answering questions about physics in a concise and easy to understand manner. \\\n",
|
||||
|
@ -20,9 +20,9 @@
|
||||
"from operator import itemgetter\n",
|
||||
"\n",
|
||||
"from langchain.memory import ConversationBufferMemory\n",
|
||||
"from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
|
||||
"from langchain_core.runnables import RunnableLambda, RunnablePassthrough\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"model = ChatOpenAI()\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
|
@ -18,8 +18,8 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import OpenAIModerationChain\n",
|
||||
"from langchain.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_community.llms import OpenAI"
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_openai import OpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -39,9 +39,9 @@
|
||||
"source": [
|
||||
"from operator import itemgetter\n",
|
||||
"\n",
|
||||
"from langchain.prompts import ChatPromptTemplate\n",
|
||||
"from langchain.schema import StrOutputParser\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"prompt1 = ChatPromptTemplate.from_template(\"what is the city {person} is from?\")\n",
|
||||
"prompt2 = ChatPromptTemplate.from_template(\n",
|
||||
|
@ -42,8 +42,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_template(\"tell me a joke about {foo}\")\n",
|
||||
"model = ChatOpenAI()\n",
|
||||
|
@ -26,12 +26,12 @@
|
||||
"from langchain.agents import AgentExecutor, load_tools\n",
|
||||
"from langchain.agents.format_scratchpad import format_to_openai_function_messages\n",
|
||||
"from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser\n",
|
||||
"from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
|
||||
"from langchain.prompts.chat import ChatPromptValue\n",
|
||||
"from langchain.tools import WikipediaQueryRun\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_community.tools.convert_to_openai import format_tool_to_openai_function\n",
|
||||
"from langchain_community.utilities import WikipediaAPIWrapper"
|
||||
"from langchain_community.utilities import WikipediaAPIWrapper\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -38,12 +38,11 @@
|
||||
"source": [
|
||||
"from operator import itemgetter\n",
|
||||
"\n",
|
||||
"from langchain.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_community.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain_community.vectorstores import FAISS\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.runnables import RunnableLambda, RunnablePassthrough"
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.runnables import RunnableLambda, RunnablePassthrough\n",
|
||||
"from langchain_openai import ChatOpenAI, OpenAIEmbeddings"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -26,7 +26,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"template = \"\"\"Based on the table schema below, write a SQL query that would answer the user's question:\n",
|
||||
"{schema}\n",
|
||||
@ -93,9 +93,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.runnables import RunnablePassthrough\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"model = ChatOpenAI()\n",
|
||||
"\n",
|
||||
|
@ -27,10 +27,10 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import ChatPromptTemplate\n",
|
||||
"from langchain.tools import DuckDuckGoSearchRun\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser"
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -32,28 +32,28 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 1,
|
||||
"id": "466b65b3",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"Why did the ice cream go to therapy?\\n\\nBecause it had too many toppings and couldn't find its cone-fidence!\""
|
||||
"\"Why don't ice creams ever get invited to parties?\\n\\nBecause they always drip when things heat up!\""
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_template(\"tell me a short joke about {topic}\")\n",
|
||||
"model = ChatOpenAI()\n",
|
||||
"model = ChatOpenAI(model=\"gpt-4\")\n",
|
||||
"output_parser = StrOutputParser()\n",
|
||||
"\n",
|
||||
"chain = prompt | model | output_parser\n",
|
||||
@ -89,7 +89,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 2,
|
||||
"id": "b8656990",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@ -99,7 +99,7 @@
|
||||
"ChatPromptValue(messages=[HumanMessage(content='tell me a short joke about ice cream')])"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@ -111,7 +111,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": 3,
|
||||
"id": "e6034488",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@ -121,7 +121,7 @@
|
||||
"[HumanMessage(content='tell me a short joke about ice cream')]"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@ -132,7 +132,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": 4,
|
||||
"id": "60565463",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@ -142,7 +142,7 @@
|
||||
"'Human: tell me a short joke about ice cream'"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@ -163,17 +163,17 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"execution_count": 5,
|
||||
"id": "33cf5f72",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"AIMessage(content=\"Why did the ice cream go to therapy? \\n\\nBecause it had too many toppings and couldn't find its cone-fidence!\")"
|
||||
"AIMessage(content=\"Why don't ice creams ever get invited to parties?\\n\\nBecause they always bring a melt down!\")"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@ -193,23 +193,23 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"execution_count": 6,
|
||||
"id": "8feb05da",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'\\n\\nRobot: Why did the ice cream go to therapy? Because it had a rocky road.'"
|
||||
"'\\n\\nRobot: Why did the ice cream truck break down? Because it had a meltdown!'"
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.llms import OpenAI\n",
|
||||
"from langchain_openai.llms import OpenAI\n",
|
||||
"\n",
|
||||
"llm = OpenAI(model=\"gpt-3.5-turbo-instruct\")\n",
|
||||
"llm.invoke(prompt_value)"
|
||||
@ -324,12 +324,12 @@
|
||||
"# Requires:\n",
|
||||
"# pip install langchain docarray tiktoken\n",
|
||||
"\n",
|
||||
"from langchain.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_community.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain_community.vectorstores import DocArrayInMemorySearch\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.runnables import RunnableParallel, RunnablePassthrough\n",
|
||||
"from langchain_openai.chat_models import ChatOpenAI\n",
|
||||
"from langchain_openai.embeddings import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"vectorstore = DocArrayInMemorySearch.from_texts(\n",
|
||||
" [\"harrison worked at kensho\", \"bears like to eat honey\"],\n",
|
||||
@ -486,7 +486,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
"version": "3.10.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
@ -19,10 +19,10 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import ChatPromptTemplate\n",
|
||||
"from langchain.schema import StrOutputParser\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_core.runnables import RunnablePassthrough"
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.runnables import RunnablePassthrough\n",
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -42,8 +42,8 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_core.runnables import ConfigurableField\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"model = ChatOpenAI(temperature=0).configurable_fields(\n",
|
||||
" temperature=ConfigurableField(\n",
|
||||
@ -264,8 +264,9 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain_community.chat_models import ChatAnthropic, ChatOpenAI\n",
|
||||
"from langchain_core.runnables import ConfigurableField"
|
||||
"from langchain_community.chat_models import ChatAnthropic\n",
|
||||
"from langchain_core.runnables import ConfigurableField\n",
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -23,10 +23,10 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.runnables import chain"
|
||||
"from langchain_core.runnables import chain\n",
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -31,7 +31,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.chat_models import ChatAnthropic, ChatOpenAI"
|
||||
"from langchain_community.chat_models import ChatAnthropic\n",
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -141,7 +142,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
@ -241,7 +242,7 @@
|
||||
"source": [
|
||||
"# Now lets create a chain with the normal OpenAI model\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain_community.llms import OpenAI\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
"prompt_template = \"\"\"Instructions: You should always include a compliment in your response.\n",
|
||||
"\n",
|
||||
@ -291,7 +292,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.12"
|
||||
"version": "3.11.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
@ -33,9 +33,9 @@
|
||||
"source": [
|
||||
"from operator import itemgetter\n",
|
||||
"\n",
|
||||
"from langchain.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.runnables import RunnableLambda\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def length_function(text):\n",
|
||||
|
@ -33,8 +33,8 @@
|
||||
"from typing import Iterator, List\n",
|
||||
"\n",
|
||||
"from langchain.prompts.chat import ChatPromptTemplate\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_template(\n",
|
||||
" \"Write a comma-separated list of 5 animals similar to: {animal}\"\n",
|
||||
|
@ -33,10 +33,9 @@
|
||||
"\n",
|
||||
"from langchain.prompts import ChatPromptTemplate\n",
|
||||
"from langchain.vectorstores import FAISS\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_community.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.runnables import RunnableLambda, RunnablePassthrough"
|
||||
"from langchain_core.runnables import RunnableLambda, RunnablePassthrough\n",
|
||||
"from langchain_openai import ChatOpenAI, OpenAIEmbeddings"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -44,12 +44,11 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_community.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain_community.vectorstores import FAISS\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.runnables import RunnablePassthrough\n",
|
||||
"from langchain_openai import ChatOpenAI, OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"vectorstore = FAISS.from_texts(\n",
|
||||
" [\"harrison worked at kensho\"], embedding=OpenAIEmbeddings()\n",
|
||||
@ -128,12 +127,11 @@
|
||||
"source": [
|
||||
"from operator import itemgetter\n",
|
||||
"\n",
|
||||
"from langchain.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_community.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain_community.vectorstores import FAISS\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.runnables import RunnablePassthrough\n",
|
||||
"from langchain_openai import ChatOpenAI, OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"vectorstore = FAISS.from_texts(\n",
|
||||
" [\"harrison worked at kensho\"], embedding=OpenAIEmbeddings()\n",
|
||||
@ -192,9 +190,9 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.runnables import RunnableParallel\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"model = ChatOpenAI()\n",
|
||||
"joke_chain = ChatPromptTemplate.from_template(\"tell me a joke about {topic}\") | model\n",
|
||||
|
@ -131,10 +131,10 @@
|
||||
"source": [
|
||||
"from typing import Optional\n",
|
||||
"\n",
|
||||
"from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
|
||||
"from langchain_community.chat_message_histories import RedisChatMessageHistory\n",
|
||||
"from langchain_community.chat_models import ChatAnthropic\n",
|
||||
"from langchain_core.chat_history import BaseChatMessageHistory\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
|
||||
"from langchain_core.runnables.history import RunnableWithMessageHistory"
|
||||
]
|
||||
},
|
||||
|
@ -97,12 +97,11 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_community.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain_community.vectorstores import FAISS\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.runnables import RunnablePassthrough\n",
|
||||
"from langchain_openai import ChatOpenAI, OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"vectorstore = FAISS.from_texts(\n",
|
||||
" [\"harrison worked at kensho\"], embedding=OpenAIEmbeddings()\n",
|
||||
|
@ -57,8 +57,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"model = ChatOpenAI()\n",
|
||||
"prompt = ChatPromptTemplate.from_template(\"tell me a joke about {topic}\")\n",
|
||||
@ -659,10 +659,10 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.embeddings import OpenAIEmbeddings\n",
|
||||
"from langchain_community.vectorstores import FAISS\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.runnables import RunnablePassthrough\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"template = \"\"\"Answer the question based only on the following context:\n",
|
||||
"{context}\n",
|
||||
|
@ -42,8 +42,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"\n",
|
||||
"\n",
|
||||
@ -389,7 +389,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.llms import OpenAI\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
"llm = OpenAI(model=\"gpt-3.5-turbo-instruct\")\n",
|
||||
"llm_chain = (\n",
|
||||
@ -1002,8 +1002,9 @@
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"from langchain_community.chat_models import ChatAnthropic, ChatOpenAI\n",
|
||||
"from langchain_community.llms import OpenAI\n",
|
||||
"from langchain_community.chat_models import ChatAnthropic\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.runnables import RunnablePassthrough\n",
|
||||
|
@ -70,10 +70,10 @@ For this getting started guide, we will provide two options: using OpenAI (a pop
|
||||
<Tabs>
|
||||
<TabItem value="openai" label="OpenAI" default>
|
||||
|
||||
First we'll need to install their Python package:
|
||||
First we'll need to the LangChain <> OpenAI integration package.
|
||||
|
||||
```shell
|
||||
pip install openai
|
||||
pip install langchain_openai
|
||||
```
|
||||
|
||||
Accessing the API requires an API key, which you can get by creating an account and heading [here](https://platform.openai.com/account/api-keys). Once we have a key we'll want to set it as an environment variable by running:
|
||||
@ -85,7 +85,7 @@ export OPENAI_API_KEY="..."
|
||||
We can then initialize the model:
|
||||
|
||||
```python
|
||||
from langchain_community.chat_models import ChatOpenAI
|
||||
from langchain_openai import ChatOpenAI
|
||||
|
||||
llm = ChatOpenAI()
|
||||
```
|
||||
@ -93,7 +93,7 @@ llm = ChatOpenAI()
|
||||
If you'd prefer not to set an environment variable you can pass the key in directly via the `openai_api_key` named parameter when initiating the OpenAI LLM class:
|
||||
|
||||
```python
|
||||
from langchain_community.chat_models import ChatOpenAI
|
||||
from langchain_openai import ChatOpenAI
|
||||
|
||||
llm = ChatOpenAI(openai_api_key="...")
|
||||
```
|
||||
@ -128,7 +128,7 @@ We can also guide it's response with a prompt template.
|
||||
Prompt templates are used to convert raw user input to a better input to the LLM.
|
||||
|
||||
```python
|
||||
from langchain.prompts import ChatPromptTemplate
|
||||
from langchain_core.prompts import ChatPromptTemplate
|
||||
prompt = ChatPromptTemplate.from_messages([
|
||||
("system", "You are world class technical documentation writer."),
|
||||
("user", "{input}")
|
||||
@ -199,10 +199,10 @@ For embedding models, we once again provide examples for accessing via OpenAI or
|
||||
<Tabs>
|
||||
<TabItem value="openai" label="OpenAI" default>
|
||||
|
||||
Make sure you have the openai package installed an the appropriate environment variables set (these are the same as needed for the LLM).
|
||||
Make sure you have the `langchain_openai` package installed an the appropriate environment variables set (these are the same as needed for the LLM).
|
||||
|
||||
```python
|
||||
from langchain_community.embeddings import OpenAIEmbeddings
|
||||
from langchain_openai import OpenAIEmbeddings
|
||||
|
||||
embeddings = OpenAIEmbeddings()
|
||||
```
|
||||
@ -416,7 +416,7 @@ pip install langchainhub
|
||||
Now we can use it to get a predefined prompt
|
||||
|
||||
```python
|
||||
from langchain_community.chat_models import ChatOpenAI
|
||||
from langchain_openai import ChatOpenAI
|
||||
from langchain import hub
|
||||
from langchain.agents import create_openai_functions_agent
|
||||
from langchain.agents import AgentExecutor
|
||||
@ -479,15 +479,15 @@ To create a server for our application we'll make a `serve.py` file. This will c
|
||||
from typing import List
|
||||
|
||||
from fastapi import FastAPI
|
||||
from langchain.prompts import ChatPromptTemplate
|
||||
from langchain_community.chat_models import ChatOpenAI
|
||||
from langchain_core.prompts import ChatPromptTemplate
|
||||
from langchain_openai import ChatOpenAI
|
||||
from langchain_community.document_loaders import WebBaseLoader
|
||||
from langchain_community.embeddings import OpenAIEmbeddings
|
||||
from langchain_openai import OpenAIEmbeddings
|
||||
from langchain_community.vectorstores import DocArrayInMemorySearch
|
||||
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
||||
from langchain.tools.retriever import create_retriever_tool
|
||||
from langchain_community.tools.tavily_search import TavilySearchResults
|
||||
from langchain_community.chat_models import ChatOpenAI
|
||||
from langchain_openai import ChatOpenAI
|
||||
from langchain import hub
|
||||
from langchain.agents import create_openai_functions_agent
|
||||
from langchain.agents import AgentExecutor
|
||||
|
@ -25,7 +25,7 @@ Let's suppose we have a simple agent, and want to visualize the actions it takes
|
||||
|
||||
```python
|
||||
from langchain.agents import AgentType, initialize_agent, load_tools
|
||||
from langchain_community.chat_models import ChatOpenAI
|
||||
from langchain_openai import ChatOpenAI
|
||||
|
||||
llm = ChatOpenAI(model_name="gpt-4", temperature=0)
|
||||
tools = load_tools(["ddg-search", "llm-math"], llm=llm)
|
||||
|
@ -99,8 +99,8 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.agents import AgentType, Tool, initialize_agent\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_community.utilities import SerpAPIWrapper\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"# Initialize the language model\n",
|
||||
"# You can add your own OpenAI API key by adding openai_api_key=\"<your_api_key>\"\n",
|
||||
|
@ -25,7 +25,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.evaluation import load_evaluator\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"evaluator = load_evaluator(\"labeled_score_string\", llm=ChatOpenAI(model=\"gpt-4\"))"
|
||||
]
|
||||
|
@ -26,7 +26,7 @@
|
||||
"from langchain.chains import LLMChain\n",
|
||||
"from langchain.evaluation import AgentTrajectoryEvaluator\n",
|
||||
"from langchain.schema import AgentAction\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class StepNecessityEvaluator(AgentTrajectoryEvaluator):\n",
|
||||
|
@ -76,7 +76,7 @@
|
||||
"\n",
|
||||
"from langchain.agents import AgentType, initialize_agent\n",
|
||||
"from langchain.tools import tool\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"from pydantic import HttpUrl\n",
|
||||
"\n",
|
||||
"\n",
|
||||
|
@ -33,7 +33,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.chat_models import ChatAnthropic, ChatOpenAI"
|
||||
"from langchain_community.chat_models import ChatAnthropic\n",
|
||||
"from langchain_openai import ChatOpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -143,7 +144,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"\n",
|
||||
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||
" [\n",
|
||||
@ -206,7 +207,7 @@
|
||||
"source": [
|
||||
"# Now lets create a chain with the normal OpenAI model\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain_community.llms import OpenAI\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
"prompt_template = \"\"\"Instructions: You should always include a compliment in your response.\n",
|
||||
"\n",
|
||||
|
@ -21,7 +21,8 @@
|
||||
"source": [
|
||||
"from langchain.model_laboratory import ModelLaboratory\n",
|
||||
"from langchain.prompts import PromptTemplate\n",
|
||||
"from langchain_community.llms import Cohere, HuggingFaceHub, OpenAI"
|
||||
"from langchain_community.llms import Cohere, HuggingFaceHub\n",
|
||||
"from langchain_openai import OpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -130,7 +130,7 @@
|
||||
],
|
||||
"source": [
|
||||
"from langchain.prompts.prompt import PromptTemplate\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"anonymizer = PresidioAnonymizer()\n",
|
||||
"\n",
|
||||
|
@ -638,8 +638,8 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
|
||||
"from langchain_community.embeddings.openai import OpenAIEmbeddings\n",
|
||||
"from langchain_community.vectorstores import FAISS\n",
|
||||
"from langchain_openai import OpenAIEmbeddings\n",
|
||||
"\n",
|
||||
"# 2. Load the data: In our case data's already loaded\n",
|
||||
"# 3. Anonymize the data before indexing\n",
|
||||
@ -664,14 +664,14 @@
|
||||
"source": [
|
||||
"from operator import itemgetter\n",
|
||||
"\n",
|
||||
"from langchain.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_community.chat_models.openai import ChatOpenAI\n",
|
||||
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||
"from langchain_core.runnables import (\n",
|
||||
" RunnableLambda,\n",
|
||||
" RunnableParallel,\n",
|
||||
" RunnablePassthrough,\n",
|
||||
")\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"# 6. Create anonymizer chain\n",
|
||||
"template = \"\"\"Answer the question based only on the following context:\n",
|
||||
|
@ -208,7 +208,7 @@
|
||||
],
|
||||
"source": [
|
||||
"from langchain.prompts.prompt import PromptTemplate\n",
|
||||
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"anonymizer = PresidioReversibleAnonymizer()\n",
|
||||
"\n",
|
||||
|
@ -12,7 +12,7 @@ content that may violate guidelines, be offensive, or deviate from the desired c
|
||||
|
||||
```python
|
||||
# Imports
|
||||
from langchain_community.llms import OpenAI
|
||||
from langchain_openai import OpenAI
|
||||
from langchain.prompts import PromptTemplate
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.chains.constitutional_ai.base import ConstitutionalChain
|
||||
|
@ -207,7 +207,7 @@
|
||||
],
|
||||
"source": [
|
||||
"from langchain.agents import AgentType, initialize_agent\n",
|
||||
"from langchain_community.llms import OpenAI\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"\n",
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"agent = initialize_agent(\n",
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user