diff --git a/cookbook/Multi_modal_RAG.ipynb b/cookbook/Multi_modal_RAG.ipynb index 60e2c4d1f66..e6796f39d84 100644 --- a/cookbook/Multi_modal_RAG.ipynb +++ b/cookbook/Multi_modal_RAG.ipynb @@ -221,13 +221,14 @@ "metadata": {}, "outputs": [], "source": [ + "import base64\n", "import io\n", "import os\n", - "import base64\n", + "\n", "import numpy as np\n", - "from PIL import Image\n", "from langchain.chat_models import ChatOpenAI\n", "from langchain.schema.messages import HumanMessage, SystemMessage\n", + "from PIL import Image\n", "\n", "\n", "def encode_image(image_path):\n", @@ -294,7 +295,7 @@ } ], "source": [ - "from IPython.display import display, HTML\n", + "from IPython.display import HTML, display\n", "\n", "\n", "def plt_img_base64(img_base64):\n", @@ -347,11 +348,12 @@ "outputs": [], "source": [ "import uuid\n", - "from langchain.vectorstores import Chroma\n", - "from langchain.storage import InMemoryStore\n", - "from langchain.schema.document import Document\n", + "\n", "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.retrievers.multi_vector import MultiVectorRetriever\n", + "from langchain.schema.document import Document\n", + "from langchain.storage import InMemoryStore\n", + "from langchain.vectorstores import Chroma\n", "\n", "# The vectorstore to use to index the child chunks\n", "vectorstore = Chroma(\n", @@ -524,7 +526,8 @@ "outputs": [], "source": [ "from operator import itemgetter\n", - "from langchain.schema.runnable import RunnablePassthrough, RunnableLambda\n", + "\n", + "from langchain.schema.runnable import RunnableLambda, RunnablePassthrough\n", "\n", "\n", "def prompt_func(dict):\n", diff --git a/cookbook/Semi_Structured_RAG.ipynb b/cookbook/Semi_Structured_RAG.ipynb index 46b1935ac8c..c1d8823f976 100644 --- a/cookbook/Semi_Structured_RAG.ipynb +++ b/cookbook/Semi_Structured_RAG.ipynb @@ -102,8 +102,9 @@ "metadata": {}, "outputs": [], "source": [ - "from pydantic import BaseModel\n", "from typing import Any\n", + "\n", + "from pydantic import BaseModel\n", "from unstructured.partition.pdf import partition_pdf\n", "\n", "# Get elements\n", @@ -316,11 +317,12 @@ "outputs": [], "source": [ "import uuid\n", - "from langchain.vectorstores import Chroma\n", - "from langchain.storage import InMemoryStore\n", - "from langchain.schema.document import Document\n", + "\n", "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.retrievers.multi_vector import MultiVectorRetriever\n", + "from langchain.schema.document import Document\n", + "from langchain.storage import InMemoryStore\n", + "from langchain.vectorstores import Chroma\n", "\n", "# The vectorstore to use to index the child chunks\n", "vectorstore = Chroma(collection_name=\"summaries\", embedding_function=OpenAIEmbeddings())\n", diff --git a/cookbook/Semi_structured_and_multi_modal_RAG.ipynb b/cookbook/Semi_structured_and_multi_modal_RAG.ipynb index 4a1e3838952..459258ce3cc 100644 --- a/cookbook/Semi_structured_and_multi_modal_RAG.ipynb +++ b/cookbook/Semi_structured_and_multi_modal_RAG.ipynb @@ -92,8 +92,9 @@ "metadata": {}, "outputs": [], "source": [ - "from pydantic import BaseModel\n", "from typing import Any\n", + "\n", + "from pydantic import BaseModel\n", "from unstructured.partition.pdf import partition_pdf\n", "\n", "# Get elements\n", @@ -336,8 +337,8 @@ "metadata": {}, "outputs": [], "source": [ - "import os\n", "import glob\n", + "import os\n", "\n", "# Get all .txt file summaries\n", "file_paths = glob.glob(os.path.expanduser(os.path.join(path, \"*.txt\")))\n", @@ -371,11 +372,12 @@ "outputs": [], "source": [ "import uuid\n", - "from langchain.vectorstores import Chroma\n", - "from langchain.storage import InMemoryStore\n", - "from langchain.schema.document import Document\n", + "\n", "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.retrievers.multi_vector import MultiVectorRetriever\n", + "from langchain.schema.document import Document\n", + "from langchain.storage import InMemoryStore\n", + "from langchain.vectorstores import Chroma\n", "\n", "# The vectorstore to use to index the child chunks\n", "vectorstore = Chroma(collection_name=\"summaries\", embedding_function=OpenAIEmbeddings())\n", diff --git a/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb b/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb index 5d01fa2578c..27a53120e36 100644 --- a/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb +++ b/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb @@ -82,8 +82,9 @@ "metadata": {}, "outputs": [], "source": [ - "from pydantic import BaseModel\n", "from typing import Any\n", + "\n", + "from pydantic import BaseModel\n", "from unstructured.partition.pdf import partition_pdf\n", "\n", "# Path to save images\n", @@ -320,8 +321,8 @@ "metadata": {}, "outputs": [], "source": [ - "import os\n", "import glob\n", + "import os\n", "\n", "# Get all .txt files in the directory\n", "file_paths = glob.glob(os.path.expanduser(os.path.join(path, \"*.txt\")))\n", @@ -374,11 +375,12 @@ ], "source": [ "import uuid\n", - "from langchain.vectorstores import Chroma\n", - "from langchain.storage import InMemoryStore\n", - "from langchain.schema.document import Document\n", + "\n", "from langchain.embeddings import GPT4AllEmbeddings\n", "from langchain.retrievers.multi_vector import MultiVectorRetriever\n", + "from langchain.schema.document import Document\n", + "from langchain.storage import InMemoryStore\n", + "from langchain.vectorstores import Chroma\n", "\n", "# The vectorstore to use to index the child chunks\n", "vectorstore = Chroma(\n", diff --git a/cookbook/advanced_rag_eval.ipynb b/cookbook/advanced_rag_eval.ipynb index 640c06c9f2e..7d0494469df 100644 --- a/cookbook/advanced_rag_eval.ipynb +++ b/cookbook/advanced_rag_eval.ipynb @@ -132,8 +132,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.vectorstores import Chroma\n", "from langchain.embeddings import OpenAIEmbeddings\n", + "from langchain.vectorstores import Chroma\n", "\n", "baseline = Chroma.from_texts(\n", " texts=all_splits_pypdf_texts,\n", @@ -197,12 +197,13 @@ "outputs": [], "source": [ "# Image summary chain\n", - "import os\n", "import base64\n", "import io\n", + "import os\n", "from io import BytesIO\n", - "from PIL import Image\n", + "\n", "from langchain.schema.messages import HumanMessage\n", + "from PIL import Image\n", "\n", "\n", "def encode_image(image_path):\n", @@ -270,9 +271,10 @@ "source": [ "import uuid\n", "from base64 import b64decode\n", - "from langchain.storage import InMemoryStore\n", - "from langchain.schema.document import Document\n", + "\n", "from langchain.retrievers.multi_vector import MultiVectorRetriever\n", + "from langchain.schema.document import Document\n", + "from langchain.storage import InMemoryStore\n", "\n", "\n", "def create_multi_vector_retriever(\n", @@ -363,7 +365,7 @@ } ], "source": [ - "from IPython.display import display, HTML\n", + "from IPython.display import HTML, display\n", "\n", "\n", "def plt_img_base64(img_base64):\n", @@ -472,6 +474,7 @@ "outputs": [], "source": [ "from operator import itemgetter\n", + "\n", "from langchain.schema.runnable import RunnablePassthrough\n", "\n", "# Prompt\n", @@ -516,6 +519,7 @@ "outputs": [], "source": [ "import re\n", + "\n", "from langchain.schema import Document\n", "from langchain.schema.runnable import RunnableLambda\n", "\n", diff --git a/cookbook/autogpt/autogpt.ipynb b/cookbook/autogpt/autogpt.ipynb index afe994cdfca..4410ac16c2c 100644 --- a/cookbook/autogpt/autogpt.ipynb +++ b/cookbook/autogpt/autogpt.ipynb @@ -27,10 +27,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.utilities import SerpAPIWrapper\n", "from langchain.agents import Tool\n", - "from langchain.tools.file_management.write import WriteFileTool\n", "from langchain.tools.file_management.read import ReadFileTool\n", + "from langchain.tools.file_management.write import WriteFileTool\n", + "from langchain.utilities import SerpAPIWrapper\n", "\n", "search = SerpAPIWrapper()\n", "tools = [\n", @@ -61,9 +61,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.vectorstores import FAISS\n", "from langchain.docstore import InMemoryDocstore\n", - "from langchain.embeddings import OpenAIEmbeddings" + "from langchain.embeddings import OpenAIEmbeddings\n", + "from langchain.vectorstores import FAISS" ] }, { @@ -100,8 +100,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_experimental.autonomous_agents import AutoGPT\n", - "from langchain.chat_models import ChatOpenAI" + "from langchain.chat_models import ChatOpenAI\n", + "from langchain_experimental.autonomous_agents import AutoGPT" ] }, { diff --git a/cookbook/autogpt/marathon_times.ipynb b/cookbook/autogpt/marathon_times.ipynb index 04d1219b4fc..42f06075920 100644 --- a/cookbook/autogpt/marathon_times.ipynb +++ b/cookbook/autogpt/marathon_times.ipynb @@ -34,16 +34,15 @@ "outputs": [], "source": [ "# General\n", - "import os\n", - "import pandas as pd\n", - "from langchain_experimental.autonomous_agents import AutoGPT\n", - "from langchain.chat_models import ChatOpenAI\n", - "\n", - "from langchain.agents.agent_toolkits.pandas.base import create_pandas_dataframe_agent\n", - "from langchain.docstore.document import Document\n", "import asyncio\n", - "import nest_asyncio\n", + "import os\n", "\n", + "import nest_asyncio\n", + "import pandas as pd\n", + "from langchain.agents.agent_toolkits.pandas.base import create_pandas_dataframe_agent\n", + "from langchain.chat_models import ChatOpenAI\n", + "from langchain.docstore.document import Document\n", + "from langchain_experimental.autonomous_agents import AutoGPT\n", "\n", "# Needed synce jupyter runs an async eventloop\n", "nest_asyncio.apply()" @@ -92,6 +91,7 @@ "import os\n", "from contextlib import contextmanager\n", "from typing import Optional\n", + "\n", "from langchain.agents import tool\n", "from langchain.tools.file_management.read import ReadFileTool\n", "from langchain.tools.file_management.write import WriteFileTool\n", @@ -223,14 +223,13 @@ }, "outputs": [], "source": [ - "from langchain.tools import BaseTool, DuckDuckGoSearchRun\n", - "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", - "\n", - "from pydantic import Field\n", "from langchain.chains.qa_with_sources.loading import (\n", - " load_qa_with_sources_chain,\n", " BaseCombineDocumentsChain,\n", + " load_qa_with_sources_chain,\n", ")\n", + "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", + "from langchain.tools import BaseTool, DuckDuckGoSearchRun\n", + "from pydantic import Field\n", "\n", "\n", "def _get_text_splitter():\n", @@ -311,9 +310,9 @@ "source": [ "# Memory\n", "import faiss\n", - "from langchain.vectorstores import FAISS\n", "from langchain.docstore import InMemoryDocstore\n", "from langchain.embeddings import OpenAIEmbeddings\n", + "from langchain.vectorstores import FAISS\n", "\n", "embeddings_model = OpenAIEmbeddings()\n", "embedding_size = 1536\n", diff --git a/cookbook/baby_agi.ipynb b/cookbook/baby_agi.ipynb index 84309be2baa..a11c998e44c 100644 --- a/cookbook/baby_agi.ipynb +++ b/cookbook/baby_agi.ipynb @@ -31,8 +31,8 @@ "source": [ "from typing import Optional\n", "\n", - "from langchain.llms import OpenAI\n", "from langchain.embeddings import OpenAIEmbeddings\n", + "from langchain.llms import OpenAI\n", "from langchain_experimental.autonomous_agents import BabyAGI" ] }, @@ -53,8 +53,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.vectorstores import FAISS\n", - "from langchain.docstore import InMemoryDocstore" + "from langchain.docstore import InMemoryDocstore\n", + "from langchain.vectorstores import FAISS" ] }, { diff --git a/cookbook/baby_agi_with_agent.ipynb b/cookbook/baby_agi_with_agent.ipynb index 8636cb9b906..27fc1ec55a4 100644 --- a/cookbook/baby_agi_with_agent.ipynb +++ b/cookbook/baby_agi_with_agent.ipynb @@ -28,9 +28,9 @@ "from typing import Optional\n", "\n", "from langchain.chains import LLMChain\n", + "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.llms import OpenAI\n", "from langchain.prompts import PromptTemplate\n", - "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain_experimental.autonomous_agents import BabyAGI" ] }, @@ -62,8 +62,8 @@ "source": [ "%pip install faiss-cpu > /dev/null\n", "%pip install google-search-results > /dev/null\n", - "from langchain.vectorstores import FAISS\n", - "from langchain.docstore import InMemoryDocstore" + "from langchain.docstore import InMemoryDocstore\n", + "from langchain.vectorstores import FAISS" ] }, { @@ -106,10 +106,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.agents import ZeroShotAgent, Tool, AgentExecutor\n", + "from langchain.agents import AgentExecutor, Tool, ZeroShotAgent\n", + "from langchain.chains import LLMChain\n", "from langchain.llms import OpenAI\n", "from langchain.utilities import SerpAPIWrapper\n", - "from langchain.chains import LLMChain\n", "\n", "todo_prompt = PromptTemplate.from_template(\n", " \"You are a planner who is an expert at coming up with a todo list for a given objective. Come up with a todo list for this objective: {objective}\"\n", diff --git a/cookbook/camel_role_playing.ipynb b/cookbook/camel_role_playing.ipynb index d0684d65250..158b231c449 100644 --- a/cookbook/camel_role_playing.ipynb +++ b/cookbook/camel_role_playing.ipynb @@ -35,16 +35,17 @@ "outputs": [], "source": [ "from typing import List\n", + "\n", "from langchain.chat_models import ChatOpenAI\n", "from langchain.prompts.chat import (\n", - " SystemMessagePromptTemplate,\n", " HumanMessagePromptTemplate,\n", + " SystemMessagePromptTemplate,\n", ")\n", "from langchain.schema import (\n", " AIMessage,\n", + " BaseMessage,\n", " HumanMessage,\n", " SystemMessage,\n", - " BaseMessage,\n", ")" ] }, diff --git a/cookbook/causal_program_aided_language_model.ipynb b/cookbook/causal_program_aided_language_model.ipynb index 882d17aa8db..2c2c1f3d83a 100644 --- a/cookbook/causal_program_aided_language_model.ipynb +++ b/cookbook/causal_program_aided_language_model.ipynb @@ -47,10 +47,9 @@ "outputs": [], "source": [ "from IPython.display import SVG\n", - "\n", + "from langchain.llms import OpenAI\n", "from langchain_experimental.cpal.base import CPALChain\n", "from langchain_experimental.pal_chain import PALChain\n", - "from langchain.llms import OpenAI\n", "\n", "llm = OpenAI(temperature=0, max_tokens=512)\n", "cpal_chain = CPALChain.from_univariate_prompt(llm=llm, verbose=True)\n", diff --git a/cookbook/code-analysis-deeplake.ipynb b/cookbook/code-analysis-deeplake.ipynb index 913a286bd98..8e1d6d438a0 100644 --- a/cookbook/code-analysis-deeplake.ipynb +++ b/cookbook/code-analysis-deeplake.ipynb @@ -717,7 +717,6 @@ "source": [ "from langchain.vectorstores import DeepLake\n", "\n", - "\n", "username = \"\"\n", "\n", "\n", @@ -834,8 +833,8 @@ }, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.chains import ConversationalRetrievalChain\n", + "from langchain.chat_models import ChatOpenAI\n", "\n", "model = ChatOpenAI(\n", " model_name=\"gpt-3.5-turbo-0613\"\n", diff --git a/cookbook/custom_agent_with_plugin_retrieval.ipynb b/cookbook/custom_agent_with_plugin_retrieval.ipynb index 5b04e21bf3a..9b081065fd9 100644 --- a/cookbook/custom_agent_with_plugin_retrieval.ipynb +++ b/cookbook/custom_agent_with_plugin_retrieval.ipynb @@ -32,19 +32,20 @@ "metadata": {}, "outputs": [], "source": [ + "import re\n", + "from typing import Union\n", + "\n", "from langchain.agents import (\n", " AgentExecutor,\n", - " LLMSingleActionAgent,\n", " AgentOutputParser,\n", + " LLMSingleActionAgent,\n", ")\n", - "from langchain.prompts import StringPromptTemplate\n", - "from langchain.llms import OpenAI\n", - "from langchain.chains import LLMChain\n", - "from typing import Union\n", - "from langchain.schema import AgentAction, AgentFinish\n", "from langchain.agents.agent_toolkits import NLAToolkit\n", - "from langchain.tools.plugin import AIPlugin\n", - "import re" + "from langchain.chains import LLMChain\n", + "from langchain.llms import OpenAI\n", + "from langchain.prompts import StringPromptTemplate\n", + "from langchain.schema import AgentAction, AgentFinish\n", + "from langchain.tools.plugin import AIPlugin" ] }, { @@ -113,9 +114,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.vectorstores import FAISS\n", "from langchain.embeddings import OpenAIEmbeddings\n", - "from langchain.schema import Document" + "from langchain.schema import Document\n", + "from langchain.vectorstores import FAISS" ] }, { diff --git a/cookbook/custom_agent_with_plugin_retrieval_using_plugnplai.ipynb b/cookbook/custom_agent_with_plugin_retrieval_using_plugnplai.ipynb index 6bc5effcbeb..2937337d0da 100644 --- a/cookbook/custom_agent_with_plugin_retrieval_using_plugnplai.ipynb +++ b/cookbook/custom_agent_with_plugin_retrieval_using_plugnplai.ipynb @@ -56,20 +56,21 @@ "metadata": {}, "outputs": [], "source": [ + "import re\n", + "from typing import Union\n", + "\n", + "import plugnplai\n", "from langchain.agents import (\n", " AgentExecutor,\n", - " LLMSingleActionAgent,\n", " AgentOutputParser,\n", + " LLMSingleActionAgent,\n", ")\n", - "from langchain.prompts import StringPromptTemplate\n", - "from langchain.llms import OpenAI\n", - "from langchain.chains import LLMChain\n", - "from typing import Union\n", - "from langchain.schema import AgentAction, AgentFinish\n", "from langchain.agents.agent_toolkits import NLAToolkit\n", - "from langchain.tools.plugin import AIPlugin\n", - "import re\n", - "import plugnplai" + "from langchain.chains import LLMChain\n", + "from langchain.llms import OpenAI\n", + "from langchain.prompts import StringPromptTemplate\n", + "from langchain.schema import AgentAction, AgentFinish\n", + "from langchain.tools.plugin import AIPlugin" ] }, { @@ -137,9 +138,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.vectorstores import FAISS\n", "from langchain.embeddings import OpenAIEmbeddings\n", - "from langchain.schema import Document" + "from langchain.schema import Document\n", + "from langchain.vectorstores import FAISS" ] }, { diff --git a/cookbook/deeplake_semantic_search_over_chat.ipynb b/cookbook/deeplake_semantic_search_over_chat.ipynb index 89f771f9e7e..aa6eb8c7cc2 100644 --- a/cookbook/deeplake_semantic_search_over_chat.ipynb +++ b/cookbook/deeplake_semantic_search_over_chat.ipynb @@ -48,16 +48,17 @@ "metadata": {}, "outputs": [], "source": [ - "import os\n", "import getpass\n", + "import os\n", + "\n", + "from langchain.chains import RetrievalQA\n", "from langchain.embeddings.openai import OpenAIEmbeddings\n", + "from langchain.llms import OpenAI\n", "from langchain.text_splitter import (\n", - " RecursiveCharacterTextSplitter,\n", " CharacterTextSplitter,\n", + " RecursiveCharacterTextSplitter,\n", ")\n", "from langchain.vectorstores import DeepLake\n", - "from langchain.chains import RetrievalQA\n", - "from langchain.llms import OpenAI\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")\n", "activeloop_token = getpass.getpass(\"Activeloop Token:\")\n", diff --git a/cookbook/elasticsearch_db_qa.ipynb b/cookbook/elasticsearch_db_qa.ipynb index cee48b36937..02a8faa77a4 100644 --- a/cookbook/elasticsearch_db_qa.ipynb +++ b/cookbook/elasticsearch_db_qa.ipynb @@ -38,9 +38,8 @@ "outputs": [], "source": [ "from elasticsearch import Elasticsearch\n", - "\n", - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.chains.elasticsearch_database import ElasticsearchDatabaseChain" + "from langchain.chains.elasticsearch_database import ElasticsearchDatabaseChain\n", + "from langchain.chat_models import ChatOpenAI" ] }, { diff --git a/cookbook/extraction_openai_tools.ipynb b/cookbook/extraction_openai_tools.ipynb index a4fd2cc7d45..b9e78b96c1f 100644 --- a/cookbook/extraction_openai_tools.ipynb +++ b/cookbook/extraction_openai_tools.ipynb @@ -19,10 +19,11 @@ "metadata": {}, "outputs": [], "source": [ + "from typing import List, Optional\n", + "\n", + "from langchain.chains.openai_tools import create_extraction_chain_pydantic\n", "from langchain.chat_models import ChatOpenAI\n", - "from langchain.pydantic_v1 import BaseModel\n", - "from typing import Optional, List\n", - "from langchain.chains.openai_tools import create_extraction_chain_pydantic" + "from langchain.pydantic_v1 import BaseModel" ] }, { diff --git a/cookbook/fake_llm.ipynb b/cookbook/fake_llm.ipynb index 61e5fc3b4ac..016f3e9fcce 100644 --- a/cookbook/fake_llm.ipynb +++ b/cookbook/fake_llm.ipynb @@ -30,9 +30,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.agents import load_tools\n", - "from langchain.agents import initialize_agent\n", - "from langchain.agents import AgentType" + "from langchain.agents import AgentType, initialize_agent, load_tools" ] }, { diff --git a/cookbook/forward_looking_retrieval_augmented_generation.ipynb b/cookbook/forward_looking_retrieval_augmented_generation.ipynb index 347fb5e1fec..ff17aac14a8 100644 --- a/cookbook/forward_looking_retrieval_augmented_generation.ipynb +++ b/cookbook/forward_looking_retrieval_augmented_generation.ipynb @@ -67,16 +67,16 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.schema import BaseRetriever\n", + "from typing import Any, List\n", + "\n", "from langchain.callbacks.manager import (\n", " AsyncCallbackManagerForRetrieverRun,\n", " CallbackManagerForRetrieverRun,\n", ")\n", - "from langchain.utilities import GoogleSerperAPIWrapper\n", "from langchain.chat_models import ChatOpenAI\n", "from langchain.llms import OpenAI\n", - "from langchain.schema import Document\n", - "from typing import Any, List" + "from langchain.schema import BaseRetriever, Document\n", + "from langchain.utilities import GoogleSerperAPIWrapper" ] }, { diff --git a/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb b/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb index 406a546f45a..8313966cb41 100644 --- a/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb +++ b/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb @@ -46,14 +46,13 @@ "source": [ "from datetime import datetime, timedelta\n", "from typing import List\n", - "from termcolor import colored\n", - "\n", "\n", "from langchain.chat_models import ChatOpenAI\n", "from langchain.docstore import InMemoryDocstore\n", "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.retrievers import TimeWeightedVectorStoreRetriever\n", - "from langchain.vectorstores import FAISS" + "from langchain.vectorstores import FAISS\n", + "from termcolor import colored" ] }, { @@ -153,6 +152,7 @@ "outputs": [], "source": [ "import math\n", + "\n", "import faiss\n", "\n", "\n", diff --git a/cookbook/gymnasium_agent_simulation.ipynb b/cookbook/gymnasium_agent_simulation.ipynb index 9f8236b8a32..9990afd4d8f 100644 --- a/cookbook/gymnasium_agent_simulation.ipynb +++ b/cookbook/gymnasium_agent_simulation.ipynb @@ -28,12 +28,11 @@ "outputs": [], "source": [ "import tenacity\n", - "\n", + "from langchain.output_parsers import RegexParser\n", "from langchain.schema import (\n", " HumanMessage,\n", " SystemMessage,\n", - ")\n", - "from langchain.output_parsers import RegexParser" + ")" ] }, { diff --git a/cookbook/human_input_chat_model.ipynb b/cookbook/human_input_chat_model.ipynb index 677d45af8cb..35a2f5969d0 100644 --- a/cookbook/human_input_chat_model.ipynb +++ b/cookbook/human_input_chat_model.ipynb @@ -55,9 +55,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.agents import load_tools\n", - "from langchain.agents import initialize_agent\n", - "from langchain.agents import AgentType" + "from langchain.agents import AgentType, initialize_agent, load_tools" ] }, { diff --git a/cookbook/human_input_llm.ipynb b/cookbook/human_input_llm.ipynb index c197250a715..c06da208d38 100644 --- a/cookbook/human_input_llm.ipynb +++ b/cookbook/human_input_llm.ipynb @@ -28,9 +28,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.agents import load_tools\n", - "from langchain.agents import initialize_agent\n", - "from langchain.agents import AgentType" + "from langchain.agents import AgentType, initialize_agent, load_tools" ] }, { diff --git a/cookbook/hypothetical_document_embeddings.ipynb b/cookbook/hypothetical_document_embeddings.ipynb index c640e61637d..d815aa9c443 100644 --- a/cookbook/hypothetical_document_embeddings.ipynb +++ b/cookbook/hypothetical_document_embeddings.ipynb @@ -20,9 +20,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import OpenAI\n", + "from langchain.chains import HypotheticalDocumentEmbedder, LLMChain\n", "from langchain.embeddings import OpenAIEmbeddings\n", - "from langchain.chains import LLMChain, HypotheticalDocumentEmbedder\n", + "from langchain.llms import OpenAI\n", "from langchain.prompts import PromptTemplate" ] }, diff --git a/cookbook/learned_prompt_optimization.ipynb b/cookbook/learned_prompt_optimization.ipynb index 1b0a5e35b3c..412c5b12ed1 100644 --- a/cookbook/learned_prompt_optimization.ipynb +++ b/cookbook/learned_prompt_optimization.ipynb @@ -790,8 +790,8 @@ } ], "source": [ - "from langchain.prompts.prompt import PromptTemplate\n", "from langchain.globals import set_debug\n", + "from langchain.prompts.prompt import PromptTemplate\n", "\n", "set_debug(True)\n", "\n", diff --git a/cookbook/llm_bash.ipynb b/cookbook/llm_bash.ipynb index 4316c3f0840..9f97d6c3a9a 100644 --- a/cookbook/llm_bash.ipynb +++ b/cookbook/llm_bash.ipynb @@ -43,8 +43,8 @@ } ], "source": [ - "from langchain_experimental.llm_bash.base import LLMBashChain\n", "from langchain.llms import OpenAI\n", + "from langchain_experimental.llm_bash.base import LLMBashChain\n", "\n", "llm = OpenAI(temperature=0)\n", "\n", @@ -69,8 +69,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.prompts.prompt import PromptTemplate\n", "from langchain.chains.llm_bash.prompt import BashOutputParser\n", + "from langchain.prompts.prompt import PromptTemplate\n", "\n", "_PROMPT_TEMPLATE = \"\"\"If someone asks you to perform a task, your job is to come up with a series of bash commands that will perform the task. There is no need to put \"#!/bin/bash\" in your answer. Make sure to reason step by step, using this format:\n", "Question: \"copy the files in the directory named 'target' into a new directory at the same level as target called 'myNewDirectory'\"\n", @@ -185,7 +185,6 @@ "source": [ "from langchain_experimental.llm_bash.bash import BashProcess\n", "\n", - "\n", "persistent_process = BashProcess(persistent=True)\n", "bash_chain = LLMBashChain.from_llm(llm, bash_process=persistent_process, verbose=True)\n", "\n", diff --git a/cookbook/llm_math.ipynb b/cookbook/llm_math.ipynb index edc56654bbc..0e2079b9558 100644 --- a/cookbook/llm_math.ipynb +++ b/cookbook/llm_math.ipynb @@ -45,7 +45,8 @@ } ], "source": [ - "from langchain.llms import OpenAI\nfrom langchain.chains import LLMMathChain\n", + "from langchain.chains import LLMMathChain\n", + "from langchain.llms import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", "llm_math = LLMMathChain.from_llm(llm, verbose=True)\n", diff --git a/cookbook/meta_prompt.ipynb b/cookbook/meta_prompt.ipynb index 45da5f957ea..2339907a269 100644 --- a/cookbook/meta_prompt.ipynb +++ b/cookbook/meta_prompt.ipynb @@ -56,10 +56,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import OpenAI\n", "from langchain.chains import LLMChain\n", - "from langchain.prompts import PromptTemplate\n", - "from langchain.memory import ConversationBufferWindowMemory" + "from langchain.llms import OpenAI\n", + "from langchain.memory import ConversationBufferWindowMemory\n", + "from langchain.prompts import PromptTemplate" ] }, { diff --git a/cookbook/multi_modal_QA.ipynb b/cookbook/multi_modal_QA.ipynb index 135cdad8f14..c886212cde9 100644 --- a/cookbook/multi_modal_QA.ipynb +++ b/cookbook/multi_modal_QA.ipynb @@ -40,12 +40,13 @@ } ], "source": [ - "import os\n", - "import io\n", "import base64\n", + "import io\n", + "import os\n", + "\n", "import numpy as np\n", + "from IPython.display import HTML, display\n", "from PIL import Image\n", - "from IPython.display import display, HTML\n", "\n", "\n", "def encode_image(image_path):\n", diff --git a/cookbook/multi_modal_RAG_chroma.ipynb b/cookbook/multi_modal_RAG_chroma.ipynb index 372d2ad5b59..48b5d390250 100644 --- a/cookbook/multi_modal_RAG_chroma.ipynb +++ b/cookbook/multi_modal_RAG_chroma.ipynb @@ -184,11 +184,12 @@ "source": [ "import os\n", "import uuid\n", + "\n", "import chromadb\n", "import numpy as np\n", - "from PIL import Image as _PILImage\n", "from langchain.vectorstores import Chroma\n", "from langchain_experimental.open_clip import OpenCLIPEmbeddings\n", + "from PIL import Image as _PILImage\n", "\n", "# Create chroma\n", "vectorstore = Chroma(\n", @@ -233,10 +234,11 @@ "metadata": {}, "outputs": [], "source": [ - "import io\n", - "import numpy as np\n", "import base64\n", + "import io\n", "from io import BytesIO\n", + "\n", + "import numpy as np\n", "from PIL import Image\n", "\n", "\n", @@ -312,10 +314,11 @@ "outputs": [], "source": [ "from operator import itemgetter\n", + "\n", "from langchain.chat_models import ChatOpenAI\n", - "from langchain.schema.output_parser import StrOutputParser\n", - "from langchain.schema.runnable import RunnablePassthrough, RunnableLambda\n", "from langchain.schema.messages import HumanMessage, SystemMessage\n", + "from langchain.schema.output_parser import StrOutputParser\n", + "from langchain.schema.runnable import RunnableLambda, RunnablePassthrough\n", "\n", "\n", "def prompt_func(data_dict):\n", @@ -421,7 +424,7 @@ } ], "source": [ - "from IPython.display import display, HTML\n", + "from IPython.display import HTML, display\n", "\n", "\n", "def plt_img_base64(img_base64):\n", diff --git a/cookbook/multi_modal_output_agent.ipynb b/cookbook/multi_modal_output_agent.ipynb index 632a41350c7..22c8f8750c1 100644 --- a/cookbook/multi_modal_output_agent.ipynb +++ b/cookbook/multi_modal_output_agent.ipynb @@ -29,9 +29,10 @@ "metadata": {}, "outputs": [], "source": [ - "from steamship import Block, Steamship\n", "import re\n", - "from IPython.display import Image" + "\n", + "from IPython.display import Image\n", + "from steamship import Block, Steamship" ] }, { @@ -41,9 +42,8 @@ "metadata": {}, "outputs": [], "source": [ + "from langchain.agents import AgentType, initialize_agent\n", "from langchain.llms import OpenAI\n", - "from langchain.agents import initialize_agent\n", - "from langchain.agents import AgentType\n", "from langchain.tools import SteamshipImageGenerationTool" ] }, diff --git a/cookbook/multi_player_dnd.ipynb b/cookbook/multi_player_dnd.ipynb index 7921cde9826..3a9c3e4b012 100644 --- a/cookbook/multi_player_dnd.ipynb +++ b/cookbook/multi_player_dnd.ipynb @@ -26,7 +26,8 @@ "metadata": {}, "outputs": [], "source": [ - "from typing import List, Callable\n", + "from typing import Callable, List\n", + "\n", "from langchain.chat_models import ChatOpenAI\n", "from langchain.schema import (\n", " HumanMessage,\n", diff --git a/cookbook/multiagent_authoritarian.ipynb b/cookbook/multiagent_authoritarian.ipynb index 790cc1cfd57..18b2fcb7815 100644 --- a/cookbook/multiagent_authoritarian.ipynb +++ b/cookbook/multiagent_authoritarian.ipynb @@ -27,17 +27,17 @@ "metadata": {}, "outputs": [], "source": [ - "from collections import OrderedDict\n", "import functools\n", "import random\n", - "import tenacity\n", - "from typing import List, Callable\n", + "from collections import OrderedDict\n", + "from typing import Callable, List\n", "\n", + "import tenacity\n", + "from langchain.chat_models import ChatOpenAI\n", + "from langchain.output_parsers import RegexParser\n", "from langchain.prompts import (\n", " PromptTemplate,\n", ")\n", - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.output_parsers import RegexParser\n", "from langchain.schema import (\n", " HumanMessage,\n", " SystemMessage,\n", diff --git a/cookbook/multiagent_bidding.ipynb b/cookbook/multiagent_bidding.ipynb index 49a2ab5c262..7ee0d7321ee 100644 --- a/cookbook/multiagent_bidding.ipynb +++ b/cookbook/multiagent_bidding.ipynb @@ -24,11 +24,12 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.prompts import PromptTemplate\n", + "from typing import Callable, List\n", + "\n", "import tenacity\n", - "from typing import List, Callable\n", "from langchain.chat_models import ChatOpenAI\n", "from langchain.output_parsers import RegexParser\n", + "from langchain.prompts import PromptTemplate\n", "from langchain.schema import (\n", " HumanMessage,\n", " SystemMessage,\n", diff --git a/cookbook/myscale_vector_sql.ipynb b/cookbook/myscale_vector_sql.ipynb index 6d958afae46..af50a5a154a 100644 --- a/cookbook/myscale_vector_sql.ipynb +++ b/cookbook/myscale_vector_sql.ipynb @@ -27,17 +27,15 @@ "metadata": {}, "outputs": [], "source": [ - "from os import environ\n", "import getpass\n", - "from langchain.llms import OpenAI\n", - "from langchain.utilities import SQLDatabase\n", + "from os import environ\n", + "\n", "from langchain.chains import LLMChain\n", - "from langchain_experimental.sql.vector_sql import VectorSQLDatabaseChain\n", - "from sqlalchemy import create_engine, MetaData\n", + "from langchain.llms import OpenAI\n", "from langchain.prompts import PromptTemplate\n", - "\n", - "\n", - "from sqlalchemy import create_engine\n", + "from langchain.utilities import SQLDatabase\n", + "from langchain_experimental.sql.vector_sql import VectorSQLDatabaseChain\n", + "from sqlalchemy import MetaData, create_engine\n", "\n", "MYSCALE_HOST = \"msc-4a9e710a.us-east-1.aws.staging.myscale.cloud\"\n", "MYSCALE_PORT = 443\n", @@ -76,9 +74,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import OpenAI\n", "from langchain.callbacks import StdOutCallbackHandler\n", - "\n", + "from langchain.llms import OpenAI\n", "from langchain.utilities.sql_database import SQLDatabase\n", "from langchain_experimental.sql.prompt import MYSCALE_PROMPT\n", "from langchain_experimental.sql.vector_sql import VectorSQLDatabaseChain\n", @@ -119,15 +116,16 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.chains.qa_with_sources.retrieval import RetrievalQAWithSourcesChain\n", - "\n", - "from langchain_experimental.sql.vector_sql import VectorSQLDatabaseChain\n", + "from langchain.chat_models import ChatOpenAI\n", "from langchain_experimental.retrievers.vector_sql_database import (\n", " VectorSQLDatabaseChainRetriever,\n", ")\n", "from langchain_experimental.sql.prompt import MYSCALE_PROMPT\n", - "from langchain_experimental.sql.vector_sql import VectorSQLRetrieveAllOutputParser\n", + "from langchain_experimental.sql.vector_sql import (\n", + " VectorSQLDatabaseChain,\n", + " VectorSQLRetrieveAllOutputParser,\n", + ")\n", "\n", "output_parser_retrieve_all = VectorSQLRetrieveAllOutputParser.from_embeddings(\n", " output_parser.model\n", diff --git a/cookbook/openai_functions_retrieval_qa.ipynb b/cookbook/openai_functions_retrieval_qa.ipynb index c64c3427f29..f5bce419330 100644 --- a/cookbook/openai_functions_retrieval_qa.ipynb +++ b/cookbook/openai_functions_retrieval_qa.ipynb @@ -50,10 +50,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", + "from langchain.chains import create_qa_with_sources_chain\n", "from langchain.chains.combine_documents.stuff import StuffDocumentsChain\n", - "from langchain.prompts import PromptTemplate\n", - "from langchain.chains import create_qa_with_sources_chain" + "from langchain.chat_models import ChatOpenAI\n", + "from langchain.prompts import PromptTemplate" ] }, { @@ -230,9 +230,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chains import ConversationalRetrievalChain\n", + "from langchain.chains import ConversationalRetrievalChain, LLMChain\n", "from langchain.memory import ConversationBufferMemory\n", - "from langchain.chains import LLMChain\n", "\n", "memory = ConversationBufferMemory(memory_key=\"chat_history\", return_messages=True)\n", "_template = \"\"\"Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language.\\\n", @@ -357,12 +356,10 @@ "source": [ "from typing import List\n", "\n", - "from pydantic import BaseModel, Field\n", - "\n", "from langchain.chains.openai_functions import create_qa_with_structure_chain\n", - "\n", "from langchain.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate\n", - "from langchain.schema import SystemMessage, HumanMessage" + "from langchain.schema import HumanMessage, SystemMessage\n", + "from pydantic import BaseModel, Field" ] }, { diff --git a/cookbook/openai_v1_cookbook.ipynb b/cookbook/openai_v1_cookbook.ipynb index 3434d8b07a6..15c3b655737 100644 --- a/cookbook/openai_v1_cookbook.ipynb +++ b/cookbook/openai_v1_cookbook.ipynb @@ -167,7 +167,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.tools import E2BDataAnalysisTool, DuckDuckGoSearchRun\n", + "from langchain.tools import DuckDuckGoSearchRun, E2BDataAnalysisTool\n", "\n", "tools = [E2BDataAnalysisTool(api_key=\"...\"), DuckDuckGoSearchRun()]" ] @@ -456,9 +456,9 @@ "from typing import Literal\n", "\n", "from langchain.output_parsers.openai_tools import PydanticToolsParser\n", - "from langchain.utils.openai_functions import convert_pydantic_to_openai_tool\n", "from langchain.prompts import ChatPromptTemplate\n", "from langchain.pydantic_v1 import BaseModel, Field\n", + "from langchain.utils.openai_functions import convert_pydantic_to_openai_tool\n", "\n", "\n", "class GetCurrentWeather(BaseModel):\n", diff --git a/cookbook/petting_zoo.ipynb b/cookbook/petting_zoo.ipynb index 5091fe1980f..bfb8c1a6e90 100644 --- a/cookbook/petting_zoo.ipynb +++ b/cookbook/petting_zoo.ipynb @@ -45,14 +45,14 @@ "source": [ "import collections\n", "import inspect\n", - "import tenacity\n", "\n", + "import tenacity\n", "from langchain.chat_models import ChatOpenAI\n", + "from langchain.output_parsers import RegexParser\n", "from langchain.schema import (\n", " HumanMessage,\n", " SystemMessage,\n", - ")\n", - "from langchain.output_parsers import RegexParser" + ")" ] }, { diff --git a/cookbook/program_aided_language_model.ipynb b/cookbook/program_aided_language_model.ipynb index 3b8ab0dfeff..dba6c5eef59 100644 --- a/cookbook/program_aided_language_model.ipynb +++ b/cookbook/program_aided_language_model.ipynb @@ -17,8 +17,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_experimental.pal_chain import PALChain\n", - "from langchain.llms import OpenAI" + "from langchain.llms import OpenAI\n", + "from langchain_experimental.pal_chain import PALChain" ] }, { diff --git a/cookbook/qianfan_baidu_elasticesearch_RAG.ipynb b/cookbook/qianfan_baidu_elasticesearch_RAG.ipynb index 59af499212a..2efea3d1dd2 100644 --- a/cookbook/qianfan_baidu_elasticesearch_RAG.ipynb +++ b/cookbook/qianfan_baidu_elasticesearch_RAG.ipynb @@ -54,13 +54,13 @@ "metadata": {}, "outputs": [], "source": [ - "from baidubce.bce_client_configuration import BceClientConfiguration\n", "from baidubce.auth.bce_credentials import BceCredentials\n", + "from baidubce.bce_client_configuration import BceClientConfiguration\n", "from langchain.document_loaders.baiducloud_bos_directory import BaiduBOSDirectoryLoader\n", - "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", "from langchain.embeddings.huggingface import HuggingFaceEmbeddings\n", - "from langchain.vectorstores import BESVectorStore\n", - "from langchain.llms.baidu_qianfan_endpoint import QianfanLLMEndpoint" + "from langchain.llms.baidu_qianfan_endpoint import QianfanLLMEndpoint\n", + "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", + "from langchain.vectorstores import BESVectorStore" ] }, { diff --git a/cookbook/rag_fusion.ipynb b/cookbook/rag_fusion.ipynb index c47b95add6c..aae35fec331 100644 --- a/cookbook/rag_fusion.ipynb +++ b/cookbook/rag_fusion.ipynb @@ -30,8 +30,8 @@ "outputs": [], "source": [ "import pinecone\n", - "from langchain.vectorstores import Pinecone\n", "from langchain.embeddings import OpenAIEmbeddings\n", + "from langchain.vectorstores import Pinecone\n", "\n", "pinecone.init(api_key=\"...\", environment=\"...\")" ] diff --git a/cookbook/retrieval_in_sql.ipynb b/cookbook/retrieval_in_sql.ipynb index bc720fa9226..adf43b4cb05 100644 --- a/cookbook/retrieval_in_sql.ipynb +++ b/cookbook/retrieval_in_sql.ipynb @@ -28,8 +28,8 @@ "metadata": {}, "outputs": [], "source": [ - "import os\n", "import getpass\n", + "import os\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = os.environ.get(\"OPENAI_API_KEY\") or getpass.getpass(\n", " \"OpenAI API Key:\"\n", @@ -42,8 +42,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.sql_database import SQLDatabase\n", "from langchain.chat_models import ChatOpenAI\n", + "from langchain.sql_database import SQLDatabase\n", "\n", "CONNECTION_STRING = \"postgresql+psycopg2://postgres:test@localhost:5432/vectordb\" # Replace with your own\n", "db = SQLDatabase.from_uri(CONNECTION_STRING)" @@ -323,6 +323,7 @@ "outputs": [], "source": [ "import re\n", + "\n", "from langchain.schema.runnable import RunnableLambda\n", "\n", "\n", diff --git a/cookbook/rewrite.ipynb b/cookbook/rewrite.ipynb index 3099ef28269..5eabc2c26a5 100644 --- a/cookbook/rewrite.ipynb +++ b/cookbook/rewrite.ipynb @@ -31,8 +31,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.prompts import ChatPromptTemplate\n", "from langchain.chat_models import ChatOpenAI\n", + "from langchain.prompts import ChatPromptTemplate\n", "from langchain.schema.output_parser import StrOutputParser\n", "from langchain.schema.runnable import RunnablePassthrough\n", "from langchain.utilities import DuckDuckGoSearchAPIWrapper" diff --git a/cookbook/sales_agent_with_context.ipynb b/cookbook/sales_agent_with_context.ipynb index e4a0d1ceed3..f402ecb0b92 100644 --- a/cookbook/sales_agent_with_context.ipynb +++ b/cookbook/sales_agent_with_context.ipynb @@ -42,22 +42,22 @@ "OPENAI_API_KEY = \"sk-xx\"\n", "os.environ[\"OPENAI_API_KEY\"] = OPENAI_API_KEY\n", "\n", - "from typing import Dict, List, Any, Union, Callable\n", - "from pydantic import BaseModel, Field\n", - "from langchain.chains import LLMChain\nfrom langchain.prompts import PromptTemplate\n", - "from langchain.llms import BaseLLM\n", - "from langchain.chains.base import Chain\n", - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.agents import Tool, LLMSingleActionAgent, AgentExecutor\n", - "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", - "from langchain.chains import RetrievalQA\n", - "from langchain.vectorstores import Chroma\n", - "from langchain.llms import OpenAI\n", - "from langchain.prompts.base import StringPromptTemplate\n", + "from typing import Any, Callable, Dict, List, Union\n", + "\n", + "from langchain.agents import AgentExecutor, LLMSingleActionAgent, Tool\n", "from langchain.agents.agent import AgentOutputParser\n", "from langchain.agents.conversational.prompt import FORMAT_INSTRUCTIONS\n", - "from langchain.schema import AgentAction, AgentFinish" + "from langchain.chains import LLMChain, RetrievalQA\n", + "from langchain.chains.base import Chain\n", + "from langchain.chat_models import ChatOpenAI\n", + "from langchain.embeddings.openai import OpenAIEmbeddings\n", + "from langchain.llms import BaseLLM, OpenAI\n", + "from langchain.prompts import PromptTemplate\n", + "from langchain.prompts.base import StringPromptTemplate\n", + "from langchain.schema import AgentAction, AgentFinish\n", + "from langchain.text_splitter import CharacterTextSplitter\n", + "from langchain.vectorstores import Chroma\n", + "from pydantic import BaseModel, Field" ] }, { diff --git a/cookbook/selecting_llms_based_on_context_length.ipynb b/cookbook/selecting_llms_based_on_context_length.ipynb index 7a58a6518f1..8556854acdd 100644 --- a/cookbook/selecting_llms_based_on_context_length.ipynb +++ b/cookbook/selecting_llms_based_on_context_length.ipynb @@ -17,10 +17,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.prompts import PromptTemplate\n", - "from langchain.schema.prompt import PromptValue\n", "from langchain.chat_models import ChatOpenAI\n", - "from langchain.schema.output_parser import StrOutputParser" + "from langchain.prompts import PromptTemplate\n", + "from langchain.schema.output_parser import StrOutputParser\n", + "from langchain.schema.prompt import PromptValue" ] }, { diff --git a/cookbook/smart_llm.ipynb b/cookbook/smart_llm.ipynb index 1ee00768965..b7146daaae5 100644 --- a/cookbook/smart_llm.ipynb +++ b/cookbook/smart_llm.ipynb @@ -51,8 +51,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.prompts import PromptTemplate\n", "from langchain.chat_models import ChatOpenAI\n", + "from langchain.prompts import PromptTemplate\n", "from langchain_experimental.smart_llm import SmartLLMChain" ] }, diff --git a/cookbook/stepback-qa.ipynb b/cookbook/stepback-qa.ipynb index 336527cf207..f94922ef041 100644 --- a/cookbook/stepback-qa.ipynb +++ b/cookbook/stepback-qa.ipynb @@ -131,7 +131,6 @@ "source": [ "from langchain.utilities import DuckDuckGoSearchAPIWrapper\n", "\n", - "\n", "search = DuckDuckGoSearchAPIWrapper(max_results=4)\n", "\n", "\n", diff --git a/cookbook/tree_of_thought.ipynb b/cookbook/tree_of_thought.ipynb index fe1cba0a91b..b27402a1ab2 100644 --- a/cookbook/tree_of_thought.ipynb +++ b/cookbook/tree_of_thought.ipynb @@ -84,10 +84,11 @@ "metadata": {}, "outputs": [], "source": [ + "import re\n", "from typing import Tuple\n", + "\n", "from langchain_experimental.tot.checker import ToTChecker\n", "from langchain_experimental.tot.thought import ThoughtValidity\n", - "import re\n", "\n", "\n", "class MyChecker(ToTChecker):\n", diff --git a/cookbook/twitter-the-algorithm-analysis-deeplake.ipynb b/cookbook/twitter-the-algorithm-analysis-deeplake.ipynb index 1f120fa3087..0e1998a93cd 100644 --- a/cookbook/twitter-the-algorithm-analysis-deeplake.ipynb +++ b/cookbook/twitter-the-algorithm-analysis-deeplake.ipynb @@ -34,8 +34,8 @@ "metadata": {}, "outputs": [], "source": [ - "import os\n", "import getpass\n", + "import os\n", "\n", "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.vectorstores import DeepLake\n", @@ -109,6 +109,7 @@ "outputs": [], "source": [ "import os\n", + "\n", "from langchain.document_loaders import TextLoader\n", "\n", "root_dir = \"./the-algorithm\"\n", @@ -3807,8 +3808,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.chains import ConversationalRetrievalChain\n", + "from langchain.chat_models import ChatOpenAI\n", "\n", "model = ChatOpenAI(model_name=\"gpt-3.5-turbo-0613\") # switch to 'gpt-4'\n", "qa = ConversationalRetrievalChain.from_llm(model, retriever=retriever)" diff --git a/cookbook/two_agent_debate_tools.ipynb b/cookbook/two_agent_debate_tools.ipynb index ae7f1c11116..808053733ed 100644 --- a/cookbook/two_agent_debate_tools.ipynb +++ b/cookbook/two_agent_debate_tools.ipynb @@ -22,7 +22,8 @@ "metadata": {}, "outputs": [], "source": [ - "from typing import List, Callable\n", + "from typing import Callable, List\n", + "\n", "from langchain.chat_models import ChatOpenAI\n", "from langchain.memory import ConversationBufferMemory\n", "from langchain.schema import (\n", @@ -45,9 +46,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.agents import initialize_agent\n", - "from langchain.agents import AgentType\n", - "from langchain.agents import load_tools" + "from langchain.agents import AgentType, initialize_agent, load_tools" ] }, { diff --git a/cookbook/two_player_dnd.ipynb b/cookbook/two_player_dnd.ipynb index ab44b519859..627a683e1f8 100644 --- a/cookbook/two_player_dnd.ipynb +++ b/cookbook/two_player_dnd.ipynb @@ -22,7 +22,8 @@ "metadata": {}, "outputs": [], "source": [ - "from typing import List, Callable\n", + "from typing import Callable, List\n", + "\n", "from langchain.chat_models import ChatOpenAI\n", "from langchain.schema import (\n", " HumanMessage,\n", diff --git a/cookbook/wikibase_agent.ipynb b/cookbook/wikibase_agent.ipynb index c2f3b196104..e5f2110aa79 100644 --- a/cookbook/wikibase_agent.ipynb +++ b/cookbook/wikibase_agent.ipynb @@ -192,10 +192,10 @@ " return current\n", "\n", "\n", - "import requests\n", - "\n", "from typing import Optional\n", "\n", + "import requests\n", + "\n", "\n", "def vocab_lookup(\n", " search: str,\n", @@ -319,9 +319,10 @@ "metadata": {}, "outputs": [], "source": [ - "import requests\n", - "from typing import List, Dict, Any\n", "import json\n", + "from typing import Any, Dict, List\n", + "\n", + "import requests\n", "\n", "\n", "def run_sparql(\n", @@ -389,17 +390,18 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.agents import (\n", - " Tool,\n", - " AgentExecutor,\n", - " LLMSingleActionAgent,\n", - " AgentOutputParser,\n", - ")\n", - "from langchain.prompts import StringPromptTemplate\n", - "from langchain.chains import LLMChain\n", + "import re\n", "from typing import List, Union\n", - "from langchain.schema import AgentAction, AgentFinish\n", - "import re" + "\n", + "from langchain.agents import (\n", + " AgentExecutor,\n", + " AgentOutputParser,\n", + " LLMSingleActionAgent,\n", + " Tool,\n", + ")\n", + "from langchain.chains import LLMChain\n", + "from langchain.prompts import StringPromptTemplate\n", + "from langchain.schema import AgentAction, AgentFinish" ] }, { diff --git a/docs/docs/expression_language/cookbook/agent.ipynb b/docs/docs/expression_language/cookbook/agent.ipynb index 2163dd5bf28..452c4762f76 100644 --- a/docs/docs/expression_language/cookbook/agent.ipynb +++ b/docs/docs/expression_language/cookbook/agent.ipynb @@ -17,7 +17,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.agents import XMLAgent, tool, AgentExecutor\n", + "from langchain.agents import AgentExecutor, XMLAgent, tool\n", "from langchain.chat_models import ChatAnthropic" ] }, diff --git a/docs/docs/expression_language/cookbook/embedding_router.ipynb b/docs/docs/expression_language/cookbook/embedding_router.ipynb index c43f82399f0..98d33ff4f31 100644 --- a/docs/docs/expression_language/cookbook/embedding_router.ipynb +++ b/docs/docs/expression_language/cookbook/embedding_router.ipynb @@ -26,7 +26,6 @@ "from langchain.schema.runnable import RunnableLambda, RunnablePassthrough\n", "from langchain.utils.math import cosine_similarity\n", "\n", - "\n", "physics_template = \"\"\"You are a very smart physics professor. \\\n", "You are great at answering questions about physics in a concise and easy to understand manner. \\\n", "When you don't know the answer to a question you admit that you don't know.\n", diff --git a/docs/docs/expression_language/cookbook/memory.ipynb b/docs/docs/expression_language/cookbook/memory.ipynb index c6ab96f0f1f..726fec5be11 100644 --- a/docs/docs/expression_language/cookbook/memory.ipynb +++ b/docs/docs/expression_language/cookbook/memory.ipynb @@ -18,10 +18,11 @@ "outputs": [], "source": [ "from operator import itemgetter\n", + "\n", "from langchain.chat_models import ChatOpenAI\n", "from langchain.memory import ConversationBufferMemory\n", - "from langchain.schema.runnable import RunnablePassthrough, RunnableLambda\n", "from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder\n", + "from langchain.schema.runnable import RunnableLambda, RunnablePassthrough\n", "\n", "model = ChatOpenAI()\n", "prompt = ChatPromptTemplate.from_messages(\n", diff --git a/docs/docs/expression_language/cookbook/prompt_llm_parser.ipynb b/docs/docs/expression_language/cookbook/prompt_llm_parser.ipynb index eb284ad5c22..657e7ddac76 100644 --- a/docs/docs/expression_language/cookbook/prompt_llm_parser.ipynb +++ b/docs/docs/expression_language/cookbook/prompt_llm_parser.ipynb @@ -42,8 +42,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.prompts import ChatPromptTemplate\n", "from langchain.chat_models import ChatOpenAI\n", + "from langchain.prompts import ChatPromptTemplate\n", "\n", "prompt = ChatPromptTemplate.from_template(\"tell me a joke about {foo}\")\n", "model = ChatOpenAI()\n", diff --git a/docs/docs/expression_language/cookbook/retrieval.ipynb b/docs/docs/expression_language/cookbook/retrieval.ipynb index 0fc7e066382..2689ffcd963 100644 --- a/docs/docs/expression_language/cookbook/retrieval.ipynb +++ b/docs/docs/expression_language/cookbook/retrieval.ipynb @@ -38,11 +38,11 @@ "source": [ "from operator import itemgetter\n", "\n", - "from langchain.prompts import ChatPromptTemplate\n", "from langchain.chat_models import ChatOpenAI\n", "from langchain.embeddings import OpenAIEmbeddings\n", + "from langchain.prompts import ChatPromptTemplate\n", "from langchain.schema.output_parser import StrOutputParser\n", - "from langchain.schema.runnable import RunnablePassthrough, RunnableLambda\n", + "from langchain.schema.runnable import RunnableLambda, RunnablePassthrough\n", "from langchain.vectorstores import FAISS" ] }, @@ -170,8 +170,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.schema.runnable import RunnableMap\n", - "from langchain.schema import format_document" + "from langchain.schema import format_document\n", + "from langchain.schema.runnable import RunnableMap" ] }, { @@ -231,7 +231,7 @@ "metadata": {}, "outputs": [], "source": [ - "from typing import Tuple, List\n", + "from typing import List, Tuple\n", "\n", "\n", "def _format_chat_history(chat_history: List[Tuple]) -> str:\n", @@ -335,6 +335,7 @@ "outputs": [], "source": [ "from operator import itemgetter\n", + "\n", "from langchain.memory import ConversationBufferMemory" ] }, diff --git a/docs/docs/expression_language/how_to/configure.ipynb b/docs/docs/expression_language/how_to/configure.ipynb index 4afe3b05290..c4e024b8c65 100644 --- a/docs/docs/expression_language/how_to/configure.ipynb +++ b/docs/docs/expression_language/how_to/configure.ipynb @@ -262,9 +262,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI, ChatAnthropic\n", - "from langchain.schema.runnable import ConfigurableField\n", - "from langchain.prompts import PromptTemplate" + "from langchain.chat_models import ChatAnthropic, ChatOpenAI\n", + "from langchain.prompts import PromptTemplate\n", + "from langchain.schema.runnable import ConfigurableField" ] }, { diff --git a/docs/docs/expression_language/how_to/fallbacks.ipynb b/docs/docs/expression_language/how_to/fallbacks.ipynb index 00f1b761de3..f788c0746dc 100644 --- a/docs/docs/expression_language/how_to/fallbacks.ipynb +++ b/docs/docs/expression_language/how_to/fallbacks.ipynb @@ -31,7 +31,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI, ChatAnthropic" + "from langchain.chat_models import ChatAnthropic, ChatOpenAI" ] }, { @@ -50,6 +50,7 @@ "outputs": [], "source": [ "from unittest.mock import patch\n", + "\n", "from openai.error import RateLimitError" ] }, diff --git a/docs/docs/expression_language/how_to/functions.ipynb b/docs/docs/expression_language/how_to/functions.ipynb index fea416cf30c..87a890e3fe8 100644 --- a/docs/docs/expression_language/how_to/functions.ipynb +++ b/docs/docs/expression_language/how_to/functions.ipynb @@ -19,11 +19,12 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.schema.runnable import RunnableLambda\n", - "from langchain.prompts import ChatPromptTemplate\n", - "from langchain.chat_models import ChatOpenAI\n", "from operator import itemgetter\n", "\n", + "from langchain.chat_models import ChatOpenAI\n", + "from langchain.prompts import ChatPromptTemplate\n", + "from langchain.schema.runnable import RunnableLambda\n", + "\n", "\n", "def length_function(text):\n", " return len(text)\n", @@ -91,8 +92,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.schema.runnable import RunnableConfig\n", - "from langchain.schema.output_parser import StrOutputParser" + "from langchain.schema.output_parser import StrOutputParser\n", + "from langchain.schema.runnable import RunnableConfig" ] }, { diff --git a/docs/docs/expression_language/how_to/generators.ipynb b/docs/docs/expression_language/how_to/generators.ipynb index bf70f3a94e2..bf7319d2735 100644 --- a/docs/docs/expression_language/how_to/generators.ipynb +++ b/docs/docs/expression_language/how_to/generators.ipynb @@ -29,7 +29,6 @@ "from langchain.prompts.chat import ChatPromptTemplate\n", "from langchain.schema.output_parser import StrOutputParser\n", "\n", - "\n", "prompt = ChatPromptTemplate.from_template(\n", " \"Write a comma-separated list of 5 animals similar to: {animal}\"\n", ")\n", diff --git a/docs/docs/expression_language/how_to/map.ipynb b/docs/docs/expression_language/how_to/map.ipynb index 8c5aaa1cd65..d513b8464c5 100644 --- a/docs/docs/expression_language/how_to/map.ipynb +++ b/docs/docs/expression_language/how_to/map.ipynb @@ -33,7 +33,6 @@ "from langchain.prompts import ChatPromptTemplate\n", "from langchain.schema.runnable import RunnableParallel\n", "\n", - "\n", "model = ChatOpenAI()\n", "joke_chain = ChatPromptTemplate.from_template(\"tell me a joke about {topic}\") | model\n", "poem_chain = (\n", diff --git a/docs/docs/expression_language/how_to/routing.ipynb b/docs/docs/expression_language/how_to/routing.ipynb index 63c321a3dcd..61f8598359b 100644 --- a/docs/docs/expression_language/how_to/routing.ipynb +++ b/docs/docs/expression_language/how_to/routing.ipynb @@ -40,8 +40,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.prompts import PromptTemplate\n", "from langchain.chat_models import ChatAnthropic\n", + "from langchain.prompts import PromptTemplate\n", "from langchain.schema.output_parser import StrOutputParser" ] }, diff --git a/docs/docs/expression_language/interface.ipynb b/docs/docs/expression_language/interface.ipynb index 8ffc08a45b8..2e1253fbe62 100644 --- a/docs/docs/expression_language/interface.ipynb +++ b/docs/docs/expression_language/interface.ipynb @@ -57,8 +57,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.prompts import ChatPromptTemplate\n", "from langchain.chat_models import ChatOpenAI\n", + "from langchain.prompts import ChatPromptTemplate\n", "\n", "model = ChatOpenAI()\n", "prompt = ChatPromptTemplate.from_template(\"tell me a joke about {topic}\")\n", diff --git a/docs/docs/guides/evaluation/comparison/custom.ipynb b/docs/docs/guides/evaluation/comparison/custom.ipynb index 60fb2a4dd97..c4ed70bcfb5 100644 --- a/docs/docs/guides/evaluation/comparison/custom.ipynb +++ b/docs/docs/guides/evaluation/comparison/custom.ipynb @@ -34,7 +34,8 @@ }, "outputs": [], "source": [ - "from typing import Optional, Any\n", + "from typing import Any, Optional\n", + "\n", "from langchain.evaluation import PairwiseStringEvaluator\n", "\n", "\n", @@ -116,10 +117,11 @@ }, "outputs": [], "source": [ - "from typing import Optional, Any\n", - "from langchain.evaluation import PairwiseStringEvaluator\n", - "from langchain.chat_models import ChatAnthropic\n", + "from typing import Any, Optional\n", + "\n", "from langchain.chains import LLMChain\n", + "from langchain.chat_models import ChatAnthropic\n", + "from langchain.evaluation import PairwiseStringEvaluator\n", "\n", "\n", "class CustomPreferenceEvaluator(PairwiseStringEvaluator):\n", diff --git a/docs/docs/guides/evaluation/examples/comparisons.ipynb b/docs/docs/guides/evaluation/examples/comparisons.ipynb index 27ac1eeb986..09d44b828c1 100644 --- a/docs/docs/guides/evaluation/examples/comparisons.ipynb +++ b/docs/docs/guides/evaluation/examples/comparisons.ipynb @@ -1,448 +1,447 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Comparing Chain Outputs\n", - "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/guides/evaluation/examples/comparisons.ipynb)\n", - "\n", - "Suppose you have two different prompts (or LLMs). How do you know which will generate \"better\" results?\n", - "\n", - "One automated way to predict the preferred configuration is to use a `PairwiseStringEvaluator` like the `PairwiseStringEvalChain`[[1]](#cite_note-1). This chain prompts an LLM to select which output is preferred, given a specific input.\n", - "\n", - "For this evaluation, we will need 3 things:\n", - "1. An evaluator\n", - "2. A dataset of inputs\n", - "3. 2 (or more) LLMs, Chains, or Agents to compare\n", - "\n", - "Then we will aggregate the results to determine the preferred model.\n", - "\n", - "### Step 1. Create the Evaluator\n", - "\n", - "In this example, you will use gpt-4 to select which output is preferred." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "from langchain.evaluation import load_evaluator\n", - "\n", - "eval_chain = load_evaluator(\"pairwise_string\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Step 2. Select Dataset\n", - "\n", - "If you already have real usage data for your LLM, you can use a representative sample. More examples\n", - "provide more reliable results. We will use some example queries someone might have about how to use langchain here." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": { - "tags": [] - }, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Found cached dataset parquet (/Users/wfh/.cache/huggingface/datasets/LangChainDatasets___parquet/LangChainDatasets--langchain-howto-queries-bbb748bbee7e77aa/0.0.0/14a00e99c0d15a23649d0db8944380ac81082d4b021f398733dd84f3a6c569a7)\n" - ] - }, - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "a2358d37246640ce95e0f9940194590a", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - " 0%| | 0/1 [00:00\"\n", - "llm = ChatOpenAI(temperature=0, model=\"gpt-3.5-turbo-0613\")\n", - "\n", - "# Initialize the SerpAPIWrapper for search functionality\n", - "# Replace in openai_api_key=\"\" with your actual SerpAPI key.\n", - "search = SerpAPIWrapper()\n", - "\n", - "# Define a list of tools offered by the agent\n", - "tools = [\n", - " Tool(\n", - " name=\"Search\",\n", - " func=search.run,\n", - " coroutine=search.arun,\n", - " description=\"Useful when you need to answer questions about current events. You should ask targeted questions.\",\n", - " ),\n", - "]" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "functions_agent = initialize_agent(\n", - " tools, llm, agent=AgentType.OPENAI_MULTI_FUNCTIONS, verbose=False\n", - ")\n", - "conversations_agent = initialize_agent(\n", - " tools, llm, agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION, verbose=False\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Step 4. Generate Responses\n", - "\n", - "We will generate outputs for each of the models before evaluating them." - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": { - "tags": [] - }, - "outputs": [ - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "87277cb39a1a4726bb7cc533a24e2ea4", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - " 0%| | 0/20 [00:00= concurrency_level:\n", - " batch_results = await asyncio.gather(*batch, return_exceptions=True)\n", - " results.extend(list(zip(*[iter(batch_results)] * 2)))\n", - " batch = []\n", - "if batch:\n", - " batch_results = await asyncio.gather(*batch, return_exceptions=True)\n", - " results.extend(list(zip(*[iter(batch_results)] * 2)))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Step 5. Evaluate Pairs\n", - "\n", - "Now it's time to evaluate the results. For each agent response, run the evaluation chain to select which output is preferred (or return a tie).\n", - "\n", - "Randomly select the input order to reduce the likelihood that one model will be preferred just because it is presented first." - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "import random\n", - "\n", - "\n", - "def predict_preferences(dataset, results) -> list:\n", - " preferences = []\n", - "\n", - " for example, (res_a, res_b) in zip(dataset, results):\n", - " input_ = example[\"inputs\"]\n", - " # Flip a coin to reduce persistent position bias\n", - " if random.random() < 0.5:\n", - " pred_a, pred_b = res_a, res_b\n", - " a, b = \"a\", \"b\"\n", - " else:\n", - " pred_a, pred_b = res_b, res_a\n", - " a, b = \"b\", \"a\"\n", - " eval_res = eval_chain.evaluate_string_pairs(\n", - " prediction=pred_a[\"output\"] if isinstance(pred_a, dict) else str(pred_a),\n", - " prediction_b=pred_b[\"output\"] if isinstance(pred_b, dict) else str(pred_b),\n", - " input=input_,\n", - " )\n", - " if eval_res[\"value\"] == \"A\":\n", - " preferences.append(a)\n", - " elif eval_res[\"value\"] == \"B\":\n", - " preferences.append(b)\n", - " else:\n", - " preferences.append(None) # No preference\n", - " return preferences" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [], - "source": [ - "preferences = predict_preferences(dataset, results)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "tags": [] - }, - "source": [ - "**Print out the ratio of preferences.**" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": { - "tags": [] - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "OpenAI Functions Agent: 95.00%\n", - "None: 5.00%\n" - ] - } - ], - "source": [ - "from collections import Counter\n", - "\n", - "name_map = {\n", - " \"a\": \"OpenAI Functions Agent\",\n", - " \"b\": \"Structured Chat Agent\",\n", - "}\n", - "counts = Counter(preferences)\n", - "pref_ratios = {k: v / len(preferences) for k, v in counts.items()}\n", - "for k, v in pref_ratios.items():\n", - " print(f\"{name_map.get(k)}: {v:.2%}\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Estimate Confidence Intervals\n", - "\n", - "The results seem pretty clear, but if you want to have a better sense of how confident we are, that model \"A\" (the OpenAI Functions Agent) is the preferred model, we can calculate confidence intervals. \n", - "\n", - "Below, use the Wilson score to estimate the confidence interval." - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "from math import sqrt\n", - "\n", - "\n", - "def wilson_score_interval(\n", - " preferences: list, which: str = \"a\", z: float = 1.96\n", - ") -> tuple:\n", - " \"\"\"Estimate the confidence interval using the Wilson score.\n", - "\n", - " See: https://en.wikipedia.org/wiki/Binomial_proportion_confidence_interval#Wilson_score_interval\n", - " for more details, including when to use it and when it should not be used.\n", - " \"\"\"\n", - " total_preferences = preferences.count(\"a\") + preferences.count(\"b\")\n", - " n_s = preferences.count(which)\n", - "\n", - " if total_preferences == 0:\n", - " return (0, 0)\n", - "\n", - " p_hat = n_s / total_preferences\n", - "\n", - " denominator = 1 + (z**2) / total_preferences\n", - " adjustment = (z / denominator) * sqrt(\n", - " p_hat * (1 - p_hat) / total_preferences\n", - " + (z**2) / (4 * total_preferences * total_preferences)\n", - " )\n", - " center = (p_hat + (z**2) / (2 * total_preferences)) / denominator\n", - " lower_bound = min(max(center - adjustment, 0.0), 1.0)\n", - " upper_bound = min(max(center + adjustment, 0.0), 1.0)\n", - "\n", - " return (lower_bound, upper_bound)" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": { - "tags": [] - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "The \"OpenAI Functions Agent\" would be preferred between 83.18% and 100.00% percent of the time (with 95% confidence).\n", - "The \"Structured Chat Agent\" would be preferred between 0.00% and 16.82% percent of the time (with 95% confidence).\n" - ] - } - ], - "source": [ - "for which_, name in name_map.items():\n", - " low, high = wilson_score_interval(preferences, which=which_)\n", - " print(\n", - " f'The \"{name}\" would be preferred between {low:.2%} and {high:.2%} percent of the time (with 95% confidence).'\n", - " )" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Print out the p-value.**" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": { - "tags": [] - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "The p-value is 0.00000. If the null hypothesis is true (i.e., if the selected eval chain actually has no preference between the models),\n", - "then there is a 0.00038% chance of observing the OpenAI Functions Agent be preferred at least 19\n", - "times out of 19 trials.\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/var/folders/gf/6rnp_mbx5914kx7qmmh7xzmw0000gn/T/ipykernel_15978/384907688.py:6: DeprecationWarning: 'binom_test' is deprecated in favour of 'binomtest' from version 1.7.0 and will be removed in Scipy 1.12.0.\n", - " p_value = stats.binom_test(successes, n, p=0.5, alternative=\"two-sided\")\n" - ] - } - ], - "source": [ - "from scipy import stats\n", - "\n", - "preferred_model = max(pref_ratios, key=pref_ratios.get)\n", - "successes = preferences.count(preferred_model)\n", - "n = len(preferences) - preferences.count(None)\n", - "p_value = stats.binom_test(successes, n, p=0.5, alternative=\"two-sided\")\n", - "print(\n", - " f\"\"\"The p-value is {p_value:.5f}. If the null hypothesis is true (i.e., if the selected eval chain actually has no preference between the models),\n", - "then there is a {p_value:.5%} chance of observing the {name_map.get(preferred_model)} be preferred at least {successes}\n", - "times out of {n} trials.\"\"\"\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "_1. Note: Automated evals are still an open research topic and are best used alongside other evaluation approaches. \n", - "LLM preferences exhibit biases, including banal ones like the order of outputs.\n", - "In choosing preferences, \"ground truth\" may not be taken into account, which may lead to scores that aren't grounded in utility._" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.2" - } + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Comparing Chain Outputs\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/guides/evaluation/examples/comparisons.ipynb)\n", + "\n", + "Suppose you have two different prompts (or LLMs). How do you know which will generate \"better\" results?\n", + "\n", + "One automated way to predict the preferred configuration is to use a `PairwiseStringEvaluator` like the `PairwiseStringEvalChain`[[1]](#cite_note-1). This chain prompts an LLM to select which output is preferred, given a specific input.\n", + "\n", + "For this evaluation, we will need 3 things:\n", + "1. An evaluator\n", + "2. A dataset of inputs\n", + "3. 2 (or more) LLMs, Chains, or Agents to compare\n", + "\n", + "Then we will aggregate the results to determine the preferred model.\n", + "\n", + "### Step 1. Create the Evaluator\n", + "\n", + "In this example, you will use gpt-4 to select which output is preferred." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "from langchain.evaluation import load_evaluator\n", + "\n", + "eval_chain = load_evaluator(\"pairwise_string\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Step 2. Select Dataset\n", + "\n", + "If you already have real usage data for your LLM, you can use a representative sample. More examples\n", + "provide more reliable results. We will use some example queries someone might have about how to use langchain here." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Found cached dataset parquet (/Users/wfh/.cache/huggingface/datasets/LangChainDatasets___parquet/LangChainDatasets--langchain-howto-queries-bbb748bbee7e77aa/0.0.0/14a00e99c0d15a23649d0db8944380ac81082d4b021f398733dd84f3a6c569a7)\n" + ] }, - "nbformat": 4, - "nbformat_minor": 4 + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "a2358d37246640ce95e0f9940194590a", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + " 0%| | 0/1 [00:00\"\n", + "llm = ChatOpenAI(temperature=0, model=\"gpt-3.5-turbo-0613\")\n", + "\n", + "# Initialize the SerpAPIWrapper for search functionality\n", + "# Replace in openai_api_key=\"\" with your actual SerpAPI key.\n", + "search = SerpAPIWrapper()\n", + "\n", + "# Define a list of tools offered by the agent\n", + "tools = [\n", + " Tool(\n", + " name=\"Search\",\n", + " func=search.run,\n", + " coroutine=search.arun,\n", + " description=\"Useful when you need to answer questions about current events. You should ask targeted questions.\",\n", + " ),\n", + "]" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "functions_agent = initialize_agent(\n", + " tools, llm, agent=AgentType.OPENAI_MULTI_FUNCTIONS, verbose=False\n", + ")\n", + "conversations_agent = initialize_agent(\n", + " tools, llm, agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION, verbose=False\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Step 4. Generate Responses\n", + "\n", + "We will generate outputs for each of the models before evaluating them." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "tags": [] + }, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "87277cb39a1a4726bb7cc533a24e2ea4", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + " 0%| | 0/20 [00:00= concurrency_level:\n", + " batch_results = await asyncio.gather(*batch, return_exceptions=True)\n", + " results.extend(list(zip(*[iter(batch_results)] * 2)))\n", + " batch = []\n", + "if batch:\n", + " batch_results = await asyncio.gather(*batch, return_exceptions=True)\n", + " results.extend(list(zip(*[iter(batch_results)] * 2)))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Step 5. Evaluate Pairs\n", + "\n", + "Now it's time to evaluate the results. For each agent response, run the evaluation chain to select which output is preferred (or return a tie).\n", + "\n", + "Randomly select the input order to reduce the likelihood that one model will be preferred just because it is presented first." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "import random\n", + "\n", + "\n", + "def predict_preferences(dataset, results) -> list:\n", + " preferences = []\n", + "\n", + " for example, (res_a, res_b) in zip(dataset, results):\n", + " input_ = example[\"inputs\"]\n", + " # Flip a coin to reduce persistent position bias\n", + " if random.random() < 0.5:\n", + " pred_a, pred_b = res_a, res_b\n", + " a, b = \"a\", \"b\"\n", + " else:\n", + " pred_a, pred_b = res_b, res_a\n", + " a, b = \"b\", \"a\"\n", + " eval_res = eval_chain.evaluate_string_pairs(\n", + " prediction=pred_a[\"output\"] if isinstance(pred_a, dict) else str(pred_a),\n", + " prediction_b=pred_b[\"output\"] if isinstance(pred_b, dict) else str(pred_b),\n", + " input=input_,\n", + " )\n", + " if eval_res[\"value\"] == \"A\":\n", + " preferences.append(a)\n", + " elif eval_res[\"value\"] == \"B\":\n", + " preferences.append(b)\n", + " else:\n", + " preferences.append(None) # No preference\n", + " return preferences" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "preferences = predict_preferences(dataset, results)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "**Print out the ratio of preferences.**" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "OpenAI Functions Agent: 95.00%\n", + "None: 5.00%\n" + ] + } + ], + "source": [ + "from collections import Counter\n", + "\n", + "name_map = {\n", + " \"a\": \"OpenAI Functions Agent\",\n", + " \"b\": \"Structured Chat Agent\",\n", + "}\n", + "counts = Counter(preferences)\n", + "pref_ratios = {k: v / len(preferences) for k, v in counts.items()}\n", + "for k, v in pref_ratios.items():\n", + " print(f\"{name_map.get(k)}: {v:.2%}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Estimate Confidence Intervals\n", + "\n", + "The results seem pretty clear, but if you want to have a better sense of how confident we are, that model \"A\" (the OpenAI Functions Agent) is the preferred model, we can calculate confidence intervals. \n", + "\n", + "Below, use the Wilson score to estimate the confidence interval." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "from math import sqrt\n", + "\n", + "\n", + "def wilson_score_interval(\n", + " preferences: list, which: str = \"a\", z: float = 1.96\n", + ") -> tuple:\n", + " \"\"\"Estimate the confidence interval using the Wilson score.\n", + "\n", + " See: https://en.wikipedia.org/wiki/Binomial_proportion_confidence_interval#Wilson_score_interval\n", + " for more details, including when to use it and when it should not be used.\n", + " \"\"\"\n", + " total_preferences = preferences.count(\"a\") + preferences.count(\"b\")\n", + " n_s = preferences.count(which)\n", + "\n", + " if total_preferences == 0:\n", + " return (0, 0)\n", + "\n", + " p_hat = n_s / total_preferences\n", + "\n", + " denominator = 1 + (z**2) / total_preferences\n", + " adjustment = (z / denominator) * sqrt(\n", + " p_hat * (1 - p_hat) / total_preferences\n", + " + (z**2) / (4 * total_preferences * total_preferences)\n", + " )\n", + " center = (p_hat + (z**2) / (2 * total_preferences)) / denominator\n", + " lower_bound = min(max(center - adjustment, 0.0), 1.0)\n", + " upper_bound = min(max(center + adjustment, 0.0), 1.0)\n", + "\n", + " return (lower_bound, upper_bound)" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The \"OpenAI Functions Agent\" would be preferred between 83.18% and 100.00% percent of the time (with 95% confidence).\n", + "The \"Structured Chat Agent\" would be preferred between 0.00% and 16.82% percent of the time (with 95% confidence).\n" + ] + } + ], + "source": [ + "for which_, name in name_map.items():\n", + " low, high = wilson_score_interval(preferences, which=which_)\n", + " print(\n", + " f'The \"{name}\" would be preferred between {low:.2%} and {high:.2%} percent of the time (with 95% confidence).'\n", + " )" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Print out the p-value.**" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The p-value is 0.00000. If the null hypothesis is true (i.e., if the selected eval chain actually has no preference between the models),\n", + "then there is a 0.00038% chance of observing the OpenAI Functions Agent be preferred at least 19\n", + "times out of 19 trials.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/var/folders/gf/6rnp_mbx5914kx7qmmh7xzmw0000gn/T/ipykernel_15978/384907688.py:6: DeprecationWarning: 'binom_test' is deprecated in favour of 'binomtest' from version 1.7.0 and will be removed in Scipy 1.12.0.\n", + " p_value = stats.binom_test(successes, n, p=0.5, alternative=\"two-sided\")\n" + ] + } + ], + "source": [ + "from scipy import stats\n", + "\n", + "preferred_model = max(pref_ratios, key=pref_ratios.get)\n", + "successes = preferences.count(preferred_model)\n", + "n = len(preferences) - preferences.count(None)\n", + "p_value = stats.binom_test(successes, n, p=0.5, alternative=\"two-sided\")\n", + "print(\n", + " f\"\"\"The p-value is {p_value:.5f}. If the null hypothesis is true (i.e., if the selected eval chain actually has no preference between the models),\n", + "then there is a {p_value:.5%} chance of observing the {name_map.get(preferred_model)} be preferred at least {successes}\n", + "times out of {n} trials.\"\"\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "_1. Note: Automated evals are still an open research topic and are best used alongside other evaluation approaches. \n", + "LLM preferences exhibit biases, including banal ones like the order of outputs.\n", + "In choosing preferences, \"ground truth\" may not be taken into account, which may lead to scores that aren't grounded in utility._" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.2" + } + }, + "nbformat": 4, + "nbformat_minor": 4 } diff --git a/docs/docs/guides/evaluation/string/custom.ipynb b/docs/docs/guides/evaluation/string/custom.ipynb index 72b103bf962..544ff98df06 100644 --- a/docs/docs/guides/evaluation/string/custom.ipynb +++ b/docs/docs/guides/evaluation/string/custom.ipynb @@ -1,209 +1,209 @@ { - "cells": [ - { - "cell_type": "markdown", - "id": "4460f924-1738-4dc5-999f-c26383aba0a4", - "metadata": {}, - "source": [ - "# Custom String Evaluator\n", - "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/guides/evaluation/string/custom.ipynb)\n", - "\n", - "You can make your own custom string evaluators by inheriting from the `StringEvaluator` class and implementing the `_evaluate_strings` (and `_aevaluate_strings` for async support) methods.\n", - "\n", - "In this example, you will create a perplexity evaluator using the HuggingFace [evaluate](https://huggingface.co/docs/evaluate/index) library.\n", - "[Perplexity](https://en.wikipedia.org/wiki/Perplexity) is a measure of how well the generated text would be predicted by the model used to compute the metric." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "90ec5942-4b14-47b1-baff-9dd2a9f17a4e", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# %pip install evaluate > /dev/null" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "54fdba68-0ae7-4102-a45b-dabab86c97ac", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "from typing import Any, Optional\n", - "\n", - "from langchain.evaluation import StringEvaluator\n", - "from evaluate import load\n", - "\n", - "\n", - "class PerplexityEvaluator(StringEvaluator):\n", - " \"\"\"Evaluate the perplexity of a predicted string.\"\"\"\n", - "\n", - " def __init__(self, model_id: str = \"gpt2\"):\n", - " self.model_id = model_id\n", - " self.metric_fn = load(\n", - " \"perplexity\", module_type=\"metric\", model_id=self.model_id, pad_token=0\n", - " )\n", - "\n", - " def _evaluate_strings(\n", - " self,\n", - " *,\n", - " prediction: str,\n", - " reference: Optional[str] = None,\n", - " input: Optional[str] = None,\n", - " **kwargs: Any,\n", - " ) -> dict:\n", - " results = self.metric_fn.compute(\n", - " predictions=[prediction], model_id=self.model_id\n", - " )\n", - " ppl = results[\"perplexities\"][0]\n", - " return {\"score\": ppl}" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "52767568-8075-4f77-93c9-80e1a7e5cba3", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "evaluator = PerplexityEvaluator()" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "697ee0c0-d1ae-4a55-a542-a0f8e602c28a", - "metadata": { - "tags": [] - }, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Using pad_token, but it is not set yet.\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n", - "To disable this warning, you can either:\n", - "\t- Avoid using `tokenizers` before the fork if possible\n", - "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n" - ] - }, - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "467109d44654486e8b415288a319fc2c", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - " 0%| | 0/1 [00:00 /dev/null" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "54fdba68-0ae7-4102-a45b-dabab86c97ac", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "from typing import Any, Optional\n", + "\n", + "from evaluate import load\n", + "from langchain.evaluation import StringEvaluator\n", + "\n", + "\n", + "class PerplexityEvaluator(StringEvaluator):\n", + " \"\"\"Evaluate the perplexity of a predicted string.\"\"\"\n", + "\n", + " def __init__(self, model_id: str = \"gpt2\"):\n", + " self.model_id = model_id\n", + " self.metric_fn = load(\n", + " \"perplexity\", module_type=\"metric\", model_id=self.model_id, pad_token=0\n", + " )\n", + "\n", + " def _evaluate_strings(\n", + " self,\n", + " *,\n", + " prediction: str,\n", + " reference: Optional[str] = None,\n", + " input: Optional[str] = None,\n", + " **kwargs: Any,\n", + " ) -> dict:\n", + " results = self.metric_fn.compute(\n", + " predictions=[prediction], model_id=self.model_id\n", + " )\n", + " ppl = results[\"perplexities\"][0]\n", + " return {\"score\": ppl}" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "52767568-8075-4f77-93c9-80e1a7e5cba3", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "evaluator = PerplexityEvaluator()" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "697ee0c0-d1ae-4a55-a542-a0f8e602c28a", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Using pad_token, but it is not set yet.\n" + ] }, - "nbformat": 4, - "nbformat_minor": 5 + { + "name": "stdout", + "output_type": "stream", + "text": [ + "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n", + "To disable this warning, you can either:\n", + "\t- Avoid using `tokenizers` before the fork if possible\n", + "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "467109d44654486e8b415288a319fc2c", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + " 0%| | 0/1 [00:00 None:\n", - " llm = ChatOpenAI(model=\"gpt-4\", temperature=0.0)\n", - " template = \"\"\"Are any of the following steps unnecessary in answering {input}? Provide the verdict on a new line as a single \"Y\" for yes or \"N\" for no.\n", - "\n", - " DATA\n", - " ------\n", - " Steps: {trajectory}\n", - " ------\n", - "\n", - " Verdict:\"\"\"\n", - " self.chain = LLMChain.from_string(llm, template)\n", - "\n", - " def _evaluate_agent_trajectory(\n", - " self,\n", - " *,\n", - " prediction: str,\n", - " input: str,\n", - " agent_trajectory: Sequence[Tuple[AgentAction, str]],\n", - " reference: Optional[str] = None,\n", - " **kwargs: Any,\n", - " ) -> dict:\n", - " vals = [\n", - " f\"{i}: Action=[{action.tool}] returned observation = [{observation}]\"\n", - " for i, (action, observation) in enumerate(agent_trajectory)\n", - " ]\n", - " trajectory = \"\\n\".join(vals)\n", - " response = self.chain.run(dict(trajectory=trajectory, input=input), **kwargs)\n", - " decision = response.split(\"\\n\")[-1].strip()\n", - " score = 1 if decision == \"Y\" else 0\n", - " return {\"score\": score, \"value\": decision, \"reasoning\": response}" - ] - }, - { - "cell_type": "markdown", - "id": "297dea4b-fb28-4292-b6e0-1c769cfb9cbd", - "metadata": {}, - "source": [ - "The example above will return a score of 1 if the language model predicts that any of the actions were unnecessary, and it returns a score of 0 if all of them were predicted to be necessary. It returns the string 'decision' as the 'value', and includes the rest of the generated text as 'reasoning' to let you audit the decision.\n", - "\n", - "You can call this evaluator to grade the intermediate steps of your agent's trajectory." - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "a3fbcc1d-249f-4e00-8841-b6872c73c486", - "metadata": { - "tags": [] - }, - "outputs": [ - { - "data": { - "text/plain": [ - "{'score': 1, 'value': 'Y', 'reasoning': 'Y'}" - ] - }, - "execution_count": 3, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "evaluator = StepNecessityEvaluator()\n", - "\n", - "evaluator.evaluate_agent_trajectory(\n", - " prediction=\"The answer is pi\",\n", - " input=\"What is today?\",\n", - " agent_trajectory=[\n", - " (\n", - " AgentAction(tool=\"ask\", tool_input=\"What is today?\", log=\"\"),\n", - " \"tomorrow's yesterday\",\n", - " ),\n", - " (\n", - " AgentAction(tool=\"check_tv\", tool_input=\"Watch tv for half hour\", log=\"\"),\n", - " \"bzzz\",\n", - " ),\n", - " ],\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "77353528-723e-4075-939e-aebdb17c1e4f", - "metadata": {}, - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 + "cells": [ + { + "cell_type": "markdown", + "id": "db9d627f-b234-4f7f-ab96-639fae474122", + "metadata": {}, + "source": [ + "# Custom Trajectory Evaluator\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/guides/evaluation/trajectory/custom.ipynb)\n", + "\n", + "You can make your own custom trajectory evaluators by inheriting from the [AgentTrajectoryEvaluator](https://api.python.langchain.com/en/latest/evaluation/langchain.evaluation.schema.AgentTrajectoryEvaluator.html#langchain.evaluation.schema.AgentTrajectoryEvaluator) class and overwriting the `_evaluate_agent_trajectory` (and `_aevaluate_agent_action`) method.\n", + "\n", + "\n", + "In this example, you will make a simple trajectory evaluator that uses an LLM to determine if any actions were unnecessary." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "ca84ab0c-e7e2-4c03-bd74-9cc4e6338eec", + "metadata": {}, + "outputs": [], + "source": [ + "from typing import Any, Optional, Sequence, Tuple\n", + "\n", + "from langchain.chains import LLMChain\n", + "from langchain.chat_models import ChatOpenAI\n", + "from langchain.evaluation import AgentTrajectoryEvaluator\n", + "from langchain.schema import AgentAction\n", + "\n", + "\n", + "class StepNecessityEvaluator(AgentTrajectoryEvaluator):\n", + " \"\"\"Evaluate the perplexity of a predicted string.\"\"\"\n", + "\n", + " def __init__(self) -> None:\n", + " llm = ChatOpenAI(model=\"gpt-4\", temperature=0.0)\n", + " template = \"\"\"Are any of the following steps unnecessary in answering {input}? Provide the verdict on a new line as a single \"Y\" for yes or \"N\" for no.\n", + "\n", + " DATA\n", + " ------\n", + " Steps: {trajectory}\n", + " ------\n", + "\n", + " Verdict:\"\"\"\n", + " self.chain = LLMChain.from_string(llm, template)\n", + "\n", + " def _evaluate_agent_trajectory(\n", + " self,\n", + " *,\n", + " prediction: str,\n", + " input: str,\n", + " agent_trajectory: Sequence[Tuple[AgentAction, str]],\n", + " reference: Optional[str] = None,\n", + " **kwargs: Any,\n", + " ) -> dict:\n", + " vals = [\n", + " f\"{i}: Action=[{action.tool}] returned observation = [{observation}]\"\n", + " for i, (action, observation) in enumerate(agent_trajectory)\n", + " ]\n", + " trajectory = \"\\n\".join(vals)\n", + " response = self.chain.run(dict(trajectory=trajectory, input=input), **kwargs)\n", + " decision = response.split(\"\\n\")[-1].strip()\n", + " score = 1 if decision == \"Y\" else 0\n", + " return {\"score\": score, \"value\": decision, \"reasoning\": response}" + ] + }, + { + "cell_type": "markdown", + "id": "297dea4b-fb28-4292-b6e0-1c769cfb9cbd", + "metadata": {}, + "source": [ + "The example above will return a score of 1 if the language model predicts that any of the actions were unnecessary, and it returns a score of 0 if all of them were predicted to be necessary. It returns the string 'decision' as the 'value', and includes the rest of the generated text as 'reasoning' to let you audit the decision.\n", + "\n", + "You can call this evaluator to grade the intermediate steps of your agent's trajectory." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "a3fbcc1d-249f-4e00-8841-b6872c73c486", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "data": { + "text/plain": [ + "{'score': 1, 'value': 'Y', 'reasoning': 'Y'}" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "evaluator = StepNecessityEvaluator()\n", + "\n", + "evaluator.evaluate_agent_trajectory(\n", + " prediction=\"The answer is pi\",\n", + " input=\"What is today?\",\n", + " agent_trajectory=[\n", + " (\n", + " AgentAction(tool=\"ask\", tool_input=\"What is today?\", log=\"\"),\n", + " \"tomorrow's yesterday\",\n", + " ),\n", + " (\n", + " AgentAction(tool=\"check_tv\", tool_input=\"Watch tv for half hour\", log=\"\"),\n", + " \"bzzz\",\n", + " ),\n", + " ],\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "77353528-723e-4075-939e-aebdb17c1e4f", + "metadata": {}, + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.2" + } + }, + "nbformat": 4, + "nbformat_minor": 5 } \ No newline at end of file diff --git a/docs/docs/guides/evaluation/trajectory/trajectory_eval.ipynb b/docs/docs/guides/evaluation/trajectory/trajectory_eval.ipynb index 7519f8dfec7..c40bf8dd872 100644 --- a/docs/docs/guides/evaluation/trajectory/trajectory_eval.ipynb +++ b/docs/docs/guides/evaluation/trajectory/trajectory_eval.ipynb @@ -72,13 +72,12 @@ "outputs": [], "source": [ "import subprocess\n", + "from urllib.parse import urlparse\n", "\n", + "from langchain.agents import AgentType, initialize_agent\n", "from langchain.chat_models import ChatOpenAI\n", "from langchain.tools import tool\n", - "from langchain.agents import AgentType, initialize_agent\n", - "\n", "from pydantic import HttpUrl\n", - "from urllib.parse import urlparse\n", "\n", "\n", "@tool\n", diff --git a/docs/docs/guides/fallbacks.ipynb b/docs/docs/guides/fallbacks.ipynb index 1e4f69271d0..d9366bbeddf 100644 --- a/docs/docs/guides/fallbacks.ipynb +++ b/docs/docs/guides/fallbacks.ipynb @@ -33,7 +33,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI, ChatAnthropic" + "from langchain.chat_models import ChatAnthropic, ChatOpenAI" ] }, { @@ -52,6 +52,7 @@ "outputs": [], "source": [ "from unittest.mock import patch\n", + "\n", "from openai.error import RateLimitError" ] }, diff --git a/docs/docs/guides/local_llms.ipynb b/docs/docs/guides/local_llms.ipynb index dc26204223e..60e271463d5 100644 --- a/docs/docs/guides/local_llms.ipynb +++ b/docs/docs/guides/local_llms.ipynb @@ -482,9 +482,9 @@ } ], "source": [ - "from langchain.prompts import PromptTemplate\n", "from langchain.chains import LLMChain\n", "from langchain.chains.prompt_selector import ConditionalPromptSelector\n", + "from langchain.prompts import PromptTemplate\n", "\n", "DEFAULT_LLAMA_SEARCH_PROMPT = PromptTemplate(\n", " input_variables=[\"question\"],\n", diff --git a/docs/docs/guides/model_laboratory.ipynb b/docs/docs/guides/model_laboratory.ipynb index 3eb8c46ddfb..c3c650feaa5 100644 --- a/docs/docs/guides/model_laboratory.ipynb +++ b/docs/docs/guides/model_laboratory.ipynb @@ -19,9 +19,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import OpenAI, Cohere, HuggingFaceHub\n", - "from langchain.prompts import PromptTemplate\n", - "from langchain.model_laboratory import ModelLaboratory" + "from langchain.llms import Cohere, HuggingFaceHub, OpenAI\n", + "from langchain.model_laboratory import ModelLaboratory\n", + "from langchain.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/guides/privacy/presidio_data_anonymization/index.ipynb b/docs/docs/guides/privacy/presidio_data_anonymization/index.ipynb index b03f5d3ba46..3021ad2c586 100644 --- a/docs/docs/guides/privacy/presidio_data_anonymization/index.ipynb +++ b/docs/docs/guides/privacy/presidio_data_anonymization/index.ipynb @@ -127,8 +127,8 @@ } ], "source": [ - "from langchain.prompts.prompt import PromptTemplate\n", "from langchain.chat_models import ChatOpenAI\n", + "from langchain.prompts.prompt import PromptTemplate\n", "\n", "anonymizer = PresidioAnonymizer()\n", "\n", @@ -286,7 +286,6 @@ "# Define the regex pattern in a Presidio `Pattern` object:\n", "from presidio_analyzer import Pattern, PatternRecognizer\n", "\n", - "\n", "polish_phone_numbers_pattern = Pattern(\n", " name=\"polish_phone_numbers_pattern\",\n", " regex=\"(? None:\n", " file_id = url.split(\"/\")[-2]\n", @@ -106,8 +107,8 @@ "outputs": [], "source": [ "from langchain.chat_loaders.facebook_messenger import (\n", - " SingleFileFacebookMessengerChatLoader,\n", " FolderFacebookMessengerChatLoader,\n", + " SingleFileFacebookMessengerChatLoader,\n", ")" ] }, @@ -201,8 +202,8 @@ "outputs": [], "source": [ "from langchain.chat_loaders.utils import (\n", - " merge_chat_runs,\n", " map_ai_messages,\n", + " merge_chat_runs,\n", ")" ] }, @@ -387,8 +388,8 @@ ], "source": [ "import json\n", - "from io import BytesIO\n", "import time\n", + "from io import BytesIO\n", "\n", "import openai\n", "\n", diff --git a/docs/docs/integrations/chat_loaders/gmail.ipynb b/docs/docs/integrations/chat_loaders/gmail.ipynb index ef089f0260c..914eea401ea 100644 --- a/docs/docs/integrations/chat_loaders/gmail.ipynb +++ b/docs/docs/integrations/chat_loaders/gmail.ipynb @@ -36,6 +36,7 @@ "outputs": [], "source": [ "import os.path\n", + "\n", "from google.auth.transport.requests import Request\n", "from google.oauth2.credentials import Credentials\n", "from google_auth_oauthlib.flow import InstalledAppFlow\n", diff --git a/docs/docs/integrations/chat_loaders/imessage.ipynb b/docs/docs/integrations/chat_loaders/imessage.ipynb index a547b22d371..b07bdb77aa4 100644 --- a/docs/docs/integrations/chat_loaders/imessage.ipynb +++ b/docs/docs/integrations/chat_loaders/imessage.ipynb @@ -115,6 +115,7 @@ "outputs": [], "source": [ "from typing import List\n", + "\n", "from langchain.chat_loaders.base import ChatSession\n", "from langchain.chat_loaders.utils import (\n", " map_ai_messages,\n", @@ -231,8 +232,8 @@ ], "source": [ "import json\n", - "from io import BytesIO\n", "import time\n", + "from io import BytesIO\n", "\n", "import openai\n", "\n", diff --git a/docs/docs/integrations/chat_loaders/langsmith_dataset.ipynb b/docs/docs/integrations/chat_loaders/langsmith_dataset.ipynb index 8f63828f77b..43c974dbd12 100644 --- a/docs/docs/integrations/chat_loaders/langsmith_dataset.ipynb +++ b/docs/docs/integrations/chat_loaders/langsmith_dataset.ipynb @@ -179,11 +179,12 @@ } ], "source": [ - "import openai\n", - "import time\n", "import json\n", + "import time\n", "from io import BytesIO\n", "\n", + "import openai\n", + "\n", "my_file = BytesIO()\n", "for dialog in training_data:\n", " my_file.write((json.dumps({\"messages\": dialog}) + \"\\n\").encode(\"utf-8\"))\n", diff --git a/docs/docs/integrations/chat_loaders/langsmith_llm_runs.ipynb b/docs/docs/integrations/chat_loaders/langsmith_llm_runs.ipynb index 997f6c9a96c..4fd4f8d2cfe 100644 --- a/docs/docs/integrations/chat_loaders/langsmith_llm_runs.ipynb +++ b/docs/docs/integrations/chat_loaders/langsmith_llm_runs.ipynb @@ -70,9 +70,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.pydantic_v1 import BaseModel, Field\n", "from enum import Enum\n", "\n", + "from langchain.pydantic_v1 import BaseModel, Field\n", + "\n", "\n", "class Operation(Enum):\n", " add = \"+\"\n", @@ -132,10 +133,11 @@ } ], "source": [ - "from langchain.utils.openai_functions import convert_pydantic_to_openai_function\n", - "from langchain.pydantic_v1 import BaseModel\n", "from pprint import pprint\n", "\n", + "from langchain.pydantic_v1 import BaseModel\n", + "from langchain.utils.openai_functions import convert_pydantic_to_openai_function\n", + "\n", "openai_function_def = convert_pydantic_to_openai_function(Calculator)\n", "pprint(openai_function_def)" ] @@ -147,9 +149,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.prompts import ChatPromptTemplate\n", "from langchain.chat_models import ChatOpenAI\n", "from langchain.output_parsers.openai_functions import PydanticOutputFunctionsParser\n", + "from langchain.prompts import ChatPromptTemplate\n", "\n", "prompt = ChatPromptTemplate.from_messages(\n", " [\n", @@ -320,11 +322,12 @@ } ], "source": [ - "import openai\n", - "import time\n", "import json\n", + "import time\n", "from io import BytesIO\n", "\n", + "import openai\n", + "\n", "my_file = BytesIO()\n", "for dialog in training_data:\n", " my_file.write((json.dumps({\"messages\": dialog}) + \"\\n\").encode(\"utf-8\"))\n", diff --git a/docs/docs/integrations/chat_loaders/slack.ipynb b/docs/docs/integrations/chat_loaders/slack.ipynb index 39035aea3a0..044eb4a3854 100644 --- a/docs/docs/integrations/chat_loaders/slack.ipynb +++ b/docs/docs/integrations/chat_loaders/slack.ipynb @@ -86,6 +86,7 @@ "outputs": [], "source": [ "from typing import List\n", + "\n", "from langchain.chat_loaders.base import ChatSession\n", "from langchain.chat_loaders.utils import (\n", " map_ai_messages,\n", diff --git a/docs/docs/integrations/chat_loaders/telegram.ipynb b/docs/docs/integrations/chat_loaders/telegram.ipynb index 960c42447f5..cc0269d51fc 100644 --- a/docs/docs/integrations/chat_loaders/telegram.ipynb +++ b/docs/docs/integrations/chat_loaders/telegram.ipynb @@ -135,6 +135,7 @@ "outputs": [], "source": [ "from typing import List\n", + "\n", "from langchain.chat_loaders.base import ChatSession\n", "from langchain.chat_loaders.utils import (\n", " map_ai_messages,\n", diff --git a/docs/docs/integrations/chat_loaders/twitter.ipynb b/docs/docs/integrations/chat_loaders/twitter.ipynb index e8fb47552a2..af80f142c20 100644 --- a/docs/docs/integrations/chat_loaders/twitter.ipynb +++ b/docs/docs/integrations/chat_loaders/twitter.ipynb @@ -20,8 +20,9 @@ "outputs": [], "source": [ "import json\n", - "from langchain.schema import AIMessage\n", - "from langchain.adapters.openai import convert_message_to_dict" + "\n", + "from langchain.adapters.openai import convert_message_to_dict\n", + "from langchain.schema import AIMessage" ] }, { diff --git a/docs/docs/integrations/chat_loaders/wechat.ipynb b/docs/docs/integrations/chat_loaders/wechat.ipynb index ed4e9582833..4d901386f52 100644 --- a/docs/docs/integrations/chat_loaders/wechat.ipynb +++ b/docs/docs/integrations/chat_loaders/wechat.ipynb @@ -78,8 +78,8 @@ "import re\n", "from typing import Iterator, List\n", "\n", - "from langchain.schema import HumanMessage, BaseMessage\n", "from langchain.chat_loaders import base as chat_loaders\n", + "from langchain.schema import BaseMessage, HumanMessage\n", "\n", "logger = logging.getLogger()\n", "\n", @@ -208,6 +208,7 @@ "outputs": [], "source": [ "from typing import List\n", + "\n", "from langchain.chat_loaders.base import ChatSession\n", "from langchain.chat_loaders.utils import (\n", " map_ai_messages,\n", diff --git a/docs/docs/integrations/chat_loaders/whatsapp.ipynb b/docs/docs/integrations/chat_loaders/whatsapp.ipynb index 2086bf4dddd..0cda70c1698 100644 --- a/docs/docs/integrations/chat_loaders/whatsapp.ipynb +++ b/docs/docs/integrations/chat_loaders/whatsapp.ipynb @@ -125,6 +125,7 @@ ], "source": [ "from typing import List\n", + "\n", "from langchain.chat_loaders.base import ChatSession\n", "from langchain.chat_loaders.utils import (\n", " map_ai_messages,\n", diff --git a/docs/docs/integrations/document_loaders/diffbot.ipynb b/docs/docs/integrations/document_loaders/diffbot.ipynb index dad475d01d1..6b174074362 100644 --- a/docs/docs/integrations/document_loaders/diffbot.ipynb +++ b/docs/docs/integrations/document_loaders/diffbot.ipynb @@ -44,6 +44,7 @@ "outputs": [], "source": [ "import os\n", + "\n", "from langchain.document_loaders import DiffbotLoader\n", "\n", "loader = DiffbotLoader(urls=urls, api_token=os.environ.get(\"DIFFBOT_API_TOKEN\"))" diff --git a/docs/docs/integrations/document_loaders/discord.ipynb b/docs/docs/integrations/document_loaders/discord.ipynb index d7d1d8cb7e5..260531db5a1 100644 --- a/docs/docs/integrations/document_loaders/discord.ipynb +++ b/docs/docs/integrations/document_loaders/discord.ipynb @@ -23,8 +23,9 @@ "metadata": {}, "outputs": [], "source": [ - "import pandas as pd\n", - "import os" + "import os\n", + "\n", + "import pandas as pd" ] }, { diff --git a/docs/docs/integrations/document_loaders/docugami.ipynb b/docs/docs/integrations/document_loaders/docugami.ipynb index 3213d5d112f..89063fbf29e 100644 --- a/docs/docs/integrations/document_loaders/docugami.ipynb +++ b/docs/docs/integrations/document_loaders/docugami.ipynb @@ -55,6 +55,7 @@ "outputs": [], "source": [ "import os\n", + "\n", "from langchain.document_loaders import DocugamiLoader" ] }, @@ -148,10 +149,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.vectorstores import Chroma\n", + "from langchain.chains import RetrievalQA\n", "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.llms import OpenAI\n", - "from langchain.chains import RetrievalQA\n", + "from langchain.vectorstores import Chroma\n", "\n", "# For this example, we already have a processed docset for a set of lease documents\n", "loader = DocugamiLoader(docset_id=\"wh2kned25uqm\")\n", diff --git a/docs/docs/integrations/document_loaders/embaas.ipynb b/docs/docs/integrations/document_loaders/embaas.ipynb index 0c8c19d71ac..26129822674 100644 --- a/docs/docs/integrations/document_loaders/embaas.ipynb +++ b/docs/docs/integrations/document_loaders/embaas.ipynb @@ -2,6 +2,9 @@ "cells": [ { "cell_type": "markdown", + "metadata": { + "collapsed": false + }, "source": [ "# Embaas\n", "[embaas](https://embaas.io) is a fully managed NLP API service that offers features like embedding generation, document text extraction, document to embeddings and more. You can choose a [variety of pre-trained models](https://embaas.io/docs/models/embeddings).\n", @@ -11,62 +14,66 @@ "\n", "### Document Text Extraction API\n", "The document text extraction API allows you to extract the text from a given document. The API supports a variety of document formats, including PDF, mp3, mp4 and more. For a full list of supported formats, check out the API docs (link below)." - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "code", "execution_count": null, + "metadata": { + "collapsed": false + }, "outputs": [], "source": [ "# Set API key\n", "embaas_api_key = \"YOUR_API_KEY\"\n", "# or set environment variable\n", "os.environ[\"EMBAAS_API_KEY\"] = \"YOUR_API_KEY\"" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "markdown", + "metadata": { + "collapsed": false + }, "source": [ "#### Using a blob (bytes)" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "code", "execution_count": null, + "metadata": { + "collapsed": false + }, "outputs": [], "source": [ - "from langchain.document_loaders.embaas import EmbaasBlobLoader\n", - "from langchain.document_loaders.blob_loaders import Blob" - ], - "metadata": { - "collapsed": false - } + "from langchain.document_loaders.blob_loaders import Blob\n", + "from langchain.document_loaders.embaas import EmbaasBlobLoader" + ] }, { "cell_type": "code", "execution_count": null, + "metadata": { + "collapsed": false + }, "outputs": [], "source": [ "blob_loader = EmbaasBlobLoader()\n", "blob = Blob.from_path(\"example.pdf\")\n", "documents = blob_loader.load(blob)" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "code", "execution_count": null, + "metadata": { + "ExecuteTime": { + "end_time": "2023-06-12T22:19:48.380467Z", + "start_time": "2023-06-12T22:19:48.366886Z" + }, + "collapsed": false + }, "outputs": [], "source": [ "# You can also directly create embeddings with your preferred embeddings model\n", @@ -75,72 +82,65 @@ "documents = blob_loader.load(blob)\n", "\n", "print(documents[0][\"metadata\"][\"embedding\"])" - ], - "metadata": { - "collapsed": false, - "ExecuteTime": { - "start_time": "2023-06-12T22:19:48.366886Z", - "end_time": "2023-06-12T22:19:48.380467Z" - } - } + ] }, { "cell_type": "markdown", - "source": [ - "#### Using a file" - ], "metadata": { "collapsed": false - } + }, + "source": [ + "#### Using a file" + ] }, { "cell_type": "code", "execution_count": null, + "metadata": { + "collapsed": false + }, "outputs": [], "source": [ "from langchain.document_loaders.embaas import EmbaasLoader" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "code", "execution_count": null, + "metadata": { + "collapsed": false + }, "outputs": [], "source": [ "file_loader = EmbaasLoader(file_path=\"example.pdf\")\n", "documents = file_loader.load()" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "code", "execution_count": 15, + "metadata": { + "ExecuteTime": { + "end_time": "2023-06-12T22:24:31.894665Z", + "start_time": "2023-06-12T22:24:31.880857Z" + }, + "collapsed": false + }, "outputs": [], "source": [ "# Disable automatic text splitting\n", "file_loader = EmbaasLoader(file_path=\"example.mp3\", params={\"should_chunk\": False})\n", "documents = file_loader.load()" - ], - "metadata": { - "collapsed": false, - "ExecuteTime": { - "start_time": "2023-06-12T22:24:31.880857Z", - "end_time": "2023-06-12T22:24:31.894665Z" - } - } + ] }, { "cell_type": "markdown", - "source": [ - "For more detailed information about the embaas document text extraction API, please refer to [the official embaas API documentation](https://embaas.io/api-reference)." - ], "metadata": { "collapsed": false - } + }, + "source": [ + "For more detailed information about the embaas document text extraction API, please refer to [the official embaas API documentation](https://embaas.io/api-reference)." + ] } ], "metadata": { diff --git a/docs/docs/integrations/document_loaders/etherscan.ipynb b/docs/docs/integrations/document_loaders/etherscan.ipynb index 9c165ccb9c7..45e9fe3c052 100644 --- a/docs/docs/integrations/document_loaders/etherscan.ipynb +++ b/docs/docs/integrations/document_loaders/etherscan.ipynb @@ -72,8 +72,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import EtherscanLoader\n", - "import os" + "import os\n", + "\n", + "from langchain.document_loaders import EtherscanLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/figma.ipynb b/docs/docs/integrations/document_loaders/figma.ipynb index bf521ae4d01..e7739a37cb5 100644 --- a/docs/docs/integrations/document_loaders/figma.ipynb +++ b/docs/docs/integrations/document_loaders/figma.ipynb @@ -23,15 +23,13 @@ "source": [ "import os\n", "\n", - "\n", - "from langchain.document_loaders.figma import FigmaFileLoader\n", - "\n", "from langchain.chat_models import ChatOpenAI\n", + "from langchain.document_loaders.figma import FigmaFileLoader\n", "from langchain.indexes import VectorstoreIndexCreator\n", "from langchain.prompts.chat import (\n", " ChatPromptTemplate,\n", - " SystemMessagePromptTemplate,\n", " HumanMessagePromptTemplate,\n", + " SystemMessagePromptTemplate,\n", ")" ] }, diff --git a/docs/docs/integrations/document_loaders/geopandas.ipynb b/docs/docs/integrations/document_loaders/geopandas.ipynb index 997f5de9982..af147ebcb8a 100644 --- a/docs/docs/integrations/document_loaders/geopandas.ipynb +++ b/docs/docs/integrations/document_loaders/geopandas.ipynb @@ -36,8 +36,9 @@ "outputs": [], "source": [ "import ast\n", - "import pandas as pd\n", + "\n", "import geopandas as gpd\n", + "import pandas as pd\n", "from langchain.document_loaders import OpenCityDataLoader" ] }, diff --git a/docs/docs/integrations/document_loaders/google_drive.ipynb b/docs/docs/integrations/document_loaders/google_drive.ipynb index c132e38c445..2e3067e5843 100644 --- a/docs/docs/integrations/document_loaders/google_drive.ipynb +++ b/docs/docs/integrations/document_loaders/google_drive.ipynb @@ -118,8 +118,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import GoogleDriveLoader\n", - "from langchain.document_loaders import UnstructuredFileIOLoader" + "from langchain.document_loaders import GoogleDriveLoader, UnstructuredFileIOLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/grobid.ipynb b/docs/docs/integrations/document_loaders/grobid.ipynb index b584316ba9f..31b9378d7c9 100644 --- a/docs/docs/integrations/document_loaders/grobid.ipynb +++ b/docs/docs/integrations/document_loaders/grobid.ipynb @@ -36,8 +36,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders.parsers import GrobidParser\n", - "from langchain.document_loaders.generic import GenericLoader" + "from langchain.document_loaders.generic import GenericLoader\n", + "from langchain.document_loaders.parsers import GrobidParser" ] }, { diff --git a/docs/docs/integrations/document_loaders/hugging_face_dataset.ipynb b/docs/docs/integrations/document_loaders/hugging_face_dataset.ipynb index c66096e5357..4decf5a3fb4 100644 --- a/docs/docs/integrations/document_loaders/hugging_face_dataset.ipynb +++ b/docs/docs/integrations/document_loaders/hugging_face_dataset.ipynb @@ -99,8 +99,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.indexes import VectorstoreIndexCreator\n", - "from langchain.document_loaders.hugging_face_dataset import HuggingFaceDatasetLoader" + "from langchain.document_loaders.hugging_face_dataset import HuggingFaceDatasetLoader\n", + "from langchain.indexes import VectorstoreIndexCreator" ] }, { diff --git a/docs/docs/integrations/document_loaders/image_captions.ipynb b/docs/docs/integrations/document_loaders/image_captions.ipynb index b9d113c768a..fa11c91afa7 100644 --- a/docs/docs/integrations/document_loaders/image_captions.ipynb +++ b/docs/docs/integrations/document_loaders/image_captions.ipynb @@ -97,8 +97,8 @@ }, "outputs": [], "source": [ - "from PIL import Image\n", "import requests\n", + "from PIL import Image\n", "\n", "Image.open(requests.get(list_image_urls[0], stream=True).raw).convert(\"RGB\")" ] diff --git a/docs/docs/integrations/document_loaders/larksuite.ipynb b/docs/docs/integrations/document_loaders/larksuite.ipynb index 03042a91402..2877050aa52 100644 --- a/docs/docs/integrations/document_loaders/larksuite.ipynb +++ b/docs/docs/integrations/document_loaders/larksuite.ipynb @@ -29,6 +29,7 @@ "outputs": [], "source": [ "from getpass import getpass\n", + "\n", "from langchain.document_loaders.larksuite import LarkSuiteDocLoader\n", "\n", "DOMAIN = input(\"larksuite domain\")\n", diff --git a/docs/docs/integrations/document_loaders/pdf-amazonTextractPDFLoader.ipynb b/docs/docs/integrations/document_loaders/pdf-amazonTextractPDFLoader.ipynb index 5e05701d0d8..87c6cf2c394 100644 --- a/docs/docs/integrations/document_loaders/pdf-amazonTextractPDFLoader.ipynb +++ b/docs/docs/integrations/document_loaders/pdf-amazonTextractPDFLoader.ipynb @@ -402,8 +402,8 @@ } ], "source": [ - "from langchain.llms import OpenAI\n", "from langchain.chains.question_answering import load_qa_chain\n", + "from langchain.llms import OpenAI\n", "\n", "chain = load_qa_chain(llm=OpenAI(), chain_type=\"map_reduce\")\n", "query = [\"Who are the autors?\"]\n", diff --git a/docs/docs/integrations/document_loaders/psychic.ipynb b/docs/docs/integrations/document_loaders/psychic.ipynb index 720d90dc125..7778d1049d0 100644 --- a/docs/docs/integrations/document_loaders/psychic.ipynb +++ b/docs/docs/integrations/document_loaders/psychic.ipynb @@ -77,11 +77,11 @@ "metadata": {}, "outputs": [], "source": [ + "from langchain.chains import RetrievalQAWithSourcesChain\n", "from langchain.embeddings.openai import OpenAIEmbeddings\n", - "from langchain.vectorstores import Chroma\n", - "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain.llms import OpenAI\n", - "from langchain.chains import RetrievalQAWithSourcesChain" + "from langchain.text_splitter import CharacterTextSplitter\n", + "from langchain.vectorstores import Chroma" ] }, { diff --git a/docs/docs/integrations/document_loaders/rockset.ipynb b/docs/docs/integrations/document_loaders/rockset.ipynb index 3bdad1cdd68..a9256f13791 100644 --- a/docs/docs/integrations/document_loaders/rockset.ipynb +++ b/docs/docs/integrations/document_loaders/rockset.ipynb @@ -51,7 +51,7 @@ "outputs": [], "source": [ "from langchain.document_loaders import RocksetLoader\n", - "from rockset import RocksetClient, Regions, models\n", + "from rockset import Regions, RocksetClient, models\n", "\n", "loader = RocksetLoader(\n", " RocksetClient(Regions.usw2a1, \"\"),\n", @@ -144,7 +144,7 @@ "outputs": [], "source": [ "from langchain.document_loaders import RocksetLoader\n", - "from rockset import RocksetClient, Regions, models\n", + "from rockset import Regions, RocksetClient, models\n", "\n", "loader = RocksetLoader(\n", " RocksetClient(Regions.usw2a1, \"\"),\n", diff --git a/docs/docs/integrations/document_loaders/snowflake.ipynb b/docs/docs/integrations/document_loaders/snowflake.ipynb index 77517341871..c84086eb81e 100644 --- a/docs/docs/integrations/document_loaders/snowflake.ipynb +++ b/docs/docs/integrations/document_loaders/snowflake.ipynb @@ -55,8 +55,8 @@ "metadata": {}, "outputs": [], "source": [ - "from snowflakeLoader import SnowflakeLoader\n", "import settings as s\n", + "from snowflakeLoader import SnowflakeLoader\n", "\n", "QUERY = \"select text, survey_id as source from CLOUD_DATA_SOLUTIONS.HAPPY_OR_NOT.OPEN_FEEDBACK limit 10\"\n", "snowflake_loader = SnowflakeLoader(\n", diff --git a/docs/docs/integrations/document_loaders/source_code.ipynb b/docs/docs/integrations/document_loaders/source_code.ipynb index 19281a59b5c..6a4908f169a 100644 --- a/docs/docs/integrations/document_loaders/source_code.ipynb +++ b/docs/docs/integrations/document_loaders/source_code.ipynb @@ -33,9 +33,10 @@ "\n", "warnings.filterwarnings(\"ignore\")\n", "from pprint import pprint\n", - "from langchain.text_splitter import Language\n", + "\n", "from langchain.document_loaders.generic import GenericLoader\n", - "from langchain.document_loaders.parsers import LanguageParser" + "from langchain.document_loaders.parsers import LanguageParser\n", + "from langchain.text_splitter import Language" ] }, { @@ -295,8 +296,8 @@ "outputs": [], "source": [ "from langchain.text_splitter import (\n", - " RecursiveCharacterTextSplitter,\n", " Language,\n", + " RecursiveCharacterTextSplitter,\n", ")" ] }, diff --git a/docs/docs/integrations/document_loaders/telegram.ipynb b/docs/docs/integrations/document_loaders/telegram.ipynb index c69519a7419..5317a6e5233 100644 --- a/docs/docs/integrations/document_loaders/telegram.ipynb +++ b/docs/docs/integrations/document_loaders/telegram.ipynb @@ -19,7 +19,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import TelegramChatFileLoader, TelegramChatApiLoader" + "from langchain.document_loaders import TelegramChatApiLoader, TelegramChatFileLoader" ] }, { diff --git a/docs/docs/integrations/document_loaders/tensorflow_datasets.ipynb b/docs/docs/integrations/document_loaders/tensorflow_datasets.ipynb index 5f178c6ceb5..9cf6d393456 100644 --- a/docs/docs/integrations/document_loaders/tensorflow_datasets.ipynb +++ b/docs/docs/integrations/document_loaders/tensorflow_datasets.ipynb @@ -195,8 +195,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.schema import Document\n", "from langchain.document_loaders import TensorflowDatasetLoader\n", + "from langchain.schema import Document\n", "\n", "loader = TensorflowDatasetLoader(\n", " dataset_name=\"mlqa/en\",\n", diff --git a/docs/docs/integrations/document_loaders/youtube_audio.ipynb b/docs/docs/integrations/document_loaders/youtube_audio.ipynb index 47ade5ca524..ae1f89009fd 100644 --- a/docs/docs/integrations/document_loaders/youtube_audio.ipynb +++ b/docs/docs/integrations/document_loaders/youtube_audio.ipynb @@ -24,12 +24,12 @@ "metadata": {}, "outputs": [], "source": [ + "from langchain.document_loaders.blob_loaders.youtube_audio import YoutubeAudioLoader\n", "from langchain.document_loaders.generic import GenericLoader\n", "from langchain.document_loaders.parsers import (\n", " OpenAIWhisperParser,\n", " OpenAIWhisperParserLocal,\n", - ")\n", - "from langchain.document_loaders.blob_loaders.youtube_audio import YoutubeAudioLoader" + ")" ] }, { @@ -166,10 +166,10 @@ "outputs": [], "source": [ "from langchain.chains import RetrievalQA\n", - "from langchain.vectorstores import FAISS\n", "from langchain.chat_models import ChatOpenAI\n", "from langchain.embeddings import OpenAIEmbeddings\n", - "from langchain.text_splitter import RecursiveCharacterTextSplitter" + "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", + "from langchain.vectorstores import FAISS" ] }, { diff --git a/docs/docs/integrations/document_loaders/youtube_transcript.ipynb b/docs/docs/integrations/document_loaders/youtube_transcript.ipynb index 8b6f6ee96a5..0e106f0110f 100644 --- a/docs/docs/integrations/document_loaders/youtube_transcript.ipynb +++ b/docs/docs/integrations/document_loaders/youtube_transcript.ipynb @@ -147,11 +147,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import GoogleApiClient, GoogleApiYoutubeLoader\n", - "\n", "# Init the GoogleApiClient\n", "from pathlib import Path\n", "\n", + "from langchain.document_loaders import GoogleApiClient, GoogleApiYoutubeLoader\n", "\n", "google_api_client = GoogleApiClient(credentials_path=Path(\"your_path_creds.json\"))\n", "\n", diff --git a/docs/docs/integrations/document_transformers/doctran_extract_properties.ipynb b/docs/docs/integrations/document_transformers/doctran_extract_properties.ipynb index 17df5db4b65..597516e0657 100644 --- a/docs/docs/integrations/document_transformers/doctran_extract_properties.ipynb +++ b/docs/docs/integrations/document_transformers/doctran_extract_properties.ipynb @@ -30,8 +30,9 @@ "outputs": [], "source": [ "import json\n", - "from langchain.schema import Document\n", - "from langchain.document_transformers import DoctranPropertyExtractor" + "\n", + "from langchain.document_transformers import DoctranPropertyExtractor\n", + "from langchain.schema import Document" ] }, { diff --git a/docs/docs/integrations/document_transformers/doctran_interrogate_document.ipynb b/docs/docs/integrations/document_transformers/doctran_interrogate_document.ipynb index 02b64d09db9..699b689d405 100644 --- a/docs/docs/integrations/document_transformers/doctran_interrogate_document.ipynb +++ b/docs/docs/integrations/document_transformers/doctran_interrogate_document.ipynb @@ -29,8 +29,9 @@ "outputs": [], "source": [ "import json\n", - "from langchain.schema import Document\n", - "from langchain.document_transformers import DoctranQATransformer" + "\n", + "from langchain.document_transformers import DoctranQATransformer\n", + "from langchain.schema import Document" ] }, { diff --git a/docs/docs/integrations/document_transformers/doctran_translate_document.ipynb b/docs/docs/integrations/document_transformers/doctran_translate_document.ipynb index a3add9e9774..803f77e388d 100644 --- a/docs/docs/integrations/document_transformers/doctran_translate_document.ipynb +++ b/docs/docs/integrations/document_transformers/doctran_translate_document.ipynb @@ -28,8 +28,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.schema import Document\n", - "from langchain.document_transformers import DoctranTextTranslator" + "from langchain.document_transformers import DoctranTextTranslator\n", + "from langchain.schema import Document" ] }, { diff --git a/docs/docs/integrations/document_transformers/google_translate.ipynb b/docs/docs/integrations/document_transformers/google_translate.ipynb index 9f70d03efba..cc2b69e5e74 100644 --- a/docs/docs/integrations/document_transformers/google_translate.ipynb +++ b/docs/docs/integrations/document_transformers/google_translate.ipynb @@ -31,8 +31,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.schema import Document\n", - "from langchain.document_transformers import GoogleTranslateTransformer" + "from langchain.document_transformers import GoogleTranslateTransformer\n", + "from langchain.schema import Document" ] }, { diff --git a/docs/docs/integrations/document_transformers/openai_metadata_tagger.ipynb b/docs/docs/integrations/document_transformers/openai_metadata_tagger.ipynb index 595028ab6cc..b304fbb9cb0 100644 --- a/docs/docs/integrations/document_transformers/openai_metadata_tagger.ipynb +++ b/docs/docs/integrations/document_transformers/openai_metadata_tagger.ipynb @@ -21,9 +21,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.schema import Document\n", "from langchain.chat_models import ChatOpenAI\n", - "from langchain.document_transformers.openai_functions import create_metadata_tagger" + "from langchain.document_transformers.openai_functions import create_metadata_tagger\n", + "from langchain.schema import Document" ] }, { diff --git a/docs/docs/integrations/llms/ai21.ipynb b/docs/docs/integrations/llms/ai21.ipynb index 8cbbccfaa90..b54df8529aa 100644 --- a/docs/docs/integrations/llms/ai21.ipynb +++ b/docs/docs/integrations/llms/ai21.ipynb @@ -58,8 +58,9 @@ }, "outputs": [], "source": [ + "from langchain.chains import LLMChain\n", "from langchain.llms import AI21\n", - "from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain" + "from langchain.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/integrations/llms/aleph_alpha.ipynb b/docs/docs/integrations/llms/aleph_alpha.ipynb index ac3c36dc814..4e4c23bd354 100644 --- a/docs/docs/integrations/llms/aleph_alpha.ipynb +++ b/docs/docs/integrations/llms/aleph_alpha.ipynb @@ -58,8 +58,9 @@ }, "outputs": [], "source": [ + "from langchain.chains import LLMChain\n", "from langchain.llms import AlephAlpha\n", - "from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain" + "from langchain.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/integrations/llms/amazon_api_gateway.ipynb b/docs/docs/integrations/llms/amazon_api_gateway.ipynb index 4eae26fa179..e9d801ed9d1 100644 --- a/docs/docs/integrations/llms/amazon_api_gateway.ipynb +++ b/docs/docs/integrations/llms/amazon_api_gateway.ipynb @@ -120,10 +120,7 @@ } ], "source": [ - "from langchain.agents import load_tools\n", - "from langchain.agents import initialize_agent\n", - "from langchain.agents import AgentType\n", - "\n", + "from langchain.agents import AgentType, initialize_agent, load_tools\n", "\n", "parameters = {\n", " \"max_new_tokens\": 50,\n", diff --git a/docs/docs/integrations/llms/anyscale.ipynb b/docs/docs/integrations/llms/anyscale.ipynb index 5f58e67ea82..a610cb548fc 100644 --- a/docs/docs/integrations/llms/anyscale.ipynb +++ b/docs/docs/integrations/llms/anyscale.ipynb @@ -36,9 +36,9 @@ }, "outputs": [], "source": [ + "from langchain.chains import LLMChain\n", "from langchain.llms import Anyscale\n", - "from langchain.prompts import PromptTemplate\n", - "from langchain.chains import LLMChain" + "from langchain.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/integrations/llms/azure_ml.ipynb b/docs/docs/integrations/llms/azure_ml.ipynb index 32750700322..f6e5c56f64e 100644 --- a/docs/docs/integrations/llms/azure_ml.ipynb +++ b/docs/docs/integrations/llms/azure_ml.ipynb @@ -72,11 +72,11 @@ } ], "source": [ + "import json\n", + "import os\n", "from typing import Dict\n", "\n", "from langchain.llms.azureml_endpoint import AzureMLOnlineEndpoint, ContentFormatterBase\n", - "import os\n", - "import json\n", "\n", "\n", "class CustomFormatter(ContentFormatterBase):\n", @@ -154,9 +154,9 @@ } ], "source": [ - "from langchain.prompts import PromptTemplate\n", - "from langchain.llms.azureml_endpoint import DollyContentFormatter\n", "from langchain.chains import LLMChain\n", + "from langchain.llms.azureml_endpoint import DollyContentFormatter\n", + "from langchain.prompts import PromptTemplate\n", "\n", "formatter_template = \"Write a {word_count} word essay about {topic}.\"\n", "\n", diff --git a/docs/docs/integrations/llms/baidu_qianfan_endpoint.ipynb b/docs/docs/integrations/llms/baidu_qianfan_endpoint.ipynb index f6854c4d7b8..5b3e823fb6b 100644 --- a/docs/docs/integrations/llms/baidu_qianfan_endpoint.ipynb +++ b/docs/docs/integrations/llms/baidu_qianfan_endpoint.ipynb @@ -72,9 +72,10 @@ ], "source": [ "\"\"\"For basic init and call\"\"\"\n", - "from langchain.llms import QianfanLLMEndpoint\n", "import os\n", "\n", + "from langchain.llms import QianfanLLMEndpoint\n", + "\n", "os.environ[\"QIANFAN_AK\"] = \"your_ak\"\n", "os.environ[\"QIANFAN_SK\"] = \"your_sk\"\n", "\n", diff --git a/docs/docs/integrations/llms/banana.ipynb b/docs/docs/integrations/llms/banana.ipynb index 130b30d625e..ab95acdd2e0 100644 --- a/docs/docs/integrations/llms/banana.ipynb +++ b/docs/docs/integrations/llms/banana.ipynb @@ -51,9 +51,9 @@ "metadata": {}, "outputs": [], "source": [ + "from langchain.chains import LLMChain\n", "from langchain.llms import Banana\n", - "from langchain.prompts import PromptTemplate\n", - "from langchain.chains import LLMChain" + "from langchain.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/integrations/llms/baseten.ipynb b/docs/docs/integrations/llms/baseten.ipynb index 7786578a64f..ccded8c1c76 100644 --- a/docs/docs/integrations/llms/baseten.ipynb +++ b/docs/docs/integrations/llms/baseten.ipynb @@ -106,8 +106,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chains import SimpleSequentialChain\n", - "from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain" + "from langchain.chains import LLMChain, SimpleSequentialChain\n", + "from langchain.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/integrations/llms/bedrock.ipynb b/docs/docs/integrations/llms/bedrock.ipynb index 6ced4daab42..5a721c187e3 100644 --- a/docs/docs/integrations/llms/bedrock.ipynb +++ b/docs/docs/integrations/llms/bedrock.ipynb @@ -75,9 +75,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import Bedrock\n", "from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n", - "\n", + "from langchain.llms import Bedrock\n", "\n", "llm = Bedrock(\n", " credentials_profile_name=\"bedrock-admin\",\n", diff --git a/docs/docs/integrations/llms/bittensor.ipynb b/docs/docs/integrations/llms/bittensor.ipynb index 2d2603eb1dd..509768676f9 100644 --- a/docs/docs/integrations/llms/bittensor.ipynb +++ b/docs/docs/integrations/llms/bittensor.ipynb @@ -30,11 +30,11 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import NIBittensorLLM\n", "import json\n", "from pprint import pprint\n", "\n", "from langchain.globals import set_debug\n", + "from langchain.llms import NIBittensorLLM\n", "\n", "set_debug(True)\n", "\n", @@ -80,11 +80,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.prompts import PromptTemplate\n", "from langchain.chains import LLMChain\n", - "from langchain.llms import NIBittensorLLM\n", - "\n", "from langchain.globals import set_debug\n", + "from langchain.llms import NIBittensorLLM\n", + "from langchain.prompts import PromptTemplate\n", "\n", "set_debug(True)\n", "\n", @@ -120,13 +119,13 @@ "outputs": [], "source": [ "from langchain.agents import (\n", - " ZeroShotAgent,\n", " AgentExecutor,\n", + " ZeroShotAgent,\n", ")\n", - "from langchain.memory import ConversationBufferMemory\n", "from langchain.chains import LLMChain\n", - "from langchain.prompts import PromptTemplate\n", "from langchain.llms import NIBittensorLLM\n", + "from langchain.memory import ConversationBufferMemory\n", + "from langchain.prompts import PromptTemplate\n", "\n", "memory = ConversationBufferMemory(memory_key=\"chat_history\")\n", "\n", diff --git a/docs/docs/integrations/llms/cerebriumai.ipynb b/docs/docs/integrations/llms/cerebriumai.ipynb index f9ea729c093..051a82cdcc1 100644 --- a/docs/docs/integrations/llms/cerebriumai.ipynb +++ b/docs/docs/integrations/llms/cerebriumai.ipynb @@ -43,8 +43,10 @@ "outputs": [], "source": [ "import os\n", + "\n", + "from langchain.chains import LLMChain\n", "from langchain.llms import CerebriumAI\n", - "from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain" + "from langchain.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/integrations/llms/chatglm.ipynb b/docs/docs/integrations/llms/chatglm.ipynb index d128fa0d6cf..82867b0f091 100644 --- a/docs/docs/integrations/llms/chatglm.ipynb +++ b/docs/docs/integrations/llms/chatglm.ipynb @@ -21,9 +21,9 @@ "metadata": {}, "outputs": [], "source": [ + "from langchain.chains import LLMChain\n", "from langchain.llms import ChatGLM\n", "from langchain.prompts import PromptTemplate\n", - "from langchain.chains import LLMChain\n", "\n", "# import os" ] diff --git a/docs/docs/integrations/llms/clarifai.ipynb b/docs/docs/integrations/llms/clarifai.ipynb index 7d58f35e166..02569a56017 100644 --- a/docs/docs/integrations/llms/clarifai.ipynb +++ b/docs/docs/integrations/llms/clarifai.ipynb @@ -81,8 +81,9 @@ "outputs": [], "source": [ "# Import the required modules\n", + "from langchain.chains import LLMChain\n", "from langchain.llms import Clarifai\n", - "from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain" + "from langchain.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/integrations/llms/cohere.ipynb b/docs/docs/integrations/llms/cohere.ipynb index 9795875ae6e..7f0786d33e2 100644 --- a/docs/docs/integrations/llms/cohere.ipynb +++ b/docs/docs/integrations/llms/cohere.ipynb @@ -58,8 +58,9 @@ }, "outputs": [], "source": [ + "from langchain.chains import LLMChain\n", "from langchain.llms import Cohere\n", - "from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain" + "from langchain.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/integrations/llms/ctransformers.ipynb b/docs/docs/integrations/llms/ctransformers.ipynb index 0539ca6c510..424be57c9dc 100644 --- a/docs/docs/integrations/llms/ctransformers.ipynb +++ b/docs/docs/integrations/llms/ctransformers.ipynb @@ -102,7 +102,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain\n", + "from langchain.chains import LLMChain\n", + "from langchain.prompts import PromptTemplate\n", "\n", "template = \"\"\"Question: {question}\n", "\n", diff --git a/docs/docs/integrations/llms/ctranslate2.ipynb b/docs/docs/integrations/llms/ctranslate2.ipynb index 62dc7194d93..ebf20e4bd74 100644 --- a/docs/docs/integrations/llms/ctranslate2.ipynb +++ b/docs/docs/integrations/llms/ctranslate2.ipynb @@ -195,7 +195,8 @@ } ], "source": [ - "from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain\n", + "from langchain.chains import LLMChain\n", + "from langchain.prompts import PromptTemplate\n", "\n", "template = \"\"\"{question}\n", "\n", diff --git a/docs/docs/integrations/llms/edenai.ipynb b/docs/docs/integrations/llms/edenai.ipynb index 46f18191739..41f235aad7b 100644 --- a/docs/docs/integrations/llms/edenai.ipynb +++ b/docs/docs/integrations/llms/edenai.ipynb @@ -97,8 +97,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.prompts import PromptTemplate\n", "from langchain.chains import LLMChain\n", + "from langchain.prompts import PromptTemplate\n", "\n", "llm = EdenAI(\n", " feature=\"text\",\n", @@ -131,6 +131,7 @@ "source": [ "import base64\n", "from io import BytesIO\n", + "\n", "from PIL import Image\n", "\n", "\n", @@ -188,8 +189,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import EdenAI\n", "from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n", + "from langchain.llms import EdenAI\n", "\n", "llm = EdenAI(\n", " callbacks=[StreamingStdOutCallbackHandler()],\n", @@ -218,9 +219,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chains import SimpleSequentialChain\n", - "from langchain.prompts import PromptTemplate\n", - "from langchain.chains import LLMChain" + "from langchain.chains import LLMChain, SimpleSequentialChain\n", + "from langchain.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/integrations/llms/fireworks.ipynb b/docs/docs/integrations/llms/fireworks.ipynb index e0e955f0ed1..5144e7a52f6 100644 --- a/docs/docs/integrations/llms/fireworks.ipynb +++ b/docs/docs/integrations/llms/fireworks.ipynb @@ -19,9 +19,10 @@ "metadata": {}, "outputs": [], "source": [ + "import os\n", + "\n", "from langchain.llms.fireworks import Fireworks\n", - "from langchain.prompts import PromptTemplate\n", - "import os" + "from langchain.prompts import PromptTemplate" ] }, { @@ -43,8 +44,8 @@ "metadata": {}, "outputs": [], "source": [ - "import os\n", "import getpass\n", + "import os\n", "\n", "if \"FIREWORKS_API_KEY\" not in os.environ:\n", " os.environ[\"FIREWORKS_API_KEY\"] = getpass.getpass(\"Fireworks API Key:\")\n", @@ -204,8 +205,8 @@ } ], "source": [ - "from langchain.prompts import PromptTemplate\n", "from langchain.llms.fireworks import Fireworks\n", + "from langchain.prompts import PromptTemplate\n", "\n", "llm = Fireworks(\n", " model=\"accounts/fireworks/models/llama-v2-13b\",\n", diff --git a/docs/docs/integrations/llms/forefrontai.ipynb b/docs/docs/integrations/llms/forefrontai.ipynb index 7dac40fd7c3..ea3b9fb32e6 100644 --- a/docs/docs/integrations/llms/forefrontai.ipynb +++ b/docs/docs/integrations/llms/forefrontai.ipynb @@ -26,9 +26,10 @@ "outputs": [], "source": [ "import os\n", + "\n", + "from langchain.chains import LLMChain\n", "from langchain.llms import ForefrontAI\n", - "from langchain.prompts import PromptTemplate\n", - "from langchain.chains import LLMChain" + "from langchain.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/integrations/llms/gigachat.ipynb b/docs/docs/integrations/llms/gigachat.ipynb index dad7dca95c6..617cb46d579 100644 --- a/docs/docs/integrations/llms/gigachat.ipynb +++ b/docs/docs/integrations/llms/gigachat.ipynb @@ -2,14 +2,14 @@ "cells": [ { "cell_type": "markdown", + "metadata": { + "collapsed": false + }, "source": [ "# GigaChat\n", "This notebook shows how to use LangChain with [GigaChat](https://developers.sber.ru/portal/products/gigachat).\n", "To use you need to install ```gigachat``` python package." - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "code", @@ -24,44 +24,47 @@ }, { "cell_type": "markdown", + "metadata": { + "collapsed": false + }, "source": [ "To get GigaChat credentials you need to [create account](https://developers.sber.ru/studio/login) and [get access to API](https://developers.sber.ru/docs/ru/gigachat/api/integration)\n", "## Example" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "code", "execution_count": 1, + "metadata": { + "collapsed": false + }, "outputs": [], "source": [ "import os\n", "from getpass import getpass\n", "\n", "os.environ[\"GIGACHAT_CREDENTIALS\"] = getpass()" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "code", "execution_count": 2, + "metadata": { + "collapsed": false + }, "outputs": [], "source": [ "from langchain.llms import GigaChat\n", "\n", "llm = GigaChat(verify_ssl_certs=False)" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "code", "execution_count": 3, + "metadata": { + "collapsed": false + }, "outputs": [ { "name": "stdout", @@ -72,8 +75,8 @@ } ], "source": [ - "from langchain.prompts import PromptTemplate\n", "from langchain.chains import LLMChain\n", + "from langchain.prompts import PromptTemplate\n", "\n", "template = \"What is capital of {country}?\"\n", "\n", @@ -83,10 +86,7 @@ "\n", "generated = llm_chain.run(country=\"Russia\")\n", "print(generated)" - ], - "metadata": { - "collapsed": false - } + ] } ], "metadata": { diff --git a/docs/docs/integrations/llms/gooseai.ipynb b/docs/docs/integrations/llms/gooseai.ipynb index 52a9ddcad56..30b2221373a 100644 --- a/docs/docs/integrations/llms/gooseai.ipynb +++ b/docs/docs/integrations/llms/gooseai.ipynb @@ -42,9 +42,10 @@ "outputs": [], "source": [ "import os\n", + "\n", + "from langchain.chains import LLMChain\n", "from langchain.llms import GooseAI\n", - "from langchain.prompts import PromptTemplate\n", - "from langchain.chains import LLMChain" + "from langchain.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/integrations/llms/gpt4all.ipynb b/docs/docs/integrations/llms/gpt4all.ipynb index 604f6049995..5b0b59f6ac9 100644 --- a/docs/docs/integrations/llms/gpt4all.ipynb +++ b/docs/docs/integrations/llms/gpt4all.ipynb @@ -47,9 +47,10 @@ }, "outputs": [], "source": [ - "from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain\n", + "from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n", + "from langchain.chains import LLMChain\n", "from langchain.llms import GPT4All\n", - "from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler" + "from langchain.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/integrations/llms/gradient.ipynb b/docs/docs/integrations/llms/gradient.ipynb index 1085ed03461..4e769ea576a 100644 --- a/docs/docs/integrations/llms/gradient.ipynb +++ b/docs/docs/integrations/llms/gradient.ipynb @@ -24,9 +24,9 @@ "metadata": {}, "outputs": [], "source": [ + "from langchain.chains import LLMChain\n", "from langchain.llms import GradientLLM\n", - "from langchain.prompts import PromptTemplate\n", - "from langchain.chains import LLMChain" + "from langchain.prompts import PromptTemplate" ] }, { @@ -43,8 +43,8 @@ "metadata": {}, "outputs": [], "source": [ - "from getpass import getpass\n", "import os\n", + "from getpass import getpass\n", "\n", "if not os.environ.get(\"GRADIENT_ACCESS_TOKEN\", None):\n", " # Access token under https://auth.gradient.ai/select-workspace\n", diff --git a/docs/docs/integrations/llms/huggingface_hub.ipynb b/docs/docs/integrations/llms/huggingface_hub.ipynb index 8b0d33d9bb4..37ac5bd3b62 100644 --- a/docs/docs/integrations/llms/huggingface_hub.ipynb +++ b/docs/docs/integrations/llms/huggingface_hub.ipynb @@ -101,7 +101,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain" + "from langchain.chains import LLMChain\n", + "from langchain.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/integrations/llms/huggingface_textgen_inference.ipynb b/docs/docs/integrations/llms/huggingface_textgen_inference.ipynb index 7e9261d25bf..395a5d8bad9 100644 --- a/docs/docs/integrations/llms/huggingface_textgen_inference.ipynb +++ b/docs/docs/integrations/llms/huggingface_textgen_inference.ipynb @@ -62,9 +62,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import HuggingFaceTextGenInference\n", "from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n", - "\n", + "from langchain.llms import HuggingFaceTextGenInference\n", "\n", "llm = HuggingFaceTextGenInference(\n", " inference_server_url=\"http://localhost:8010/\",\n", diff --git a/docs/docs/integrations/llms/jsonformer_experimental.ipynb b/docs/docs/integrations/llms/jsonformer_experimental.ipynb index 0290108510f..8acf15618be 100644 --- a/docs/docs/integrations/llms/jsonformer_experimental.ipynb +++ b/docs/docs/integrations/llms/jsonformer_experimental.ipynb @@ -59,10 +59,11 @@ }, "outputs": [], "source": [ - "from langchain.tools import tool\n", - "import os\n", "import json\n", + "import os\n", + "\n", "import requests\n", + "from langchain.tools import tool\n", "\n", "HF_TOKEN = os.environ.get(\"HUGGINGFACE_API_KEY\")\n", "\n", @@ -151,8 +152,8 @@ } ], "source": [ - "from transformers import pipeline\n", "from langchain.llms import HuggingFacePipeline\n", + "from transformers import pipeline\n", "\n", "hf_model = pipeline(\n", " \"text-generation\", model=\"cerebras/Cerebras-GPT-590M\", max_new_tokens=200\n", diff --git a/docs/docs/integrations/llms/llamacpp.ipynb b/docs/docs/integrations/llms/llamacpp.ipynb index 95f3b87c74a..989284040d4 100644 --- a/docs/docs/integrations/llms/llamacpp.ipynb +++ b/docs/docs/integrations/llms/llamacpp.ipynb @@ -192,11 +192,11 @@ }, "outputs": [], "source": [ - "from langchain.llms import LlamaCpp\n", - "from langchain.prompts import PromptTemplate\n", - "from langchain.chains import LLMChain\n", "from langchain.callbacks.manager import CallbackManager\n", - "from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler" + "from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n", + "from langchain.chains import LLMChain\n", + "from langchain.llms import LlamaCpp\n", + "from langchain.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/integrations/llms/llm_caching.ipynb b/docs/docs/integrations/llms/llm_caching.ipynb index 000ea53a6f1..3e1907691f3 100644 --- a/docs/docs/integrations/llms/llm_caching.ipynb +++ b/docs/docs/integrations/llms/llm_caching.ipynb @@ -231,8 +231,8 @@ "metadata": {}, "outputs": [], "source": [ - "from upstash_redis import Redis\n", "from langchain.cache import UpstashRedisCache\n", + "from upstash_redis import Redis\n", "\n", "URL = \"\"\n", "TOKEN = \"\"\n", @@ -331,8 +331,8 @@ "source": [ "# We can do the same thing with a Redis cache\n", "# (make sure your local Redis instance is running first before running this example)\n", - "from redis import Redis\n", "from langchain.cache import RedisCache\n", + "from redis import Redis\n", "\n", "set_llm_cache(RedisCache(redis_=Redis()))" ] @@ -415,9 +415,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.cache import RedisSemanticCache\n", - "\n", + "from langchain.embeddings import OpenAIEmbeddings\n", "\n", "set_llm_cache(\n", " RedisSemanticCache(redis_url=\"redis://localhost:6379\", embedding=OpenAIEmbeddings())\n", @@ -509,11 +508,12 @@ "metadata": {}, "outputs": [], "source": [ + "import hashlib\n", + "\n", "from gptcache import Cache\n", "from gptcache.manager.factory import manager_factory\n", "from gptcache.processor.pre import get_prompt\n", "from langchain.cache import GPTCache\n", - "import hashlib\n", "\n", "\n", "def get_hashed_name(name):\n", @@ -608,10 +608,11 @@ "metadata": {}, "outputs": [], "source": [ + "import hashlib\n", + "\n", "from gptcache import Cache\n", "from gptcache.adapter.api import init_similar_cache\n", "from langchain.cache import GPTCache\n", - "import hashlib\n", "\n", "\n", "def get_hashed_name(name):\n", @@ -762,7 +763,6 @@ "\n", "from langchain.cache import MomentoCache\n", "\n", - "\n", "cache_name = \"langchain\"\n", "ttl = timedelta(days=1)\n", "set_llm_cache(MomentoCache.from_client_params(cache_name, ttl))" @@ -875,11 +875,10 @@ "source": [ "# You can define your own declarative SQLAlchemyCache child class to customize the schema used for caching. For example, to support high-speed fulltext prompt indexing with Postgres, use:\n", "\n", - "from sqlalchemy import Column, Integer, String, Computed, Index, Sequence\n", - "from sqlalchemy import create_engine\n", + "from langchain.cache import SQLAlchemyCache\n", + "from sqlalchemy import Column, Computed, Index, Integer, Sequence, String, create_engine\n", "from sqlalchemy.ext.declarative import declarative_base\n", "from sqlalchemy_utils import TSVectorType\n", - "from langchain.cache import SQLAlchemyCache\n", "\n", "Base = declarative_base()\n", "\n", @@ -961,8 +960,8 @@ "metadata": {}, "outputs": [], "source": [ - "from cassandra.cluster import Cluster\n", "from cassandra.auth import PlainTextAuthProvider\n", + "from cassandra.cluster import Cluster\n", "\n", "cluster = Cluster(\n", " cloud={\n", @@ -990,8 +989,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.globals import set_llm_cache\n", "from langchain.cache import CassandraCache\n", + "from langchain.globals import set_llm_cache\n", "\n", "set_llm_cache(CassandraCache(session=session, keyspace=keyspace))" ] diff --git a/docs/docs/integrations/llms/lmformatenforcer_experimental.ipynb b/docs/docs/integrations/llms/lmformatenforcer_experimental.ipynb index 4ee78d39c36..d648064f970 100644 --- a/docs/docs/integrations/llms/lmformatenforcer_experimental.ipynb +++ b/docs/docs/integrations/llms/lmformatenforcer_experimental.ipynb @@ -49,6 +49,7 @@ "outputs": [], "source": [ "import logging\n", + "\n", "from langchain_experimental.pydantic_v1 import BaseModel\n", "\n", "logging.basicConfig(level=logging.ERROR)\n", @@ -81,7 +82,7 @@ ], "source": [ "import torch\n", - "from transformers import AutoTokenizer, AutoModelForCausalLM, AutoConfig\n", + "from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer\n", "\n", "model_id = \"meta-llama/Llama-2-7b-chat-hf\"\n", "\n", @@ -194,8 +195,8 @@ } ], "source": [ - "from transformers import pipeline\n", "from langchain.llms import HuggingFacePipeline\n", + "from transformers import pipeline\n", "\n", "hf_model = pipeline(\n", " \"text-generation\", model=model, tokenizer=tokenizer, max_new_tokens=200\n", diff --git a/docs/docs/integrations/llms/manifest.ipynb b/docs/docs/integrations/llms/manifest.ipynb index 56562479009..9ab68ad626d 100644 --- a/docs/docs/integrations/llms/manifest.ipynb +++ b/docs/docs/integrations/llms/manifest.ipynb @@ -41,8 +41,8 @@ }, "outputs": [], "source": [ - "from manifest import Manifest\n", - "from langchain.llms.manifest import ManifestWrapper" + "from langchain.llms.manifest import ManifestWrapper\n", + "from manifest import Manifest" ] }, { @@ -80,10 +80,9 @@ "outputs": [], "source": [ "# Map reduce example\n", + "from langchain.chains.mapreduce import MapReduceChain\n", "from langchain.prompts import PromptTemplate\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.chains.mapreduce import MapReduceChain\n", - "\n", "\n", "_prompt = \"\"\"Write a concise summary of the following:\n", "\n", diff --git a/docs/docs/integrations/llms/minimax.ipynb b/docs/docs/integrations/llms/minimax.ipynb index fc28da0e203..428cf8e54f0 100644 --- a/docs/docs/integrations/llms/minimax.ipynb +++ b/docs/docs/integrations/llms/minimax.ipynb @@ -74,6 +74,9 @@ { "cell_type": "code", "execution_count": null, + "metadata": { + "collapsed": false + }, "outputs": [], "source": [ "# get api_key and group_id: https://api.minimax.chat/user-center/basic-information\n", @@ -83,27 +86,27 @@ "\n", "os.environ[\"MINIMAX_API_KEY\"] = \"YOUR_API_KEY\"\n", "os.environ[\"MINIMAX_GROUP_ID\"] = \"YOUR_GROUP_ID\"" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "code", "execution_count": null, + "metadata": { + "collapsed": false + }, "outputs": [], "source": [ + "from langchain.chains import LLMChain\n", "from langchain.llms import Minimax\n", - "from langchain.prompts import PromptTemplate\n", - "from langchain.chains import LLMChain" - ], - "metadata": { - "collapsed": false - } + "from langchain.prompts import PromptTemplate" + ] }, { "cell_type": "code", "execution_count": null, + "metadata": { + "collapsed": false + }, "outputs": [], "source": [ "template = \"\"\"Question: {question}\n", @@ -111,45 +114,42 @@ "Answer: Let's think step by step.\"\"\"\n", "\n", "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "code", "execution_count": null, + "metadata": { + "collapsed": false + }, "outputs": [], "source": [ "llm = Minimax()" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "code", "execution_count": null, + "metadata": { + "collapsed": false + }, "outputs": [], "source": [ "llm_chain = LLMChain(prompt=prompt, llm=llm)" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "code", "execution_count": null, + "metadata": { + "collapsed": false + }, "outputs": [], "source": [ "question = \"What NBA team won the Championship in the year Jay Zhou was born?\"\n", "\n", "llm_chain.run(question)" - ], - "metadata": { - "collapsed": false - } + ] } ], "metadata": { diff --git a/docs/docs/integrations/llms/modal.ipynb b/docs/docs/integrations/llms/modal.ipynb index d5864c479ac..27f2534c191 100644 --- a/docs/docs/integrations/llms/modal.ipynb +++ b/docs/docs/integrations/llms/modal.ipynb @@ -107,9 +107,9 @@ "metadata": {}, "outputs": [], "source": [ + "from langchain.chains import LLMChain\n", "from langchain.llms import Modal\n", - "from langchain.prompts import PromptTemplate\n", - "from langchain.chains import LLMChain" + "from langchain.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/integrations/llms/mosaicml.ipynb b/docs/docs/integrations/llms/mosaicml.ipynb index 141bfe71217..04011ada9a2 100644 --- a/docs/docs/integrations/llms/mosaicml.ipynb +++ b/docs/docs/integrations/llms/mosaicml.ipynb @@ -42,8 +42,9 @@ "metadata": {}, "outputs": [], "source": [ + "from langchain.chains import LLMChain\n", "from langchain.llms import MosaicML\n", - "from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain" + "from langchain.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/integrations/llms/nlpcloud.ipynb b/docs/docs/integrations/llms/nlpcloud.ipynb index d8d0e2daf3c..f5e42eaa68b 100644 --- a/docs/docs/integrations/llms/nlpcloud.ipynb +++ b/docs/docs/integrations/llms/nlpcloud.ipynb @@ -72,8 +72,9 @@ }, "outputs": [], "source": [ + "from langchain.chains import LLMChain\n", "from langchain.llms import NLPCloud\n", - "from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain" + "from langchain.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/integrations/llms/octoai.ipynb b/docs/docs/integrations/llms/octoai.ipynb index c9bcff7abe3..bfed1da79fe 100644 --- a/docs/docs/integrations/llms/octoai.ipynb +++ b/docs/docs/integrations/llms/octoai.ipynb @@ -42,8 +42,9 @@ "metadata": {}, "outputs": [], "source": [ + "from langchain.chains import LLMChain\n", "from langchain.llms.octoai_endpoint import OctoAIEndpoint\n", - "from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain" + "from langchain.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/integrations/llms/ollama.ipynb b/docs/docs/integrations/llms/ollama.ipynb index 30addf32dbf..aadc992f2a0 100644 --- a/docs/docs/integrations/llms/ollama.ipynb +++ b/docs/docs/integrations/llms/ollama.ipynb @@ -55,9 +55,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import Ollama\n", "from langchain.callbacks.manager import CallbackManager\n", "from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n", + "from langchain.llms import Ollama\n", "\n", "llm = Ollama(\n", " model=\"llama2\", callback_manager=CallbackManager([StreamingStdOutCallbackHandler()])\n", @@ -173,9 +173,11 @@ ], "source": [ "# Embed and store\n", + "from langchain.embeddings import (\n", + " GPT4AllEmbeddings,\n", + " OllamaEmbeddings, # We can also try Ollama embeddings\n", + ")\n", "from langchain.vectorstores import Chroma\n", - "from langchain.embeddings import GPT4AllEmbeddings\n", - "from langchain.embeddings import OllamaEmbeddings # We can also try Ollama embeddings\n", "\n", "vectorstore = Chroma.from_documents(documents=all_splits, embedding=GPT4AllEmbeddings())" ] @@ -222,9 +224,9 @@ "outputs": [], "source": [ "# LLM\n", - "from langchain.llms import Ollama\n", "from langchain.callbacks.manager import CallbackManager\n", "from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n", + "from langchain.llms import Ollama\n", "\n", "llm = Ollama(\n", " model=\"llama2\",\n", @@ -287,8 +289,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.schema import LLMResult\n", "from langchain.callbacks.base import BaseCallbackHandler\n", + "from langchain.schema import LLMResult\n", "\n", "\n", "class GenerationStatisticsCallback(BaseCallbackHandler):\n", @@ -364,9 +366,9 @@ "outputs": [], "source": [ "# LLM\n", - "from langchain.llms import Ollama\n", "from langchain.callbacks.manager import CallbackManager\n", "from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n", + "from langchain.llms import Ollama\n", "\n", "llm = Ollama(\n", " model=\"mistral:7b-instruct\",\n", diff --git a/docs/docs/integrations/llms/opaqueprompts.ipynb b/docs/docs/integrations/llms/opaqueprompts.ipynb index 1e7c8352122..4df00fee77e 100644 --- a/docs/docs/integrations/llms/opaqueprompts.ipynb +++ b/docs/docs/integrations/llms/opaqueprompts.ipynb @@ -58,15 +58,12 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chains import LLMChain\n", - "from langchain.prompts import PromptTemplate\n", "from langchain.callbacks.stdout import StdOutCallbackHandler\n", - "from langchain.llms import OpenAI\n", - "from langchain.memory import ConversationBufferWindowMemory\n", - "\n", - "from langchain.llms import OpaquePrompts\n", - "\n", + "from langchain.chains import LLMChain\n", "from langchain.globals import set_debug, set_verbose\n", + "from langchain.llms import OpaquePrompts, OpenAI\n", + "from langchain.memory import ConversationBufferWindowMemory\n", + "from langchain.prompts import PromptTemplate\n", "\n", "set_debug(True)\n", "set_verbose(True)\n", @@ -177,9 +174,8 @@ "outputs": [], "source": [ "import langchain.utilities.opaqueprompts as op\n", - "from langchain.schema.runnable import RunnablePassthrough\n", "from langchain.schema.output_parser import StrOutputParser\n", - "\n", + "from langchain.schema.runnable import RunnablePassthrough\n", "\n", "prompt = (PromptTemplate.from_template(prompt_template),)\n", "llm = OpenAI()\n", diff --git a/docs/docs/integrations/llms/openai.ipynb b/docs/docs/integrations/llms/openai.ipynb index 3bd1da7c82d..fcc4940ce64 100644 --- a/docs/docs/integrations/llms/openai.ipynb +++ b/docs/docs/integrations/llms/openai.ipynb @@ -66,8 +66,9 @@ }, "outputs": [], "source": [ + "from langchain.chains import LLMChain\n", "from langchain.llms import OpenAI\n", - "from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain" + "from langchain.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/integrations/llms/openllm.ipynb b/docs/docs/integrations/llms/openllm.ipynb index 6d85ea29b0d..e5c019b2b66 100644 --- a/docs/docs/integrations/llms/openllm.ipynb +++ b/docs/docs/integrations/llms/openllm.ipynb @@ -114,7 +114,8 @@ } ], "source": [ - "from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain\n", + "from langchain.chains import LLMChain\n", + "from langchain.prompts import PromptTemplate\n", "\n", "template = \"What is a good name for a company that makes {product}?\"\n", "\n", diff --git a/docs/docs/integrations/llms/openlm.ipynb b/docs/docs/integrations/llms/openlm.ipynb index e22b8b31473..a992bd442d2 100644 --- a/docs/docs/integrations/llms/openlm.ipynb +++ b/docs/docs/integrations/llms/openlm.ipynb @@ -39,9 +39,8 @@ "metadata": {}, "outputs": [], "source": [ - "from getpass import getpass\n", "import os\n", - "\n", + "from getpass import getpass\n", "\n", "# Check if OPENAI_API_KEY environment variable is set\n", "if \"OPENAI_API_KEY\" not in os.environ:\n", @@ -69,9 +68,9 @@ "metadata": {}, "outputs": [], "source": [ + "from langchain.chains import LLMChain\n", "from langchain.llms import OpenLM\n", - "from langchain.prompts import PromptTemplate\n", - "from langchain.chains import LLMChain" + "from langchain.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/integrations/llms/pai_eas_endpoint.ipynb b/docs/docs/integrations/llms/pai_eas_endpoint.ipynb index ed7f388b194..2f5baee551a 100644 --- a/docs/docs/integrations/llms/pai_eas_endpoint.ipynb +++ b/docs/docs/integrations/llms/pai_eas_endpoint.ipynb @@ -14,9 +14,9 @@ "metadata": {}, "outputs": [], "source": [ + "from langchain.chains import LLMChain\n", "from langchain.llms.pai_eas_endpoint import PaiEasEndpoint\n", "from langchain.prompts import PromptTemplate\n", - "from langchain.chains import LLMChain\n", "\n", "template = \"\"\"Question: {question}\n", "\n", diff --git a/docs/docs/integrations/llms/petals.ipynb b/docs/docs/integrations/llms/petals.ipynb index 1128429e788..582a982d8fe 100644 --- a/docs/docs/integrations/llms/petals.ipynb +++ b/docs/docs/integrations/llms/petals.ipynb @@ -44,8 +44,10 @@ "outputs": [], "source": [ "import os\n", + "\n", + "from langchain.chains import LLMChain\n", "from langchain.llms import Petals\n", - "from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain" + "from langchain.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/integrations/llms/pipelineai.ipynb b/docs/docs/integrations/llms/pipelineai.ipynb index 7e89832fd6f..cb7225cc251 100644 --- a/docs/docs/integrations/llms/pipelineai.ipynb +++ b/docs/docs/integrations/llms/pipelineai.ipynb @@ -49,8 +49,10 @@ "outputs": [], "source": [ "import os\n", + "\n", + "from langchain.chains import LLMChain\n", "from langchain.llms import PipelineAI\n", - "from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain" + "from langchain.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/integrations/llms/predictionguard.ipynb b/docs/docs/integrations/llms/predictionguard.ipynb index ac7518e9348..dfdba4af199 100644 --- a/docs/docs/integrations/llms/predictionguard.ipynb +++ b/docs/docs/integrations/llms/predictionguard.ipynb @@ -31,9 +31,9 @@ "source": [ "import os\n", "\n", + "from langchain.chains import LLMChain\n", "from langchain.llms import PredictionGuard\n", - "from langchain.prompts import PromptTemplate\n", - "from langchain.chains import LLMChain" + "from langchain.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/integrations/llms/promptlayer_openai.ipynb b/docs/docs/integrations/llms/promptlayer_openai.ipynb index 6c2404bb5fc..eea93414457 100644 --- a/docs/docs/integrations/llms/promptlayer_openai.ipynb +++ b/docs/docs/integrations/llms/promptlayer_openai.ipynb @@ -59,8 +59,9 @@ "outputs": [], "source": [ "import os\n", - "from langchain.llms import PromptLayerOpenAI\n", - "import promptlayer" + "\n", + "import promptlayer\n", + "from langchain.llms import PromptLayerOpenAI" ] }, { diff --git a/docs/docs/integrations/llms/rellm_experimental.ipynb b/docs/docs/integrations/llms/rellm_experimental.ipynb index 8cc3e00465f..c783e434c16 100644 --- a/docs/docs/integrations/llms/rellm_experimental.ipynb +++ b/docs/docs/integrations/llms/rellm_experimental.ipynb @@ -92,8 +92,8 @@ } ], "source": [ - "from transformers import pipeline\n", "from langchain.llms import HuggingFacePipeline\n", + "from transformers import pipeline\n", "\n", "hf_model = pipeline(\n", " \"text-generation\", model=\"cerebras/Cerebras-GPT-590M\", max_new_tokens=200\n", diff --git a/docs/docs/integrations/llms/replicate.ipynb b/docs/docs/integrations/llms/replicate.ipynb index b4eca041303..9b9fc37b473 100644 --- a/docs/docs/integrations/llms/replicate.ipynb +++ b/docs/docs/integrations/llms/replicate.ipynb @@ -103,8 +103,9 @@ }, "outputs": [], "source": [ + "from langchain.chains import LLMChain\n", "from langchain.llms import Replicate\n", - "from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain" + "from langchain.prompts import PromptTemplate" ] }, { @@ -273,10 +274,11 @@ "metadata": {}, "outputs": [], "source": [ - "from PIL import Image\n", - "import requests\n", "from io import BytesIO\n", "\n", + "import requests\n", + "from PIL import Image\n", + "\n", "response = requests.get(image_output)\n", "img = Image.open(BytesIO(response.content))\n", "\n", diff --git a/docs/docs/integrations/llms/runhouse.ipynb b/docs/docs/integrations/llms/runhouse.ipynb index 27b3c5c919c..c56912061cb 100644 --- a/docs/docs/integrations/llms/runhouse.ipynb +++ b/docs/docs/integrations/llms/runhouse.ipynb @@ -43,9 +43,10 @@ } ], "source": [ - "from langchain.llms import SelfHostedPipeline, SelfHostedHuggingFaceLLM\n", - "from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain\n", - "import runhouse as rh" + "import runhouse as rh\n", + "from langchain.chains import LLMChain\n", + "from langchain.llms import SelfHostedHuggingFaceLLM, SelfHostedPipeline\n", + "from langchain.prompts import PromptTemplate" ] }, { @@ -213,7 +214,7 @@ " AutoModelForCausalLM,\n", " AutoTokenizer,\n", " pipeline,\n", - " ) # Need to be inside the fn in notebooks\n", + " )\n", "\n", " model_id = \"gpt2\"\n", " tokenizer = AutoTokenizer.from_pretrained(model_id)\n", diff --git a/docs/docs/integrations/llms/sagemaker.ipynb b/docs/docs/integrations/llms/sagemaker.ipynb index 43103e1a86c..f10e14d72ef 100644 --- a/docs/docs/integrations/llms/sagemaker.ipynb +++ b/docs/docs/integrations/llms/sagemaker.ipynb @@ -99,14 +99,14 @@ }, "outputs": [], "source": [ + "import json\n", "from typing import Dict\n", "\n", - "from langchain.prompts import PromptTemplate\n", + "import boto3\n", + "from langchain.chains.question_answering import load_qa_chain\n", "from langchain.llms import SagemakerEndpoint\n", "from langchain.llms.sagemaker_endpoint import LLMContentHandler\n", - "from langchain.chains.question_answering import load_qa_chain\n", - "import json\n", - "import boto3\n", + "from langchain.prompts import PromptTemplate\n", "\n", "query = \"\"\"How long was Elizabeth hospitalized?\n", "\"\"\"\n", @@ -170,13 +170,13 @@ "metadata": {}, "outputs": [], "source": [ + "import json\n", "from typing import Dict\n", "\n", - "from langchain.prompts import PromptTemplate\n", + "from langchain.chains.question_answering import load_qa_chain\n", "from langchain.llms import SagemakerEndpoint\n", "from langchain.llms.sagemaker_endpoint import LLMContentHandler\n", - "from langchain.chains.question_answering import load_qa_chain\n", - "import json\n", + "from langchain.prompts import PromptTemplate\n", "\n", "query = \"\"\"How long was Elizabeth hospitalized?\n", "\"\"\"\n", diff --git a/docs/docs/integrations/llms/stochasticai.ipynb b/docs/docs/integrations/llms/stochasticai.ipynb index efd4db43b99..1b226861b26 100644 --- a/docs/docs/integrations/llms/stochasticai.ipynb +++ b/docs/docs/integrations/llms/stochasticai.ipynb @@ -79,8 +79,9 @@ }, "outputs": [], "source": [ + "from langchain.chains import LLMChain\n", "from langchain.llms import StochasticAI\n", - "from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain" + "from langchain.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/integrations/llms/symblai_nebula.ipynb b/docs/docs/integrations/llms/symblai_nebula.ipynb index 23efe08fa14..3e3f97cd782 100644 --- a/docs/docs/integrations/llms/symblai_nebula.ipynb +++ b/docs/docs/integrations/llms/symblai_nebula.ipynb @@ -2,6 +2,10 @@ "cells": [ { "cell_type": "markdown", + "id": "bb8cd830db4a004e", + "metadata": { + "collapsed": false + }, "source": [ "# Nebula (Symbl.ai)\n", "[Nebula](https://symbl.ai/nebula/) is a large language model (LLM) built by [Symbl.ai](https://symbl.ai). It is trained to perform generative tasks on human conversations. Nebula excels at modeling the nuanced details of a conversation and performing tasks on the conversation.\n", @@ -9,52 +13,53 @@ "Nebula documentation: https://docs.symbl.ai/docs/nebula-llm\n", "\n", "This example goes over how to use LangChain to interact with the [Nebula platform](https://docs.symbl.ai/docs/nebula-llm)." - ], - "metadata": { - "collapsed": false - }, - "id": "bb8cd830db4a004e" + ] }, { "cell_type": "markdown", - "source": [ - "Make sure you have API Key with you. If you don't have one please [request one](https://info.symbl.ai/Nebula_Private_Beta.html)." - ], + "id": "519570b6539aa18c", "metadata": { "collapsed": false }, - "id": "519570b6539aa18c" + "source": [ + "Make sure you have API Key with you. If you don't have one please [request one](https://info.symbl.ai/Nebula_Private_Beta.html)." + ] }, { "cell_type": "code", "execution_count": null, + "id": "9f47bef45880aece", + "metadata": { + "collapsed": false + }, "outputs": [], "source": [ "from langchain.llms.symblai_nebula import Nebula\n", "\n", "llm = Nebula(nebula_api_key=\"\")" - ], - "metadata": { - "collapsed": false - }, - "id": "9f47bef45880aece" + ] }, { "cell_type": "markdown", - "source": [ - "Use a conversation transcript and instruction to construct a prompt." - ], + "id": "88c6a516ef51c74b", "metadata": { "collapsed": false }, - "id": "88c6a516ef51c74b" + "source": [ + "Use a conversation transcript and instruction to construct a prompt." + ] }, { "cell_type": "code", "execution_count": null, + "id": "5977ccc2d4432624", + "metadata": { + "collapsed": false + }, "outputs": [], "source": [ - "from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain\n", + "from langchain.chains import LLMChain\n", + "from langchain.prompts import PromptTemplate\n", "\n", "conversation = \"\"\"Sam: Good morning, team! Let's keep this standup concise. We'll go in the usual order: what you did yesterday, what you plan to do today, and any blockers. Alex, kick us off.\n", "Alex: Morning! Yesterday, I wrapped up the UI for the user dashboard. The new charts and widgets are now responsive. I also had a sync with the design team to ensure the final touchups are in line with the brand guidelines. Today, I'll start integrating the frontend with the new API endpoints Rhea was working on. The only blocker is waiting for some final API documentation, but I guess Rhea can update on that.\n", @@ -73,11 +78,7 @@ "llm_chain = LLMChain(prompt=prompt, llm=llm)\n", "\n", "llm_chain.run(instruction=instruction, conversation=conversation)" - ], - "metadata": { - "collapsed": false - }, - "id": "5977ccc2d4432624" + ] } ], "metadata": { diff --git a/docs/docs/integrations/llms/textgen.ipynb b/docs/docs/integrations/llms/textgen.ipynb index e27ac5f4686..3c56d91179f 100644 --- a/docs/docs/integrations/llms/textgen.ipynb +++ b/docs/docs/integrations/llms/textgen.ipynb @@ -41,11 +41,10 @@ }, "outputs": [], "source": [ - "from langchain.prompts import PromptTemplate\n", "from langchain.chains import LLMChain\n", - "from langchain.llms import TextGen\n", - "\n", "from langchain.globals import set_debug\n", + "from langchain.llms import TextGen\n", + "from langchain.prompts import PromptTemplate\n", "\n", "set_debug(True)\n", "\n", @@ -92,12 +91,11 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.prompts import PromptTemplate\n", - "from langchain.chains import LLMChain\n", - "from langchain.llms import TextGen\n", "from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n", - "\n", + "from langchain.chains import LLMChain\n", "from langchain.globals import set_debug\n", + "from langchain.llms import TextGen\n", + "from langchain.prompts import PromptTemplate\n", "\n", "set_debug(True)\n", "\n", diff --git a/docs/docs/integrations/llms/titan_takeoff.ipynb b/docs/docs/integrations/llms/titan_takeoff.ipynb index 435157981ce..533f7b8e5b7 100644 --- a/docs/docs/integrations/llms/titan_takeoff.ipynb +++ b/docs/docs/integrations/llms/titan_takeoff.ipynb @@ -114,8 +114,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n", "from langchain.callbacks.manager import CallbackManager\n", + "from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n", "\n", "llm = TitanTakeoff(\n", " callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]), streaming=True\n", @@ -139,8 +139,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.prompts import PromptTemplate\n", "from langchain.chains import LLMChain\n", + "from langchain.prompts import PromptTemplate\n", "\n", "llm = TitanTakeoff()\n", "\n", diff --git a/docs/docs/integrations/llms/titan_takeoff_pro.ipynb b/docs/docs/integrations/llms/titan_takeoff_pro.ipynb index 8f2096d9021..3e5a922c746 100644 --- a/docs/docs/integrations/llms/titan_takeoff_pro.ipynb +++ b/docs/docs/integrations/llms/titan_takeoff_pro.ipynb @@ -30,10 +30,10 @@ "metadata": {}, "outputs": [], "source": [ + "from langchain.callbacks.manager import CallbackManager\n", + "from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n", "from langchain.llms import TitanTakeoffPro\n", "from langchain.prompts import PromptTemplate\n", - "from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n", - "from langchain.callbacks.manager import CallbackManager\n", "\n", "# Example 1: Basic use\n", "llm = TitanTakeoffPro()\n", diff --git a/docs/docs/integrations/llms/tongyi.ipynb b/docs/docs/integrations/llms/tongyi.ipynb index 6fb8cb336c8..fbdfdad836f 100644 --- a/docs/docs/integrations/llms/tongyi.ipynb +++ b/docs/docs/integrations/llms/tongyi.ipynb @@ -75,8 +75,9 @@ }, "outputs": [], "source": [ + "from langchain.chains import LLMChain\n", "from langchain.llms import Tongyi\n", - "from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain" + "from langchain.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/integrations/llms/vllm.ipynb b/docs/docs/integrations/llms/vllm.ipynb index c828d378afd..889d207fe45 100644 --- a/docs/docs/integrations/llms/vllm.ipynb +++ b/docs/docs/integrations/llms/vllm.ipynb @@ -129,7 +129,8 @@ } ], "source": [ - "from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain\n", + "from langchain.chains import LLMChain\n", + "from langchain.prompts import PromptTemplate\n", "\n", "template = \"\"\"Question: {question}\n", "\n", @@ -208,7 +209,6 @@ "source": [ "from langchain.llms import VLLMOpenAI\n", "\n", - "\n", "llm = VLLMOpenAI(\n", " openai_api_key=\"EMPTY\",\n", " openai_api_base=\"http://localhost:8000/v1\",\n", diff --git a/docs/docs/integrations/llms/writer.ipynb b/docs/docs/integrations/llms/writer.ipynb index dc7451048fb..a74fc0564a3 100644 --- a/docs/docs/integrations/llms/writer.ipynb +++ b/docs/docs/integrations/llms/writer.ipynb @@ -55,8 +55,9 @@ }, "outputs": [], "source": [ + "from langchain.chains import LLMChain\n", "from langchain.llms import Writer\n", - "from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain" + "from langchain.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/integrations/llms/xinference.ipynb b/docs/docs/integrations/llms/xinference.ipynb index bfa1cd300ce..5d5af24c716 100644 --- a/docs/docs/integrations/llms/xinference.ipynb +++ b/docs/docs/integrations/llms/xinference.ipynb @@ -121,7 +121,8 @@ } ], "source": [ - "from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain\n", + "from langchain.chains import LLMChain\n", + "from langchain.prompts import PromptTemplate\n", "\n", "template = \"Where can we visit in the capital of {country}?\"\n", "\n", diff --git a/docs/docs/integrations/memory/aws_dynamodb.ipynb b/docs/docs/integrations/memory/aws_dynamodb.ipynb index f1c502c8e01..942ea065fb0 100644 --- a/docs/docs/integrations/memory/aws_dynamodb.ipynb +++ b/docs/docs/integrations/memory/aws_dynamodb.ipynb @@ -247,11 +247,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.agents import Tool\n", - "from langchain.memory import ConversationBufferMemory\n", + "from langchain.agents import AgentType, Tool, initialize_agent\n", "from langchain.chat_models import ChatOpenAI\n", - "from langchain.agents import initialize_agent\n", - "from langchain.agents import AgentType\n", + "from langchain.memory import ConversationBufferMemory\n", "from langchain_experimental.utilities import PythonREPL" ] }, diff --git a/docs/docs/integrations/memory/cassandra_chat_message_history.ipynb b/docs/docs/integrations/memory/cassandra_chat_message_history.ipynb index 698e0f618a4..f4c0532a304 100644 --- a/docs/docs/integrations/memory/cassandra_chat_message_history.ipynb +++ b/docs/docs/integrations/memory/cassandra_chat_message_history.ipynb @@ -81,8 +81,8 @@ "metadata": {}, "outputs": [], "source": [ - "from cassandra.cluster import Cluster\n", "from cassandra.auth import PlainTextAuthProvider\n", + "from cassandra.cluster import Cluster\n", "\n", "if database_mode == \"C\":\n", " if CASSANDRA_CONTACT_POINTS:\n", diff --git a/docs/docs/integrations/memory/elasticsearch_chat_message_history.ipynb b/docs/docs/integrations/memory/elasticsearch_chat_message_history.ipynb index 886ee24fd57..00e8cb18dc9 100644 --- a/docs/docs/integrations/memory/elasticsearch_chat_message_history.ipynb +++ b/docs/docs/integrations/memory/elasticsearch_chat_message_history.ipynb @@ -62,6 +62,7 @@ "outputs": [], "source": [ "import os\n", + "\n", "from langchain.memory import ElasticsearchChatMessageHistory\n", "\n", "es_url = os.environ.get(\"ES_URL\", \"http://localhost:9200\")\n", diff --git a/docs/docs/integrations/memory/motorhead_memory.ipynb b/docs/docs/integrations/memory/motorhead_memory.ipynb index beaecc2cbef..e30a30f36e8 100644 --- a/docs/docs/integrations/memory/motorhead_memory.ipynb +++ b/docs/docs/integrations/memory/motorhead_memory.ipynb @@ -35,7 +35,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import OpenAI\nfrom langchain.chains import LLMChain\nfrom langchain.prompts import PromptTemplate\n", + "from langchain.chains import LLMChain\n", + "from langchain.llms import OpenAI\n", + "from langchain.prompts import PromptTemplate\n", "\n", "template = \"\"\"You are a chatbot having a conversation with a human.\n", "\n", diff --git a/docs/docs/integrations/memory/rockset_chat_message_history.ipynb b/docs/docs/integrations/memory/rockset_chat_message_history.ipynb index d1b5673b0b8..41cfb1d25d6 100644 --- a/docs/docs/integrations/memory/rockset_chat_message_history.ipynb +++ b/docs/docs/integrations/memory/rockset_chat_message_history.ipynb @@ -53,7 +53,7 @@ "outputs": [], "source": [ "from langchain.memory.chat_message_histories import RocksetChatMessageHistory\n", - "from rockset import RocksetClient, Regions\n", + "from rockset import Regions, RocksetClient\n", "\n", "history = RocksetChatMessageHistory(\n", " session_id=\"MySession\",\n", diff --git a/docs/docs/integrations/memory/sql_chat_message_history.ipynb b/docs/docs/integrations/memory/sql_chat_message_history.ipynb index 5d1e40c0698..fe4663c1a17 100644 --- a/docs/docs/integrations/memory/sql_chat_message_history.ipynb +++ b/docs/docs/integrations/memory/sql_chat_message_history.ipynb @@ -128,12 +128,12 @@ "outputs": [], "source": [ "from datetime import datetime\n", - "from langchain.schema import BaseMessage, HumanMessage, AIMessage, SystemMessage\n", "from typing import Any\n", - "from sqlalchemy import Column, Integer, Text, DateTime\n", - "from sqlalchemy.orm import declarative_base\n", - "from langchain.memory.chat_message_histories.sql import BaseMessageConverter\n", "\n", + "from langchain.memory.chat_message_histories.sql import BaseMessageConverter\n", + "from langchain.schema import AIMessage, BaseMessage, HumanMessage, SystemMessage\n", + "from sqlalchemy import Column, DateTime, Integer, Text\n", + "from sqlalchemy.orm import declarative_base\n", "\n", "Base = declarative_base()\n", "\n", diff --git a/docs/docs/integrations/memory/xata_chat_message_history.ipynb b/docs/docs/integrations/memory/xata_chat_message_history.ipynb index e95f7b40402..48fd22ce714 100644 --- a/docs/docs/integrations/memory/xata_chat_message_history.ipynb +++ b/docs/docs/integrations/memory/xata_chat_message_history.ipynb @@ -190,9 +190,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.memory import ConversationBufferMemory\n", "from uuid import uuid4\n", "\n", + "from langchain.memory import ConversationBufferMemory\n", + "\n", "chat_memory = XataChatMessageHistory(\n", " session_id=str(uuid4()), # needs to be unique per user session\n", " api_key=api_key,\n", @@ -217,7 +218,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.agents import initialize_agent, AgentType\n", + "from langchain.agents import AgentType, initialize_agent\n", "from langchain.agents.agent_toolkits import create_retriever_tool\n", "from langchain.chat_models import ChatOpenAI\n", "\n", diff --git a/docs/docs/integrations/memory/zep_memory.ipynb b/docs/docs/integrations/memory/zep_memory.ipynb index a47e37c9be0..1e90ee93a2a 100644 --- a/docs/docs/integrations/memory/zep_memory.ipynb +++ b/docs/docs/integrations/memory/zep_memory.ipynb @@ -47,14 +47,14 @@ }, "outputs": [], "source": [ - "from langchain.memory import ZepMemory\n", - "from langchain.retrievers import ZepRetriever\n", - "from langchain.llms import OpenAI\n", - "from langchain.schema import HumanMessage, AIMessage\n", - "from langchain.utilities import WikipediaAPIWrapper\n", - "from langchain.agents import initialize_agent, AgentType, Tool\n", "from uuid import uuid4\n", "\n", + "from langchain.agents import AgentType, Tool, initialize_agent\n", + "from langchain.llms import OpenAI\n", + "from langchain.memory import ZepMemory\n", + "from langchain.retrievers import ZepRetriever\n", + "from langchain.schema import AIMessage, HumanMessage\n", + "from langchain.utilities import WikipediaAPIWrapper\n", "\n", "# Set this to your Zep server URL\n", "ZEP_API_URL = \"http://localhost:8000\"\n", diff --git a/docs/docs/integrations/providers/aim_tracking.ipynb b/docs/docs/integrations/providers/aim_tracking.ipynb index 14f046b6560..fda118a40cc 100644 --- a/docs/docs/integrations/providers/aim_tracking.ipynb +++ b/docs/docs/integrations/providers/aim_tracking.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "613b5312", "metadata": {}, "source": [ "# Aim\n", @@ -19,28 +20,28 @@ "Aim is fully open source, [learn more](https://github.com/aimhubio/aim) about Aim on GitHub.\n", "\n", "Let's move forward and see how to enable and configure Aim callback." - ], - "id": "613b5312" + ] }, { "cell_type": "markdown", + "id": "3615f1e2", "metadata": {}, "source": [ "

Tracking LangChain Executions with Aim

" - ], - "id": "3615f1e2" + ] }, { "cell_type": "markdown", + "id": "5d271566", "metadata": {}, "source": [ "In this notebook we will explore three usage scenarios. To start off, we will install the necessary packages and import certain modules. Subsequently, we will configure two environment variables that can be established either within the Python script or through the terminal." - ], - "id": "5d271566" + ] }, { "cell_type": "code", "execution_count": null, + "id": "d16e00da", "metadata": { "id": "mf88kuCJhbVu" }, @@ -50,12 +51,12 @@ "!pip install langchain\n", "!pip install openai\n", "!pip install google-search-results" - ], - "id": "d16e00da" + ] }, { "cell_type": "code", "execution_count": null, + "id": "c970cda9", "metadata": { "id": "g4eTuajwfl6L" }, @@ -64,24 +65,24 @@ "import os\n", "from datetime import datetime\n", "\n", - "from langchain.llms import OpenAI\n", - "from langchain.callbacks import AimCallbackHandler, StdOutCallbackHandler" - ], - "id": "c970cda9" + "from langchain.callbacks import AimCallbackHandler, StdOutCallbackHandler\n", + "from langchain.llms import OpenAI" + ] }, { "cell_type": "markdown", + "id": "426ecf0d", "metadata": {}, "source": [ "Our examples use a GPT model as the LLM, and OpenAI offers an API for this purpose. You can obtain the key from the following link: https://platform.openai.com/account/api-keys .\n", "\n", "We will use the SerpApi to retrieve search results from Google. To acquire the SerpApi key, please go to https://serpapi.com/manage-api-key ." - ], - "id": "426ecf0d" + ] }, { "cell_type": "code", "execution_count": null, + "id": "b2b1cfc2", "metadata": { "id": "T1bSmKd6V2If" }, @@ -89,22 +90,22 @@ "source": [ "os.environ[\"OPENAI_API_KEY\"] = \"...\"\n", "os.environ[\"SERPAPI_API_KEY\"] = \"...\"" - ], - "id": "b2b1cfc2" + ] }, { "cell_type": "markdown", + "id": "53070869", "metadata": { "id": "QenUYuBZjIzc" }, "source": [ "The event methods of `AimCallbackHandler` accept the LangChain module or agent as input and log at least the prompts and generated results, as well as the serialized version of the LangChain module, to the designated Aim run." - ], - "id": "53070869" + ] }, { "cell_type": "code", "execution_count": null, + "id": "3a30e90d", "metadata": { "id": "KAz8weWuUeXF" }, @@ -118,30 +119,30 @@ "\n", "callbacks = [StdOutCallbackHandler(), aim_callback]\n", "llm = OpenAI(temperature=0, callbacks=callbacks)" - ], - "id": "3a30e90d" + ] }, { "cell_type": "markdown", + "id": "1f591582", "metadata": { "id": "b8WfByB4fl6N" }, "source": [ "The `flush_tracker` function is used to record LangChain assets on Aim. By default, the session is reset rather than being terminated outright." - ], - "id": "1f591582" + ] }, { "cell_type": "markdown", + "id": "8a425743", "metadata": {}, "source": [ "

Scenario 1

In the first scenario, we will use OpenAI LLM." - ], - "id": "8a425743" + ] }, { "cell_type": "code", "execution_count": null, + "id": "795cda48", "metadata": { "id": "o_VmneyIUyx8" }, @@ -153,33 +154,33 @@ " langchain_asset=llm,\n", " experiment_name=\"scenario 2: Chain with multiple SubChains on multiple generations\",\n", ")" - ], - "id": "795cda48" + ] }, { "cell_type": "markdown", + "id": "7374776f", "metadata": {}, "source": [ "

Scenario 2

Scenario two involves chaining with multiple SubChains across multiple generations." - ], - "id": "7374776f" + ] }, { "cell_type": "code", "execution_count": null, + "id": "f946249a", "metadata": { "id": "trxslyb1U28Y" }, "outputs": [], "source": [ - "from langchain.prompts import PromptTemplate\n", - "from langchain.chains import LLMChain" - ], - "id": "f946249a" + "from langchain.chains import LLMChain\n", + "from langchain.prompts import PromptTemplate" + ] }, { "cell_type": "code", "execution_count": null, + "id": "1012e817", "metadata": { "id": "uauQk10SUzF6" }, @@ -203,33 +204,32 @@ "aim_callback.flush_tracker(\n", " langchain_asset=synopsis_chain, experiment_name=\"scenario 3: Agent with Tools\"\n", ")" - ], - "id": "1012e817" + ] }, { "cell_type": "markdown", + "id": "f18e2d10", "metadata": {}, "source": [ "

Scenario 3

The third scenario involves an agent with tools." - ], - "id": "f18e2d10" + ] }, { "cell_type": "code", "execution_count": null, + "id": "9de08db4", "metadata": { "id": "_jN73xcPVEpI" }, "outputs": [], "source": [ - "from langchain.agents import initialize_agent, load_tools\n", - "from langchain.agents import AgentType" - ], - "id": "9de08db4" + "from langchain.agents import AgentType, initialize_agent, load_tools" + ] }, { "cell_type": "code", "execution_count": null, + "id": "0992df94", "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -278,8 +278,7 @@ " \"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?\"\n", ")\n", "aim_callback.flush_tracker(langchain_asset=agent, reset=False, finish=True)" - ], - "id": "0992df94" + ] } ], "metadata": { diff --git a/docs/docs/integrations/providers/clearml_tracking.ipynb b/docs/docs/integrations/providers/clearml_tracking.ipynb index 12c50ff43aa..0b0050a851c 100644 --- a/docs/docs/integrations/providers/clearml_tracking.ipynb +++ b/docs/docs/integrations/providers/clearml_tracking.ipynb @@ -542,8 +542,7 @@ } ], "source": [ - "from langchain.agents import initialize_agent, load_tools\n", - "from langchain.agents import AgentType\n", + "from langchain.agents import AgentType, initialize_agent, load_tools\n", "\n", "# SCENARIO 2 - Agent with Tools\n", "tools = load_tools([\"serpapi\", \"llm-math\"], llm=llm, callbacks=callbacks)\n", diff --git a/docs/docs/integrations/providers/comet_tracking.ipynb b/docs/docs/integrations/providers/comet_tracking.ipynb index 6f6bcf02b7d..cd119f06f88 100644 --- a/docs/docs/integrations/providers/comet_tracking.ipynb +++ b/docs/docs/integrations/providers/comet_tracking.ipynb @@ -246,12 +246,11 @@ "metadata": {}, "outputs": [], "source": [ - "from rouge_score import rouge_scorer\n", - "\n", "from langchain.callbacks import CometCallbackHandler, StdOutCallbackHandler\n", "from langchain.chains import LLMChain\n", "from langchain.llms import OpenAI\n", "from langchain.prompts import PromptTemplate\n", + "from rouge_score import rouge_scorer\n", "\n", "\n", "class Rouge:\n", diff --git a/docs/docs/integrations/providers/mlflow_tracking.ipynb b/docs/docs/integrations/providers/mlflow_tracking.ipynb index 0797315466b..4403ce33029 100644 --- a/docs/docs/integrations/providers/mlflow_tracking.ipynb +++ b/docs/docs/integrations/providers/mlflow_tracking.ipynb @@ -122,8 +122,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.prompts import PromptTemplate\n", - "from langchain.chains import LLMChain" + "from langchain.chains import LLMChain\n", + "from langchain.prompts import PromptTemplate" ] }, { @@ -158,8 +158,7 @@ }, "outputs": [], "source": [ - "from langchain.agents import initialize_agent, load_tools\n", - "from langchain.agents import AgentType" + "from langchain.agents import AgentType, initialize_agent, load_tools" ] }, { diff --git a/docs/docs/integrations/providers/ray_serve.ipynb b/docs/docs/integrations/providers/ray_serve.ipynb index 0e56b3d37c1..db17b4acaa9 100644 --- a/docs/docs/integrations/providers/ray_serve.ipynb +++ b/docs/docs/integrations/providers/ray_serve.ipynb @@ -107,8 +107,9 @@ "metadata": {}, "outputs": [], "source": [ + "from langchain.chains import LLMChain\n", "from langchain.llms import OpenAI\n", - "from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain" + "from langchain.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/integrations/providers/rebuff.ipynb b/docs/docs/integrations/providers/rebuff.ipynb index 16f52a8a73e..1ee61311e3c 100644 --- a/docs/docs/integrations/providers/rebuff.ipynb +++ b/docs/docs/integrations/providers/rebuff.ipynb @@ -176,7 +176,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chains import TransformChain, SimpleSequentialChain\n", + "from langchain.chains import SimpleSequentialChain, TransformChain\n", "from langchain.sql_database import SQLDatabase\n", "from langchain_experimental.sql import SQLDatabaseChain" ] diff --git a/docs/docs/integrations/providers/vectara/vectara_chat.ipynb b/docs/docs/integrations/providers/vectara/vectara_chat.ipynb index cc33ad1fd50..214729abf4f 100644 --- a/docs/docs/integrations/providers/vectara/vectara_chat.ipynb +++ b/docs/docs/integrations/providers/vectara/vectara_chat.ipynb @@ -20,9 +20,10 @@ "outputs": [], "source": [ "import os\n", - "from langchain.vectorstores import Vectara\n", + "\n", + "from langchain.chains import ConversationalRetrievalChain\n", "from langchain.llms import OpenAI\n", - "from langchain.chains import ConversationalRetrievalChain" + "from langchain.vectorstores import Vectara" ] }, { @@ -430,8 +431,8 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.chains.question_answering import load_qa_chain\n", - "from langchain.chains.conversational_retrieval.prompts import CONDENSE_QUESTION_PROMPT" + "from langchain.chains.conversational_retrieval.prompts import CONDENSE_QUESTION_PROMPT\n", + "from langchain.chains.question_answering import load_qa_chain" ] }, { @@ -587,12 +588,12 @@ }, "outputs": [], "source": [ - "from langchain.chains.llm import LLMChain\n", "from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n", "from langchain.chains.conversational_retrieval.prompts import (\n", " CONDENSE_QUESTION_PROMPT,\n", " QA_PROMPT,\n", ")\n", + "from langchain.chains.llm import LLMChain\n", "from langchain.chains.question_answering import load_qa_chain\n", "\n", "# Construct a ConversationalRetrievalChain with a streaming llm for combine docs\n", diff --git a/docs/docs/integrations/providers/vectara/vectara_text_generation.ipynb b/docs/docs/integrations/providers/vectara/vectara_text_generation.ipynb index 542d8bd9ae7..d93b85e0618 100644 --- a/docs/docs/integrations/providers/vectara/vectara_text_generation.ipynb +++ b/docs/docs/integrations/providers/vectara/vectara_text_generation.ipynb @@ -25,14 +25,15 @@ "outputs": [], "source": [ "import os\n", - "from langchain.llms import OpenAI\n", - "from langchain.docstore.document import Document\n", - "from langchain.vectorstores import Vectara\n", - "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.prompts import PromptTemplate\n", "import pathlib\n", "import subprocess\n", - "import tempfile" + "import tempfile\n", + "\n", + "from langchain.docstore.document import Document\n", + "from langchain.llms import OpenAI\n", + "from langchain.prompts import PromptTemplate\n", + "from langchain.text_splitter import CharacterTextSplitter\n", + "from langchain.vectorstores import Vectara" ] }, { diff --git a/docs/docs/integrations/providers/wandb_tracing.ipynb b/docs/docs/integrations/providers/wandb_tracing.ipynb index 153cac6f9f8..fb60e19a79f 100644 --- a/docs/docs/integrations/providers/wandb_tracing.ipynb +++ b/docs/docs/integrations/providers/wandb_tracing.ipynb @@ -37,10 +37,9 @@ "# here we are configuring the wandb project name\n", "os.environ[\"WANDB_PROJECT\"] = \"langchain-tracing\"\n", "\n", - "from langchain.agents import initialize_agent, load_tools\n", - "from langchain.agents import AgentType\n", - "from langchain.llms import OpenAI\n", - "from langchain.callbacks import wandb_tracing_enabled" + "from langchain.agents import AgentType, initialize_agent, load_tools\n", + "from langchain.callbacks import wandb_tracing_enabled\n", + "from langchain.llms import OpenAI" ] }, { diff --git a/docs/docs/integrations/providers/wandb_tracking.ipynb b/docs/docs/integrations/providers/wandb_tracking.ipynb index 54cec8c2093..9f9a23bf4f5 100644 --- a/docs/docs/integrations/providers/wandb_tracking.ipynb +++ b/docs/docs/integrations/providers/wandb_tracking.ipynb @@ -3,6 +3,7 @@ { "attachments": {}, "cell_type": "markdown", + "id": "e43f4ea0", "metadata": {}, "source": [ "# Weights & Biases\n", @@ -18,12 +19,12 @@ "\n", "\n", "**Note**: _the `WandbCallbackHandler` is being deprecated in favour of the `WandbTracer`_ . In future please use the `WandbTracer` as it is more flexible and allows for more granular logging. To know more about the `WandbTracer` refer to the [agent_with_wandb_tracing.html](https://python.langchain.com/en/latest/integrations/agent_with_wandb_tracing.html) notebook or use the following [colab notebook](http://wandb.me/prompts-quickstart). To know more about Weights & Biases Prompts refer to the following [prompts documentation](https://docs.wandb.ai/guides/prompts)." - ], - "id": "e43f4ea0" + ] }, { "cell_type": "code", "execution_count": null, + "id": "fbe82fa5", "metadata": {}, "outputs": [], "source": [ @@ -32,12 +33,12 @@ "!pip install textstat\n", "!pip install spacy\n", "!python -m spacy download en_core_web_sm" - ], - "id": "fbe82fa5" + ] }, { "cell_type": "code", "execution_count": 1, + "id": "be90b9ec", "metadata": { "id": "T1bSmKd6V2If" }, @@ -48,26 +49,27 @@ "os.environ[\"WANDB_API_KEY\"] = \"\"\n", "# os.environ[\"OPENAI_API_KEY\"] = \"\"\n", "# os.environ[\"SERPAPI_API_KEY\"] = \"\"" - ], - "id": "be90b9ec" + ] }, { "cell_type": "code", "execution_count": 2, + "id": "46a9bd4d", "metadata": { "id": "8WAGnTWpUUnD" }, "outputs": [], "source": [ "from datetime import datetime\n", - "from langchain.callbacks import WandbCallbackHandler, StdOutCallbackHandler\n", + "\n", + "from langchain.callbacks import StdOutCallbackHandler, WandbCallbackHandler\n", "from langchain.llms import OpenAI" - ], - "id": "46a9bd4d" + ] }, { "attachments": {}, "cell_type": "markdown", + "id": "849569b7", "metadata": {}, "source": [ "```\n", @@ -85,12 +87,12 @@ " complexity_metrics (bool): Whether to log complexity metrics.\n", " stream_logs (bool): Whether to stream callback actions to W&B\n", "```" - ], - "id": "849569b7" + ] }, { "attachments": {}, "cell_type": "markdown", + "id": "718579f7", "metadata": { "id": "cxBFfZR8d9FC" }, @@ -102,21 +104,21 @@ "complexity_metrics: bool = False,\n", "stream_logs: bool = False,\n", "```\n" - ], - "id": "718579f7" + ] }, { "attachments": {}, "cell_type": "markdown", + "id": "e5f067a1", "metadata": {}, "source": [ "NOTE: For beta workflows we have made the default analysis based on textstat and the visualizations based on spacy" - ], - "id": "e5f067a1" + ] }, { "cell_type": "code", "execution_count": 3, + "id": "4ddf7dce", "metadata": { "id": "KAz8weWuUeXF" }, @@ -215,12 +217,12 @@ ")\n", "callbacks = [StdOutCallbackHandler(), wandb_callback]\n", "llm = OpenAI(temperature=0, callbacks=callbacks)" - ], - "id": "4ddf7dce" + ] }, { "attachments": {}, "cell_type": "markdown", + "id": "f684905f", "metadata": { "id": "Q-65jwrDeK6w" }, @@ -234,21 +236,21 @@ "finish: bool = False,\n", "```\n", "\n" - ], - "id": "f684905f" + ] }, { "attachments": {}, "cell_type": "markdown", + "id": "1c096610", "metadata": {}, "source": [ "The `flush_tracker` function is used to log LangChain sessions to Weights & Biases. It takes in the LangChain module or agent, and logs at minimum the prompts and generations alongside the serialized form of the LangChain module to the specified Weights & Biases project. By default we reset the session as opposed to concluding the session outright." - ], - "id": "1c096610" + ] }, { "cell_type": "code", "execution_count": 4, + "id": "d68750d5", "metadata": { "id": "o_VmneyIUyx8" }, @@ -368,25 +370,25 @@ "# SCENARIO 1 - LLM\n", "llm_result = llm.generate([\"Tell me a joke\", \"Tell me a poem\"] * 3)\n", "wandb_callback.flush_tracker(llm, name=\"simple_sequential\")" - ], - "id": "d68750d5" + ] }, { "cell_type": "code", "execution_count": 5, + "id": "839a528e", "metadata": { "id": "trxslyb1U28Y" }, "outputs": [], "source": [ - "from langchain.prompts import PromptTemplate\n", - "from langchain.chains import LLMChain" - ], - "id": "839a528e" + "from langchain.chains import LLMChain\n", + "from langchain.prompts import PromptTemplate" + ] }, { "cell_type": "code", "execution_count": 6, + "id": "44842d32", "metadata": { "id": "uauQk10SUzF6" }, @@ -519,25 +521,24 @@ "]\n", "synopsis_chain.apply(test_prompts)\n", "wandb_callback.flush_tracker(synopsis_chain, name=\"agent\")" - ], - "id": "44842d32" + ] }, { "cell_type": "code", "execution_count": 7, + "id": "0c609071", "metadata": { "id": "_jN73xcPVEpI" }, "outputs": [], "source": [ - "from langchain.agents import initialize_agent, load_tools\n", - "from langchain.agents import AgentType" - ], - "id": "0c609071" + "from langchain.agents import AgentType, initialize_agent, load_tools" + ] }, { "cell_type": "code", "execution_count": 8, + "id": "5e106cb8", "metadata": { "id": "Gpq4rk6VT9cu" }, @@ -614,16 +615,15 @@ " callbacks=callbacks,\n", ")\n", "wandb_callback.flush_tracker(agent, reset=False, finish=True)" - ], - "id": "5e106cb8" + ] }, { "cell_type": "code", "execution_count": null, + "id": "2701d0de", "metadata": {}, "outputs": [], - "source": [], - "id": "2701d0de" + "source": [] } ], "metadata": { diff --git a/docs/docs/integrations/retrievers/Activeloop DeepMemory+LangChain.ipynb b/docs/docs/integrations/retrievers/Activeloop DeepMemory+LangChain.ipynb index 1a91aeeb0f0..e4f0b1c3e70 100644 --- a/docs/docs/integrations/retrievers/Activeloop DeepMemory+LangChain.ipynb +++ b/docs/docs/integrations/retrievers/Activeloop DeepMemory+LangChain.ipynb @@ -68,15 +68,13 @@ "metadata": {}, "outputs": [], "source": [ - "import os\n", "import getpass\n", + "import os\n", "\n", - "from langchain.vectorstores.deeplake import DeepLake\n", - "\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.chains import RetrievalQA\n", + "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.llms import OpenAIChat\n", - "\n", + "from langchain.vectorstores.deeplake import DeepLake\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"Enter your OpenAI API token: \")\n", "# # activeloop token is needed if you are not signed in using CLI: `activeloop login -u -p `\n", @@ -117,9 +115,10 @@ "metadata": {}, "outputs": [], "source": [ + "from urllib.parse import urljoin\n", + "\n", "import requests\n", "from bs4 import BeautifulSoup\n", - "from urllib.parse import urljoin\n", "\n", "\n", "def get_all_links(url):\n", @@ -195,7 +194,6 @@ "source": [ "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", "\n", - "\n", "chunk_size = 4096\n", "docs_new = []\n", "\n", @@ -280,7 +278,6 @@ "from langchain.chat_models import ChatOpenAI\n", "from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate\n", "from langchain.schema import HumanMessage, SystemMessage\n", - "\n", "from pydantic import BaseModel, Field" ] }, @@ -336,8 +333,9 @@ "outputs": [], "source": [ "import random\n", - "from tqdm import tqdm\n", + "\n", "from langchain.embeddings import OpenAIEmbeddings\n", + "from tqdm import tqdm\n", "\n", "\n", "def generate_queries(docs: List[str], ids: List[str], n: int = 100):\n", @@ -507,10 +505,10 @@ "metadata": {}, "outputs": [], "source": [ + "from ragas.langchain import RagasEvaluatorChain\n", "from ragas.metrics import (\n", " context_recall,\n", - ")\n", - "from ragas.langchain import RagasEvaluatorChain" + ")" ] }, { diff --git a/docs/docs/integrations/retrievers/arxiv.ipynb b/docs/docs/integrations/retrievers/arxiv.ipynb index f644af3ec6a..95b65ba49d6 100644 --- a/docs/docs/integrations/retrievers/arxiv.ipynb +++ b/docs/docs/integrations/retrievers/arxiv.ipynb @@ -200,8 +200,8 @@ }, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.chains import ConversationalRetrievalChain\n", + "from langchain.chat_models import ChatOpenAI\n", "\n", "model = ChatOpenAI(model_name=\"gpt-3.5-turbo\") # switch to 'gpt-4'\n", "qa = ConversationalRetrievalChain.from_llm(model, retriever=retriever)" diff --git a/docs/docs/integrations/retrievers/chatgpt-plugin.ipynb b/docs/docs/integrations/retrievers/chatgpt-plugin.ipynb index 24ff62064dd..a271942c258 100644 --- a/docs/docs/integrations/retrievers/chatgpt-plugin.ipynb +++ b/docs/docs/integrations/retrievers/chatgpt-plugin.ipynb @@ -42,9 +42,10 @@ "# STEP 2: Convert\n", "\n", "# Convert Document to format expected by https://github.com/openai/chatgpt-retrieval-plugin\n", - "from typing import List\n", - "from langchain.docstore.document import Document\n", "import json\n", + "from typing import List\n", + "\n", + "from langchain.docstore.document import Document\n", "\n", "\n", "def write_json(path: str, documents: List[Document]) -> None:\n", @@ -97,8 +98,8 @@ } ], "source": [ - "import os\n", "import getpass\n", + "import os\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")" ] diff --git a/docs/docs/integrations/retrievers/cohere-reranker.ipynb b/docs/docs/integrations/retrievers/cohere-reranker.ipynb index 5a11bef51e0..8e973b0331a 100644 --- a/docs/docs/integrations/retrievers/cohere-reranker.ipynb +++ b/docs/docs/integrations/retrievers/cohere-reranker.ipynb @@ -47,8 +47,8 @@ "source": [ "# get a new token: https://dashboard.cohere.ai/\n", "\n", - "import os\n", "import getpass\n", + "import os\n", "\n", "os.environ[\"COHERE_API_KEY\"] = getpass.getpass(\"Cohere API Key:\")" ] @@ -325,9 +325,9 @@ } ], "source": [ - "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", - "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.document_loaders import TextLoader\n", + "from langchain.embeddings import OpenAIEmbeddings\n", + "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", "from langchain.vectorstores import FAISS\n", "\n", "documents = TextLoader(\"../../modules/state_of_the_union.txt\").load()\n", diff --git a/docs/docs/integrations/retrievers/docarray_retriever.ipynb b/docs/docs/integrations/retrievers/docarray_retriever.ipynb index 421ff66a988..458915ffcd5 100644 --- a/docs/docs/integrations/retrievers/docarray_retriever.ipynb +++ b/docs/docs/integrations/retrievers/docarray_retriever.ipynb @@ -30,11 +30,12 @@ }, "outputs": [], "source": [ - "from langchain.retrievers import DocArrayRetriever\n", + "import random\n", + "\n", "from docarray import BaseDoc\n", "from docarray.typing import NdArray\n", "from langchain.embeddings import FakeEmbeddings\n", - "import random\n", + "from langchain.retrievers import DocArrayRetriever\n", "\n", "embeddings = FakeEmbeddings(size=32)" ] @@ -90,7 +91,6 @@ "source": [ "from docarray.index import InMemoryExactNNIndex\n", "\n", - "\n", "# initialize the index\n", "db = InMemoryExactNNIndex[MyDoc]()\n", "# index data\n", @@ -163,7 +163,6 @@ "source": [ "from docarray.index import HnswDocumentIndex\n", "\n", - "\n", "# initialize the index\n", "db = HnswDocumentIndex[MyDoc](work_dir=\"hnsw_index\")\n", "\n", @@ -260,7 +259,6 @@ "source": [ "from docarray.index import WeaviateDocumentIndex\n", "\n", - "\n", "# initialize the index\n", "dbconfig = WeaviateDocumentIndex.DBConfig(host=\"http://localhost:8080\")\n", "db = WeaviateDocumentIndex[WeaviateDoc](db_config=dbconfig)\n", @@ -335,7 +333,6 @@ "source": [ "from docarray.index import ElasticDocIndex\n", "\n", - "\n", "# initialize the index\n", "db = ElasticDocIndex[MyDoc](\n", " hosts=\"http://localhost:9200\", index_name=\"docarray_retriever\"\n", @@ -420,7 +417,6 @@ "from docarray.index import QdrantDocumentIndex\n", "from qdrant_client.http import models as rest\n", "\n", - "\n", "# initialize the index\n", "qdrant_config = QdrantDocumentIndex.DBConfig(path=\":memory:\")\n", "db = QdrantDocumentIndex[MyDoc](qdrant_config)\n", diff --git a/docs/docs/integrations/retrievers/fleet_context.ipynb b/docs/docs/integrations/retrievers/fleet_context.ipynb index 4a57d3f7953..a01e44318f1 100644 --- a/docs/docs/integrations/retrievers/fleet_context.ipynb +++ b/docs/docs/integrations/retrievers/fleet_context.ipynb @@ -33,7 +33,6 @@ "from typing import Any, Optional, Type\n", "\n", "import pandas as pd\n", - "\n", "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.retrievers import MultiVectorRetriever\n", "from langchain.schema import Document\n", diff --git a/docs/docs/integrations/retrievers/google_vertex_ai_search.ipynb b/docs/docs/integrations/retrievers/google_vertex_ai_search.ipynb index 143d5b9c817..a1b5cef9a9b 100644 --- a/docs/docs/integrations/retrievers/google_vertex_ai_search.ipynb +++ b/docs/docs/integrations/retrievers/google_vertex_ai_search.ipynb @@ -164,8 +164,8 @@ "outputs": [], "source": [ "from langchain.retrievers import (\n", - " GoogleVertexAISearchRetriever,\n", " GoogleVertexAIMultiTurnSearchRetriever,\n", + " GoogleVertexAISearchRetriever,\n", ")\n", "\n", "PROJECT_ID = \"\" # Set to your Project ID\n", diff --git a/docs/docs/integrations/retrievers/kay.ipynb b/docs/docs/integrations/retrievers/kay.ipynb index 3b048cf8290..b61f4b9b6e4 100644 --- a/docs/docs/integrations/retrievers/kay.ipynb +++ b/docs/docs/integrations/retrievers/kay.ipynb @@ -71,6 +71,7 @@ "outputs": [], "source": [ "import os\n", + "\n", "from langchain.retrievers import KayAiRetriever\n", "\n", "os.environ[\"KAY_API_KEY\"] = KAY_API_KEY\n", @@ -149,8 +150,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.chains import ConversationalRetrievalChain\n", + "from langchain.chat_models import ChatOpenAI\n", "\n", "model = ChatOpenAI(model_name=\"gpt-3.5-turbo\")\n", "qa = ConversationalRetrievalChain.from_llm(model, retriever=retriever)" diff --git a/docs/docs/integrations/retrievers/knn.ipynb b/docs/docs/integrations/retrievers/knn.ipynb index ba4dc9152d0..fe3b2c3d1ab 100644 --- a/docs/docs/integrations/retrievers/knn.ipynb +++ b/docs/docs/integrations/retrievers/knn.ipynb @@ -21,8 +21,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.retrievers import KNNRetriever\n", - "from langchain.embeddings import OpenAIEmbeddings" + "from langchain.embeddings import OpenAIEmbeddings\n", + "from langchain.retrievers import KNNRetriever" ] }, { diff --git a/docs/docs/integrations/retrievers/merger_retriever.ipynb b/docs/docs/integrations/retrievers/merger_retriever.ipynb index f0e7bdc6541..8f56f74f04f 100644 --- a/docs/docs/integrations/retrievers/merger_retriever.ipynb +++ b/docs/docs/integrations/retrievers/merger_retriever.ipynb @@ -21,17 +21,17 @@ "outputs": [], "source": [ "import os\n", + "\n", "import chromadb\n", + "from langchain.document_transformers import (\n", + " EmbeddingsClusteringFilter,\n", + " EmbeddingsRedundantFilter,\n", + ")\n", + "from langchain.embeddings import HuggingFaceEmbeddings, OpenAIEmbeddings\n", + "from langchain.retrievers import ContextualCompressionRetriever\n", + "from langchain.retrievers.document_compressors import DocumentCompressorPipeline\n", "from langchain.retrievers.merger_retriever import MergerRetriever\n", "from langchain.vectorstores import Chroma\n", - "from langchain.embeddings import HuggingFaceEmbeddings\n", - "from langchain.embeddings import OpenAIEmbeddings\n", - "from langchain.document_transformers import (\n", - " EmbeddingsRedundantFilter,\n", - " EmbeddingsClusteringFilter,\n", - ")\n", - "from langchain.retrievers.document_compressors import DocumentCompressorPipeline\n", - "from langchain.retrievers import ContextualCompressionRetriever\n", "\n", "# Get 3 diff embeddings.\n", "all_mini = HuggingFaceEmbeddings(model_name=\"all-MiniLM-L6-v2\")\n", diff --git a/docs/docs/integrations/retrievers/pinecone_hybrid_search.ipynb b/docs/docs/integrations/retrievers/pinecone_hybrid_search.ipynb index 4353f47364d..ce5362743a2 100644 --- a/docs/docs/integrations/retrievers/pinecone_hybrid_search.ipynb +++ b/docs/docs/integrations/retrievers/pinecone_hybrid_search.ipynb @@ -34,8 +34,8 @@ "metadata": {}, "outputs": [], "source": [ - "import os\n", "import getpass\n", + "import os\n", "\n", "os.environ[\"PINECONE_API_KEY\"] = getpass.getpass(\"Pinecone API Key:\")" ] @@ -115,6 +115,7 @@ ], "source": [ "import os\n", + "\n", "import pinecone\n", "\n", "api_key = os.getenv(\"PINECONE_API_KEY\") or \"PINECONE_API_KEY\"\n", diff --git a/docs/docs/integrations/retrievers/re_phrase.ipynb b/docs/docs/integrations/retrievers/re_phrase.ipynb index 652ce6e88c7..07ad112fb31 100644 --- a/docs/docs/integrations/retrievers/re_phrase.ipynb +++ b/docs/docs/integrations/retrievers/re_phrase.ipynb @@ -26,13 +26,13 @@ "outputs": [], "source": [ "import logging\n", - "from langchain.document_loaders import WebBaseLoader\n", - "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", - "from langchain.vectorstores import Chroma\n", - "from langchain.embeddings import OpenAIEmbeddings\n", - "from langchain.chat_models import ChatOpenAI\n", "\n", - "from langchain.retrievers import RePhraseQueryRetriever" + "from langchain.chat_models import ChatOpenAI\n", + "from langchain.document_loaders import WebBaseLoader\n", + "from langchain.embeddings import OpenAIEmbeddings\n", + "from langchain.retrievers import RePhraseQueryRetriever\n", + "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", + "from langchain.vectorstores import Chroma" ] }, { diff --git a/docs/docs/integrations/retrievers/self_query/activeloop_deeplake_self_query.ipynb b/docs/docs/integrations/retrievers/self_query/activeloop_deeplake_self_query.ipynb index 0739b5b41cd..825cd0125ff 100644 --- a/docs/docs/integrations/retrievers/self_query/activeloop_deeplake_self_query.ipynb +++ b/docs/docs/integrations/retrievers/self_query/activeloop_deeplake_self_query.ipynb @@ -67,8 +67,8 @@ }, "outputs": [], "source": [ - "import os\n", "import getpass\n", + "import os\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")\n", "os.environ[\"ACTIVELOOP_TOKEN\"] = getpass.getpass(\"Activeloop token:\")" @@ -83,8 +83,8 @@ }, "outputs": [], "source": [ - "from langchain.schema import Document\n", "from langchain.embeddings.openai import OpenAIEmbeddings\n", + "from langchain.schema import Document\n", "from langchain.vectorstores import DeepLake\n", "\n", "embeddings = OpenAIEmbeddings()" @@ -192,9 +192,9 @@ "metadata": {}, "outputs": [], "source": [ + "from langchain.chains.query_constructor.base import AttributeInfo\n", "from langchain.llms import OpenAI\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", - "from langchain.chains.query_constructor.base import AttributeInfo\n", "\n", "metadata_field_info = [\n", " AttributeInfo(\n", diff --git a/docs/docs/integrations/retrievers/self_query/chroma_self_query.ipynb b/docs/docs/integrations/retrievers/self_query/chroma_self_query.ipynb index 3429cb95eef..67890db8bdb 100644 --- a/docs/docs/integrations/retrievers/self_query/chroma_self_query.ipynb +++ b/docs/docs/integrations/retrievers/self_query/chroma_self_query.ipynb @@ -72,8 +72,8 @@ } ], "source": [ - "import os\n", "import getpass\n", + "import os\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")" ] @@ -87,8 +87,8 @@ }, "outputs": [], "source": [ - "from langchain.schema import Document\n", "from langchain.embeddings.openai import OpenAIEmbeddings\n", + "from langchain.schema import Document\n", "from langchain.vectorstores import Chroma\n", "\n", "embeddings = OpenAIEmbeddings()" @@ -163,9 +163,9 @@ }, "outputs": [], "source": [ + "from langchain.chains.query_constructor.base import AttributeInfo\n", "from langchain.llms import OpenAI\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", - "from langchain.chains.query_constructor.base import AttributeInfo\n", "\n", "metadata_field_info = [\n", " AttributeInfo(\n", diff --git a/docs/docs/integrations/retrievers/self_query/dashvector.ipynb b/docs/docs/integrations/retrievers/self_query/dashvector.ipynb index 60d6c11c9a7..772c8ed0723 100644 --- a/docs/docs/integrations/retrievers/self_query/dashvector.ipynb +++ b/docs/docs/integrations/retrievers/self_query/dashvector.ipynb @@ -74,6 +74,7 @@ "outputs": [], "source": [ "import os\n", + "\n", "import dashvector\n", "\n", "client = dashvector.Client(api_key=os.environ[\"DASHVECTOR_API_KEY\"])" @@ -91,8 +92,8 @@ }, "outputs": [], "source": [ - "from langchain.schema import Document\n", "from langchain.embeddings import DashScopeEmbeddings\n", + "from langchain.schema import Document\n", "from langchain.vectorstores import DashVector\n", "\n", "embeddings = DashScopeEmbeddings()\n", @@ -184,9 +185,9 @@ }, "outputs": [], "source": [ + "from langchain.chains.query_constructor.base import AttributeInfo\n", "from langchain.llms import Tongyi\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", - "from langchain.chains.query_constructor.base import AttributeInfo\n", "\n", "metadata_field_info = [\n", " AttributeInfo(\n", diff --git a/docs/docs/integrations/retrievers/self_query/elasticsearch_self_query.ipynb b/docs/docs/integrations/retrievers/self_query/elasticsearch_self_query.ipynb index 4b806f3ce1b..c20fc557cae 100644 --- a/docs/docs/integrations/retrievers/self_query/elasticsearch_self_query.ipynb +++ b/docs/docs/integrations/retrievers/self_query/elasticsearch_self_query.ipynb @@ -57,11 +57,12 @@ }, "outputs": [], "source": [ - "from langchain.schema import Document\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", - "from langchain.vectorstores import ElasticsearchStore\n", - "import os\n", "import getpass\n", + "import os\n", + "\n", + "from langchain.embeddings.openai import OpenAIEmbeddings\n", + "from langchain.schema import Document\n", + "from langchain.vectorstores import ElasticsearchStore\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")\n", "\n", @@ -134,9 +135,9 @@ }, "outputs": [], "source": [ + "from langchain.chains.query_constructor.base import AttributeInfo\n", "from langchain.llms import OpenAI\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", - "from langchain.chains.query_constructor.base import AttributeInfo\n", "\n", "metadata_field_info = [\n", " AttributeInfo(\n", diff --git a/docs/docs/integrations/retrievers/self_query/milvus_self_query.ipynb b/docs/docs/integrations/retrievers/self_query/milvus_self_query.ipynb index 2a779fc6a93..3640b55cc30 100644 --- a/docs/docs/integrations/retrievers/self_query/milvus_self_query.ipynb +++ b/docs/docs/integrations/retrievers/self_query/milvus_self_query.ipynb @@ -67,8 +67,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.schema import Document\n", "from langchain.embeddings.openai import OpenAIEmbeddings\n", + "from langchain.schema import Document\n", "from langchain.vectorstores import Milvus\n", "\n", "embeddings = OpenAIEmbeddings()" @@ -128,9 +128,9 @@ "metadata": {}, "outputs": [], "source": [ + "from langchain.chains.query_constructor.base import AttributeInfo\n", "from langchain.llms import OpenAI\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", - "from langchain.chains.query_constructor.base import AttributeInfo\n", "\n", "metadata_field_info = [\n", " AttributeInfo(\n", diff --git a/docs/docs/integrations/retrievers/self_query/myscale_self_query.ipynb b/docs/docs/integrations/retrievers/self_query/myscale_self_query.ipynb index 70a327aecdf..b38e2901ab2 100644 --- a/docs/docs/integrations/retrievers/self_query/myscale_self_query.ipynb +++ b/docs/docs/integrations/retrievers/self_query/myscale_self_query.ipynb @@ -59,8 +59,8 @@ }, "outputs": [], "source": [ - "import os\n", "import getpass\n", + "import os\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")\n", "os.environ[\"MYSCALE_HOST\"] = getpass.getpass(\"MyScale URL:\")\n", @@ -78,8 +78,8 @@ }, "outputs": [], "source": [ - "from langchain.schema import Document\n", "from langchain.embeddings.openai import OpenAIEmbeddings\n", + "from langchain.schema import Document\n", "from langchain.vectorstores import MyScale\n", "\n", "embeddings = OpenAIEmbeddings()" @@ -160,9 +160,9 @@ }, "outputs": [], "source": [ + "from langchain.chains.query_constructor.base import AttributeInfo\n", "from langchain.llms import OpenAI\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", - "from langchain.chains.query_constructor.base import AttributeInfo\n", "\n", "metadata_field_info = [\n", " AttributeInfo(\n", diff --git a/docs/docs/integrations/retrievers/self_query/opensearch_self_query.ipynb b/docs/docs/integrations/retrievers/self_query/opensearch_self_query.ipynb index 4ffc98b96e0..7e7fb8940cd 100644 --- a/docs/docs/integrations/retrievers/self_query/opensearch_self_query.ipynb +++ b/docs/docs/integrations/retrievers/self_query/opensearch_self_query.ipynb @@ -27,17 +27,17 @@ { "cell_type": "code", "execution_count": null, - "outputs": [], - "source": [ - "!pip install lark opensearch-py" - ], + "id": "6078a74d", "metadata": { "collapsed": false, "pycharm": { "name": "#%%\n" } }, - "id": "6078a74d" + "outputs": [], + "source": [ + "!pip install lark opensearch-py" + ] }, { "cell_type": "code", @@ -56,11 +56,12 @@ } ], "source": [ - "from langchain.schema import Document\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", - "from langchain.vectorstores import OpenSearchVectorSearch\n", - "import os\n", "import getpass\n", + "import os\n", + "\n", + "from langchain.embeddings.openai import OpenAIEmbeddings\n", + "from langchain.schema import Document\n", + "from langchain.vectorstores import OpenSearchVectorSearch\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")\n", "\n", @@ -133,9 +134,9 @@ }, "outputs": [], "source": [ + "from langchain.chains.query_constructor.base import AttributeInfo\n", "from langchain.llms import OpenAI\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", - "from langchain.chains.query_constructor.base import AttributeInfo\n", "\n", "metadata_field_info = [\n", " AttributeInfo(\n", diff --git a/docs/docs/integrations/retrievers/self_query/pinecone.ipynb b/docs/docs/integrations/retrievers/self_query/pinecone.ipynb index c3181fdd809..04214a77d37 100644 --- a/docs/docs/integrations/retrievers/self_query/pinecone.ipynb +++ b/docs/docs/integrations/retrievers/self_query/pinecone.ipynb @@ -65,7 +65,6 @@ "\n", "import pinecone\n", "\n", - "\n", "pinecone.init(\n", " api_key=os.environ[\"PINECONE_API_KEY\"], environment=os.environ[\"PINECONE_ENV\"]\n", ")" @@ -78,8 +77,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.schema import Document\n", "from langchain.embeddings.openai import OpenAIEmbeddings\n", + "from langchain.schema import Document\n", "from langchain.vectorstores import Pinecone\n", "\n", "embeddings = OpenAIEmbeddings()\n", @@ -146,9 +145,9 @@ "metadata": {}, "outputs": [], "source": [ + "from langchain.chains.query_constructor.base import AttributeInfo\n", "from langchain.llms import OpenAI\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", - "from langchain.chains.query_constructor.base import AttributeInfo\n", "\n", "metadata_field_info = [\n", " AttributeInfo(\n", diff --git a/docs/docs/integrations/retrievers/self_query/qdrant_self_query.ipynb b/docs/docs/integrations/retrievers/self_query/qdrant_self_query.ipynb index 8a91504cede..14c063b71f8 100644 --- a/docs/docs/integrations/retrievers/self_query/qdrant_self_query.ipynb +++ b/docs/docs/integrations/retrievers/self_query/qdrant_self_query.ipynb @@ -70,8 +70,8 @@ }, "outputs": [], "source": [ - "from langchain.schema import Document\n", "from langchain.embeddings.openai import OpenAIEmbeddings\n", + "from langchain.schema import Document\n", "from langchain.vectorstores import Qdrant\n", "\n", "embeddings = OpenAIEmbeddings()" @@ -144,9 +144,9 @@ }, "outputs": [], "source": [ + "from langchain.chains.query_constructor.base import AttributeInfo\n", "from langchain.llms import OpenAI\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", - "from langchain.chains.query_constructor.base import AttributeInfo\n", "\n", "metadata_field_info = [\n", " AttributeInfo(\n", diff --git a/docs/docs/integrations/retrievers/self_query/redis_self_query.ipynb b/docs/docs/integrations/retrievers/self_query/redis_self_query.ipynb index 984eb407399..41090e04023 100644 --- a/docs/docs/integrations/retrievers/self_query/redis_self_query.ipynb +++ b/docs/docs/integrations/retrievers/self_query/redis_self_query.ipynb @@ -52,8 +52,8 @@ }, "outputs": [], "source": [ - "import os\n", "import getpass\n", + "import os\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")" ] @@ -67,8 +67,8 @@ }, "outputs": [], "source": [ - "from langchain.schema import Document\n", "from langchain.embeddings.openai import OpenAIEmbeddings\n", + "from langchain.schema import Document\n", "from langchain.vectorstores import Redis\n", "\n", "embeddings = OpenAIEmbeddings()" @@ -193,9 +193,9 @@ }, "outputs": [], "source": [ + "from langchain.chains.query_constructor.base import AttributeInfo\n", "from langchain.llms import OpenAI\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", - "from langchain.chains.query_constructor.base import AttributeInfo\n", "\n", "metadata_field_info = [\n", " AttributeInfo(\n", diff --git a/docs/docs/integrations/retrievers/self_query/supabase_self_query.ipynb b/docs/docs/integrations/retrievers/self_query/supabase_self_query.ipynb index b4208764557..f072394390c 100644 --- a/docs/docs/integrations/retrievers/self_query/supabase_self_query.ipynb +++ b/docs/docs/integrations/retrievers/self_query/supabase_self_query.ipynb @@ -160,8 +160,8 @@ }, "outputs": [], "source": [ - "import os\n", "import getpass\n", + "import os\n", "\n", "os.environ[\"SUPABASE_URL\"] = getpass.getpass(\"Supabase URL:\")\n", "os.environ[\"SUPABASE_SERVICE_KEY\"] = getpass.getpass(\"Supabase Service Key:\")\n", @@ -216,10 +216,11 @@ "outputs": [], "source": [ "import os\n", - "from supabase.client import Client, create_client\n", - "from langchain.schema import Document\n", + "\n", "from langchain.embeddings.openai import OpenAIEmbeddings\n", + "from langchain.schema import Document\n", "from langchain.vectorstores import SupabaseVectorStore\n", + "from supabase.client import Client, create_client\n", "\n", "supabase_url = os.environ.get(\"SUPABASE_URL\")\n", "supabase_key = os.environ.get(\"SUPABASE_SERVICE_KEY\")\n", @@ -304,9 +305,9 @@ }, "outputs": [], "source": [ + "from langchain.chains.query_constructor.base import AttributeInfo\n", "from langchain.llms import OpenAI\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", - "from langchain.chains.query_constructor.base import AttributeInfo\n", "\n", "metadata_field_info = [\n", " AttributeInfo(\n", diff --git a/docs/docs/integrations/retrievers/self_query/timescalevector_self_query.ipynb b/docs/docs/integrations/retrievers/self_query/timescalevector_self_query.ipynb index 56682190ce3..fe5cfc64572 100644 --- a/docs/docs/integrations/retrievers/self_query/timescalevector_self_query.ipynb +++ b/docs/docs/integrations/retrievers/self_query/timescalevector_self_query.ipynb @@ -91,7 +91,8 @@ "# Get openAI api key by reading local .env file\n", "# The .env file should contain a line starting with `OPENAI_API_KEY=sk-`\n", "import os\n", - "from dotenv import load_dotenv, find_dotenv\n", + "\n", + "from dotenv import find_dotenv, load_dotenv\n", "\n", "_ = load_dotenv(find_dotenv())\n", "\n", @@ -142,8 +143,8 @@ }, "outputs": [], "source": [ - "from langchain.schema import Document\n", "from langchain.embeddings.openai import OpenAIEmbeddings\n", + "from langchain.schema import Document\n", "from langchain.vectorstores.timescalevector import TimescaleVector\n", "\n", "embeddings = OpenAIEmbeddings()" @@ -244,9 +245,9 @@ }, "outputs": [], "source": [ + "from langchain.chains.query_constructor.base import AttributeInfo\n", "from langchain.llms import OpenAI\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", - "from langchain.chains.query_constructor.base import AttributeInfo\n", "\n", "# Give LLM info about the metadata fields\n", "metadata_field_info = [\n", diff --git a/docs/docs/integrations/retrievers/self_query/vectara_self_query.ipynb b/docs/docs/integrations/retrievers/self_query/vectara_self_query.ipynb index ec2f703ab3e..70fa7c91551 100644 --- a/docs/docs/integrations/retrievers/self_query/vectara_self_query.ipynb +++ b/docs/docs/integrations/retrievers/self_query/vectara_self_query.ipynb @@ -75,13 +75,12 @@ }, "outputs": [], "source": [ + "from langchain.chains.query_constructor.base import AttributeInfo\n", "from langchain.embeddings import FakeEmbeddings\n", - "from langchain.schema import Document\n", - "from langchain.vectorstores import Vectara\n", - "\n", "from langchain.llms import OpenAI\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", - "from langchain.chains.query_constructor.base import AttributeInfo" + "from langchain.schema import Document\n", + "from langchain.vectorstores import Vectara" ] }, { @@ -152,9 +151,9 @@ }, "outputs": [], "source": [ + "from langchain.chains.query_constructor.base import AttributeInfo\n", "from langchain.llms import OpenAI\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", - "from langchain.chains.query_constructor.base import AttributeInfo\n", "\n", "metadata_field_info = [\n", " AttributeInfo(\n", diff --git a/docs/docs/integrations/retrievers/self_query/weaviate_self_query.ipynb b/docs/docs/integrations/retrievers/self_query/weaviate_self_query.ipynb index e5f6be8a7a0..b2c7a087aea 100644 --- a/docs/docs/integrations/retrievers/self_query/weaviate_self_query.ipynb +++ b/docs/docs/integrations/retrievers/self_query/weaviate_self_query.ipynb @@ -45,8 +45,8 @@ }, "outputs": [], "source": [ - "from langchain.schema import Document\n", "from langchain.embeddings.openai import OpenAIEmbeddings\n", + "from langchain.schema import Document\n", "from langchain.vectorstores import Weaviate\n", "\n", "embeddings = OpenAIEmbeddings()" @@ -115,9 +115,9 @@ }, "outputs": [], "source": [ + "from langchain.chains.query_constructor.base import AttributeInfo\n", "from langchain.llms import OpenAI\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", - "from langchain.chains.query_constructor.base import AttributeInfo\n", "\n", "metadata_field_info = [\n", " AttributeInfo(\n", diff --git a/docs/docs/integrations/retrievers/singlestoredb.ipynb b/docs/docs/integrations/retrievers/singlestoredb.ipynb index ec69b040307..d510adae241 100644 --- a/docs/docs/integrations/retrievers/singlestoredb.ipynb +++ b/docs/docs/integrations/retrievers/singlestoredb.ipynb @@ -44,16 +44,16 @@ }, "outputs": [], "source": [ - "import os\n", "import getpass\n", + "import os\n", "\n", "# We want to use OpenAIEmbeddings so we have to get the OpenAI API Key.\n", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")\n", "\n", + "from langchain.document_loaders import TextLoader\n", "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain.vectorstores import SingleStoreDB\n", - "from langchain.document_loaders import TextLoader\n", "\n", "loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n", "documents = loader.load()\n", diff --git a/docs/docs/integrations/retrievers/svm.ipynb b/docs/docs/integrations/retrievers/svm.ipynb index 93c6d2747d3..66ca2bd0012 100644 --- a/docs/docs/integrations/retrievers/svm.ipynb +++ b/docs/docs/integrations/retrievers/svm.ipynb @@ -65,8 +65,8 @@ } ], "source": [ - "import os\n", "import getpass\n", + "import os\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")" ] @@ -80,8 +80,8 @@ }, "outputs": [], "source": [ - "from langchain.retrievers import SVMRetriever\n", - "from langchain.embeddings import OpenAIEmbeddings" + "from langchain.embeddings import OpenAIEmbeddings\n", + "from langchain.retrievers import SVMRetriever" ] }, { diff --git a/docs/docs/integrations/retrievers/tavily.ipynb b/docs/docs/integrations/retrievers/tavily.ipynb index 9c87e35ed73..34b1d139638 100644 --- a/docs/docs/integrations/retrievers/tavily.ipynb +++ b/docs/docs/integrations/retrievers/tavily.ipynb @@ -44,6 +44,7 @@ ], "source": [ "import os\n", + "\n", "from langchain.retrievers.tavily_search_api import TavilySearchAPIRetriever\n", "\n", "os.environ[\"TAVILY_API_KEY\"] = \"YOUR_API_KEY\"\n", diff --git a/docs/docs/integrations/retrievers/weaviate-hybrid.ipynb b/docs/docs/integrations/retrievers/weaviate-hybrid.ipynb index bdbf7a92156..8426b0faf6c 100644 --- a/docs/docs/integrations/retrievers/weaviate-hybrid.ipynb +++ b/docs/docs/integrations/retrievers/weaviate-hybrid.ipynb @@ -44,9 +44,10 @@ "metadata": {}, "outputs": [], "source": [ - "import weaviate\n", "import os\n", "\n", + "import weaviate\n", + "\n", "WEAVIATE_URL = os.getenv(\"WEAVIATE_URL\")\n", "auth_client_secret = (weaviate.AuthApiKey(api_key=os.getenv(\"WEAVIATE_API_KEY\")),)\n", "client = weaviate.Client(\n", diff --git a/docs/docs/integrations/retrievers/wikipedia.ipynb b/docs/docs/integrations/retrievers/wikipedia.ipynb index 13fff296255..75a9c9c0770 100644 --- a/docs/docs/integrations/retrievers/wikipedia.ipynb +++ b/docs/docs/integrations/retrievers/wikipedia.ipynb @@ -199,8 +199,8 @@ }, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.chains import ConversationalRetrievalChain\n", + "from langchain.chat_models import ChatOpenAI\n", "\n", "model = ChatOpenAI(model_name=\"gpt-3.5-turbo\") # switch to 'gpt-4'\n", "qa = ConversationalRetrievalChain.from_llm(model, retriever=retriever)" diff --git a/docs/docs/integrations/retrievers/you-retriever.ipynb b/docs/docs/integrations/retrievers/you-retriever.ipynb index 68648f316a5..8c2da49df01 100644 --- a/docs/docs/integrations/retrievers/you-retriever.ipynb +++ b/docs/docs/integrations/retrievers/you-retriever.ipynb @@ -18,9 +18,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.retrievers.you_retriever import YouRetriever\n", "from langchain.chains import RetrievalQA\n", "from langchain.llms import OpenAI\n", + "from langchain.retrievers.you_retriever import YouRetriever\n", "\n", "yr = YouRetriever()\n", "qa = RetrievalQA.from_chain_type(llm=OpenAI(), chain_type=\"map_reduce\", retriever=yr)" diff --git a/docs/docs/integrations/retrievers/zep_memorystore.ipynb b/docs/docs/integrations/retrievers/zep_memorystore.ipynb index 8938a36240d..8aeca6c1add 100644 --- a/docs/docs/integrations/retrievers/zep_memorystore.ipynb +++ b/docs/docs/integrations/retrievers/zep_memorystore.ipynb @@ -63,7 +63,7 @@ "from uuid import uuid4\n", "\n", "from langchain.memory import ZepMemory\n", - "from langchain.schema import HumanMessage, AIMessage\n", + "from langchain.schema import AIMessage, HumanMessage\n", "\n", "# Set this to your Zep server URL\n", "ZEP_API_URL = \"http://localhost:8000\"" @@ -294,7 +294,7 @@ ], "source": [ "from langchain.retrievers import ZepRetriever\n", - "from langchain.retrievers.zep import SearchType, SearchScope\n", + "from langchain.retrievers.zep import SearchScope, SearchType\n", "\n", "zep_retriever = ZepRetriever(\n", " session_id=session_id, # Ensure that you provide the session_id when instantiating the Retriever\n", diff --git a/docs/docs/integrations/text_embedding/baidu_qianfan_endpoint.ipynb b/docs/docs/integrations/text_embedding/baidu_qianfan_endpoint.ipynb index 783ab807859..ea930cd4120 100644 --- a/docs/docs/integrations/text_embedding/baidu_qianfan_endpoint.ipynb +++ b/docs/docs/integrations/text_embedding/baidu_qianfan_endpoint.ipynb @@ -60,10 +60,10 @@ ], "source": [ "\"\"\"For basic init and call\"\"\"\n", - "from langchain.embeddings import QianfanEmbeddingsEndpoint\n", - "\n", "import os\n", "\n", + "from langchain.embeddings import QianfanEmbeddingsEndpoint\n", + "\n", "os.environ[\"QIANFAN_AK\"] = \"your_ak\"\n", "os.environ[\"QIANFAN_SK\"] = \"your_sk\"\n", "\n", diff --git a/docs/docs/integrations/text_embedding/gradient.ipynb b/docs/docs/integrations/text_embedding/gradient.ipynb index f6ecef779a0..63426b56dcd 100644 --- a/docs/docs/integrations/text_embedding/gradient.ipynb +++ b/docs/docs/integrations/text_embedding/gradient.ipynb @@ -41,8 +41,8 @@ "metadata": {}, "outputs": [], "source": [ - "from getpass import getpass\n", "import os\n", + "from getpass import getpass\n", "\n", "if not os.environ.get(\"GRADIENT_ACCESS_TOKEN\", None):\n", " # Access token under https://auth.gradient.ai/select-workspace\n", diff --git a/docs/docs/integrations/text_embedding/open_clip.ipynb b/docs/docs/integrations/text_embedding/open_clip.ipynb index c1e1ba9ed81..eb5a2e4910c 100644 --- a/docs/docs/integrations/text_embedding/open_clip.ipynb +++ b/docs/docs/integrations/text_embedding/open_clip.ipynb @@ -84,8 +84,8 @@ "outputs": [], "source": [ "import numpy as np\n", - "from PIL import Image\n", "from langchain_experimental.open_clip import OpenCLIPEmbeddings\n", + "from PIL import Image\n", "\n", "# Image URIs\n", "uri_dog = \"/Users/rlm/Desktop/test/dog.jpg\"\n", @@ -133,10 +133,11 @@ ], "source": [ "import os\n", - "import skimage\n", + "from collections import OrderedDict\n", + "\n", "import IPython.display\n", "import matplotlib.pyplot as plt\n", - "from collections import OrderedDict\n", + "import skimage\n", "\n", "%matplotlib inline\n", "%config InlineBackend.figure_format = 'retina'\n", diff --git a/docs/docs/integrations/text_embedding/sagemaker-endpoint.ipynb b/docs/docs/integrations/text_embedding/sagemaker-endpoint.ipynb index 08d22af40cb..17d746a4b7a 100644 --- a/docs/docs/integrations/text_embedding/sagemaker-endpoint.ipynb +++ b/docs/docs/integrations/text_embedding/sagemaker-endpoint.ipynb @@ -39,10 +39,11 @@ "metadata": {}, "outputs": [], "source": [ + "import json\n", "from typing import Dict, List\n", + "\n", "from langchain.embeddings import SagemakerEndpointEmbeddings\n", "from langchain.embeddings.sagemaker_endpoint import EmbeddingsContentHandler\n", - "import json\n", "\n", "\n", "class ContentHandler(EmbeddingsContentHandler):\n", diff --git a/docs/docs/integrations/text_embedding/self-hosted.ipynb b/docs/docs/integrations/text_embedding/self-hosted.ipynb index 47faa6bf2d7..55a99e442d4 100644 --- a/docs/docs/integrations/text_embedding/self-hosted.ipynb +++ b/docs/docs/integrations/text_embedding/self-hosted.ipynb @@ -18,12 +18,12 @@ }, "outputs": [], "source": [ + "import runhouse as rh\n", "from langchain.embeddings import (\n", " SelfHostedEmbeddings,\n", " SelfHostedHuggingFaceEmbeddings,\n", " SelfHostedHuggingFaceInstructEmbeddings,\n", - ")\n", - "import runhouse as rh" + ")" ] }, { @@ -115,7 +115,7 @@ " AutoModelForCausalLM,\n", " AutoTokenizer,\n", " pipeline,\n", - " ) # Must be inside the function in notebooks\n", + " )\n", "\n", " model_id = \"facebook/bart-base\"\n", " tokenizer = AutoTokenizer.from_pretrained(model_id)\n", diff --git a/docs/docs/integrations/toolkits/ainetwork.ipynb b/docs/docs/integrations/toolkits/ainetwork.ipynb index 8991cb7b3d7..f533c57d1bd 100644 --- a/docs/docs/integrations/toolkits/ainetwork.ipynb +++ b/docs/docs/integrations/toolkits/ainetwork.ipynb @@ -130,8 +130,8 @@ "metadata": {}, "outputs": [], "source": [ + "from langchain.agents import AgentType, initialize_agent\n", "from langchain.chat_models import ChatOpenAI\n", - "from langchain.agents import initialize_agent, AgentType\n", "\n", "llm = ChatOpenAI(temperature=0)\n", "agent = initialize_agent(\n", diff --git a/docs/docs/integrations/toolkits/airbyte_structured_qa.ipynb b/docs/docs/integrations/toolkits/airbyte_structured_qa.ipynb index 0c8b48854a1..0e7d82404e8 100644 --- a/docs/docs/integrations/toolkits/airbyte_structured_qa.ipynb +++ b/docs/docs/integrations/toolkits/airbyte_structured_qa.ipynb @@ -26,11 +26,11 @@ "outputs": [], "source": [ "import os\n", - "import pandas as pd\n", "\n", - "from langchain.document_loaders.airbyte import AirbyteStripeLoader\n", - "from langchain.chat_models.openai import ChatOpenAI\n", + "import pandas as pd\n", "from langchain.agents import AgentType, create_pandas_dataframe_agent\n", + "from langchain.chat_models.openai import ChatOpenAI\n", + "from langchain.document_loaders.airbyte import AirbyteStripeLoader\n", "\n", "stream_name = \"customers\"\n", "config = {\n", diff --git a/docs/docs/integrations/toolkits/amadeus.ipynb b/docs/docs/integrations/toolkits/amadeus.ipynb index 91fc4bb9e18..7d0a118f7a7 100644 --- a/docs/docs/integrations/toolkits/amadeus.ipynb +++ b/docs/docs/integrations/toolkits/amadeus.ipynb @@ -81,8 +81,8 @@ }, "outputs": [], "source": [ - "from langchain.llms import OpenAI\n", - "from langchain.agents import initialize_agent, AgentType" + "from langchain.agents import AgentType, initialize_agent\n", + "from langchain.llms import OpenAI" ] }, { diff --git a/docs/docs/integrations/toolkits/azure_cognitive_services.ipynb b/docs/docs/integrations/toolkits/azure_cognitive_services.ipynb index de9d16c6567..fb04f4985ff 100644 --- a/docs/docs/integrations/toolkits/azure_cognitive_services.ipynb +++ b/docs/docs/integrations/toolkits/azure_cognitive_services.ipynb @@ -105,8 +105,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import OpenAI\n", - "from langchain.agents import initialize_agent, AgentType" + "from langchain.agents import AgentType, initialize_agent\n", + "from langchain.llms import OpenAI" ] }, { diff --git a/docs/docs/integrations/toolkits/clickup.ipynb b/docs/docs/integrations/toolkits/clickup.ipynb index c0eb7e026fa..05fe240368c 100644 --- a/docs/docs/integrations/toolkits/clickup.ipynb +++ b/docs/docs/integrations/toolkits/clickup.ipynb @@ -17,9 +17,8 @@ "%autoreload 2\n", "from datetime import datetime\n", "\n", - "from langchain.agents.agent_toolkits.clickup.toolkit import ClickupToolkit\n", - "\n", "from langchain.agents import AgentType, initialize_agent\n", + "from langchain.agents.agent_toolkits.clickup.toolkit import ClickupToolkit\n", "from langchain.llms import OpenAI\n", "from langchain.utilities.clickup import ClickupAPIWrapper" ] diff --git a/docs/docs/integrations/toolkits/csv.ipynb b/docs/docs/integrations/toolkits/csv.ipynb index ecf04cc1b43..effe3a73ae9 100644 --- a/docs/docs/integrations/toolkits/csv.ipynb +++ b/docs/docs/integrations/toolkits/csv.ipynb @@ -20,10 +20,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import OpenAI\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.agents.agent_types import AgentType\n", - "\n", + "from langchain.chat_models import ChatOpenAI\n", + "from langchain.llms import OpenAI\n", "from langchain_experimental.agents.agent_toolkits import create_csv_agent" ] }, diff --git a/docs/docs/integrations/toolkits/document_comparison_toolkit.ipynb b/docs/docs/integrations/toolkits/document_comparison_toolkit.ipynb index 7ff7083a115..a9edf0ecaed 100644 --- a/docs/docs/integrations/toolkits/document_comparison_toolkit.ipynb +++ b/docs/docs/integrations/toolkits/document_comparison_toolkit.ipynb @@ -19,15 +19,14 @@ "metadata": {}, "outputs": [], "source": [ - "from pydantic import BaseModel, Field\n", - "\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.agents import Tool\n", + "from langchain.chains import RetrievalQA\n", + "from langchain.chat_models import ChatOpenAI\n", + "from langchain.document_loaders import PyPDFLoader\n", "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain.vectorstores import FAISS\n", - "from langchain.document_loaders import PyPDFLoader\n", - "from langchain.chains import RetrievalQA" + "from pydantic import BaseModel, Field" ] }, { @@ -83,8 +82,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.agents import initialize_agent\n", - "from langchain.agents import AgentType" + "from langchain.agents import AgentType, initialize_agent" ] }, { diff --git a/docs/docs/integrations/toolkits/github.ipynb b/docs/docs/integrations/toolkits/github.ipynb index aec05c95051..a14a5542cf2 100644 --- a/docs/docs/integrations/toolkits/github.ipynb +++ b/docs/docs/integrations/toolkits/github.ipynb @@ -104,8 +104,8 @@ "outputs": [], "source": [ "import os\n", - "from langchain.agents import AgentType\n", - "from langchain.agents import initialize_agent\n", + "\n", + "from langchain.agents import AgentType, initialize_agent\n", "from langchain.agents.agent_toolkits.github.toolkit import GitHubToolkit\n", "from langchain.llms import OpenAI\n", "from langchain.utilities.github import GitHubAPIWrapper" @@ -241,9 +241,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.tools import DuckDuckGoSearchRun\n", "from langchain.agents import Tool\n", "from langchain.chat_models import ChatOpenAI\n", + "from langchain.tools import DuckDuckGoSearchRun\n", "\n", "tools = []\n", "unwanted_tools = [\"Get Issue\", \"Delete File\", \"Create File\", \"Create Pull Request\"]\n", diff --git a/docs/docs/integrations/toolkits/gitlab.ipynb b/docs/docs/integrations/toolkits/gitlab.ipynb index 3c9a955e38f..3edb840ef2a 100644 --- a/docs/docs/integrations/toolkits/gitlab.ipynb +++ b/docs/docs/integrations/toolkits/gitlab.ipynb @@ -97,8 +97,8 @@ "outputs": [], "source": [ "import os\n", - "from langchain.agents import AgentType\n", - "from langchain.agents import initialize_agent\n", + "\n", + "from langchain.agents import AgentType, initialize_agent\n", "from langchain.agents.agent_toolkits.gitlab.toolkit import GitLabToolkit\n", "from langchain.llms import OpenAI\n", "from langchain.utilities.gitlab import GitLabAPIWrapper" diff --git a/docs/docs/integrations/toolkits/gmail.ipynb b/docs/docs/integrations/toolkits/gmail.ipynb index 9ae8fc2b8d4..98db1f41c6b 100644 --- a/docs/docs/integrations/toolkits/gmail.ipynb +++ b/docs/docs/integrations/toolkits/gmail.ipynb @@ -118,8 +118,8 @@ }, "outputs": [], "source": [ - "from langchain.llms import OpenAI\n", - "from langchain.agents import initialize_agent, AgentType" + "from langchain.agents import AgentType, initialize_agent\n", + "from langchain.llms import OpenAI" ] }, { diff --git a/docs/docs/integrations/toolkits/google_drive.ipynb b/docs/docs/integrations/toolkits/google_drive.ipynb index 49875b4d42d..e89c5b99fea 100644 --- a/docs/docs/integrations/toolkits/google_drive.ipynb +++ b/docs/docs/integrations/toolkits/google_drive.ipynb @@ -98,8 +98,8 @@ }, "outputs": [], "source": [ - "from langchain_googledrive.utilities.google_drive import GoogleDriveAPIWrapper\n", "from langchain_googledrive.tools.google_drive.tool import GoogleDriveSearchTool\n", + "from langchain_googledrive.utilities.google_drive import GoogleDriveAPIWrapper\n", "\n", "# By default, search only in the filename.\n", "tool = GoogleDriveSearchTool(\n", @@ -170,8 +170,8 @@ }, "outputs": [], "source": [ + "from langchain.agents import AgentType, initialize_agent\n", "from langchain.llms import OpenAI\n", - "from langchain.agents import initialize_agent, AgentType\n", "\n", "llm = OpenAI(temperature=0)\n", "agent = initialize_agent(\n", diff --git a/docs/docs/integrations/toolkits/jira.ipynb b/docs/docs/integrations/toolkits/jira.ipynb index 39480eeb588..ff977eab229 100644 --- a/docs/docs/integrations/toolkits/jira.ipynb +++ b/docs/docs/integrations/toolkits/jira.ipynb @@ -48,8 +48,8 @@ "outputs": [], "source": [ "import os\n", - "from langchain.agents import AgentType\n", - "from langchain.agents import initialize_agent\n", + "\n", + "from langchain.agents import AgentType, initialize_agent\n", "from langchain.agents.agent_toolkits.jira.toolkit import JiraToolkit\n", "from langchain.llms import OpenAI\n", "from langchain.utilities.jira import JiraAPIWrapper" diff --git a/docs/docs/integrations/toolkits/json.ipynb b/docs/docs/integrations/toolkits/json.ipynb index dc841ed1b25..b826916c7e4 100644 --- a/docs/docs/integrations/toolkits/json.ipynb +++ b/docs/docs/integrations/toolkits/json.ipynb @@ -33,7 +33,6 @@ "outputs": [], "source": [ "import yaml\n", - "\n", "from langchain.agents import create_json_agent\n", "from langchain.agents.agent_toolkits import JsonToolkit\n", "from langchain.llms.openai import OpenAI\n", diff --git a/docs/docs/integrations/toolkits/multion.ipynb b/docs/docs/integrations/toolkits/multion.ipynb index 7bde3fdd190..0dc0a1ee9b0 100644 --- a/docs/docs/integrations/toolkits/multion.ipynb +++ b/docs/docs/integrations/toolkits/multion.ipynb @@ -28,7 +28,6 @@ "source": [ "from langchain.agents.agent_toolkits import MultionToolkit\n", "\n", - "\n", "toolkit = MultionToolkit()\n", "\n", "toolkit" @@ -80,8 +79,8 @@ }, "outputs": [], "source": [ + "from langchain.agents import AgentType, initialize_agent\n", "from langchain.llms import OpenAI\n", - "from langchain.agents import initialize_agent, AgentType\n", "\n", "llm = OpenAI(temperature=0)\n", "from langchain.agents.agent_toolkits import MultionToolkit\n", diff --git a/docs/docs/integrations/toolkits/office365.ipynb b/docs/docs/integrations/toolkits/office365.ipynb index c22f2a047e8..c8178bb3245 100644 --- a/docs/docs/integrations/toolkits/office365.ipynb +++ b/docs/docs/integrations/toolkits/office365.ipynb @@ -93,8 +93,8 @@ }, "outputs": [], "source": [ - "from langchain.llms import OpenAI\n", - "from langchain.agents import initialize_agent, AgentType" + "from langchain.agents import AgentType, initialize_agent\n", + "from langchain.llms import OpenAI" ] }, { diff --git a/docs/docs/integrations/toolkits/openapi.ipynb b/docs/docs/integrations/toolkits/openapi.ipynb index bebe8088def..fa7e5e98304 100644 --- a/docs/docs/integrations/toolkits/openapi.ipynb +++ b/docs/docs/integrations/toolkits/openapi.ipynb @@ -42,6 +42,7 @@ "outputs": [], "source": [ "import os\n", + "\n", "import yaml" ] }, @@ -262,8 +263,8 @@ } ], "source": [ - "from langchain.llms.openai import OpenAI\n", "from langchain.agents.agent_toolkits.openapi import planner\n", + "from langchain.llms.openai import OpenAI\n", "\n", "llm = OpenAI(model_name=\"gpt-4\", temperature=0.0)" ] diff --git a/docs/docs/integrations/toolkits/openapi_nla.ipynb b/docs/docs/integrations/toolkits/openapi_nla.ipynb index eeae9df5303..212299e14c9 100644 --- a/docs/docs/integrations/toolkits/openapi_nla.ipynb +++ b/docs/docs/integrations/toolkits/openapi_nla.ipynb @@ -25,10 +25,10 @@ }, "outputs": [], "source": [ - "from langchain.llms import OpenAI\n", - "from langchain.requests import Requests\n", "from langchain.agents import AgentType, initialize_agent\n", - "from langchain.agents.agent_toolkits import NLAToolkit" + "from langchain.agents.agent_toolkits import NLAToolkit\n", + "from langchain.llms import OpenAI\n", + "from langchain.requests import Requests" ] }, { diff --git a/docs/docs/integrations/toolkits/pandas.ipynb b/docs/docs/integrations/toolkits/pandas.ipynb index dba345893db..66af16ef8f1 100644 --- a/docs/docs/integrations/toolkits/pandas.ipynb +++ b/docs/docs/integrations/toolkits/pandas.ipynb @@ -19,9 +19,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_experimental.agents.agent_toolkits import create_pandas_dataframe_agent\n", + "from langchain.agents.agent_types import AgentType\n", "from langchain.chat_models import ChatOpenAI\n", - "from langchain.agents.agent_types import AgentType" + "from langchain_experimental.agents.agent_toolkits import create_pandas_dataframe_agent" ] }, { @@ -31,8 +31,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import OpenAI\n", "import pandas as pd\n", + "from langchain.llms import OpenAI\n", "\n", "df = pd.read_csv(\"titanic.csv\")" ] diff --git a/docs/docs/integrations/toolkits/playwright.ipynb b/docs/docs/integrations/toolkits/playwright.ipynb index 3091039e2aa..241fe8dca60 100644 --- a/docs/docs/integrations/toolkits/playwright.ipynb +++ b/docs/docs/integrations/toolkits/playwright.ipynb @@ -205,7 +205,7 @@ }, "outputs": [], "source": [ - "from langchain.agents import initialize_agent, AgentType\n", + "from langchain.agents import AgentType, initialize_agent\n", "from langchain.chat_models import ChatAnthropic\n", "\n", "llm = ChatAnthropic(temperature=0) # or any other LLM, e.g., ChatOpenAI(), OpenAI()\n", diff --git a/docs/docs/integrations/toolkits/powerbi.ipynb b/docs/docs/integrations/toolkits/powerbi.ipynb index 41380c80f51..c74aa2d4116 100644 --- a/docs/docs/integrations/toolkits/powerbi.ipynb +++ b/docs/docs/integrations/toolkits/powerbi.ipynb @@ -37,11 +37,10 @@ }, "outputs": [], "source": [ - "from langchain.agents.agent_toolkits import create_pbi_agent\n", - "from langchain.agents.agent_toolkits import PowerBIToolkit\n", - "from langchain.utilities.powerbi import PowerBIDataset\n", + "from azure.identity import DefaultAzureCredential\n", + "from langchain.agents.agent_toolkits import PowerBIToolkit, create_pbi_agent\n", "from langchain.chat_models import ChatOpenAI\n", - "from azure.identity import DefaultAzureCredential" + "from langchain.utilities.powerbi import PowerBIDataset" ] }, { diff --git a/docs/docs/integrations/toolkits/python.ipynb b/docs/docs/integrations/toolkits/python.ipynb index e6bea2a73d5..2569a278170 100644 --- a/docs/docs/integrations/toolkits/python.ipynb +++ b/docs/docs/integrations/toolkits/python.ipynb @@ -19,11 +19,11 @@ }, "outputs": [], "source": [ - "from langchain_experimental.agents.agent_toolkits import create_python_agent\n", - "from langchain_experimental.tools import PythonREPLTool\n", - "from langchain.llms.openai import OpenAI\n", "from langchain.agents.agent_types import AgentType\n", - "from langchain.chat_models import ChatOpenAI" + "from langchain.chat_models import ChatOpenAI\n", + "from langchain.llms.openai import OpenAI\n", + "from langchain_experimental.agents.agent_toolkits import create_python_agent\n", + "from langchain_experimental.tools import PythonREPLTool" ] }, { diff --git a/docs/docs/integrations/toolkits/spark.ipynb b/docs/docs/integrations/toolkits/spark.ipynb index a79937680b3..341d4edee09 100644 --- a/docs/docs/integrations/toolkits/spark.ipynb +++ b/docs/docs/integrations/toolkits/spark.ipynb @@ -80,8 +80,8 @@ ], "source": [ "from langchain.llms import OpenAI\n", - "from pyspark.sql import SparkSession\n", "from langchain_experimental.agents.agent_toolkits import create_spark_dataframe_agent\n", + "from pyspark.sql import SparkSession\n", "\n", "spark = SparkSession.builder.getOrCreate()\n", "csv_file_path = \"titanic.csv\"\n", @@ -332,9 +332,10 @@ "metadata": {}, "outputs": [], "source": [ + "import os\n", + "\n", "from langchain.agents import create_spark_dataframe_agent\n", "from langchain.llms import OpenAI\n", - "import os\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = \"...input your openai api key here...\"\n", "\n", diff --git a/docs/docs/integrations/toolkits/sql_database.ipynb b/docs/docs/integrations/toolkits/sql_database.ipynb index d67c66eda76..51e356e2c86 100644 --- a/docs/docs/integrations/toolkits/sql_database.ipynb +++ b/docs/docs/integrations/toolkits/sql_database.ipynb @@ -36,9 +36,9 @@ "source": [ "from langchain.agents import create_sql_agent\n", "from langchain.agents.agent_toolkits import SQLDatabaseToolkit\n", - "from langchain.sql_database import SQLDatabase\n", + "from langchain.agents.agent_types import AgentType\n", "from langchain.llms.openai import OpenAI\n", - "from langchain.agents.agent_types import AgentType" + "from langchain.sql_database import SQLDatabase" ] }, { diff --git a/docs/docs/integrations/toolkits/vectorstore.ipynb b/docs/docs/integrations/toolkits/vectorstore.ipynb index ea137a56c1b..c22bd96972b 100644 --- a/docs/docs/integrations/toolkits/vectorstore.ipynb +++ b/docs/docs/integrations/toolkits/vectorstore.ipynb @@ -28,9 +28,9 @@ "outputs": [], "source": [ "from langchain.embeddings.openai import OpenAIEmbeddings\n", - "from langchain.vectorstores import Chroma\n", - "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain.llms import OpenAI\n", + "from langchain.text_splitter import CharacterTextSplitter\n", + "from langchain.vectorstores import Chroma\n", "\n", "llm = OpenAI(temperature=0)" ] @@ -110,9 +110,9 @@ "outputs": [], "source": [ "from langchain.agents.agent_toolkits import (\n", - " create_vectorstore_agent,\n", - " VectorStoreToolkit,\n", " VectorStoreInfo,\n", + " VectorStoreToolkit,\n", + " create_vectorstore_agent,\n", ")\n", "\n", "vectorstore_info = VectorStoreInfo(\n", @@ -229,9 +229,9 @@ "outputs": [], "source": [ "from langchain.agents.agent_toolkits import (\n", - " create_vectorstore_router_agent,\n", - " VectorStoreRouterToolkit,\n", " VectorStoreInfo,\n", + " VectorStoreRouterToolkit,\n", + " create_vectorstore_router_agent,\n", ")" ] }, diff --git a/docs/docs/integrations/toolkits/xorbits.ipynb b/docs/docs/integrations/toolkits/xorbits.ipynb index 83800e147c2..c5f8331f888 100644 --- a/docs/docs/integrations/toolkits/xorbits.ipynb +++ b/docs/docs/integrations/toolkits/xorbits.ipynb @@ -35,9 +35,8 @@ "outputs": [], "source": [ "import xorbits.pandas as pd\n", - "\n", - "from langchain_experimental.agents.agent_toolkits import create_xorbits_agent\n", - "from langchain.llms import OpenAI" + "from langchain.llms import OpenAI\n", + "from langchain_experimental.agents.agent_toolkits import create_xorbits_agent" ] }, { @@ -381,7 +380,6 @@ ], "source": [ "import xorbits.numpy as np\n", - "\n", "from langchain.agents import create_xorbits_agent\n", "from langchain.llms import OpenAI\n", "\n", diff --git a/docs/docs/integrations/tools/arxiv.ipynb b/docs/docs/integrations/tools/arxiv.ipynb index efe5e1152c4..2210cc2a740 100644 --- a/docs/docs/integrations/tools/arxiv.ipynb +++ b/docs/docs/integrations/tools/arxiv.ipynb @@ -36,8 +36,8 @@ }, "outputs": [], "source": [ + "from langchain.agents import AgentType, initialize_agent, load_tools\n", "from langchain.chat_models import ChatOpenAI\n", - "from langchain.agents import load_tools, initialize_agent, AgentType\n", "\n", "llm = ChatOpenAI(temperature=0.0)\n", "tools = load_tools(\n", diff --git a/docs/docs/integrations/tools/awslambda.ipynb b/docs/docs/integrations/tools/awslambda.ipynb index 30a9fb968d3..f22a5f3c73e 100644 --- a/docs/docs/integrations/tools/awslambda.ipynb +++ b/docs/docs/integrations/tools/awslambda.ipynb @@ -61,8 +61,8 @@ }, "outputs": [], "source": [ + "from langchain.agents import AgentType, initialize_agent, load_tools\n", "from langchain.llms import OpenAI\n", - "from langchain.agents import load_tools, initialize_agent, AgentType\n", "\n", "llm = OpenAI(temperature=0)\n", "\n", diff --git a/docs/docs/integrations/tools/bash.ipynb b/docs/docs/integrations/tools/bash.ipynb index 523b9d856e3..c5c51a5bc36 100644 --- a/docs/docs/integrations/tools/bash.ipynb +++ b/docs/docs/integrations/tools/bash.ipynb @@ -144,9 +144,8 @@ } ], "source": [ + "from langchain.agents import AgentType, initialize_agent\n", "from langchain.chat_models import ChatOpenAI\n", - "from langchain.agents import initialize_agent\n", - "from langchain.agents import AgentType\n", "\n", "llm = ChatOpenAI(temperature=0)\n", "\n", diff --git a/docs/docs/integrations/tools/bearly.ipynb b/docs/docs/integrations/tools/bearly.ipynb index 02fc5795bd9..b99d7a3513e 100644 --- a/docs/docs/integrations/tools/bearly.ipynb +++ b/docs/docs/integrations/tools/bearly.ipynb @@ -27,9 +27,9 @@ "metadata": {}, "outputs": [], "source": [ + "from langchain.agents import AgentType, initialize_agent\n", "from langchain.chat_models import ChatOpenAI\n", - "from langchain.tools import BearlyInterpreterTool\n", - "from langchain.agents import initialize_agent, AgentType" + "from langchain.tools import BearlyInterpreterTool" ] }, { diff --git a/docs/docs/integrations/tools/chatgpt_plugins.ipynb b/docs/docs/integrations/tools/chatgpt_plugins.ipynb index 3b81ca5b67d..4f57a6cc11b 100644 --- a/docs/docs/integrations/tools/chatgpt_plugins.ipynb +++ b/docs/docs/integrations/tools/chatgpt_plugins.ipynb @@ -21,9 +21,8 @@ "metadata": {}, "outputs": [], "source": [ + "from langchain.agents import AgentType, initialize_agent, load_tools\n", "from langchain.chat_models import ChatOpenAI\n", - "from langchain.agents import load_tools, initialize_agent\n", - "from langchain.agents import AgentType\n", "from langchain.tools import AIPluginTool" ] }, diff --git a/docs/docs/integrations/tools/dalle_image_generator.ipynb b/docs/docs/integrations/tools/dalle_image_generator.ipynb index 6a66aae71fb..c5c0db01baf 100644 --- a/docs/docs/integrations/tools/dalle_image_generator.ipynb +++ b/docs/docs/integrations/tools/dalle_image_generator.ipynb @@ -28,9 +28,10 @@ }, "outputs": [], "source": [ - "from langchain.llms import OpenAI\n", "import os\n", "\n", + "from langchain.llms import OpenAI\n", + "\n", "os.environ[\"OPENAI_API_KEY\"] = \"\"" ] }, @@ -48,10 +49,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.utilities.dalle_image_generator import DallEAPIWrapper\n", - "from langchain.prompts import PromptTemplate\n", "from langchain.chains import LLMChain\n", "from langchain.llms import OpenAI\n", + "from langchain.prompts import PromptTemplate\n", + "from langchain.utilities.dalle_image_generator import DallEAPIWrapper\n", "\n", "llm = OpenAI(temperature=0.9)\n", "prompt = PromptTemplate(\n", @@ -153,8 +154,7 @@ } ], "source": [ - "from langchain.agents import load_tools\n", - "from langchain.agents import initialize_agent\n", + "from langchain.agents import initialize_agent, load_tools\n", "\n", "tools = load_tools([\"dalle-image-generator\"])\n", "agent = initialize_agent(tools, llm, agent=\"zero-shot-react-description\", verbose=True)\n", diff --git a/docs/docs/integrations/tools/e2b_data_analysis.ipynb b/docs/docs/integrations/tools/e2b_data_analysis.ipynb index 07c8f29d0be..e92e70783e1 100644 --- a/docs/docs/integrations/tools/e2b_data_analysis.ipynb +++ b/docs/docs/integrations/tools/e2b_data_analysis.ipynb @@ -56,9 +56,10 @@ "outputs": [], "source": [ "import os\n", + "\n", + "from langchain.agents import AgentType, initialize_agent\n", "from langchain.chat_models import ChatOpenAI\n", "from langchain.tools import E2BDataAnalysisTool\n", - "from langchain.agents import initialize_agent, AgentType\n", "\n", "os.environ[\"E2B_API_KEY\"] = \"\"\n", "os.environ[\"OPENAI_API_KEY\"] = \"\"" diff --git a/docs/docs/integrations/tools/edenai_tools.ipynb b/docs/docs/integrations/tools/edenai_tools.ipynb index ea8a6e6afb3..5d356ed43fe 100644 --- a/docs/docs/integrations/tools/edenai_tools.ipynb +++ b/docs/docs/integrations/tools/edenai_tools.ipynb @@ -49,13 +49,13 @@ "outputs": [], "source": [ "from langchain.tools.edenai import (\n", - " EdenAiSpeechToTextTool,\n", - " EdenAiTextToSpeechTool,\n", " EdenAiExplicitImageTool,\n", " EdenAiObjectDetectionTool,\n", " EdenAiParsingIDTool,\n", " EdenAiParsingInvoiceTool,\n", + " EdenAiSpeechToTextTool,\n", " EdenAiTextModerationTool,\n", + " EdenAiTextToSpeechTool,\n", ")" ] }, @@ -65,8 +65,8 @@ "metadata": {}, "outputs": [], "source": [ + "from langchain.agents import AgentType, initialize_agent\n", "from langchain.llms import EdenAI\n", - "from langchain.agents import initialize_agent, AgentType\n", "\n", "llm = EdenAI(\n", " feature=\"text\", provider=\"openai\", params={\"temperature\": 0.2, \"max_tokens\": 250}\n", diff --git a/docs/docs/integrations/tools/eleven_labs_tts.ipynb b/docs/docs/integrations/tools/eleven_labs_tts.ipynb index 55a9d3fd808..8d9d55be8f3 100644 --- a/docs/docs/integrations/tools/eleven_labs_tts.ipynb +++ b/docs/docs/integrations/tools/eleven_labs_tts.ipynb @@ -126,8 +126,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import OpenAI\n", - "from langchain.agents import initialize_agent, AgentType, load_tools" + "from langchain.agents import AgentType, initialize_agent, load_tools\n", + "from langchain.llms import OpenAI" ] }, { diff --git a/docs/docs/integrations/tools/filesystem.ipynb b/docs/docs/integrations/tools/filesystem.ipynb index f09de3114bf..7183e20794d 100644 --- a/docs/docs/integrations/tools/filesystem.ipynb +++ b/docs/docs/integrations/tools/filesystem.ipynb @@ -26,9 +26,10 @@ }, "outputs": [], "source": [ - "from langchain.agents.agent_toolkits import FileManagementToolkit\n", "from tempfile import TemporaryDirectory\n", "\n", + "from langchain.agents.agent_toolkits import FileManagementToolkit\n", + "\n", "# We'll make a temporary directory to avoid clutter\n", "working_directory = TemporaryDirectory()" ] diff --git a/docs/docs/integrations/tools/google_drive.ipynb b/docs/docs/integrations/tools/google_drive.ipynb index 3a820e548d6..2392dbaf746 100644 --- a/docs/docs/integrations/tools/google_drive.ipynb +++ b/docs/docs/integrations/tools/google_drive.ipynb @@ -98,8 +98,8 @@ }, "outputs": [], "source": [ - "from langchain.utilities.google_drive import GoogleDriveAPIWrapper\n", "from langchain.tools.google_drive.tool import GoogleDriveSearchTool\n", + "from langchain.utilities.google_drive import GoogleDriveAPIWrapper\n", "\n", "# By default, search only in the filename.\n", "tool = GoogleDriveSearchTool(\n", @@ -170,8 +170,8 @@ }, "outputs": [], "source": [ + "from langchain.agents import AgentType, initialize_agent\n", "from langchain.llms import OpenAI\n", - "from langchain.agents import initialize_agent, AgentType\n", "\n", "llm = OpenAI(temperature=0)\n", "agent = initialize_agent(\n", diff --git a/docs/docs/integrations/tools/google_scholar.ipynb b/docs/docs/integrations/tools/google_scholar.ipynb index 5f132b35c63..539ee755fcb 100644 --- a/docs/docs/integrations/tools/google_scholar.ipynb +++ b/docs/docs/integrations/tools/google_scholar.ipynb @@ -37,9 +37,10 @@ "metadata": {}, "outputs": [], "source": [ + "import os\n", + "\n", "from langchain.tools.google_scholar import GoogleScholarQueryRun\n", - "from langchain.utilities.google_scholar import GoogleScholarAPIWrapper\n", - "import os" + "from langchain.utilities.google_scholar import GoogleScholarAPIWrapper" ] }, { diff --git a/docs/docs/integrations/tools/google_serper.ipynb b/docs/docs/integrations/tools/google_serper.ipynb index 5f519443be6..fd28cf90193 100644 --- a/docs/docs/integrations/tools/google_serper.ipynb +++ b/docs/docs/integrations/tools/google_serper.ipynb @@ -158,10 +158,9 @@ } ], "source": [ - "from langchain.utilities import GoogleSerperAPIWrapper\n", + "from langchain.agents import AgentType, Tool, initialize_agent\n", "from langchain.llms.openai import OpenAI\n", - "from langchain.agents import initialize_agent, Tool\n", - "from langchain.agents import AgentType\n", + "from langchain.utilities import GoogleSerperAPIWrapper\n", "\n", "llm = OpenAI(temperature=0)\n", "search = GoogleSerperAPIWrapper()\n", diff --git a/docs/docs/integrations/tools/gradio_tools.ipynb b/docs/docs/integrations/tools/gradio_tools.ipynb index 54b7f1c565d..8201086b17d 100644 --- a/docs/docs/integrations/tools/gradio_tools.ipynb +++ b/docs/docs/integrations/tools/gradio_tools.ipynb @@ -175,15 +175,14 @@ } ], "source": [ - "from langchain.agents import initialize_agent\n", - "from langchain.llms import OpenAI\n", "from gradio_tools.tools import (\n", - " StableDiffusionTool,\n", " ImageCaptioningTool,\n", " StableDiffusionPromptGeneratorTool,\n", + " StableDiffusionTool,\n", " TextToVideoTool,\n", ")\n", - "\n", + "from langchain.agents import initialize_agent\n", + "from langchain.llms import OpenAI\n", "from langchain.memory import ConversationBufferMemory\n", "\n", "llm = OpenAI(temperature=0)\n", diff --git a/docs/docs/integrations/tools/graphql.ipynb b/docs/docs/integrations/tools/graphql.ipynb index d45da8870b5..c2099fa54a2 100644 --- a/docs/docs/integrations/tools/graphql.ipynb +++ b/docs/docs/integrations/tools/graphql.ipynb @@ -43,8 +43,8 @@ "metadata": {}, "outputs": [], "source": [ + "from langchain.agents import AgentType, initialize_agent, load_tools\n", "from langchain.llms import OpenAI\n", - "from langchain.agents import load_tools, initialize_agent, AgentType\n", "\n", "llm = OpenAI(temperature=0)\n", "\n", diff --git a/docs/docs/integrations/tools/human_tools.ipynb b/docs/docs/integrations/tools/human_tools.ipynb index 6d6dbcf3a77..4c8bc99c0db 100644 --- a/docs/docs/integrations/tools/human_tools.ipynb +++ b/docs/docs/integrations/tools/human_tools.ipynb @@ -18,10 +18,9 @@ }, "outputs": [], "source": [ + "from langchain.agents import AgentType, initialize_agent, load_tools\n", "from langchain.chat_models import ChatOpenAI\n", "from langchain.llms import OpenAI\n", - "from langchain.agents import load_tools, initialize_agent\n", - "from langchain.agents import AgentType\n", "\n", "llm = ChatOpenAI(temperature=0.0)\n", "math_llm = OpenAI(temperature=0.0)\n", diff --git a/docs/docs/integrations/tools/lemonai.ipynb b/docs/docs/integrations/tools/lemonai.ipynb index 79cf1d6e170..1a8a38c2f1c 100644 --- a/docs/docs/integrations/tools/lemonai.ipynb +++ b/docs/docs/integrations/tools/lemonai.ipynb @@ -125,8 +125,9 @@ "outputs": [], "source": [ "import os\n", - "from lemonai import execute_workflow\n", - "from langchain.llms import OpenAI" + "\n", + "from langchain.llms import OpenAI\n", + "from lemonai import execute_workflow" ] }, { diff --git a/docs/docs/integrations/tools/memorize.ipynb b/docs/docs/integrations/tools/memorize.ipynb index 5dbf66a8af8..49e5b2d3a9c 100644 --- a/docs/docs/integrations/tools/memorize.ipynb +++ b/docs/docs/integrations/tools/memorize.ipynb @@ -25,9 +25,10 @@ "outputs": [], "source": [ "import os\n", - "from langchain.llms import GradientLLM\n", - "from langchain.chains import LLMChain\n", + "\n", "from langchain.agents import AgentExecutor, AgentType, initialize_agent, load_tools\n", + "from langchain.chains import LLMChain\n", + "from langchain.llms import GradientLLM\n", "from langchain.memory import ConversationBufferMemory" ] }, @@ -47,7 +48,6 @@ "source": [ "from getpass import getpass\n", "\n", - "\n", "if not os.environ.get(\"GRADIENT_ACCESS_TOKEN\", None):\n", " # Access token under https://auth.gradient.ai/select-workspace\n", " os.environ[\"GRADIENT_ACCESS_TOKEN\"] = getpass(\"gradient.ai access token:\")\n", diff --git a/docs/docs/integrations/tools/metaphor_search.ipynb b/docs/docs/integrations/tools/metaphor_search.ipynb index 54e832f09df..1564f90d08c 100644 --- a/docs/docs/integrations/tools/metaphor_search.ipynb +++ b/docs/docs/integrations/tools/metaphor_search.ipynb @@ -66,8 +66,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.agents import tool\n", - "from typing import List" + "from typing import List\n", + "\n", + "from langchain.agents import tool" ] }, { @@ -383,7 +384,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.agents import initialize_agent, AgentType\n", + "from langchain.agents import AgentType, initialize_agent\n", "from langchain.chat_models import ChatOpenAI\n", "from langchain.tools import MetaphorSearchResults\n", "\n", diff --git a/docs/docs/integrations/tools/openweathermap.ipynb b/docs/docs/integrations/tools/openweathermap.ipynb index c4932dbf38a..65dccc24c09 100644 --- a/docs/docs/integrations/tools/openweathermap.ipynb +++ b/docs/docs/integrations/tools/openweathermap.ipynb @@ -27,9 +27,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.utilities import OpenWeatherMapAPIWrapper\n", "import os\n", "\n", + "from langchain.utilities import OpenWeatherMapAPIWrapper\n", + "\n", "os.environ[\"OPENWEATHERMAP_API_KEY\"] = \"\"\n", "\n", "weather = OpenWeatherMapAPIWrapper()" @@ -80,10 +81,11 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import OpenAI\n", - "from langchain.agents import load_tools, initialize_agent, AgentType\n", "import os\n", "\n", + "from langchain.agents import AgentType, initialize_agent, load_tools\n", + "from langchain.llms import OpenAI\n", + "\n", "os.environ[\"OPENAI_API_KEY\"] = \"\"\n", "os.environ[\"OPENWEATHERMAP_API_KEY\"] = \"\"\n", "\n", diff --git a/docs/docs/integrations/tools/sceneXplain.ipynb b/docs/docs/integrations/tools/sceneXplain.ipynb index 511e3416081..7b08f8f50a2 100644 --- a/docs/docs/integrations/tools/sceneXplain.ipynb +++ b/docs/docs/integrations/tools/sceneXplain.ipynb @@ -51,7 +51,6 @@ "source": [ "from langchain.tools import SceneXplainTool\n", "\n", - "\n", "tool = SceneXplainTool()" ] }, @@ -95,8 +94,8 @@ } ], "source": [ - "from langchain.llms import OpenAI\n", "from langchain.agents import initialize_agent\n", + "from langchain.llms import OpenAI\n", "from langchain.memory import ConversationBufferMemory\n", "\n", "llm = OpenAI(temperature=0)\n", diff --git a/docs/docs/integrations/tools/search_tools.ipynb b/docs/docs/integrations/tools/search_tools.ipynb index d8e25cd2294..d762385b416 100644 --- a/docs/docs/integrations/tools/search_tools.ipynb +++ b/docs/docs/integrations/tools/search_tools.ipynb @@ -21,9 +21,7 @@ }, "outputs": [], "source": [ - "from langchain.agents import load_tools\n", - "from langchain.agents import initialize_agent\n", - "from langchain.agents import AgentType\n", + "from langchain.agents import AgentType, initialize_agent, load_tools\n", "from langchain.llms import OpenAI" ] }, diff --git a/docs/docs/integrations/tools/searchapi.ipynb b/docs/docs/integrations/tools/searchapi.ipynb index 5aac68eb666..978b7dd380a 100644 --- a/docs/docs/integrations/tools/searchapi.ipynb +++ b/docs/docs/integrations/tools/searchapi.ipynb @@ -124,10 +124,9 @@ } ], "source": [ - "from langchain.utilities import SearchApiAPIWrapper\n", + "from langchain.agents import AgentType, Tool, initialize_agent\n", "from langchain.llms.openai import OpenAI\n", - "from langchain.agents import initialize_agent, Tool\n", - "from langchain.agents import AgentType\n", + "from langchain.utilities import SearchApiAPIWrapper\n", "\n", "llm = OpenAI(temperature=0)\n", "search = SearchApiAPIWrapper()\n", diff --git a/docs/docs/integrations/tools/searx_search.ipynb b/docs/docs/integrations/tools/searx_search.ipynb index ccd87ce9374..70246cb8b43 100644 --- a/docs/docs/integrations/tools/searx_search.ipynb +++ b/docs/docs/integrations/tools/searx_search.ipynb @@ -22,6 +22,7 @@ "outputs": [], "source": [ "import pprint\n", + "\n", "from langchain.utilities import SearxSearchWrapper" ] }, diff --git a/docs/docs/integrations/tools/tavily_search.ipynb b/docs/docs/integrations/tools/tavily_search.ipynb index 1ffc1d254dd..196329c59d5 100644 --- a/docs/docs/integrations/tools/tavily_search.ipynb +++ b/docs/docs/integrations/tools/tavily_search.ipynb @@ -80,10 +80,11 @@ "source": [ "# libraries\n", "import os\n", - "from langchain.utilities.tavily_search import TavilySearchAPIWrapper\n", - "from langchain.agents import initialize_agent, AgentType\n", + "\n", + "from langchain.agents import AgentType, initialize_agent\n", "from langchain.chat_models import ChatOpenAI\n", "from langchain.tools.tavily_search import TavilySearchResults\n", + "from langchain.utilities.tavily_search import TavilySearchAPIWrapper\n", "\n", "# set up API key\n", "os.environ[\"TAVILY_API_KEY\"] = \"...\"\n", diff --git a/docs/docs/integrations/tools/yahoo_finance_news.ipynb b/docs/docs/integrations/tools/yahoo_finance_news.ipynb index 4c0797d4969..b6c9c8d6596 100644 --- a/docs/docs/integrations/tools/yahoo_finance_news.ipynb +++ b/docs/docs/integrations/tools/yahoo_finance_news.ipynb @@ -52,11 +52,10 @@ "metadata": {}, "outputs": [], "source": [ + "from langchain.agents import AgentType, initialize_agent\n", "from langchain.chat_models import ChatOpenAI\n", - "from langchain.agents import initialize_agent, AgentType\n", "from langchain.tools.yahoo_finance_news import YahooFinanceNewsTool\n", "\n", - "\n", "llm = ChatOpenAI(temperature=0.0)\n", "tools = [YahooFinanceNewsTool()]\n", "agent_chain = initialize_agent(\n", diff --git a/docs/docs/integrations/tools/zapier.ipynb b/docs/docs/integrations/tools/zapier.ipynb index 9327df83bbc..2bb7a4902ef 100644 --- a/docs/docs/integrations/tools/zapier.ipynb +++ b/docs/docs/integrations/tools/zapier.ipynb @@ -60,10 +60,9 @@ }, "outputs": [], "source": [ - "from langchain.llms import OpenAI\n", - "from langchain.agents import initialize_agent\n", + "from langchain.agents import AgentType, initialize_agent\n", "from langchain.agents.agent_toolkits import ZapierToolkit\n", - "from langchain.agents import AgentType\n", + "from langchain.llms import OpenAI\n", "from langchain.utilities.zapier import ZapierNLAWrapper" ] }, @@ -161,8 +160,8 @@ }, "outputs": [], "source": [ + "from langchain.chains import LLMChain, SimpleSequentialChain, TransformChain\n", "from langchain.llms import OpenAI\n", - "from langchain.chains import LLMChain, TransformChain, SimpleSequentialChain\n", "from langchain.prompts import PromptTemplate\n", "from langchain.tools.zapier.tool import ZapierNLARunAction\n", "from langchain.utilities.zapier import ZapierNLAWrapper" diff --git a/docs/docs/integrations/vectorstores/activeloop_deeplake.ipynb b/docs/docs/integrations/vectorstores/activeloop_deeplake.ipynb index 5a05d6dcd04..2d45c73aec7 100644 --- a/docs/docs/integrations/vectorstores/activeloop_deeplake.ipynb +++ b/docs/docs/integrations/vectorstores/activeloop_deeplake.ipynb @@ -62,8 +62,8 @@ "metadata": {}, "outputs": [], "source": [ - "import os\n", "import getpass\n", + "import os\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")\n", "activeloop_token = getpass.getpass(\"activeloop token:\")\n", diff --git a/docs/docs/integrations/vectorstores/alibabacloud_opensearch.ipynb b/docs/docs/integrations/vectorstores/alibabacloud_opensearch.ipynb index 4b290682cb6..70a1c5be045 100644 --- a/docs/docs/integrations/vectorstores/alibabacloud_opensearch.ipynb +++ b/docs/docs/integrations/vectorstores/alibabacloud_opensearch.ipynb @@ -20,21 +20,21 @@ }, { "cell_type": "markdown", - "source": [ - "After the instance is up and running, follow these steps to split documents, get embeddings, connect to the alibaba cloud opensearch instance, index documents, and perform vector retrieval." - ], "metadata": { "collapsed": false - } + }, + "source": [ + "After the instance is up and running, follow these steps to split documents, get embeddings, connect to the alibaba cloud opensearch instance, index documents, and perform vector retrieval." + ] }, { "cell_type": "markdown", - "source": [ - "We need to install the following Python packages first." - ], "metadata": { "collapsed": false - } + }, + "source": [ + "We need to install the following Python packages first." + ] }, { "cell_type": "code", @@ -47,29 +47,29 @@ }, { "cell_type": "markdown", - "source": [ - "We want to use `OpenAIEmbeddings` so we have to get the OpenAI API Key." - ], "metadata": { "collapsed": false - } + }, + "source": [ + "We want to use `OpenAIEmbeddings` so we have to get the OpenAI API Key." + ] }, { "cell_type": "code", "execution_count": null, - "outputs": [], - "source": [ - "import os\n", - "import getpass\n", - "\n", - "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")" - ], "metadata": { "collapsed": false, "pycharm": { "name": "#%%\n" } - } + }, + "outputs": [], + "source": [ + "import getpass\n", + "import os\n", + "\n", + "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")" + ] }, { "cell_type": "code", diff --git a/docs/docs/integrations/vectorstores/annoy.ipynb b/docs/docs/integrations/vectorstores/annoy.ipynb index 11b7a47ab6f..f1915aead7d 100644 --- a/docs/docs/integrations/vectorstores/annoy.ipynb +++ b/docs/docs/integrations/vectorstores/annoy.ipynb @@ -495,6 +495,7 @@ "outputs": [], "source": [ "import uuid\n", + "\n", "from annoy import AnnoyIndex\n", "from langchain.docstore.document import Document\n", "from langchain.docstore.in_memory import InMemoryDocstore\n", diff --git a/docs/docs/integrations/vectorstores/astradb.ipynb b/docs/docs/integrations/vectorstores/astradb.ipynb index ea291af7f93..1777788e3a8 100644 --- a/docs/docs/integrations/vectorstores/astradb.ipynb +++ b/docs/docs/integrations/vectorstores/astradb.ipynb @@ -59,16 +59,17 @@ "\n", "from datasets import (\n", " load_dataset,\n", - ") # if not present yet, run: pip install \"datasets==2.14.6\"\n", - "\n", - "from langchain.schema import Document\n", - "from langchain.embeddings import OpenAIEmbeddings\n", - "from langchain.document_loaders import PyPDFLoader\n", - "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", + ")\n", "from langchain.chat_models import ChatOpenAI\n", + "from langchain.document_loaders import PyPDFLoader\n", + "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.prompts import ChatPromptTemplate\n", + "\n", + "# if not present yet, run: pip install \"datasets==2.14.6\"\n", + "from langchain.schema import Document\n", + "from langchain.schema.output_parser import StrOutputParser\n", "from langchain.schema.runnable import RunnablePassthrough\n", - "from langchain.schema.output_parser import StrOutputParser" + "from langchain.text_splitter import RecursiveCharacterTextSplitter" ] }, { diff --git a/docs/docs/integrations/vectorstores/async_faiss.ipynb b/docs/docs/integrations/vectorstores/async_faiss.ipynb index 969c3b31300..abec0d806a9 100644 --- a/docs/docs/integrations/vectorstores/async_faiss.ipynb +++ b/docs/docs/integrations/vectorstores/async_faiss.ipynb @@ -47,8 +47,8 @@ }, "outputs": [], "source": [ - "import os\n", "import getpass\n", + "import os\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")\n", "\n", @@ -65,10 +65,10 @@ }, "outputs": [], "source": [ + "from langchain.document_loaders import TextLoader\n", "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import FAISS\n", - "from langchain.document_loaders import TextLoader" + "from langchain.vectorstores import FAISS" ] }, { diff --git a/docs/docs/integrations/vectorstores/atlas.ipynb b/docs/docs/integrations/vectorstores/atlas.ipynb index a493402d392..a4c39a3c9ed 100644 --- a/docs/docs/integrations/vectorstores/atlas.ipynb +++ b/docs/docs/integrations/vectorstores/atlas.ipynb @@ -70,9 +70,10 @@ "outputs": [], "source": [ "import time\n", + "\n", + "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import SpacyTextSplitter\n", - "from langchain.vectorstores import AtlasDB\n", - "from langchain.document_loaders import TextLoader" + "from langchain.vectorstores import AtlasDB" ] }, { diff --git a/docs/docs/integrations/vectorstores/awadb.ipynb b/docs/docs/integrations/vectorstores/awadb.ipynb index 5a466d510e6..81dfd05c8f3 100644 --- a/docs/docs/integrations/vectorstores/awadb.ipynb +++ b/docs/docs/integrations/vectorstores/awadb.ipynb @@ -28,9 +28,9 @@ "metadata": {}, "outputs": [], "source": [ + "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import AwaDB\n", - "from langchain.document_loaders import TextLoader" + "from langchain.vectorstores import AwaDB" ] }, { diff --git a/docs/docs/integrations/vectorstores/azure_cosmos_db.ipynb b/docs/docs/integrations/vectorstores/azure_cosmos_db.ipynb index 90afa9a8cef..16d247ea0d4 100644 --- a/docs/docs/integrations/vectorstores/azure_cosmos_db.ipynb +++ b/docs/docs/integrations/vectorstores/azure_cosmos_db.ipynb @@ -130,13 +130,13 @@ }, "outputs": [], "source": [ + "from langchain.document_loaders import TextLoader\n", "from langchain.embeddings import OpenAIEmbeddings\n", + "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain.vectorstores.azure_cosmos_db_vector_search import (\n", " AzureCosmosDBVectorSearch,\n", " CosmosDBSimilarityType,\n", ")\n", - "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.document_loaders import TextLoader\n", "\n", "SOURCE_FILE_NAME = \"../../modules/state_of_the_union.txt\"\n", "\n", diff --git a/docs/docs/integrations/vectorstores/azuresearch.ipynb b/docs/docs/integrations/vectorstores/azuresearch.ipynb index 8710beb7ad0..1ffb30e5c72 100644 --- a/docs/docs/integrations/vectorstores/azuresearch.ipynb +++ b/docs/docs/integrations/vectorstores/azuresearch.ipynb @@ -45,6 +45,7 @@ "outputs": [], "source": [ "import os\n", + "\n", "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.vectorstores.azuresearch import AzureSearch" ] @@ -301,11 +302,11 @@ "outputs": [], "source": [ "from azure.search.documents.indexes.models import (\n", + " ScoringProfile,\n", " SearchableField,\n", " SearchField,\n", " SearchFieldDataType,\n", " SimpleField,\n", - " ScoringProfile,\n", " TextWeights,\n", ")\n", "\n", @@ -449,14 +450,14 @@ "outputs": [], "source": [ "from azure.search.documents.indexes.models import (\n", + " FreshnessScoringFunction,\n", + " FreshnessScoringParameters,\n", + " ScoringProfile,\n", " SearchableField,\n", " SearchField,\n", " SearchFieldDataType,\n", " SimpleField,\n", - " ScoringProfile,\n", " TextWeights,\n", - " FreshnessScoringFunction,\n", - " FreshnessScoringParameters,\n", ")\n", "\n", "embeddings: OpenAIEmbeddings = OpenAIEmbeddings(deployment=model, chunk_size=1)\n", diff --git a/docs/docs/integrations/vectorstores/baiducloud_vector_search.ipynb b/docs/docs/integrations/vectorstores/baiducloud_vector_search.ipynb index 76f6898a3df..3bdda14ce15 100644 --- a/docs/docs/integrations/vectorstores/baiducloud_vector_search.ipynb +++ b/docs/docs/integrations/vectorstores/baiducloud_vector_search.ipynb @@ -56,8 +56,8 @@ "metadata": {}, "outputs": [], "source": [ - "import os\n", "import getpass\n", + "import os\n", "\n", "os.environ[\"QIANFAN_AK\"] = getpass.getpass(\"Your Qianfan AK:\")\n", "os.environ[\"QIANFAN_SK\"] = getpass.getpass(\"Your Qianfan SK:\")" diff --git a/docs/docs/integrations/vectorstores/chroma.ipynb b/docs/docs/integrations/vectorstores/chroma.ipynb index 72664764258..1d12a3da412 100644 --- a/docs/docs/integrations/vectorstores/chroma.ipynb +++ b/docs/docs/integrations/vectorstores/chroma.ipynb @@ -73,10 +73,10 @@ ], "source": [ "# import\n", + "from langchain.document_loaders import TextLoader\n", "from langchain.embeddings.sentence_transformer import SentenceTransformerEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain.vectorstores import Chroma\n", - "from langchain.document_loaders import TextLoader\n", "\n", "# load the document and split it into chunks\n", "loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n", @@ -256,8 +256,9 @@ ], "source": [ "# create the chroma client\n", - "import chromadb\n", "import uuid\n", + "\n", + "import chromadb\n", "from chromadb.config import Settings\n", "\n", "client = chromadb.HttpClient(settings=Settings(allow_reset=True))\n", @@ -357,6 +358,7 @@ "# get a token: https://platform.openai.com/account/api-keys\n", "\n", "from getpass import getpass\n", + "\n", "from langchain.embeddings.openai import OpenAIEmbeddings\n", "\n", "OPENAI_API_KEY = getpass()" diff --git a/docs/docs/integrations/vectorstores/clarifai.ipynb b/docs/docs/integrations/vectorstores/clarifai.ipynb index 5b2756c5373..454872293ff 100644 --- a/docs/docs/integrations/vectorstores/clarifai.ipynb +++ b/docs/docs/integrations/vectorstores/clarifai.ipynb @@ -79,8 +79,8 @@ "outputs": [], "source": [ "# Import the required modules\n", - "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain.document_loaders import TextLoader\n", + "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain.vectorstores import Clarifai" ] }, diff --git a/docs/docs/integrations/vectorstores/clickhouse.ipynb b/docs/docs/integrations/vectorstores/clickhouse.ipynb index d4d6b99837c..6f18d02cb38 100644 --- a/docs/docs/integrations/vectorstores/clickhouse.ipynb +++ b/docs/docs/integrations/vectorstores/clickhouse.ipynb @@ -81,8 +81,8 @@ }, "outputs": [], "source": [ - "import os\n", "import getpass\n", + "import os\n", "\n", "if not os.environ[\"OPENAI_API_KEY\"]:\n", " os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")" @@ -303,8 +303,8 @@ } ], "source": [ - "from langchain.vectorstores import Clickhouse, ClickhouseSettings\n", "from langchain.document_loaders import TextLoader\n", + "from langchain.vectorstores import Clickhouse, ClickhouseSettings\n", "\n", "loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n", "documents = loader.load()\n", diff --git a/docs/docs/integrations/vectorstores/dashvector.ipynb b/docs/docs/integrations/vectorstores/dashvector.ipynb index 2352cf6f275..b141d90a91b 100644 --- a/docs/docs/integrations/vectorstores/dashvector.ipynb +++ b/docs/docs/integrations/vectorstores/dashvector.ipynb @@ -57,19 +57,19 @@ "cell_type": "code", "execution_count": 1, "metadata": { - "pycharm": { - "name": "#%%\n", - "is_executing": true - }, "ExecuteTime": { "end_time": "2023-08-11T10:37:15.091585Z", "start_time": "2023-08-11T10:36:51.859753Z" + }, + "pycharm": { + "is_executing": true, + "name": "#%%\n" } }, "outputs": [], "source": [ - "import os\n", "import getpass\n", + "import os\n", "\n", "os.environ[\"DASHVECTOR_API_KEY\"] = getpass.getpass(\"DashVector API Key:\")\n", "os.environ[\"DASHSCOPE_API_KEY\"] = getpass.getpass(\"DashScope API Key:\")" @@ -90,13 +90,13 @@ "cell_type": "code", "execution_count": 1, "metadata": { - "pycharm": { - "name": "#%%\n", - "is_executing": true - }, "ExecuteTime": { "end_time": "2023-08-11T10:42:30.243460Z", "start_time": "2023-08-11T10:42:27.783785Z" + }, + "pycharm": { + "is_executing": true, + "name": "#%%\n" } }, "outputs": [], @@ -110,13 +110,13 @@ "cell_type": "code", "execution_count": 2, "metadata": { - "pycharm": { - "is_executing": true, - "name": "#%%\n" - }, "ExecuteTime": { "end_time": "2023-08-11T10:42:30.391580Z", "start_time": "2023-08-11T10:42:30.249021Z" + }, + "pycharm": { + "is_executing": true, + "name": "#%%\n" } }, "outputs": [], @@ -174,12 +174,12 @@ "cell_type": "code", "execution_count": 4, "metadata": { - "pycharm": { - "name": "#%%\n" - }, "ExecuteTime": { "end_time": "2023-08-11T10:42:51.641309Z", "start_time": "2023-08-11T10:42:51.132109Z" + }, + "pycharm": { + "name": "#%%\n" } }, "outputs": [ @@ -205,11 +205,11 @@ { "cell_type": "code", "execution_count": null, - "outputs": [], - "source": [], "metadata": { "collapsed": false - } + }, + "outputs": [], + "source": [] } ], "metadata": { diff --git a/docs/docs/integrations/vectorstores/dingo.ipynb b/docs/docs/integrations/vectorstores/dingo.ipynb index d4a82d32400..bb1222063e2 100644 --- a/docs/docs/integrations/vectorstores/dingo.ipynb +++ b/docs/docs/integrations/vectorstores/dingo.ipynb @@ -53,8 +53,8 @@ } ], "source": [ - "import os\n", "import getpass\n", + "import os\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")" ] @@ -68,10 +68,10 @@ }, "outputs": [], "source": [ + "from langchain.document_loaders import TextLoader\n", "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Dingo\n", - "from langchain.document_loaders import TextLoader" + "from langchain.vectorstores import Dingo" ] }, { @@ -130,10 +130,10 @@ "metadata": {}, "outputs": [], "source": [ + "from langchain.document_loaders import TextLoader\n", "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Dingo\n", - "from langchain.document_loaders import TextLoader" + "from langchain.vectorstores import Dingo" ] }, { diff --git a/docs/docs/integrations/vectorstores/docarray_hnsw.ipynb b/docs/docs/integrations/vectorstores/docarray_hnsw.ipynb index 99fc5bab9be..14a197b3a0a 100644 --- a/docs/docs/integrations/vectorstores/docarray_hnsw.ipynb +++ b/docs/docs/integrations/vectorstores/docarray_hnsw.ipynb @@ -73,10 +73,10 @@ }, "outputs": [], "source": [ + "from langchain.document_loaders import TextLoader\n", "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import DocArrayHnswSearch\n", - "from langchain.document_loaders import TextLoader" + "from langchain.vectorstores import DocArrayHnswSearch" ] }, { diff --git a/docs/docs/integrations/vectorstores/docarray_in_memory.ipynb b/docs/docs/integrations/vectorstores/docarray_in_memory.ipynb index ac12f01dcc2..e478b0353e8 100644 --- a/docs/docs/integrations/vectorstores/docarray_in_memory.ipynb +++ b/docs/docs/integrations/vectorstores/docarray_in_memory.ipynb @@ -70,10 +70,10 @@ }, "outputs": [], "source": [ + "from langchain.document_loaders import TextLoader\n", "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import DocArrayInMemorySearch\n", - "from langchain.document_loaders import TextLoader" + "from langchain.vectorstores import DocArrayInMemorySearch" ] }, { diff --git a/docs/docs/integrations/vectorstores/elasticsearch.ipynb b/docs/docs/integrations/vectorstores/elasticsearch.ipynb index 8af2decbb51..033f8143789 100644 --- a/docs/docs/integrations/vectorstores/elasticsearch.ipynb +++ b/docs/docs/integrations/vectorstores/elasticsearch.ipynb @@ -148,8 +148,8 @@ }, "outputs": [], "source": [ - "import os\n", "import getpass\n", + "import os\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")" ] @@ -794,6 +794,7 @@ "outputs": [], "source": [ "from typing import Dict\n", + "\n", "from langchain.docstore.document import Document\n", "\n", "\n", diff --git a/docs/docs/integrations/vectorstores/epsilla.ipynb b/docs/docs/integrations/vectorstores/epsilla.ipynb index 25e081a47a0..330ac6ddf34 100644 --- a/docs/docs/integrations/vectorstores/epsilla.ipynb +++ b/docs/docs/integrations/vectorstores/epsilla.ipynb @@ -37,8 +37,8 @@ "metadata": {}, "outputs": [], "source": [ - "import os\n", "import getpass\n", + "import os\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")" ] diff --git a/docs/docs/integrations/vectorstores/faiss.ipynb b/docs/docs/integrations/vectorstores/faiss.ipynb index d960a32b239..5931d1a6cc4 100644 --- a/docs/docs/integrations/vectorstores/faiss.ipynb +++ b/docs/docs/integrations/vectorstores/faiss.ipynb @@ -45,8 +45,8 @@ }, "outputs": [], "source": [ - "import os\n", "import getpass\n", + "import os\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")\n", "\n", @@ -63,10 +63,10 @@ }, "outputs": [], "source": [ + "from langchain.document_loaders import TextLoader\n", "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import FAISS\n", - "from langchain.document_loaders import TextLoader" + "from langchain.vectorstores import FAISS" ] }, { diff --git a/docs/docs/integrations/vectorstores/hippo.ipynb b/docs/docs/integrations/vectorstores/hippo.ipynb index 950b5cc3ba7..c689c350f57 100644 --- a/docs/docs/integrations/vectorstores/hippo.ipynb +++ b/docs/docs/integrations/vectorstores/hippo.ipynb @@ -81,12 +81,13 @@ }, "outputs": [], "source": [ + "import os\n", + "\n", "from langchain.chat_models import ChatOpenAI\n", "from langchain.document_loaders import TextLoader\n", "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores.hippo import Hippo\n", - "import os" + "from langchain.vectorstores.hippo import Hippo" ] }, { diff --git a/docs/docs/integrations/vectorstores/lancedb.ipynb b/docs/docs/integrations/vectorstores/lancedb.ipynb index 5d2735411c6..3a546a9498e 100644 --- a/docs/docs/integrations/vectorstores/lancedb.ipynb +++ b/docs/docs/integrations/vectorstores/lancedb.ipynb @@ -49,8 +49,8 @@ } ], "source": [ - "import os\n", "import getpass\n", + "import os\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")" ] diff --git a/docs/docs/integrations/vectorstores/llm_rails.ipynb b/docs/docs/integrations/vectorstores/llm_rails.ipynb index 24870d9176d..e9a66c2c60e 100644 --- a/docs/docs/integrations/vectorstores/llm_rails.ipynb +++ b/docs/docs/integrations/vectorstores/llm_rails.ipynb @@ -73,9 +73,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.vectorstores import LLMRails\n", "import os\n", "\n", + "from langchain.vectorstores import LLMRails\n", + "\n", "os.environ[\"LLM_RAILS_DATASTORE_ID\"] = \"Your datastore id \"\n", "os.environ[\"LLM_RAILS_API_KEY\"] = \"Your API Key\"\n", "\n", diff --git a/docs/docs/integrations/vectorstores/marqo.ipynb b/docs/docs/integrations/vectorstores/marqo.ipynb index 3b8f296cb7b..0e8a6ac02f2 100644 --- a/docs/docs/integrations/vectorstores/marqo.ipynb +++ b/docs/docs/integrations/vectorstores/marqo.ipynb @@ -38,9 +38,9 @@ "metadata": {}, "outputs": [], "source": [ + "from langchain.document_loaders import TextLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Marqo\n", - "from langchain.document_loaders import TextLoader" + "from langchain.vectorstores import Marqo" ] }, { @@ -473,12 +473,12 @@ } ], "source": [ + "import getpass\n", + "import os\n", + "\n", "from langchain.chains import RetrievalQAWithSourcesChain\n", "from langchain.llms import OpenAI\n", "\n", - "import os\n", - "import getpass\n", - "\n", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")" ] }, diff --git a/docs/docs/integrations/vectorstores/matchingengine.ipynb b/docs/docs/integrations/vectorstores/matchingengine.ipynb index 0978ccdbbda..90161cec541 100644 --- a/docs/docs/integrations/vectorstores/matchingengine.ipynb +++ b/docs/docs/integrations/vectorstores/matchingengine.ipynb @@ -105,8 +105,8 @@ "source": [ "import json\n", "\n", - "from google.cloud import aiplatform\n", - "import tensorflow_hub as hub" + "import tensorflow_hub as hub\n", + "from google.cloud import aiplatform" ] }, { diff --git a/docs/docs/integrations/vectorstores/meilisearch.ipynb b/docs/docs/integrations/vectorstores/meilisearch.ipynb index 817aeeb5988..fac710acca8 100644 --- a/docs/docs/integrations/vectorstores/meilisearch.ipynb +++ b/docs/docs/integrations/vectorstores/meilisearch.ipynb @@ -88,8 +88,8 @@ "metadata": {}, "outputs": [], "source": [ - "import os\n", "import getpass\n", + "import os\n", "\n", "os.environ[\"MEILI_HTTP_ADDR\"] = getpass.getpass(\"Meilisearch HTTP address and port:\")\n", "os.environ[\"MEILI_MASTER_KEY\"] = getpass.getpass(\"Meilisearch API Key:\")" @@ -126,9 +126,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.vectorstores import Meilisearch\n", "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", + "from langchain.vectorstores import Meilisearch\n", "\n", "embeddings = OpenAIEmbeddings()" ] @@ -216,8 +216,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.vectorstores import Meilisearch\n", "import meilisearch\n", + "from langchain.vectorstores import Meilisearch\n", "\n", "client = meilisearch.Client(url=\"http://127.0.0.1:7700\", api_key=\"***\")\n", "vector_store = Meilisearch(\n", diff --git a/docs/docs/integrations/vectorstores/milvus.ipynb b/docs/docs/integrations/vectorstores/milvus.ipynb index 040fa5fdcc8..47d34dd7b41 100644 --- a/docs/docs/integrations/vectorstores/milvus.ipynb +++ b/docs/docs/integrations/vectorstores/milvus.ipynb @@ -51,8 +51,8 @@ } ], "source": [ - "import os\n", "import getpass\n", + "import os\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")" ] @@ -66,10 +66,10 @@ }, "outputs": [], "source": [ + "from langchain.document_loaders import TextLoader\n", "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Milvus\n", - "from langchain.document_loaders import TextLoader" + "from langchain.vectorstores import Milvus" ] }, { diff --git a/docs/docs/integrations/vectorstores/momento_vector_index.ipynb b/docs/docs/integrations/vectorstores/momento_vector_index.ipynb index 161f482f008..bae01cc3e52 100644 --- a/docs/docs/integrations/vectorstores/momento_vector_index.ipynb +++ b/docs/docs/integrations/vectorstores/momento_vector_index.ipynb @@ -66,8 +66,8 @@ "metadata": {}, "outputs": [], "source": [ - "import os\n", - "import getpass" + "import getpass\n", + "import os" ] }, { @@ -143,10 +143,10 @@ }, "outputs": [], "source": [ + "from langchain.document_loaders import TextLoader\n", "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import MomentoVectorIndex\n", - "from langchain.document_loaders import TextLoader" + "from langchain.vectorstores import MomentoVectorIndex" ] }, { @@ -375,8 +375,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.chains import RetrievalQA" + "from langchain.chains import RetrievalQA\n", + "from langchain.chat_models import ChatOpenAI" ] }, { diff --git a/docs/docs/integrations/vectorstores/myscale.ipynb b/docs/docs/integrations/vectorstores/myscale.ipynb index 7ab41ef85ee..3e1119066b8 100644 --- a/docs/docs/integrations/vectorstores/myscale.ipynb +++ b/docs/docs/integrations/vectorstores/myscale.ipynb @@ -50,8 +50,8 @@ "metadata": {}, "outputs": [], "source": [ - "import os\n", "import getpass\n", + "import os\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")\n", "os.environ[\"OPENAI_API_BASE\"] = getpass.getpass(\"OpenAI Base:\")\n", @@ -98,10 +98,10 @@ }, "outputs": [], "source": [ + "from langchain.document_loaders import TextLoader\n", "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import MyScale\n", - "from langchain.document_loaders import TextLoader" + "from langchain.vectorstores import MyScale" ] }, { @@ -219,8 +219,8 @@ } ], "source": [ - "from langchain.vectorstores import MyScale\n", "from langchain.document_loaders import TextLoader\n", + "from langchain.vectorstores import MyScale\n", "\n", "loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n", "documents = loader.load()\n", diff --git a/docs/docs/integrations/vectorstores/neo4jvector.ipynb b/docs/docs/integrations/vectorstores/neo4jvector.ipynb index 36bd298f990..5928c407f6b 100644 --- a/docs/docs/integrations/vectorstores/neo4jvector.ipynb +++ b/docs/docs/integrations/vectorstores/neo4jvector.ipynb @@ -58,8 +58,8 @@ } ], "source": [ - "import os\n", "import getpass\n", + "import os\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")" ] @@ -72,11 +72,11 @@ }, "outputs": [], "source": [ + "from langchain.docstore.document import Document\n", + "from langchain.document_loaders import TextLoader\n", "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Neo4jVector\n", - "from langchain.document_loaders import TextLoader\n", - "from langchain.docstore.document import Document" + "from langchain.vectorstores import Neo4jVector" ] }, { diff --git a/docs/docs/integrations/vectorstores/opensearch.ipynb b/docs/docs/integrations/vectorstores/opensearch.ipynb index 08dd2920ae1..fdaeef28d99 100644 --- a/docs/docs/integrations/vectorstores/opensearch.ipynb +++ b/docs/docs/integrations/vectorstores/opensearch.ipynb @@ -55,8 +55,8 @@ "metadata": {}, "outputs": [], "source": [ - "import os\n", "import getpass\n", + "import os\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")" ] @@ -68,10 +68,10 @@ "metadata": {}, "outputs": [], "source": [ + "from langchain.document_loaders import TextLoader\n", "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import OpenSearchVectorSearch\n", - "from langchain.document_loaders import TextLoader" + "from langchain.vectorstores import OpenSearchVectorSearch" ] }, { @@ -318,20 +318,27 @@ }, { "cell_type": "markdown", - "source": [ - "## Using AOSS (Amazon OpenSearch Service Serverless)" - ], + "id": "5f590d35", "metadata": { "collapsed": false, "pycharm": { "name": "#%% md\n" } }, - "id": "5f590d35" + "source": [ + "## Using AOSS (Amazon OpenSearch Service Serverless)" + ] }, { "cell_type": "code", "execution_count": null, + "id": "de397be7", + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "# This is just an example to show how to use AOSS with faiss engine and efficient_filter, you need to set proper values.\n", @@ -361,28 +368,28 @@ " efficient_filter=filter,\n", " k=200,\n", ")" - ], + ] + }, + { + "cell_type": "markdown", + "id": "0aa012c8", + "metadata": { + "collapsed": false + }, + "source": [ + "## Using AOS (Amazon OpenSearch Service)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2c47e408", "metadata": { "collapsed": false, "pycharm": { "name": "#%%\n" } }, - "id": "de397be7" - }, - { - "cell_type": "markdown", - "source": [ - "## Using AOS (Amazon OpenSearch Service)" - ], - "metadata": { - "collapsed": false - }, - "id": "0aa012c8" - }, - { - "cell_type": "code", - "execution_count": null, "outputs": [], "source": [ "# This is just an example to show how to use AOS , you need to set proper values.\n", @@ -410,14 +417,7 @@ " \"What is feature selection\",\n", " k=200,\n", ")" - ], - "metadata": { - "collapsed": false, - "pycharm": { - "name": "#%%\n" - } - }, - "id": "2c47e408" + ] } ], "metadata": { diff --git a/docs/docs/integrations/vectorstores/pgembedding.ipynb b/docs/docs/integrations/vectorstores/pgembedding.ipynb index 34928df3bf7..7ef4f403d93 100644 --- a/docs/docs/integrations/vectorstores/pgembedding.ipynb +++ b/docs/docs/integrations/vectorstores/pgembedding.ipynb @@ -57,8 +57,8 @@ } ], "source": [ - "import os\n", "import getpass\n", + "import os\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")" ] @@ -81,11 +81,11 @@ "metadata": {}, "outputs": [], "source": [ + "from langchain.docstore.document import Document\n", + "from langchain.document_loaders import TextLoader\n", "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import PGEmbedding\n", - "from langchain.document_loaders import TextLoader\n", - "from langchain.docstore.document import Document" + "from langchain.vectorstores import PGEmbedding" ] }, { diff --git a/docs/docs/integrations/vectorstores/pgvecto_rs.ipynb b/docs/docs/integrations/vectorstores/pgvecto_rs.ipynb index 9b60f7de172..b0be659cc3b 100644 --- a/docs/docs/integrations/vectorstores/pgvecto_rs.ipynb +++ b/docs/docs/integrations/vectorstores/pgvecto_rs.ipynb @@ -28,11 +28,12 @@ "outputs": [], "source": [ "from typing import List\n", + "\n", + "from langchain.docstore.document import Document\n", + "from langchain.document_loaders import TextLoader\n", "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores.pgvecto_rs import PGVecto_rs\n", - "from langchain.document_loaders import TextLoader\n", - "from langchain.docstore.document import Document" + "from langchain.vectorstores.pgvecto_rs import PGVecto_rs" ] }, { diff --git a/docs/docs/integrations/vectorstores/pgvector.ipynb b/docs/docs/integrations/vectorstores/pgvector.ipynb index 6c149b5eb1e..f093f36252e 100644 --- a/docs/docs/integrations/vectorstores/pgvector.ipynb +++ b/docs/docs/integrations/vectorstores/pgvector.ipynb @@ -55,8 +55,8 @@ }, "outputs": [], "source": [ - "import os\n", "import getpass\n", + "import os\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")" ] @@ -100,11 +100,11 @@ }, "outputs": [], "source": [ + "from langchain.docstore.document import Document\n", + "from langchain.document_loaders import TextLoader\n", "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores.pgvector import PGVector\n", - "from langchain.document_loaders import TextLoader\n", - "from langchain.docstore.document import Document" + "from langchain.vectorstores.pgvector import PGVector" ] }, { diff --git a/docs/docs/integrations/vectorstores/pinecone.ipynb b/docs/docs/integrations/vectorstores/pinecone.ipynb index f960eb382a4..762b72e6fb9 100644 --- a/docs/docs/integrations/vectorstores/pinecone.ipynb +++ b/docs/docs/integrations/vectorstores/pinecone.ipynb @@ -35,8 +35,8 @@ "metadata": {}, "outputs": [], "source": [ - "import os\n", "import getpass\n", + "import os\n", "\n", "os.environ[\"PINECONE_API_KEY\"] = getpass.getpass(\"Pinecone API Key:\")" ] @@ -79,10 +79,10 @@ }, "outputs": [], "source": [ + "from langchain.document_loaders import TextLoader\n", "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Pinecone\n", - "from langchain.document_loaders import TextLoader" + "from langchain.vectorstores import Pinecone" ] }, { diff --git a/docs/docs/integrations/vectorstores/qdrant.ipynb b/docs/docs/integrations/vectorstores/qdrant.ipynb index f5f381da12c..bfd17251295 100644 --- a/docs/docs/integrations/vectorstores/qdrant.ipynb +++ b/docs/docs/integrations/vectorstores/qdrant.ipynb @@ -1,742 +1,742 @@ { - "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "id": "683953b3", - "metadata": {}, - "source": [ - "# Qdrant\n", - "\n", - ">[Qdrant](https://qdrant.tech/documentation/) (read: quadrant ) is a vector similarity search engine. It provides a production-ready service with a convenient API to store, search, and manage points - vectors with an additional payload. `Qdrant` is tailored to extended filtering support. It makes it useful for all sorts of neural network or semantic-based matching, faceted search, and other applications.\n", - "\n", - "\n", - "This notebook shows how to use functionality related to the `Qdrant` vector database. \n", - "\n", - "There are various modes of how to run `Qdrant`, and depending on the chosen one, there will be some subtle differences. The options include:\n", - "- Local mode, no server required\n", - "- On-premise server deployment\n", - "- Qdrant Cloud\n", - "\n", - "See the [installation instructions](https://qdrant.tech/documentation/install/)." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e03e8460-8f32-4d1f-bb93-4f7636a476fa", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "!pip install qdrant-client" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "7b2f111b-357a-4f42-9730-ef0603bdc1b5", - "metadata": {}, - "source": [ - "We want to use `OpenAIEmbeddings` so we have to get the OpenAI API Key." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "082e7e8b-ac52-430c-98d6-8f0924457642", - "metadata": { - "tags": [] - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "OpenAI API Key: ········\n" - ] - } - ], - "source": [ - "import os\n", - "import getpass\n", - "\n", - "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "aac9563e", - "metadata": { - "ExecuteTime": { - "end_time": "2023-04-04T10:51:22.282884Z", - "start_time": "2023-04-04T10:51:21.408077Z" - }, - "tags": [] - }, - "outputs": [], - "source": [ - "from langchain.embeddings.openai import OpenAIEmbeddings\n", - "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Qdrant\n", - "from langchain.document_loaders import TextLoader" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "a3c3999a", - "metadata": { - "ExecuteTime": { - "end_time": "2023-04-04T10:51:22.520144Z", - "start_time": "2023-04-04T10:51:22.285826Z" - }, - "tags": [] - }, - "outputs": [], - "source": [ - "loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n", - "documents = loader.load()\n", - "text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n", - "docs = text_splitter.split_documents(documents)\n", - "\n", - "embeddings = OpenAIEmbeddings()" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "eeead681", - "metadata": {}, - "source": [ - "## Connecting to Qdrant from LangChain\n", - "\n", - "### Local mode\n", - "\n", - "Python client allows you to run the same code in local mode without running the Qdrant server. That's great for testing things out and debugging or if you plan to store just a small amount of vectors. The embeddings might be fully kepy in memory or persisted on disk.\n", - "\n", - "#### In-memory\n", - "\n", - "For some testing scenarios and quick experiments, you may prefer to keep all the data in memory only, so it gets lost when the client is destroyed - usually at the end of your script/notebook." - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "8429667e", - "metadata": { - "ExecuteTime": { - "end_time": "2023-04-04T10:51:22.525091Z", - "start_time": "2023-04-04T10:51:22.522015Z" - }, - "tags": [] - }, - "outputs": [], - "source": [ - "qdrant = Qdrant.from_documents(\n", - " docs,\n", - " embeddings,\n", - " location=\":memory:\", # Local mode with in-memory storage only\n", - " collection_name=\"my_documents\",\n", - ")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "59f0b954", - "metadata": {}, - "source": [ - "#### On-disk storage\n", - "\n", - "Local mode, without using the Qdrant server, may also store your vectors on disk so they're persisted between runs." - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "24b370e2", - "metadata": { - "ExecuteTime": { - "end_time": "2023-04-04T10:51:24.827567Z", - "start_time": "2023-04-04T10:51:22.529080Z" - }, - "tags": [] - }, - "outputs": [], - "source": [ - "qdrant = Qdrant.from_documents(\n", - " docs,\n", - " embeddings,\n", - " path=\"/tmp/local_qdrant\",\n", - " collection_name=\"my_documents\",\n", - ")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "749658ce", - "metadata": {}, - "source": [ - "### On-premise server deployment\n", - "\n", - "No matter if you choose to launch Qdrant locally with [a Docker container](https://qdrant.tech/documentation/install/), or select a Kubernetes deployment with [the official Helm chart](https://github.com/qdrant/qdrant-helm), the way you're going to connect to such an instance will be identical. You'll need to provide a URL pointing to the service." - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "91e7f5ce", - "metadata": { - "ExecuteTime": { - "end_time": "2023-04-04T10:51:24.832708Z", - "start_time": "2023-04-04T10:51:24.829905Z" - } - }, - "outputs": [], - "source": [ - "url = \"<---qdrant url here --->\"\n", - "qdrant = Qdrant.from_documents(\n", - " docs,\n", - " embeddings,\n", - " url=url,\n", - " prefer_grpc=True,\n", - " collection_name=\"my_documents\",\n", - ")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "c9e21ce9", - "metadata": {}, - "source": [ - "### Qdrant Cloud\n", - "\n", - "If you prefer not to keep yourself busy with managing the infrastructure, you can choose to set up a fully-managed Qdrant cluster on [Qdrant Cloud](https://cloud.qdrant.io/). There is a free forever 1GB cluster included for trying out. The main difference with using a managed version of Qdrant is that you'll need to provide an API key to secure your deployment from being accessed publicly." - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "dcf88bdf", - "metadata": { - "ExecuteTime": { - "end_time": "2023-04-04T10:51:24.837599Z", - "start_time": "2023-04-04T10:51:24.834690Z" - } - }, - "outputs": [], - "source": [ - "url = \"<---qdrant cloud cluster url here --->\"\n", - "api_key = \"<---api key here--->\"\n", - "qdrant = Qdrant.from_documents(\n", - " docs,\n", - " embeddings,\n", - " url=url,\n", - " prefer_grpc=True,\n", - " api_key=api_key,\n", - " collection_name=\"my_documents\",\n", - ")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "93540013", - "metadata": {}, - "source": [ - "## Recreating the collection\n", - "\n", - "Both `Qdrant.from_texts` and `Qdrant.from_documents` methods are great to start using Qdrant with Langchain. In the previous versions the collection was recreated every time you called any of them. That behaviour has changed. Currently, the collection is going to be reused if it already exists. Setting `force_recreate` to `True` allows to remove the old collection and start from scratch." - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "30a87570", - "metadata": { - "ExecuteTime": { - "end_time": "2023-04-04T10:51:24.854117Z", - "start_time": "2023-04-04T10:51:24.845385Z" - } - }, - "outputs": [], - "source": [ - "url = \"<---qdrant url here --->\"\n", - "qdrant = Qdrant.from_documents(\n", - " docs,\n", - " embeddings,\n", - " url=url,\n", - " prefer_grpc=True,\n", - " collection_name=\"my_documents\",\n", - " force_recreate=True,\n", - ")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "1f9215c8", - "metadata": { - "ExecuteTime": { - "end_time": "2023-04-04T09:27:29.920258Z", - "start_time": "2023-04-04T09:27:29.913714Z" - } - }, - "source": [ - "## Similarity search\n", - "\n", - "The simplest scenario for using Qdrant vector store is to perform a similarity search. Under the hood, our query will be encoded with the `embedding_function` and used to find similar documents in Qdrant collection." - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "a8c513ab", - "metadata": { - "ExecuteTime": { - "end_time": "2023-04-04T10:51:25.204469Z", - "start_time": "2023-04-04T10:51:24.855618Z" - }, - "tags": [] - }, - "outputs": [], - "source": [ - "query = \"What did the president say about Ketanji Brown Jackson\"\n", - "found_docs = qdrant.similarity_search(query)" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "fc516993", - "metadata": { - "ExecuteTime": { - "end_time": "2023-04-04T10:51:25.220984Z", - "start_time": "2023-04-04T10:51:25.213943Z" - }, - "tags": [] - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n", - "\n", - "Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n", - "\n", - "One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n", - "\n", - "And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n" - ] - } - ], - "source": [ - "print(found_docs[0].page_content)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "1bda9bf5", - "metadata": {}, - "source": [ - "## Similarity search with score\n", - "\n", - "Sometimes we might want to perform the search, but also obtain a relevancy score to know how good is a particular result. \n", - "The returned distance score is cosine distance. Therefore, a lower score is better." - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "id": "8804a21d", - "metadata": { - "ExecuteTime": { - "end_time": "2023-04-04T10:51:25.631585Z", - "start_time": "2023-04-04T10:51:25.227384Z" - } - }, - "outputs": [], - "source": [ - "query = \"What did the president say about Ketanji Brown Jackson\"\n", - "found_docs = qdrant.similarity_search_with_score(query)" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "id": "756a6887", - "metadata": { - "ExecuteTime": { - "end_time": "2023-04-04T10:51:25.642282Z", - "start_time": "2023-04-04T10:51:25.635947Z" - } - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n", - "\n", - "Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n", - "\n", - "One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n", - "\n", - "And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n", - "\n", - "Score: 0.8153784913324512\n" - ] - } - ], - "source": [ - "document, score = found_docs[0]\n", - "print(document.page_content)\n", - "print(f\"\\nScore: {score}\")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "525e3582", - "metadata": {}, - "source": [ - "### Metadata filtering\n", - "\n", - "Qdrant has an [extensive filtering system](https://qdrant.tech/documentation/concepts/filtering/) with rich type support. It is also possible to use the filters in Langchain, by passing an additional param to both the `similarity_search_with_score` and `similarity_search` methods." - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "1c2c58dc", - "metadata": {}, - "source": [ - "```python\n", - "from qdrant_client.http import models as rest\n", - "\n", - "query = \"What did the president say about Ketanji Brown Jackson\"\n", - "found_docs = qdrant.similarity_search_with_score(query, filter=rest.Filter(...))\n", - "```" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "c58c30bf", - "metadata": { - "ExecuteTime": { - "end_time": "2023-04-04T10:39:53.032744Z", - "start_time": "2023-04-04T10:39:53.028673Z" - } - }, - "source": [ - "## Maximum marginal relevance search (MMR)\n", - "\n", - "If you'd like to look up for some similar documents, but you'd also like to receive diverse results, MMR is method you should consider. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents." - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "id": "76810fb6", - "metadata": { - "ExecuteTime": { - "end_time": "2023-04-04T10:51:26.010947Z", - "start_time": "2023-04-04T10:51:25.647687Z" - } - }, - "outputs": [], - "source": [ - "query = \"What did the president say about Ketanji Brown Jackson\"\n", - "found_docs = qdrant.max_marginal_relevance_search(query, k=2, fetch_k=10)" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "id": "80c6db11", - "metadata": { - "ExecuteTime": { - "end_time": "2023-04-04T10:51:26.016979Z", - "start_time": "2023-04-04T10:51:26.013329Z" - } - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "1. Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n", - "\n", - "Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n", - "\n", - "One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n", - "\n", - "And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence. \n", - "\n", - "2. We can’t change how divided we’ve been. But we can change how we move forward—on COVID-19 and other issues we must face together. \n", - "\n", - "I recently visited the New York City Police Department days after the funerals of Officer Wilbert Mora and his partner, Officer Jason Rivera. \n", - "\n", - "They were responding to a 9-1-1 call when a man shot and killed them with a stolen gun. \n", - "\n", - "Officer Mora was 27 years old. \n", - "\n", - "Officer Rivera was 22. \n", - "\n", - "Both Dominican Americans who’d grown up on the same streets they later chose to patrol as police officers. \n", - "\n", - "I spoke with their families and told them that we are forever in debt for their sacrifice, and we will carry on their mission to restore the trust and safety every community deserves. \n", - "\n", - "I’ve worked on these issues a long time. \n", - "\n", - "I know what works: Investing in crime prevention and community police officers who’ll walk the beat, who’ll know the neighborhood, and who can restore trust and safety. \n", - "\n" - ] - } - ], - "source": [ - "for i, doc in enumerate(found_docs):\n", - " print(f\"{i + 1}.\", doc.page_content, \"\\n\")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "691a82d6", - "metadata": {}, - "source": [ - "## Qdrant as a Retriever\n", - "\n", - "Qdrant, as all the other vector stores, is a LangChain Retriever, by using cosine similarity. " - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "id": "9427195f", - "metadata": { - "ExecuteTime": { - "end_time": "2023-04-04T10:51:26.031451Z", - "start_time": "2023-04-04T10:51:26.018763Z" - } - }, - "outputs": [ - { - "data": { - "text/plain": [ - "VectorStoreRetriever(vectorstore=, search_type='similarity', search_kwargs={})" - ] - }, - "execution_count": 15, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "retriever = qdrant.as_retriever()\n", - "retriever" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "0c851b4f", - "metadata": {}, - "source": [ - "It might be also specified to use MMR as a search strategy, instead of similarity." - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "id": "64348f1b", - "metadata": { - "ExecuteTime": { - "end_time": "2023-04-04T10:51:26.043909Z", - "start_time": "2023-04-04T10:51:26.034284Z" - } - }, - "outputs": [ - { - "data": { - "text/plain": [ - "VectorStoreRetriever(vectorstore=, search_type='mmr', search_kwargs={})" - ] - }, - "execution_count": 16, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "retriever = qdrant.as_retriever(search_type=\"mmr\")\n", - "retriever" - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "id": "f3c70c31", - "metadata": { - "ExecuteTime": { - "end_time": "2023-04-04T10:51:26.495652Z", - "start_time": "2023-04-04T10:51:26.046407Z" - } - }, - "outputs": [ - { - "data": { - "text/plain": [ - "Document(page_content='Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \\n\\nTonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \\n\\nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \\n\\nAnd I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.', metadata={'source': '../../../state_of_the_union.txt'})" - ] - }, - "execution_count": 17, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "query = \"What did the president say about Ketanji Brown Jackson\"\n", - "retriever.get_relevant_documents(query)[0]" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "0358ecde", - "metadata": {}, - "source": [ - "## Customizing Qdrant\n", - "\n", - "There are some options to use an existing Qdrant collection within your Langchain application. In such cases you may need to define how to map Qdrant point into the Langchain `Document`.\n", - "\n", - "### Named vectors\n", - "\n", - "Qdrant supports [multiple vectors per point](https://qdrant.tech/documentation/concepts/collections/#collection-with-multiple-vectors) by named vectors. Langchain requires just a single embedding per document and, by default, uses a single vector. However, if you work with a collection created externally or want to have the named vector used, you can configure it by providing its name.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1f11adf8", - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "Qdrant.from_documents(\n", - " docs,\n", - " embeddings,\n", - " location=\":memory:\",\n", - " collection_name=\"my_documents_2\",\n", - " vector_name=\"custom_vector\",\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "b34f5230", - "metadata": { - "collapsed": false - }, - "source": [ - "As a Langchain user, you won't see any difference whether you use named vectors or not. Qdrant integration will handle the conversion under the hood." - ] - }, - { - "cell_type": "markdown", - "id": "b2350093", - "metadata": { - "collapsed": false - }, - "source": [ - "### Metadata\n", - "\n", - "Qdrant stores your vector embeddings along with the optional JSON-like payload. Payloads are optional, but since LangChain assumes the embeddings are generated from the documents, we keep the context data, so you can extract the original texts as well.\n", - "\n", - "By default, your document is going to be stored in the following payload structure:\n", - "\n", - "```json\n", - "{\n", - " \"page_content\": \"Lorem ipsum dolor sit amet\",\n", - " \"metadata\": {\n", - " \"foo\": \"bar\"\n", - " }\n", - "}\n", - "```\n", - "\n", - "You can, however, decide to use different keys for the page content and metadata. That's useful if you already have a collection that you'd like to reuse." - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "id": "e4d6baf9", - "metadata": { - "ExecuteTime": { - "end_time": "2023-04-04T11:08:31.739141Z", - "start_time": "2023-04-04T11:08:30.229748Z" - } - }, - "outputs": [ - { - "data": { - "text/plain": [ - "" - ] - }, - "execution_count": 19, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "Qdrant.from_documents(\n", - " docs,\n", - " embeddings,\n", - " location=\":memory:\",\n", - " collection_name=\"my_documents_2\",\n", - " content_payload_key=\"my_page_content_key\",\n", - " metadata_payload_key=\"my_meta\",\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2300e785", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.3" - } + "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "id": "683953b3", + "metadata": {}, + "source": [ + "# Qdrant\n", + "\n", + ">[Qdrant](https://qdrant.tech/documentation/) (read: quadrant ) is a vector similarity search engine. It provides a production-ready service with a convenient API to store, search, and manage points - vectors with an additional payload. `Qdrant` is tailored to extended filtering support. It makes it useful for all sorts of neural network or semantic-based matching, faceted search, and other applications.\n", + "\n", + "\n", + "This notebook shows how to use functionality related to the `Qdrant` vector database. \n", + "\n", + "There are various modes of how to run `Qdrant`, and depending on the chosen one, there will be some subtle differences. The options include:\n", + "- Local mode, no server required\n", + "- On-premise server deployment\n", + "- Qdrant Cloud\n", + "\n", + "See the [installation instructions](https://qdrant.tech/documentation/install/)." + ] }, - "nbformat": 4, - "nbformat_minor": 5 + { + "cell_type": "code", + "execution_count": null, + "id": "e03e8460-8f32-4d1f-bb93-4f7636a476fa", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "!pip install qdrant-client" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "7b2f111b-357a-4f42-9730-ef0603bdc1b5", + "metadata": {}, + "source": [ + "We want to use `OpenAIEmbeddings` so we have to get the OpenAI API Key." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "082e7e8b-ac52-430c-98d6-8f0924457642", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "OpenAI API Key: ········\n" + ] + } + ], + "source": [ + "import getpass\n", + "import os\n", + "\n", + "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "aac9563e", + "metadata": { + "ExecuteTime": { + "end_time": "2023-04-04T10:51:22.282884Z", + "start_time": "2023-04-04T10:51:21.408077Z" + }, + "tags": [] + }, + "outputs": [], + "source": [ + "from langchain.document_loaders import TextLoader\n", + "from langchain.embeddings.openai import OpenAIEmbeddings\n", + "from langchain.text_splitter import CharacterTextSplitter\n", + "from langchain.vectorstores import Qdrant" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "a3c3999a", + "metadata": { + "ExecuteTime": { + "end_time": "2023-04-04T10:51:22.520144Z", + "start_time": "2023-04-04T10:51:22.285826Z" + }, + "tags": [] + }, + "outputs": [], + "source": [ + "loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n", + "documents = loader.load()\n", + "text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n", + "docs = text_splitter.split_documents(documents)\n", + "\n", + "embeddings = OpenAIEmbeddings()" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "eeead681", + "metadata": {}, + "source": [ + "## Connecting to Qdrant from LangChain\n", + "\n", + "### Local mode\n", + "\n", + "Python client allows you to run the same code in local mode without running the Qdrant server. That's great for testing things out and debugging or if you plan to store just a small amount of vectors. The embeddings might be fully kepy in memory or persisted on disk.\n", + "\n", + "#### In-memory\n", + "\n", + "For some testing scenarios and quick experiments, you may prefer to keep all the data in memory only, so it gets lost when the client is destroyed - usually at the end of your script/notebook." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "8429667e", + "metadata": { + "ExecuteTime": { + "end_time": "2023-04-04T10:51:22.525091Z", + "start_time": "2023-04-04T10:51:22.522015Z" + }, + "tags": [] + }, + "outputs": [], + "source": [ + "qdrant = Qdrant.from_documents(\n", + " docs,\n", + " embeddings,\n", + " location=\":memory:\", # Local mode with in-memory storage only\n", + " collection_name=\"my_documents\",\n", + ")" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "59f0b954", + "metadata": {}, + "source": [ + "#### On-disk storage\n", + "\n", + "Local mode, without using the Qdrant server, may also store your vectors on disk so they're persisted between runs." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "24b370e2", + "metadata": { + "ExecuteTime": { + "end_time": "2023-04-04T10:51:24.827567Z", + "start_time": "2023-04-04T10:51:22.529080Z" + }, + "tags": [] + }, + "outputs": [], + "source": [ + "qdrant = Qdrant.from_documents(\n", + " docs,\n", + " embeddings,\n", + " path=\"/tmp/local_qdrant\",\n", + " collection_name=\"my_documents\",\n", + ")" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "749658ce", + "metadata": {}, + "source": [ + "### On-premise server deployment\n", + "\n", + "No matter if you choose to launch Qdrant locally with [a Docker container](https://qdrant.tech/documentation/install/), or select a Kubernetes deployment with [the official Helm chart](https://github.com/qdrant/qdrant-helm), the way you're going to connect to such an instance will be identical. You'll need to provide a URL pointing to the service." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "91e7f5ce", + "metadata": { + "ExecuteTime": { + "end_time": "2023-04-04T10:51:24.832708Z", + "start_time": "2023-04-04T10:51:24.829905Z" + } + }, + "outputs": [], + "source": [ + "url = \"<---qdrant url here --->\"\n", + "qdrant = Qdrant.from_documents(\n", + " docs,\n", + " embeddings,\n", + " url=url,\n", + " prefer_grpc=True,\n", + " collection_name=\"my_documents\",\n", + ")" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "c9e21ce9", + "metadata": {}, + "source": [ + "### Qdrant Cloud\n", + "\n", + "If you prefer not to keep yourself busy with managing the infrastructure, you can choose to set up a fully-managed Qdrant cluster on [Qdrant Cloud](https://cloud.qdrant.io/). There is a free forever 1GB cluster included for trying out. The main difference with using a managed version of Qdrant is that you'll need to provide an API key to secure your deployment from being accessed publicly." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "dcf88bdf", + "metadata": { + "ExecuteTime": { + "end_time": "2023-04-04T10:51:24.837599Z", + "start_time": "2023-04-04T10:51:24.834690Z" + } + }, + "outputs": [], + "source": [ + "url = \"<---qdrant cloud cluster url here --->\"\n", + "api_key = \"<---api key here--->\"\n", + "qdrant = Qdrant.from_documents(\n", + " docs,\n", + " embeddings,\n", + " url=url,\n", + " prefer_grpc=True,\n", + " api_key=api_key,\n", + " collection_name=\"my_documents\",\n", + ")" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "93540013", + "metadata": {}, + "source": [ + "## Recreating the collection\n", + "\n", + "Both `Qdrant.from_texts` and `Qdrant.from_documents` methods are great to start using Qdrant with Langchain. In the previous versions the collection was recreated every time you called any of them. That behaviour has changed. Currently, the collection is going to be reused if it already exists. Setting `force_recreate` to `True` allows to remove the old collection and start from scratch." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "30a87570", + "metadata": { + "ExecuteTime": { + "end_time": "2023-04-04T10:51:24.854117Z", + "start_time": "2023-04-04T10:51:24.845385Z" + } + }, + "outputs": [], + "source": [ + "url = \"<---qdrant url here --->\"\n", + "qdrant = Qdrant.from_documents(\n", + " docs,\n", + " embeddings,\n", + " url=url,\n", + " prefer_grpc=True,\n", + " collection_name=\"my_documents\",\n", + " force_recreate=True,\n", + ")" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "1f9215c8", + "metadata": { + "ExecuteTime": { + "end_time": "2023-04-04T09:27:29.920258Z", + "start_time": "2023-04-04T09:27:29.913714Z" + } + }, + "source": [ + "## Similarity search\n", + "\n", + "The simplest scenario for using Qdrant vector store is to perform a similarity search. Under the hood, our query will be encoded with the `embedding_function` and used to find similar documents in Qdrant collection." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "a8c513ab", + "metadata": { + "ExecuteTime": { + "end_time": "2023-04-04T10:51:25.204469Z", + "start_time": "2023-04-04T10:51:24.855618Z" + }, + "tags": [] + }, + "outputs": [], + "source": [ + "query = \"What did the president say about Ketanji Brown Jackson\"\n", + "found_docs = qdrant.similarity_search(query)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "fc516993", + "metadata": { + "ExecuteTime": { + "end_time": "2023-04-04T10:51:25.220984Z", + "start_time": "2023-04-04T10:51:25.213943Z" + }, + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n", + "\n", + "Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n", + "\n", + "One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n", + "\n", + "And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n" + ] + } + ], + "source": [ + "print(found_docs[0].page_content)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "1bda9bf5", + "metadata": {}, + "source": [ + "## Similarity search with score\n", + "\n", + "Sometimes we might want to perform the search, but also obtain a relevancy score to know how good is a particular result. \n", + "The returned distance score is cosine distance. Therefore, a lower score is better." + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "8804a21d", + "metadata": { + "ExecuteTime": { + "end_time": "2023-04-04T10:51:25.631585Z", + "start_time": "2023-04-04T10:51:25.227384Z" + } + }, + "outputs": [], + "source": [ + "query = \"What did the president say about Ketanji Brown Jackson\"\n", + "found_docs = qdrant.similarity_search_with_score(query)" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "756a6887", + "metadata": { + "ExecuteTime": { + "end_time": "2023-04-04T10:51:25.642282Z", + "start_time": "2023-04-04T10:51:25.635947Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n", + "\n", + "Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n", + "\n", + "One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n", + "\n", + "And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n", + "\n", + "Score: 0.8153784913324512\n" + ] + } + ], + "source": [ + "document, score = found_docs[0]\n", + "print(document.page_content)\n", + "print(f\"\\nScore: {score}\")" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "525e3582", + "metadata": {}, + "source": [ + "### Metadata filtering\n", + "\n", + "Qdrant has an [extensive filtering system](https://qdrant.tech/documentation/concepts/filtering/) with rich type support. It is also possible to use the filters in Langchain, by passing an additional param to both the `similarity_search_with_score` and `similarity_search` methods." + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "1c2c58dc", + "metadata": {}, + "source": [ + "```python\n", + "from qdrant_client.http import models as rest\n", + "\n", + "query = \"What did the president say about Ketanji Brown Jackson\"\n", + "found_docs = qdrant.similarity_search_with_score(query, filter=rest.Filter(...))\n", + "```" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "c58c30bf", + "metadata": { + "ExecuteTime": { + "end_time": "2023-04-04T10:39:53.032744Z", + "start_time": "2023-04-04T10:39:53.028673Z" + } + }, + "source": [ + "## Maximum marginal relevance search (MMR)\n", + "\n", + "If you'd like to look up for some similar documents, but you'd also like to receive diverse results, MMR is method you should consider. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents." + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "76810fb6", + "metadata": { + "ExecuteTime": { + "end_time": "2023-04-04T10:51:26.010947Z", + "start_time": "2023-04-04T10:51:25.647687Z" + } + }, + "outputs": [], + "source": [ + "query = \"What did the president say about Ketanji Brown Jackson\"\n", + "found_docs = qdrant.max_marginal_relevance_search(query, k=2, fetch_k=10)" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "80c6db11", + "metadata": { + "ExecuteTime": { + "end_time": "2023-04-04T10:51:26.016979Z", + "start_time": "2023-04-04T10:51:26.013329Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "1. Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n", + "\n", + "Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n", + "\n", + "One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n", + "\n", + "And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence. \n", + "\n", + "2. We can’t change how divided we’ve been. But we can change how we move forward—on COVID-19 and other issues we must face together. \n", + "\n", + "I recently visited the New York City Police Department days after the funerals of Officer Wilbert Mora and his partner, Officer Jason Rivera. \n", + "\n", + "They were responding to a 9-1-1 call when a man shot and killed them with a stolen gun. \n", + "\n", + "Officer Mora was 27 years old. \n", + "\n", + "Officer Rivera was 22. \n", + "\n", + "Both Dominican Americans who’d grown up on the same streets they later chose to patrol as police officers. \n", + "\n", + "I spoke with their families and told them that we are forever in debt for their sacrifice, and we will carry on their mission to restore the trust and safety every community deserves. \n", + "\n", + "I’ve worked on these issues a long time. \n", + "\n", + "I know what works: Investing in crime prevention and community police officers who’ll walk the beat, who’ll know the neighborhood, and who can restore trust and safety. \n", + "\n" + ] + } + ], + "source": [ + "for i, doc in enumerate(found_docs):\n", + " print(f\"{i + 1}.\", doc.page_content, \"\\n\")" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "691a82d6", + "metadata": {}, + "source": [ + "## Qdrant as a Retriever\n", + "\n", + "Qdrant, as all the other vector stores, is a LangChain Retriever, by using cosine similarity. " + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "9427195f", + "metadata": { + "ExecuteTime": { + "end_time": "2023-04-04T10:51:26.031451Z", + "start_time": "2023-04-04T10:51:26.018763Z" + } + }, + "outputs": [ + { + "data": { + "text/plain": [ + "VectorStoreRetriever(vectorstore=, search_type='similarity', search_kwargs={})" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "retriever = qdrant.as_retriever()\n", + "retriever" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "0c851b4f", + "metadata": {}, + "source": [ + "It might be also specified to use MMR as a search strategy, instead of similarity." + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "64348f1b", + "metadata": { + "ExecuteTime": { + "end_time": "2023-04-04T10:51:26.043909Z", + "start_time": "2023-04-04T10:51:26.034284Z" + } + }, + "outputs": [ + { + "data": { + "text/plain": [ + "VectorStoreRetriever(vectorstore=, search_type='mmr', search_kwargs={})" + ] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "retriever = qdrant.as_retriever(search_type=\"mmr\")\n", + "retriever" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "f3c70c31", + "metadata": { + "ExecuteTime": { + "end_time": "2023-04-04T10:51:26.495652Z", + "start_time": "2023-04-04T10:51:26.046407Z" + } + }, + "outputs": [ + { + "data": { + "text/plain": [ + "Document(page_content='Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \\n\\nTonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \\n\\nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \\n\\nAnd I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.', metadata={'source': '../../../state_of_the_union.txt'})" + ] + }, + "execution_count": 17, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "query = \"What did the president say about Ketanji Brown Jackson\"\n", + "retriever.get_relevant_documents(query)[0]" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "0358ecde", + "metadata": {}, + "source": [ + "## Customizing Qdrant\n", + "\n", + "There are some options to use an existing Qdrant collection within your Langchain application. In such cases you may need to define how to map Qdrant point into the Langchain `Document`.\n", + "\n", + "### Named vectors\n", + "\n", + "Qdrant supports [multiple vectors per point](https://qdrant.tech/documentation/concepts/collections/#collection-with-multiple-vectors) by named vectors. Langchain requires just a single embedding per document and, by default, uses a single vector. However, if you work with a collection created externally or want to have the named vector used, you can configure it by providing its name.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1f11adf8", + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "Qdrant.from_documents(\n", + " docs,\n", + " embeddings,\n", + " location=\":memory:\",\n", + " collection_name=\"my_documents_2\",\n", + " vector_name=\"custom_vector\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "b34f5230", + "metadata": { + "collapsed": false + }, + "source": [ + "As a Langchain user, you won't see any difference whether you use named vectors or not. Qdrant integration will handle the conversion under the hood." + ] + }, + { + "cell_type": "markdown", + "id": "b2350093", + "metadata": { + "collapsed": false + }, + "source": [ + "### Metadata\n", + "\n", + "Qdrant stores your vector embeddings along with the optional JSON-like payload. Payloads are optional, but since LangChain assumes the embeddings are generated from the documents, we keep the context data, so you can extract the original texts as well.\n", + "\n", + "By default, your document is going to be stored in the following payload structure:\n", + "\n", + "```json\n", + "{\n", + " \"page_content\": \"Lorem ipsum dolor sit amet\",\n", + " \"metadata\": {\n", + " \"foo\": \"bar\"\n", + " }\n", + "}\n", + "```\n", + "\n", + "You can, however, decide to use different keys for the page content and metadata. That's useful if you already have a collection that you'd like to reuse." + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "id": "e4d6baf9", + "metadata": { + "ExecuteTime": { + "end_time": "2023-04-04T11:08:31.739141Z", + "start_time": "2023-04-04T11:08:30.229748Z" + } + }, + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 19, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "Qdrant.from_documents(\n", + " docs,\n", + " embeddings,\n", + " location=\":memory:\",\n", + " collection_name=\"my_documents_2\",\n", + " content_payload_key=\"my_page_content_key\",\n", + " metadata_payload_key=\"my_meta\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2300e785", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.3" + } + }, + "nbformat": 4, + "nbformat_minor": 5 } diff --git a/docs/docs/integrations/vectorstores/redis.ipynb b/docs/docs/integrations/vectorstores/redis.ipynb index 4061c2a5f3d..643ded5eb0e 100644 --- a/docs/docs/integrations/vectorstores/redis.ipynb +++ b/docs/docs/integrations/vectorstores/redis.ipynb @@ -151,8 +151,8 @@ "metadata": {}, "outputs": [], "source": [ - "import os\n", "import getpass\n", + "import os\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")" ] diff --git a/docs/docs/integrations/vectorstores/rockset.ipynb b/docs/docs/integrations/vectorstores/rockset.ipynb index 09c4299983f..8a0406f5659 100644 --- a/docs/docs/integrations/vectorstores/rockset.ipynb +++ b/docs/docs/integrations/vectorstores/rockset.ipynb @@ -79,6 +79,7 @@ "outputs": [], "source": [ "import os\n", + "\n", "import rockset\n", "\n", "ROCKSET_API_KEY = os.environ.get(\n", @@ -107,9 +108,9 @@ "metadata": {}, "outputs": [], "source": [ + "from langchain.document_loaders import TextLoader\n", "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.document_loaders import TextLoader\n", "from langchain.vectorstores import Rockset\n", "\n", "loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n", diff --git a/docs/docs/integrations/vectorstores/scann.ipynb b/docs/docs/integrations/vectorstores/scann.ipynb index 37ce6bb89e6..4797110993a 100644 --- a/docs/docs/integrations/vectorstores/scann.ipynb +++ b/docs/docs/integrations/vectorstores/scann.ipynb @@ -59,10 +59,10 @@ } ], "source": [ + "from langchain.document_loaders import TextLoader\n", "from langchain.embeddings import HuggingFaceEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain.vectorstores import ScaNN\n", - "from langchain.document_loaders import TextLoader\n", "\n", "loader = TextLoader(\"state_of_the_union.txt\")\n", "documents = loader.load()\n", diff --git a/docs/docs/integrations/vectorstores/singlestoredb.ipynb b/docs/docs/integrations/vectorstores/singlestoredb.ipynb index 85a0beff2f5..8dec60e1ada 100644 --- a/docs/docs/integrations/vectorstores/singlestoredb.ipynb +++ b/docs/docs/integrations/vectorstores/singlestoredb.ipynb @@ -32,8 +32,8 @@ "metadata": {}, "outputs": [], "source": [ - "import os\n", "import getpass\n", + "import os\n", "\n", "# We want to use OpenAIEmbeddings so we have to get the OpenAI API Key.\n", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")" @@ -46,10 +46,10 @@ "metadata": {}, "outputs": [], "source": [ + "from langchain.document_loaders import TextLoader\n", "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import SingleStoreDB\n", - "from langchain.document_loaders import TextLoader" + "from langchain.vectorstores import SingleStoreDB" ] }, { diff --git a/docs/docs/integrations/vectorstores/sklearn.ipynb b/docs/docs/integrations/vectorstores/sklearn.ipynb index 319ddf9a51d..8ecd5e724a9 100644 --- a/docs/docs/integrations/vectorstores/sklearn.ipynb +++ b/docs/docs/integrations/vectorstores/sklearn.ipynb @@ -60,10 +60,10 @@ "metadata": {}, "outputs": [], "source": [ + "from langchain.document_loaders import TextLoader\n", "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain.vectorstores import SKLearnVectorStore\n", - "from langchain.document_loaders import TextLoader\n", "\n", "loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n", "documents = loader.load()\n", diff --git a/docs/docs/integrations/vectorstores/sqlitevss.ipynb b/docs/docs/integrations/vectorstores/sqlitevss.ipynb index 3a35f782ac7..eb8fcce9543 100644 --- a/docs/docs/integrations/vectorstores/sqlitevss.ipynb +++ b/docs/docs/integrations/vectorstores/sqlitevss.ipynb @@ -57,10 +57,10 @@ } ], "source": [ + "from langchain.document_loaders import TextLoader\n", "from langchain.embeddings.sentence_transformer import SentenceTransformerEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain.vectorstores import SQLiteVSS\n", - "from langchain.document_loaders import TextLoader\n", "\n", "# load the document and split it into chunks\n", "loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n", @@ -126,10 +126,10 @@ } ], "source": [ + "from langchain.document_loaders import TextLoader\n", "from langchain.embeddings.sentence_transformer import SentenceTransformerEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain.vectorstores import SQLiteVSS\n", - "from langchain.document_loaders import TextLoader\n", "\n", "# load the document and split it into chunks\n", "loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n", diff --git a/docs/docs/integrations/vectorstores/starrocks.ipynb b/docs/docs/integrations/vectorstores/starrocks.ipynb index f5554dbffe0..cad6ee7ced8 100644 --- a/docs/docs/integrations/vectorstores/starrocks.ipynb +++ b/docs/docs/integrations/vectorstores/starrocks.ipynb @@ -57,14 +57,13 @@ } ], "source": [ + "from langchain.chains import RetrievalQA\n", + "from langchain.document_loaders import DirectoryLoader, UnstructuredMarkdownLoader\n", "from langchain.embeddings.openai import OpenAIEmbeddings\n", + "from langchain.llms import OpenAI\n", + "from langchain.text_splitter import TokenTextSplitter\n", "from langchain.vectorstores import StarRocks\n", "from langchain.vectorstores.starrocks import StarRocksSettings\n", - "from langchain.text_splitter import TokenTextSplitter\n", - "from langchain.llms import OpenAI\n", - "from langchain.document_loaders import DirectoryLoader\n", - "from langchain.chains import RetrievalQA\n", - "from langchain.document_loaders import UnstructuredMarkdownLoader\n", "\n", "update_vectordb = False" ] diff --git a/docs/docs/integrations/vectorstores/supabase.ipynb b/docs/docs/integrations/vectorstores/supabase.ipynb index e6f0431c9d4..8416e1073aa 100644 --- a/docs/docs/integrations/vectorstores/supabase.ipynb +++ b/docs/docs/integrations/vectorstores/supabase.ipynb @@ -97,8 +97,8 @@ "metadata": {}, "outputs": [], "source": [ - "import os\n", "import getpass\n", + "import os\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")" ] @@ -154,9 +154,10 @@ "outputs": [], "source": [ "import os\n", - "from supabase.client import Client, create_client\n", + "\n", "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.vectorstores import SupabaseVectorStore\n", + "from supabase.client import Client, create_client\n", "\n", "supabase_url = os.environ.get(\"SUPABASE_URL\")\n", "supabase_key = os.environ.get(\"SUPABASE_SERVICE_KEY\")\n", @@ -182,8 +183,8 @@ }, "outputs": [], "source": [ - "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain.document_loaders import TextLoader\n", + "from langchain.text_splitter import CharacterTextSplitter\n", "\n", "loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n", "documents = loader.load()\n", diff --git a/docs/docs/integrations/vectorstores/tencentvectordb.ipynb b/docs/docs/integrations/vectorstores/tencentvectordb.ipynb index fc0b867ac10..ef8e0667b10 100644 --- a/docs/docs/integrations/vectorstores/tencentvectordb.ipynb +++ b/docs/docs/integrations/vectorstores/tencentvectordb.ipynb @@ -33,11 +33,11 @@ "metadata": {}, "outputs": [], "source": [ + "from langchain.document_loaders import TextLoader\n", "from langchain.embeddings.fake import FakeEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain.vectorstores import TencentVectorDB\n", - "from langchain.vectorstores.tencentvectordb import ConnectionParams\n", - "from langchain.document_loaders import TextLoader" + "from langchain.vectorstores.tencentvectordb import ConnectionParams" ] }, { diff --git a/docs/docs/integrations/vectorstores/tigris.ipynb b/docs/docs/integrations/vectorstores/tigris.ipynb index c2119ac4704..4b89acdb79f 100644 --- a/docs/docs/integrations/vectorstores/tigris.ipynb +++ b/docs/docs/integrations/vectorstores/tigris.ipynb @@ -65,8 +65,8 @@ }, "outputs": [], "source": [ - "import os\n", "import getpass\n", + "import os\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")\n", "os.environ[\"TIGRIS_PROJECT\"] = getpass.getpass(\"Tigris Project Name:\")\n", @@ -85,10 +85,10 @@ }, "outputs": [], "source": [ + "from langchain.document_loaders import TextLoader\n", "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Tigris\n", - "from langchain.document_loaders import TextLoader" + "from langchain.vectorstores import Tigris" ] }, { diff --git a/docs/docs/integrations/vectorstores/timescalevector.ipynb b/docs/docs/integrations/vectorstores/timescalevector.ipynb index 3377216b2a4..bbd1017925d 100644 --- a/docs/docs/integrations/vectorstores/timescalevector.ipynb +++ b/docs/docs/integrations/vectorstores/timescalevector.ipynb @@ -73,7 +73,7 @@ "\n", "# Run export OPENAI_API_KEY=sk-YOUR_OPENAI_API_KEY...\n", "# Get openAI api key by reading local .env file\n", - "from dotenv import load_dotenv, find_dotenv\n", + "from dotenv import find_dotenv, load_dotenv\n", "\n", "_ = load_dotenv(find_dotenv())\n", "OPENAI_API_KEY = os.environ[\"OPENAI_API_KEY\"]" @@ -119,11 +119,12 @@ "outputs": [], "source": [ "from datetime import datetime, timedelta\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", - "from langchain.text_splitter import CharacterTextSplitter\n", + "\n", + "from langchain.docstore.document import Document\n", "from langchain.document_loaders import TextLoader\n", "from langchain.document_loaders.json_loader import JSONLoader\n", - "from langchain.docstore.document import Document\n", + "from langchain.embeddings.openai import OpenAIEmbeddings\n", + "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain.vectorstores.timescalevector import TimescaleVector" ] }, @@ -1258,9 +1259,9 @@ "metadata": {}, "outputs": [], "source": [ + "from langchain.chains.query_constructor.base import AttributeInfo\n", "from langchain.llms import OpenAI\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", - "from langchain.chains.query_constructor.base import AttributeInfo\n", "\n", "# Give LLM info about the metadata fields\n", "metadata_field_info = [\n", diff --git a/docs/docs/integrations/vectorstores/typesense.ipynb b/docs/docs/integrations/vectorstores/typesense.ipynb index 7bb0c5caf51..89a64d3543e 100644 --- a/docs/docs/integrations/vectorstores/typesense.ipynb +++ b/docs/docs/integrations/vectorstores/typesense.ipynb @@ -63,8 +63,8 @@ }, "outputs": [], "source": [ - "import os\n", "import getpass\n", + "import os\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")" ] @@ -84,10 +84,10 @@ }, "outputs": [], "source": [ + "from langchain.document_loaders import TextLoader\n", "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Typesense\n", - "from langchain.document_loaders import TextLoader" + "from langchain.vectorstores import Typesense" ] }, { diff --git a/docs/docs/integrations/vectorstores/usearch.ipynb b/docs/docs/integrations/vectorstores/usearch.ipynb index b4aefa2a1cc..a8098653c28 100644 --- a/docs/docs/integrations/vectorstores/usearch.ipynb +++ b/docs/docs/integrations/vectorstores/usearch.ipynb @@ -40,8 +40,8 @@ }, "outputs": [], "source": [ - "import os\n", "import getpass\n", + "import os\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")" ] @@ -55,10 +55,10 @@ }, "outputs": [], "source": [ + "from langchain.document_loaders import TextLoader\n", "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import USearch\n", - "from langchain.document_loaders import TextLoader" + "from langchain.vectorstores import USearch" ] }, { diff --git a/docs/docs/integrations/vectorstores/vearch.ipynb b/docs/docs/integrations/vectorstores/vearch.ipynb index b4965ccafe1..7f6d0205ba5 100644 --- a/docs/docs/integrations/vectorstores/vearch.ipynb +++ b/docs/docs/integrations/vectorstores/vearch.ipynb @@ -19,8 +19,8 @@ "from langchain.document_loaders import TextLoader\n", "from langchain.embeddings.huggingface import HuggingFaceEmbeddings\n", "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", - "from transformers import AutoModel, AutoTokenizer\n", "from langchain.vectorstores.vearch import Vearch\n", + "from transformers import AutoModel, AutoTokenizer\n", "\n", "# repalce to your local model path\n", "model_path = \"/data/zhx/zhx/langchain-ChatGLM_new/chatglm2-6b\"\n", diff --git a/docs/docs/integrations/vectorstores/vectara.ipynb b/docs/docs/integrations/vectorstores/vectara.ipynb index 1de1bd00400..4d752e76f90 100644 --- a/docs/docs/integrations/vectorstores/vectara.ipynb +++ b/docs/docs/integrations/vectorstores/vectara.ipynb @@ -85,14 +85,13 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import FakeEmbeddings\n", - "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Vectara\n", + "from langchain.chains.query_constructor.base import AttributeInfo\n", "from langchain.document_loaders import TextLoader\n", - "\n", + "from langchain.embeddings import FakeEmbeddings\n", "from langchain.llms import OpenAI\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", - "from langchain.chains.query_constructor.base import AttributeInfo" + "from langchain.text_splitter import CharacterTextSplitter\n", + "from langchain.vectorstores import Vectara" ] }, { diff --git a/docs/docs/integrations/vectorstores/vectorstores/semadb.ipynb b/docs/docs/integrations/vectorstores/vectorstores/semadb.ipynb index 93a392f359d..e08424f6a9b 100644 --- a/docs/docs/integrations/vectorstores/vectorstores/semadb.ipynb +++ b/docs/docs/integrations/vectorstores/vectorstores/semadb.ipynb @@ -61,8 +61,8 @@ } ], "source": [ - "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain.document_loaders import TextLoader\n", + "from langchain.text_splitter import CharacterTextSplitter\n", "\n", "loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n", "documents = loader.load()\n", diff --git a/docs/docs/integrations/vectorstores/weaviate.ipynb b/docs/docs/integrations/vectorstores/weaviate.ipynb index 03b9c933c6c..6f1a7b3fad0 100644 --- a/docs/docs/integrations/vectorstores/weaviate.ipynb +++ b/docs/docs/integrations/vectorstores/weaviate.ipynb @@ -74,8 +74,8 @@ "metadata": {}, "outputs": [], "source": [ - "import os\n", "import getpass\n", + "import os\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")" ] @@ -119,10 +119,10 @@ }, "outputs": [], "source": [ + "from langchain.document_loaders import TextLoader\n", "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Weaviate\n", - "from langchain.document_loaders import TextLoader" + "from langchain.vectorstores import Weaviate" ] }, { @@ -563,8 +563,8 @@ } ], "source": [ - "from langchain.schema.runnable import RunnablePassthrough\n", "from langchain.schema.output_parser import StrOutputParser\n", + "from langchain.schema.runnable import RunnablePassthrough\n", "\n", "rag_chain = (\n", " {\"context\": retriever, \"question\": RunnablePassthrough()}\n", diff --git a/docs/docs/integrations/vectorstores/xata.ipynb b/docs/docs/integrations/vectorstores/xata.ipynb index 45617088679..2636e008b71 100644 --- a/docs/docs/integrations/vectorstores/xata.ipynb +++ b/docs/docs/integrations/vectorstores/xata.ipynb @@ -73,8 +73,8 @@ }, "outputs": [], "source": [ - "import os\n", "import getpass\n", + "import os\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")" ] @@ -107,9 +107,9 @@ }, "outputs": [], "source": [ + "from langchain.document_loaders import TextLoader\n", "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.document_loaders import TextLoader\n", "from langchain.vectorstores.xata import XataVectorStore" ] }, diff --git a/docs/docs/integrations/vectorstores/zep.ipynb b/docs/docs/integrations/vectorstores/zep.ipynb index 2ad8b1473d1..692c39101fb 100644 --- a/docs/docs/integrations/vectorstores/zep.ipynb +++ b/docs/docs/integrations/vectorstores/zep.ipynb @@ -146,9 +146,10 @@ "\n", "\n", "async def wait_for_ready(collection_name: str) -> None:\n", - " from zep_python import ZepClient\n", " import time\n", "\n", + " from zep_python import ZepClient\n", + "\n", " client = ZepClient(ZEP_API_URL, ZEP_API_KEY)\n", "\n", " while True:\n", diff --git a/docs/docs/integrations/vectorstores/zilliz.ipynb b/docs/docs/integrations/vectorstores/zilliz.ipynb index 6f4fb2edd30..fd3f75ca521 100644 --- a/docs/docs/integrations/vectorstores/zilliz.ipynb +++ b/docs/docs/integrations/vectorstores/zilliz.ipynb @@ -49,8 +49,8 @@ } ], "source": [ - "import os\n", "import getpass\n", + "import os\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")" ] @@ -76,10 +76,10 @@ "metadata": {}, "outputs": [], "source": [ + "from langchain.document_loaders import TextLoader\n", "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Milvus\n", - "from langchain.document_loaders import TextLoader" + "from langchain.vectorstores import Milvus" ] }, { diff --git a/docs/docs/langsmith/walkthrough.ipynb b/docs/docs/langsmith/walkthrough.ipynb index cd55696ff88..eb7df9f805e 100644 --- a/docs/docs/langsmith/walkthrough.ipynb +++ b/docs/docs/langsmith/walkthrough.ipynb @@ -333,12 +333,12 @@ }, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.agents import AgentType, initialize_agent, load_tools, AgentExecutor\n", + "from langchain import hub\n", + "from langchain.agents import AgentExecutor, AgentType, initialize_agent, load_tools\n", "from langchain.agents.format_scratchpad import format_to_openai_function_messages\n", "from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser\n", + "from langchain.chat_models import ChatOpenAI\n", "from langchain.tools.render import format_tool_to_openai_function\n", - "from langchain import hub\n", "\n", "\n", "# Since chains can be stateful (e.g. they can have memory), we provide\n", @@ -496,6 +496,7 @@ ], "source": [ "import functools\n", + "\n", "from langchain.smith import (\n", " arun_on_dataset,\n", " run_on_dataset,\n", diff --git a/docs/docs/modules/agents/agent_types/chat_conversation_agent.ipynb b/docs/docs/modules/agents/agent_types/chat_conversation_agent.ipynb index 347b6a4b912..2ed15e8252d 100644 --- a/docs/docs/modules/agents/agent_types/chat_conversation_agent.ipynb +++ b/docs/docs/modules/agents/agent_types/chat_conversation_agent.ipynb @@ -20,12 +20,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.agents import Tool\n", - "from langchain.agents import AgentType\n", - "from langchain.memory import ConversationBufferMemory\n", + "from langchain.agents import AgentType, Tool, initialize_agent\n", "from langchain.llms import OpenAI\n", - "from langchain.utilities import SerpAPIWrapper\n", - "from langchain.agents import initialize_agent" + "from langchain.memory import ConversationBufferMemory\n", + "from langchain.utilities import SerpAPIWrapper" ] }, { @@ -72,10 +70,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.tools.render import render_text_description\n", - "from langchain.agents.output_parsers import ReActSingleInputOutputParser\n", + "from langchain import hub\n", "from langchain.agents.format_scratchpad import format_log_to_str\n", - "from langchain import hub" + "from langchain.agents.output_parsers import ReActSingleInputOutputParser\n", + "from langchain.tools.render import render_text_description" ] }, { @@ -303,8 +301,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", - "from langchain import hub" + "from langchain import hub\n", + "from langchain.chat_models import ChatOpenAI" ] }, { @@ -348,8 +346,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.agents.output_parsers import JSONAgentOutputParser\n", - "from langchain.agents.format_scratchpad import format_log_to_messages" + "from langchain.agents.format_scratchpad import format_log_to_messages\n", + "from langchain.agents.output_parsers import JSONAgentOutputParser" ] }, { @@ -538,8 +536,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.memory import ConversationBufferMemory\n", - "from langchain.chat_models import ChatOpenAI" + "from langchain.chat_models import ChatOpenAI\n", + "from langchain.memory import ConversationBufferMemory" ] }, { diff --git a/docs/docs/modules/agents/agent_types/openai_assistants.ipynb b/docs/docs/modules/agents/agent_types/openai_assistants.ipynb index b24d8edc53b..e5389ca96aa 100644 --- a/docs/docs/modules/agents/agent_types/openai_assistants.ipynb +++ b/docs/docs/modules/agents/agent_types/openai_assistants.ipynb @@ -77,7 +77,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.tools import E2BDataAnalysisTool, DuckDuckGoSearchRun\n", + "from langchain.tools import DuckDuckGoSearchRun, E2BDataAnalysisTool\n", "\n", "tools = [E2BDataAnalysisTool(api_key=\"...\"), DuckDuckGoSearchRun()]" ] diff --git a/docs/docs/modules/agents/agent_types/openai_functions_agent.ipynb b/docs/docs/modules/agents/agent_types/openai_functions_agent.ipynb index bd7decaae56..ffbfb1e9aed 100644 --- a/docs/docs/modules/agents/agent_types/openai_functions_agent.ipynb +++ b/docs/docs/modules/agents/agent_types/openai_functions_agent.ipynb @@ -41,7 +41,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.agents import initialize_agent, AgentType, Tool\n", + "from langchain.agents import AgentType, Tool, initialize_agent\n", "from langchain.chains import LLMMathChain\n", "from langchain.chat_models import ChatOpenAI\n", "from langchain.utilities import SerpAPIWrapper, SQLDatabase\n", diff --git a/docs/docs/modules/agents/agent_types/openai_multi_functions_agent.ipynb b/docs/docs/modules/agents/agent_types/openai_multi_functions_agent.ipynb index 5a556f1e50d..e7cf779ffc7 100644 --- a/docs/docs/modules/agents/agent_types/openai_multi_functions_agent.ipynb +++ b/docs/docs/modules/agents/agent_types/openai_multi_functions_agent.ipynb @@ -23,10 +23,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.utilities import SerpAPIWrapper\n", - "from langchain.agents import initialize_agent, Tool\n", - "from langchain.agents import AgentType\n", - "from langchain.chat_models import ChatOpenAI" + "from langchain.agents import AgentType, Tool, initialize_agent\n", + "from langchain.chat_models import ChatOpenAI\n", + "from langchain.utilities import SerpAPIWrapper" ] }, { diff --git a/docs/docs/modules/agents/agent_types/openai_tools.ipynb b/docs/docs/modules/agents/agent_types/openai_tools.ipynb index 5613da5e4d4..68e0ac8d0ed 100644 --- a/docs/docs/modules/agents/agent_types/openai_tools.ipynb +++ b/docs/docs/modules/agents/agent_types/openai_tools.ipynb @@ -27,14 +27,14 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.agents import initialize_agent, AgentExecutor, AgentType, Tool\n", + "from langchain.agents import AgentExecutor, AgentType, Tool, initialize_agent\n", "from langchain.agents.format_scratchpad.openai_tools import (\n", " format_to_openai_tool_messages,\n", ")\n", "from langchain.agents.output_parsers.openai_tools import OpenAIToolsAgentOutputParser\n", "from langchain.chat_models import ChatOpenAI\n", "from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder\n", - "from langchain.tools import DuckDuckGoSearchRun, BearlyInterpreterTool\n", + "from langchain.tools import BearlyInterpreterTool, DuckDuckGoSearchRun\n", "from langchain.tools.render import format_tool_to_openai_tool" ] }, diff --git a/docs/docs/modules/agents/agent_types/react.ipynb b/docs/docs/modules/agents/agent_types/react.ipynb index 15710bd53c0..e95adf33086 100644 --- a/docs/docs/modules/agents/agent_types/react.ipynb +++ b/docs/docs/modules/agents/agent_types/react.ipynb @@ -17,9 +17,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.agents import load_tools\n", - "from langchain.agents import initialize_agent\n", - "from langchain.agents import AgentType\n", + "from langchain.agents import AgentType, initialize_agent, load_tools\n", "from langchain.llms import OpenAI" ] }, @@ -76,10 +74,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.tools.render import render_text_description\n", - "from langchain.agents.output_parsers import ReActSingleInputOutputParser\n", + "from langchain import hub\n", "from langchain.agents.format_scratchpad import format_log_to_str\n", - "from langchain import hub" + "from langchain.agents.output_parsers import ReActSingleInputOutputParser\n", + "from langchain.tools.render import render_text_description" ] }, { diff --git a/docs/docs/modules/agents/agent_types/react_docstore.ipynb b/docs/docs/modules/agents/agent_types/react_docstore.ipynb index 6cf5ed6dd90..1095e662f01 100644 --- a/docs/docs/modules/agents/agent_types/react_docstore.ipynb +++ b/docs/docs/modules/agents/agent_types/react_docstore.ipynb @@ -17,11 +17,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import OpenAI\n", - "from langchain.docstore import Wikipedia\n", - "from langchain.agents import initialize_agent, Tool\n", - "from langchain.agents import AgentType\n", + "from langchain.agents import AgentType, Tool, initialize_agent\n", "from langchain.agents.react.base import DocstoreExplorer\n", + "from langchain.docstore import Wikipedia\n", + "from langchain.llms import OpenAI\n", "\n", "docstore = DocstoreExplorer(Wikipedia())\n", "tools = [\n", diff --git a/docs/docs/modules/agents/agent_types/self_ask_with_search.ipynb b/docs/docs/modules/agents/agent_types/self_ask_with_search.ipynb index f253be7a5c0..6fef9f6be36 100644 --- a/docs/docs/modules/agents/agent_types/self_ask_with_search.ipynb +++ b/docs/docs/modules/agents/agent_types/self_ask_with_search.ipynb @@ -17,10 +17,9 @@ "metadata": {}, "outputs": [], "source": [ + "from langchain.agents import AgentType, Tool, initialize_agent\n", "from langchain.llms import OpenAI\n", "from langchain.utilities import SerpAPIWrapper\n", - "from langchain.agents import initialize_agent, Tool\n", - "from langchain.agents import AgentType\n", "\n", "llm = OpenAI(temperature=0)\n", "search = SerpAPIWrapper()\n", @@ -50,9 +49,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.agents.output_parsers import SelfAskOutputParser\n", + "from langchain import hub\n", "from langchain.agents.format_scratchpad import format_log_to_str\n", - "from langchain import hub" + "from langchain.agents.output_parsers import SelfAskOutputParser" ] }, { diff --git a/docs/docs/modules/agents/agent_types/structured_chat.ipynb b/docs/docs/modules/agents/agent_types/structured_chat.ipynb index a65b47f3ac3..34d5c81b80f 100644 --- a/docs/docs/modules/agents/agent_types/structured_chat.ipynb +++ b/docs/docs/modules/agents/agent_types/structured_chat.ipynb @@ -19,9 +19,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.agents import AgentType\n", - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.agents import initialize_agent" + "from langchain.agents import AgentType, initialize_agent\n", + "from langchain.chat_models import ChatOpenAI" ] }, { @@ -41,14 +40,13 @@ "metadata": {}, "outputs": [], "source": [ + "# This import is required only for jupyter notebooks, since they have their own eventloop\n", + "import nest_asyncio\n", "from langchain.agents.agent_toolkits import PlayWrightBrowserToolkit\n", "from langchain.tools.playwright.utils import (\n", " create_async_playwright_browser, # A synchronous browser is available, though it isn't compatible with jupyter.\n", ")\n", "\n", - "# This import is required only for jupyter notebooks, since they have their own eventloop\n", - "import nest_asyncio\n", - "\n", "nest_asyncio.apply()" ] }, @@ -147,8 +145,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.agents.output_parsers import JSONAgentOutputParser\n", - "from langchain.agents.format_scratchpad import format_log_to_str" + "from langchain.agents.format_scratchpad import format_log_to_str\n", + "from langchain.agents.output_parsers import JSONAgentOutputParser" ] }, { diff --git a/docs/docs/modules/agents/agent_types/xml_agent.ipynb b/docs/docs/modules/agents/agent_types/xml_agent.ipynb index c5a945722ea..82f1ecc9252 100644 --- a/docs/docs/modules/agents/agent_types/xml_agent.ipynb +++ b/docs/docs/modules/agents/agent_types/xml_agent.ipynb @@ -75,10 +75,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.tools.render import render_text_description\n", - "from langchain.agents.output_parsers import XMLAgentOutputParser\n", + "from langchain import hub\n", "from langchain.agents.format_scratchpad import format_xml\n", - "from langchain import hub" + "from langchain.agents.output_parsers import XMLAgentOutputParser\n", + "from langchain.tools.render import render_text_description" ] }, { @@ -205,8 +205,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chains import LLMChain\n", - "from langchain.agents import XMLAgent" + "from langchain.agents import XMLAgent\n", + "from langchain.chains import LLMChain" ] }, { diff --git a/docs/docs/modules/agents/how_to/add_memory_openai_functions.ipynb b/docs/docs/modules/agents/how_to/add_memory_openai_functions.ipynb index 9c20536c2d3..4a2a512f62b 100644 --- a/docs/docs/modules/agents/how_to/add_memory_openai_functions.ipynb +++ b/docs/docs/modules/agents/how_to/add_memory_openai_functions.ipynb @@ -17,13 +17,11 @@ "metadata": {}, "outputs": [], "source": [ + "from langchain.agents import AgentType, Tool, initialize_agent\n", "from langchain.chains import LLMMathChain\n", - "from langchain.utilities import SerpAPIWrapper\n", - "from langchain.utilities import SQLDatabase\n", - "from langchain_experimental.sql import SQLDatabaseChain\n", - "from langchain.agents import initialize_agent, Tool\n", - "from langchain.agents import AgentType\n", - "from langchain.chat_models import ChatOpenAI" + "from langchain.chat_models import ChatOpenAI\n", + "from langchain.utilities import SerpAPIWrapper, SQLDatabase\n", + "from langchain_experimental.sql import SQLDatabaseChain" ] }, { @@ -64,8 +62,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.prompts import MessagesPlaceholder\n", "from langchain.memory import ConversationBufferMemory\n", + "from langchain.prompts import MessagesPlaceholder\n", "\n", "agent_kwargs = {\n", " \"extra_prompt_messages\": [MessagesPlaceholder(variable_name=\"memory\")],\n", diff --git a/docs/docs/modules/agents/how_to/agent_iter.ipynb b/docs/docs/modules/agents/how_to/agent_iter.ipynb index 89b3c952530..618ed17e1e8 100644 --- a/docs/docs/modules/agents/how_to/agent_iter.ipynb +++ b/docs/docs/modules/agents/how_to/agent_iter.ipynb @@ -23,7 +23,7 @@ "outputs": [], "source": [ "import pydantic\n", - "from langchain.agents import initialize_agent, AgentType\n", + "from langchain.agents import AgentType, initialize_agent\n", "from langchain.agents.tools import Tool\n", "from langchain.chains import LLMMathChain\n", "from langchain.chat_models import ChatOpenAI" diff --git a/docs/docs/modules/agents/how_to/agent_structured.ipynb b/docs/docs/modules/agents/how_to/agent_structured.ipynb index c1b9158b0d7..9f1f5262dea 100644 --- a/docs/docs/modules/agents/how_to/agent_structured.ipynb +++ b/docs/docs/modules/agents/how_to/agent_structured.ipynb @@ -43,10 +43,10 @@ "metadata": {}, "outputs": [], "source": [ + "from langchain.document_loaders import TextLoader\n", "from langchain.embeddings.openai import OpenAIEmbeddings\n", - "from langchain.vectorstores import Chroma\n", "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", - "from langchain.document_loaders import TextLoader" + "from langchain.vectorstores import Chroma" ] }, { @@ -119,9 +119,10 @@ "metadata": {}, "outputs": [], "source": [ - "from pydantic import BaseModel, Field\n", "from typing import List\n", + "\n", "from langchain.utils.openai_functions import convert_pydantic_to_openai_function\n", + "from pydantic import BaseModel, Field\n", "\n", "\n", "class Response(BaseModel):\n", @@ -163,8 +164,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.schema.agent import AgentActionMessageLog, AgentFinish\n", - "import json" + "import json\n", + "\n", + "from langchain.schema.agent import AgentActionMessageLog, AgentFinish" ] }, { @@ -217,11 +219,11 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder\n", - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.tools.render import format_tool_to_openai_function\n", + "from langchain.agents import AgentExecutor\n", "from langchain.agents.format_scratchpad import format_to_openai_function_messages\n", - "from langchain.agents import AgentExecutor" + "from langchain.chat_models import ChatOpenAI\n", + "from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder\n", + "from langchain.tools.render import format_tool_to_openai_function" ] }, { diff --git a/docs/docs/modules/agents/how_to/agent_vectorstore.ipynb b/docs/docs/modules/agents/how_to/agent_vectorstore.ipynb index 072e7bcf172..7f14b74387b 100644 --- a/docs/docs/modules/agents/how_to/agent_vectorstore.ipynb +++ b/docs/docs/modules/agents/how_to/agent_vectorstore.ipynb @@ -28,11 +28,11 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings.openai import OpenAIEmbeddings\n", - "from langchain.vectorstores import Chroma\n", - "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.llms import OpenAI\n", "from langchain.chains import RetrievalQA\n", + "from langchain.embeddings.openai import OpenAIEmbeddings\n", + "from langchain.llms import OpenAI\n", + "from langchain.text_splitter import CharacterTextSplitter\n", + "from langchain.vectorstores import Chroma\n", "\n", "llm = OpenAI(temperature=0)" ] @@ -162,8 +162,7 @@ "outputs": [], "source": [ "# Import things that are needed generically\n", - "from langchain.agents import initialize_agent, Tool\n", - "from langchain.agents import AgentType\n", + "from langchain.agents import AgentType, Tool, initialize_agent\n", "from langchain.llms import OpenAI" ] }, diff --git a/docs/docs/modules/agents/how_to/async_agent.ipynb b/docs/docs/modules/agents/how_to/async_agent.ipynb index dbbf456108b..e233372f0ee 100644 --- a/docs/docs/modules/agents/how_to/async_agent.ipynb +++ b/docs/docs/modules/agents/how_to/async_agent.ipynb @@ -42,8 +42,7 @@ "import asyncio\n", "import time\n", "\n", - "from langchain.agents import initialize_agent, load_tools\n", - "from langchain.agents import AgentType\n", + "from langchain.agents import AgentType, initialize_agent, load_tools\n", "from langchain.llms import OpenAI\n", "\n", "questions = [\n", diff --git a/docs/docs/modules/agents/how_to/chatgpt_clone.ipynb b/docs/docs/modules/agents/how_to/chatgpt_clone.ipynb index 5dbf7db338c..6762f79cc27 100644 --- a/docs/docs/modules/agents/how_to/chatgpt_clone.ipynb +++ b/docs/docs/modules/agents/how_to/chatgpt_clone.ipynb @@ -47,11 +47,10 @@ } ], "source": [ - "from langchain.llms import OpenAI\n", "from langchain.chains import LLMChain\n", - "from langchain.prompts import PromptTemplate\n", + "from langchain.llms import OpenAI\n", "from langchain.memory import ConversationBufferWindowMemory\n", - "\n", + "from langchain.prompts import PromptTemplate\n", "\n", "template = \"\"\"Assistant is a large language model trained by OpenAI.\n", "\n", diff --git a/docs/docs/modules/agents/how_to/custom-functions-with-openai-functions-agent.ipynb b/docs/docs/modules/agents/how_to/custom-functions-with-openai-functions-agent.ipynb index 5505bd4eefd..01b7f845462 100644 --- a/docs/docs/modules/agents/how_to/custom-functions-with-openai-functions-agent.ipynb +++ b/docs/docs/modules/agents/how_to/custom-functions-with-openai-functions-agent.ipynb @@ -41,9 +41,10 @@ }, "outputs": [], "source": [ - "import yfinance as yf\n", "from datetime import datetime, timedelta\n", "\n", + "import yfinance as yf\n", + "\n", "\n", "def get_current_stock_price(ticker):\n", " \"\"\"Method to get current stock price\"\"\"\n", @@ -134,8 +135,9 @@ "outputs": [], "source": [ "from typing import Type\n", - "from pydantic import BaseModel, Field\n", + "\n", "from langchain.tools import BaseTool\n", + "from pydantic import BaseModel, Field\n", "\n", "\n", "class CurrentStockPriceInput(BaseModel):\n", @@ -202,9 +204,8 @@ }, "outputs": [], "source": [ - "from langchain.agents import AgentType\n", + "from langchain.agents import AgentType, initialize_agent\n", "from langchain.chat_models import ChatOpenAI\n", - "from langchain.agents import initialize_agent\n", "\n", "llm = ChatOpenAI(model=\"gpt-3.5-turbo-0613\", temperature=0)\n", "\n", diff --git a/docs/docs/modules/agents/how_to/custom_agent.ipynb b/docs/docs/modules/agents/how_to/custom_agent.ipynb index 77f64f3047f..2afb3f0d28c 100644 --- a/docs/docs/modules/agents/how_to/custom_agent.ipynb +++ b/docs/docs/modules/agents/how_to/custom_agent.ipynb @@ -25,7 +25,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.agents import Tool, AgentExecutor, BaseSingleActionAgent\n", + "from langchain.agents import AgentExecutor, BaseSingleActionAgent, Tool\n", "from langchain.utilities import SerpAPIWrapper" ] }, @@ -54,7 +54,8 @@ "metadata": {}, "outputs": [], "source": [ - "from typing import List, Tuple, Any, Union\n", + "from typing import Any, List, Tuple, Union\n", + "\n", "from langchain.schema import AgentAction, AgentFinish\n", "\n", "\n", diff --git a/docs/docs/modules/agents/how_to/custom_agent_with_tool_retrieval.ipynb b/docs/docs/modules/agents/how_to/custom_agent_with_tool_retrieval.ipynb index f533d121f2d..dd7d041a07b 100644 --- a/docs/docs/modules/agents/how_to/custom_agent_with_tool_retrieval.ipynb +++ b/docs/docs/modules/agents/how_to/custom_agent_with_tool_retrieval.ipynb @@ -31,19 +31,20 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.agents import (\n", - " Tool,\n", - " AgentExecutor,\n", - " LLMSingleActionAgent,\n", - " AgentOutputParser,\n", - ")\n", - "from langchain.prompts import StringPromptTemplate\n", - "from langchain.llms import OpenAI\n", - "from langchain.utilities import SerpAPIWrapper\n", - "from langchain.chains import LLMChain\n", + "import re\n", "from typing import Union\n", + "\n", + "from langchain.agents import (\n", + " AgentExecutor,\n", + " AgentOutputParser,\n", + " LLMSingleActionAgent,\n", + " Tool,\n", + ")\n", + "from langchain.chains import LLMChain\n", + "from langchain.llms import OpenAI\n", + "from langchain.prompts import StringPromptTemplate\n", "from langchain.schema import AgentAction, AgentFinish\n", - "import re" + "from langchain.utilities import SerpAPIWrapper" ] }, { @@ -104,9 +105,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.vectorstores import FAISS\n", "from langchain.embeddings import OpenAIEmbeddings\n", - "from langchain.schema import Document" + "from langchain.schema import Document\n", + "from langchain.vectorstores import FAISS" ] }, { diff --git a/docs/docs/modules/agents/how_to/custom_mrkl_agent.ipynb b/docs/docs/modules/agents/how_to/custom_mrkl_agent.ipynb index 7b7ddebe3a2..a6103c67880 100644 --- a/docs/docs/modules/agents/how_to/custom_mrkl_agent.ipynb +++ b/docs/docs/modules/agents/how_to/custom_mrkl_agent.ipynb @@ -47,8 +47,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.agents import ZeroShotAgent, Tool, AgentExecutor\n", - "from langchain.llms import OpenAI\nfrom langchain.utilities import SerpAPIWrapper\nfrom langchain.chains import LLMChain" + "from langchain.agents import AgentExecutor, Tool, ZeroShotAgent\n", + "from langchain.chains import LLMChain\n", + "from langchain.llms import OpenAI\n", + "from langchain.utilities import SerpAPIWrapper" ] }, { diff --git a/docs/docs/modules/agents/how_to/custom_multi_action_agent.ipynb b/docs/docs/modules/agents/how_to/custom_multi_action_agent.ipynb index 7b4486815e8..bfe084a280e 100644 --- a/docs/docs/modules/agents/how_to/custom_multi_action_agent.ipynb +++ b/docs/docs/modules/agents/how_to/custom_multi_action_agent.ipynb @@ -25,7 +25,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.agents import Tool, AgentExecutor, BaseMultiActionAgent\n", + "from langchain.agents import AgentExecutor, BaseMultiActionAgent, Tool\n", "from langchain.utilities import SerpAPIWrapper" ] }, @@ -70,7 +70,8 @@ "metadata": {}, "outputs": [], "source": [ - "from typing import List, Tuple, Any, Union\n", + "from typing import Any, List, Tuple, Union\n", + "\n", "from langchain.schema import AgentAction, AgentFinish\n", "\n", "\n", diff --git a/docs/docs/modules/agents/how_to/handle_parsing_errors.ipynb b/docs/docs/modules/agents/how_to/handle_parsing_errors.ipynb index c54a65ddeab..b0ef1d33b26 100644 --- a/docs/docs/modules/agents/how_to/handle_parsing_errors.ipynb +++ b/docs/docs/modules/agents/how_to/handle_parsing_errors.ipynb @@ -25,10 +25,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.utilities import SerpAPIWrapper\n", - "from langchain.agents import initialize_agent, Tool\n", - "from langchain.agents import AgentType\n", - "from langchain.chat_models import ChatOpenAI" + "from langchain.agents import AgentType, Tool, initialize_agent\n", + "from langchain.chat_models import ChatOpenAI\n", + "from langchain.utilities import SerpAPIWrapper" ] }, { diff --git a/docs/docs/modules/agents/how_to/intermediate_steps.ipynb b/docs/docs/modules/agents/how_to/intermediate_steps.ipynb index 3670c715bba..0b0010d6469 100644 --- a/docs/docs/modules/agents/how_to/intermediate_steps.ipynb +++ b/docs/docs/modules/agents/how_to/intermediate_steps.ipynb @@ -17,9 +17,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.agents import load_tools\n", - "from langchain.agents import initialize_agent\n", - "from langchain.agents import AgentType\n", + "from langchain.agents import AgentType, initialize_agent, load_tools\n", "from langchain.llms import OpenAI" ] }, diff --git a/docs/docs/modules/agents/how_to/max_iterations.ipynb b/docs/docs/modules/agents/how_to/max_iterations.ipynb index b1c02b1aebd..060194a0e55 100644 --- a/docs/docs/modules/agents/how_to/max_iterations.ipynb +++ b/docs/docs/modules/agents/how_to/max_iterations.ipynb @@ -17,8 +17,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.agents import initialize_agent, Tool\n", - "from langchain.agents import AgentType\n", + "from langchain.agents import AgentType, Tool, initialize_agent\n", "from langchain.llms import OpenAI" ] }, diff --git a/docs/docs/modules/agents/how_to/max_time_limit.ipynb b/docs/docs/modules/agents/how_to/max_time_limit.ipynb index 234bd8c48e6..1b052ff98a5 100644 --- a/docs/docs/modules/agents/how_to/max_time_limit.ipynb +++ b/docs/docs/modules/agents/how_to/max_time_limit.ipynb @@ -17,8 +17,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.agents import initialize_agent, Tool\n", - "from langchain.agents import AgentType\n", + "from langchain.agents import AgentType, Tool, initialize_agent\n", "from langchain.llms import OpenAI" ] }, diff --git a/docs/docs/modules/agents/how_to/sharedmemory_for_tools.ipynb b/docs/docs/modules/agents/how_to/sharedmemory_for_tools.ipynb index 7c831554da1..7c1e5df5d55 100644 --- a/docs/docs/modules/agents/how_to/sharedmemory_for_tools.ipynb +++ b/docs/docs/modules/agents/how_to/sharedmemory_for_tools.ipynb @@ -22,10 +22,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.agents import ZeroShotAgent, Tool, AgentExecutor\n", - "from langchain.memory import ConversationBufferMemory, ReadOnlySharedMemory\n", - "from langchain.llms import OpenAI\n", + "from langchain.agents import AgentExecutor, Tool, ZeroShotAgent\n", "from langchain.chains import LLMChain\n", + "from langchain.llms import OpenAI\n", + "from langchain.memory import ConversationBufferMemory, ReadOnlySharedMemory\n", "from langchain.prompts import PromptTemplate\n", "from langchain.utilities import GoogleSearchAPIWrapper" ] diff --git a/docs/docs/modules/agents/how_to/streaming_stdout_final_only.ipynb b/docs/docs/modules/agents/how_to/streaming_stdout_final_only.ipynb index 1d9796ebe06..1339a0f5880 100644 --- a/docs/docs/modules/agents/how_to/streaming_stdout_final_only.ipynb +++ b/docs/docs/modules/agents/how_to/streaming_stdout_final_only.ipynb @@ -28,9 +28,7 @@ }, "outputs": [], "source": [ - "from langchain.agents import load_tools\n", - "from langchain.agents import initialize_agent\n", - "from langchain.agents import AgentType\n", + "from langchain.agents import AgentType, initialize_agent, load_tools\n", "from langchain.callbacks.streaming_stdout_final_only import (\n", " FinalStreamingStdOutCallbackHandler,\n", ")\n", diff --git a/docs/docs/modules/agents/how_to/use_toolkits_with_openai_functions.ipynb b/docs/docs/modules/agents/how_to/use_toolkits_with_openai_functions.ipynb index def4333b308..358b9fb2545 100644 --- a/docs/docs/modules/agents/how_to/use_toolkits_with_openai_functions.ipynb +++ b/docs/docs/modules/agents/how_to/use_toolkits_with_openai_functions.ipynb @@ -17,12 +17,11 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.utilities import SQLDatabase\n", - "from langchain.agents import initialize_agent\n", - "from langchain.agents import AgentType\n", - "from langchain.chat_models import ChatOpenAI\n", + "from langchain.agents import AgentType, initialize_agent\n", "from langchain.agents.agent_toolkits import SQLDatabaseToolkit\n", - "from langchain.schema import SystemMessage" + "from langchain.chat_models import ChatOpenAI\n", + "from langchain.schema import SystemMessage\n", + "from langchain.utilities import SQLDatabase" ] }, { diff --git a/docs/docs/modules/agents/index.ipynb b/docs/docs/modules/agents/index.ipynb index ec321e6a883..78a12b07ba8 100644 --- a/docs/docs/modules/agents/index.ipynb +++ b/docs/docs/modules/agents/index.ipynb @@ -519,7 +519,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.schema.messages import HumanMessage, AIMessage\n", + "from langchain.schema.messages import AIMessage, HumanMessage\n", "\n", "chat_history = []" ] diff --git a/docs/docs/modules/agents/tools/custom_tools.ipynb b/docs/docs/modules/agents/tools/custom_tools.ipynb index aa491c269f7..96c00295f3e 100644 --- a/docs/docs/modules/agents/tools/custom_tools.ipynb +++ b/docs/docs/modules/agents/tools/custom_tools.ipynb @@ -29,11 +29,11 @@ "outputs": [], "source": [ "# Import things that are needed generically\n", - "from langchain.chains import LLMMathChain\n", - "from langchain.utilities import SerpAPIWrapper\n", "from langchain.agents import AgentType, initialize_agent\n", + "from langchain.chains import LLMMathChain\n", "from langchain.chat_models import ChatOpenAI\n", - "from langchain.tools import BaseTool, StructuredTool, Tool, tool" + "from langchain.tools import BaseTool, StructuredTool, Tool, tool\n", + "from langchain.utilities import SerpAPIWrapper" ] }, { @@ -772,10 +772,9 @@ "outputs": [], "source": [ "# Import things that are needed generically\n", - "from langchain.agents import initialize_agent, Tool\n", - "from langchain.agents import AgentType\n", - "from langchain.llms import OpenAI\n", + "from langchain.agents import AgentType, Tool, initialize_agent\n", "from langchain.chains import LLMMathChain\n", + "from langchain.llms import OpenAI\n", "from langchain.utilities import SerpAPIWrapper\n", "\n", "search = SerpAPIWrapper()\n", @@ -936,14 +935,11 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.tools.base import ToolException\n", - "\n", - "from langchain.utilities import SerpAPIWrapper\n", "from langchain.agents import AgentType, initialize_agent\n", "from langchain.chat_models import ChatOpenAI\n", "from langchain.tools import Tool\n", - "\n", - "from langchain.chat_models import ChatOpenAI\n", + "from langchain.tools.base import ToolException\n", + "from langchain.utilities import SerpAPIWrapper\n", "\n", "\n", "def _handle_error(error: ToolException) -> str:\n", diff --git a/docs/docs/modules/agents/tools/human_approval.ipynb b/docs/docs/modules/agents/tools/human_approval.ipynb index b0764feb0fa..a01b7269cef 100644 --- a/docs/docs/modules/agents/tools/human_approval.ipynb +++ b/docs/docs/modules/agents/tools/human_approval.ipynb @@ -158,9 +158,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.agents import load_tools\n", - "from langchain.agents import initialize_agent\n", - "from langchain.agents import AgentType\n", + "from langchain.agents import AgentType, initialize_agent, load_tools\n", "from langchain.llms import OpenAI" ] }, diff --git a/docs/docs/modules/agents/tools/multi_input_tool.ipynb b/docs/docs/modules/agents/tools/multi_input_tool.ipynb index 15376d0d985..23105f937e9 100644 --- a/docs/docs/modules/agents/tools/multi_input_tool.ipynb +++ b/docs/docs/modules/agents/tools/multi_input_tool.ipynb @@ -34,8 +34,8 @@ }, "outputs": [], "source": [ + "from langchain.agents import AgentType, initialize_agent\n", "from langchain.llms import OpenAI\n", - "from langchain.agents import initialize_agent, AgentType\n", "\n", "llm = OpenAI(temperature=0)" ] @@ -151,9 +151,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import OpenAI\n", - "from langchain.agents import initialize_agent, Tool\n", - "from langchain.agents import AgentType" + "from langchain.agents import AgentType, Tool, initialize_agent\n", + "from langchain.llms import OpenAI" ] }, { diff --git a/docs/docs/modules/callbacks/async_callbacks.ipynb b/docs/docs/modules/callbacks/async_callbacks.ipynb index d58783a7551..0deb98fcd63 100644 --- a/docs/docs/modules/callbacks/async_callbacks.ipynb +++ b/docs/docs/modules/callbacks/async_callbacks.ipynb @@ -61,9 +61,9 @@ "import asyncio\n", "from typing import Any, Dict, List\n", "\n", - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.schema import LLMResult, HumanMessage\n", "from langchain.callbacks.base import AsyncCallbackHandler, BaseCallbackHandler\n", + "from langchain.chat_models import ChatOpenAI\n", + "from langchain.schema import HumanMessage, LLMResult\n", "\n", "\n", "class MyCustomSyncHandler(BaseCallbackHandler):\n", diff --git a/docs/docs/modules/callbacks/filecallbackhandler.ipynb b/docs/docs/modules/callbacks/filecallbackhandler.ipynb index 53c081e046f..0806affde38 100644 --- a/docs/docs/modules/callbacks/filecallbackhandler.ipynb +++ b/docs/docs/modules/callbacks/filecallbackhandler.ipynb @@ -45,12 +45,11 @@ } ], "source": [ - "from loguru import logger\n", - "\n", "from langchain.callbacks import FileCallbackHandler\n", "from langchain.chains import LLMChain\n", "from langchain.llms import OpenAI\n", "from langchain.prompts import PromptTemplate\n", + "from loguru import logger\n", "\n", "logfile = \"output.log\"\n", "\n", @@ -138,8 +137,8 @@ } ], "source": [ - "from IPython.display import display, HTML\n", "from ansi2html import Ansi2HTMLConverter\n", + "from IPython.display import HTML, display\n", "\n", "with open(\"output.log\", \"r\") as f:\n", " content = f.read()\n", diff --git a/docs/docs/modules/callbacks/multiple_callbacks.ipynb b/docs/docs/modules/callbacks/multiple_callbacks.ipynb index 12207a3abf8..4a135acdb4d 100644 --- a/docs/docs/modules/callbacks/multiple_callbacks.ipynb +++ b/docs/docs/modules/callbacks/multiple_callbacks.ipynb @@ -124,12 +124,12 @@ } ], "source": [ - "from typing import Dict, Union, Any, List\n", + "from typing import Any, Dict, List, Union\n", "\n", - "from langchain.callbacks.base import BaseCallbackHandler\n", - "from langchain.schema import AgentAction\n", "from langchain.agents import AgentType, initialize_agent, load_tools\n", + "from langchain.callbacks.base import BaseCallbackHandler\n", "from langchain.llms import OpenAI\n", + "from langchain.schema import AgentAction\n", "\n", "\n", "# First, define custom callback handler implementations\n", diff --git a/docs/docs/modules/chains/foundational/llm_chain.ipynb b/docs/docs/modules/chains/foundational/llm_chain.ipynb index f8c5ecb0afc..94a335aa920 100644 --- a/docs/docs/modules/chains/foundational/llm_chain.ipynb +++ b/docs/docs/modules/chains/foundational/llm_chain.ipynb @@ -40,8 +40,8 @@ } ], "source": [ - "from langchain.prompts import PromptTemplate\n", "from langchain.chat_models import ChatOpenAI\n", + "from langchain.prompts import PromptTemplate\n", "from langchain.schema import StrOutputParser\n", "\n", "prompt = PromptTemplate.from_template(\n", @@ -93,9 +93,9 @@ } ], "source": [ - "from langchain.prompts import PromptTemplate\n", - "from langchain.llms import OpenAI\n", "from langchain.chains import LLMChain\n", + "from langchain.llms import OpenAI\n", + "from langchain.prompts import PromptTemplate\n", "\n", "prompt_template = \"What is a good name for a company that makes {product}?\"\n", "\n", diff --git a/docs/docs/modules/chains/foundational/router.ipynb b/docs/docs/modules/chains/foundational/router.ipynb index 019cee056d2..3b6028c3127 100644 --- a/docs/docs/modules/chains/foundational/router.ipynb +++ b/docs/docs/modules/chains/foundational/router.ipynb @@ -21,7 +21,6 @@ "source": [ "from langchain.prompts import PromptTemplate\n", "\n", - "\n", "physics_template = \"\"\"You are a very smart physics professor. \\\n", "You are great at answering questions about physics in a concise and easy to understand manner. \\\n", "When you don't know the answer to a question you admit that you don't know.\n", @@ -89,8 +88,8 @@ "source": [ "from typing import Literal\n", "\n", - "from langchain.pydantic_v1 import BaseModel\n", "from langchain.output_parsers.openai_functions import PydanticAttrOutputFunctionsParser\n", + "from langchain.pydantic_v1 import BaseModel\n", "from langchain.utils.openai_functions import convert_pydantic_to_openai_function\n", "\n", "\n", @@ -123,7 +122,6 @@ "from langchain.schema.output_parser import StrOutputParser\n", "from langchain.schema.runnable import RunnablePassthrough\n", "\n", - "\n", "final_chain = (\n", " RunnablePassthrough.assign(topic=itemgetter(\"input\") | classifier_chain)\n", " | prompt_branch\n", @@ -192,10 +190,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chains.router import MultiPromptChain\n", - "from langchain.llms import OpenAI\n", "from langchain.chains import ConversationChain\n", - "from langchain.chains.llm import LLMChain" + "from langchain.chains.llm import LLMChain\n", + "from langchain.chains.router import MultiPromptChain\n", + "from langchain.llms import OpenAI" ] }, { diff --git a/docs/docs/modules/chains/foundational/sequential_chains.ipynb b/docs/docs/modules/chains/foundational/sequential_chains.ipynb index d0e6d499740..304f73c2159 100644 --- a/docs/docs/modules/chains/foundational/sequential_chains.ipynb +++ b/docs/docs/modules/chains/foundational/sequential_chains.ipynb @@ -153,8 +153,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import OpenAI\n", "from langchain.chains import LLMChain\n", + "from langchain.llms import OpenAI\n", "from langchain.prompts import PromptTemplate\n", "\n", "# This is an LLMChain to write a synopsis given a title of a play.\n", diff --git a/docs/docs/modules/chains/foundational/transformation.ipynb b/docs/docs/modules/chains/foundational/transformation.ipynb index 4335c86915d..5435cfb6ac3 100644 --- a/docs/docs/modules/chains/foundational/transformation.ipynb +++ b/docs/docs/modules/chains/foundational/transformation.ipynb @@ -100,7 +100,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chains import TransformChain, LLMChain, SimpleSequentialChain\n", + "from langchain.chains import LLMChain, SimpleSequentialChain, TransformChain\n", "from langchain.llms import OpenAI" ] }, diff --git a/docs/docs/modules/chains/how_to/async_chain.ipynb b/docs/docs/modules/chains/how_to/async_chain.ipynb index 1804725a0ed..858c132282f 100644 --- a/docs/docs/modules/chains/how_to/async_chain.ipynb +++ b/docs/docs/modules/chains/how_to/async_chain.ipynb @@ -71,9 +71,9 @@ "import asyncio\n", "import time\n", "\n", + "from langchain.chains import LLMChain\n", "from langchain.llms import OpenAI\n", "from langchain.prompts import PromptTemplate\n", - "from langchain.chains import LLMChain\n", "\n", "\n", "def generate_serially():\n", diff --git a/docs/docs/modules/chains/how_to/custom_chain.ipynb b/docs/docs/modules/chains/how_to/custom_chain.ipynb index 2e4f4e4115d..ddf268ef771 100644 --- a/docs/docs/modules/chains/how_to/custom_chain.ipynb +++ b/docs/docs/modules/chains/how_to/custom_chain.ipynb @@ -23,15 +23,14 @@ "\n", "from typing import Any, Dict, List, Optional\n", "\n", - "from pydantic import Extra\n", - "\n", - "from langchain.schema.language_model import BaseLanguageModel\n", "from langchain.callbacks.manager import (\n", " AsyncCallbackManagerForChainRun,\n", " CallbackManagerForChainRun,\n", ")\n", "from langchain.chains.base import Chain\n", "from langchain.prompts.base import BasePromptTemplate\n", + "from langchain.schema.language_model import BaseLanguageModel\n", + "from pydantic import Extra\n", "\n", "\n", "class MyCustomChain(Chain):\n", @@ -156,7 +155,6 @@ "from langchain.chat_models.openai import ChatOpenAI\n", "from langchain.prompts.prompt import PromptTemplate\n", "\n", - "\n", "chain = MyCustomChain(\n", " prompt=PromptTemplate.from_template(\"tell us a joke about {topic}\"),\n", " llm=ChatOpenAI(),\n", diff --git a/docs/docs/modules/chains/how_to/openai_functions.ipynb b/docs/docs/modules/chains/how_to/openai_functions.ipynb index f0c8ccd6f6d..223668cc167 100644 --- a/docs/docs/modules/chains/how_to/openai_functions.ipynb +++ b/docs/docs/modules/chains/how_to/openai_functions.ipynb @@ -23,8 +23,8 @@ "\n", "from langchain.chains.openai_functions import (\n", " create_openai_fn_chain,\n", - " create_structured_output_chain,\n", " create_openai_fn_runnable,\n", + " create_structured_output_chain,\n", " create_structured_output_runnable,\n", ")\n", "from langchain.chat_models import ChatOpenAI\n", diff --git a/docs/docs/modules/chains/index.ipynb b/docs/docs/modules/chains/index.ipynb index 17e921f33af..deb3f1f79aa 100644 --- a/docs/docs/modules/chains/index.ipynb +++ b/docs/docs/modules/chains/index.ipynb @@ -155,7 +155,6 @@ "source": [ "from langchain.chains import LLMChain\n", "\n", - "\n", "chain = LLMChain(llm=model, prompt=prompt, output_parser=StrOutputParser())\n", "chain.run(question=\"How did Mansa Musa accumulate his wealth?\")" ] diff --git a/docs/docs/modules/data_connection/document_transformers/post_retrieval/long_context_reorder.ipynb b/docs/docs/modules/data_connection/document_transformers/post_retrieval/long_context_reorder.ipynb index 8d0e74db68d..2f54240f710 100644 --- a/docs/docs/modules/data_connection/document_transformers/post_retrieval/long_context_reorder.ipynb +++ b/docs/docs/modules/data_connection/document_transformers/post_retrieval/long_context_reorder.ipynb @@ -42,14 +42,14 @@ } ], "source": [ - "from langchain.vectorstores import Chroma\n", - "from langchain.embeddings import HuggingFaceEmbeddings\n", + "from langchain.chains import LLMChain, StuffDocumentsChain\n", "from langchain.document_transformers import (\n", " LongContextReorder,\n", ")\n", - "from langchain.chains import StuffDocumentsChain, LLMChain\n", - "from langchain.prompts import PromptTemplate\n", + "from langchain.embeddings import HuggingFaceEmbeddings\n", "from langchain.llms import OpenAI\n", + "from langchain.prompts import PromptTemplate\n", + "from langchain.vectorstores import Chroma\n", "\n", "# Get embeddings.\n", "embeddings = HuggingFaceEmbeddings(model_name=\"all-MiniLM-L6-v2\")\n", diff --git a/docs/docs/modules/data_connection/retrievers/MultiQueryRetriever.ipynb b/docs/docs/modules/data_connection/retrievers/MultiQueryRetriever.ipynb index 11b21c6af22..77d1293a9e3 100644 --- a/docs/docs/modules/data_connection/retrievers/MultiQueryRetriever.ipynb +++ b/docs/docs/modules/data_connection/retrievers/MultiQueryRetriever.ipynb @@ -20,10 +20,10 @@ "outputs": [], "source": [ "# Build a sample vectorDB\n", - "from langchain.vectorstores import Chroma\n", "from langchain.document_loaders import WebBaseLoader\n", "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", + "from langchain.vectorstores import Chroma\n", "\n", "# Load blog post\n", "loader = WebBaseLoader(\"https://lilianweng.github.io/posts/2023-06-23-agent/\")\n", @@ -126,10 +126,11 @@ "outputs": [], "source": [ "from typing import List\n", + "\n", "from langchain.chains import LLMChain\n", - "from pydantic import BaseModel, Field\n", - "from langchain.prompts import PromptTemplate\n", "from langchain.output_parsers import PydanticOutputParser\n", + "from langchain.prompts import PromptTemplate\n", + "from pydantic import BaseModel, Field\n", "\n", "\n", "# Output parser will split the LLM result into a list of queries\n", diff --git a/docs/docs/modules/data_connection/retrievers/index.ipynb b/docs/docs/modules/data_connection/retrievers/index.ipynb index fc626e134e1..0d40ea7290c 100644 --- a/docs/docs/modules/data_connection/retrievers/index.ipynb +++ b/docs/docs/modules/data_connection/retrievers/index.ipynb @@ -56,7 +56,6 @@ "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain.vectorstores import Chroma\n", "\n", - "\n", "full_text = open(\"state_of_the_union.txt\", \"r\").read()\n", "text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=100)\n", "texts = text_splitter.split_text(full_text)\n", @@ -119,7 +118,6 @@ "from langchain.schema import StrOutputParser\n", "from langchain.schema.runnable import RunnablePassthrough\n", "\n", - "\n", "template = \"\"\"Answer the question based only on the following context:\n", "\n", "{context}\n", diff --git a/docs/docs/modules/data_connection/retrievers/multi_vector.ipynb b/docs/docs/modules/data_connection/retrievers/multi_vector.ipynb index 3433ea29970..1a74669cd21 100644 --- a/docs/docs/modules/data_connection/retrievers/multi_vector.ipynb +++ b/docs/docs/modules/data_connection/retrievers/multi_vector.ipynb @@ -36,11 +36,11 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.vectorstores import Chroma\n", + "from langchain.document_loaders import TextLoader\n", "from langchain.embeddings import OpenAIEmbeddings\n", - "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", "from langchain.storage import InMemoryStore\n", - "from langchain.document_loaders import TextLoader" + "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", + "from langchain.vectorstores import Chroma" ] }, { @@ -195,11 +195,12 @@ "metadata": {}, "outputs": [], "source": [ + "import uuid\n", + "\n", "from langchain.chat_models import ChatOpenAI\n", "from langchain.prompts import ChatPromptTemplate\n", - "from langchain.schema.output_parser import StrOutputParser\n", - "import uuid\n", - "from langchain.schema.document import Document" + "from langchain.schema.document import Document\n", + "from langchain.schema.output_parser import StrOutputParser" ] }, { diff --git a/docs/docs/modules/data_connection/retrievers/parent_document_retriever.ipynb b/docs/docs/modules/data_connection/retrievers/parent_document_retriever.ipynb index 519c3eb9fe1..a36f9377db6 100644 --- a/docs/docs/modules/data_connection/retrievers/parent_document_retriever.ipynb +++ b/docs/docs/modules/data_connection/retrievers/parent_document_retriever.ipynb @@ -42,11 +42,11 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.vectorstores import Chroma\n", + "from langchain.document_loaders import TextLoader\n", "from langchain.embeddings import OpenAIEmbeddings\n", - "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", "from langchain.storage import InMemoryStore\n", - "from langchain.document_loaders import TextLoader" + "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", + "from langchain.vectorstores import Chroma" ] }, { diff --git a/docs/docs/modules/data_connection/retrievers/self_query.ipynb b/docs/docs/modules/data_connection/retrievers/self_query.ipynb index f2c8fa17b56..fcee4a9784e 100644 --- a/docs/docs/modules/data_connection/retrievers/self_query.ipynb +++ b/docs/docs/modules/data_connection/retrievers/self_query.ipynb @@ -42,7 +42,6 @@ "from langchain.schema import Document\n", "from langchain.vectorstores import Chroma\n", "\n", - "\n", "docs = [\n", " Document(\n", " page_content=\"A bunch of scientists bring back dinosaurs and mayhem breaks loose\",\n", @@ -94,11 +93,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.chains.query_constructor.base import AttributeInfo\n", + "from langchain.chat_models import ChatOpenAI\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", "\n", - "\n", "metadata_field_info = [\n", " AttributeInfo(\n", " name=\"genre\",\n", @@ -294,8 +292,8 @@ "outputs": [], "source": [ "from langchain.chains.query_constructor.base import (\n", - " get_query_constructor_prompt,\n", " StructuredQueryOutputParser,\n", + " get_query_constructor_prompt,\n", ")\n", "\n", "prompt = get_query_constructor_prompt(\n", diff --git a/docs/docs/modules/data_connection/retrievers/web_research.ipynb b/docs/docs/modules/data_connection/retrievers/web_research.ipynb index 192c3484f43..7d70ad487fd 100644 --- a/docs/docs/modules/data_connection/retrievers/web_research.ipynb +++ b/docs/docs/modules/data_connection/retrievers/web_research.ipynb @@ -43,10 +43,11 @@ "outputs": [], "source": [ "import os\n", - "from langchain.vectorstores import Chroma\n", - "from langchain.embeddings import OpenAIEmbeddings\n", + "\n", "from langchain.chat_models.openai import ChatOpenAI\n", + "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.utilities import GoogleSearchAPIWrapper\n", + "from langchain.vectorstores import Chroma\n", "\n", "# Vectorstore\n", "vectorstore = Chroma(\n", @@ -229,10 +230,11 @@ "import os\n", "import re\n", "from typing import List\n", + "\n", "from langchain.chains import LLMChain\n", - "from pydantic import BaseModel, Field\n", - "from langchain.prompts import PromptTemplate\n", "from langchain.output_parsers.pydantic import PydanticOutputParser\n", + "from langchain.prompts import PromptTemplate\n", + "from pydantic import BaseModel, Field\n", "\n", "# LLMChain\n", "search_prompt = PromptTemplate(\n", @@ -432,10 +434,10 @@ } ], "source": [ - "from langchain.llms import LlamaCpp\n", - "from langchain.embeddings import GPT4AllEmbeddings\n", "from langchain.callbacks.manager import CallbackManager\n", "from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n", + "from langchain.embeddings import GPT4AllEmbeddings\n", + "from langchain.llms import LlamaCpp\n", "\n", "n_gpu_layers = 1 # Metal set to 1 is enough.\n", "n_batch = 512 # Should be between 1 and n_ctx, consider the amount of RAM of your Apple Silicon Chip.\n", diff --git a/docs/docs/modules/data_connection/text_embedding/caching_embeddings.ipynb b/docs/docs/modules/data_connection/text_embedding/caching_embeddings.ipynb index 94de55231ee..3b232716501 100644 --- a/docs/docs/modules/data_connection/text_embedding/caching_embeddings.ipynb +++ b/docs/docs/modules/data_connection/text_embedding/caching_embeddings.ipynb @@ -30,13 +30,13 @@ }, "outputs": [], "source": [ + "from langchain.embeddings import CacheBackedEmbeddings, OpenAIEmbeddings\n", "from langchain.storage import (\n", " InMemoryStore,\n", " LocalFileStore,\n", " RedisStore,\n", " UpstashRedisStore,\n", - ")\n", - "from langchain.embeddings import OpenAIEmbeddings, CacheBackedEmbeddings" + ")" ] }, { diff --git a/docs/docs/modules/memory/adding_memory.ipynb b/docs/docs/modules/memory/adding_memory.ipynb index ee2fb83267e..c2168c638ab 100644 --- a/docs/docs/modules/memory/adding_memory.ipynb +++ b/docs/docs/modules/memory/adding_memory.ipynb @@ -177,12 +177,12 @@ "outputs": [], "source": [ "from langchain.chat_models import ChatOpenAI\n", - "from langchain.schema import SystemMessage\n", "from langchain.prompts import (\n", " ChatPromptTemplate,\n", " HumanMessagePromptTemplate,\n", " MessagesPlaceholder,\n", - ")" + ")\n", + "from langchain.schema import SystemMessage" ] }, { diff --git a/docs/docs/modules/memory/adding_memory_chain_multiple_inputs.ipynb b/docs/docs/modules/memory/adding_memory_chain_multiple_inputs.ipynb index 62eb1eb678b..5d4992b256f 100644 --- a/docs/docs/modules/memory/adding_memory_chain_multiple_inputs.ipynb +++ b/docs/docs/modules/memory/adding_memory_chain_multiple_inputs.ipynb @@ -78,8 +78,8 @@ "source": [ "from langchain.chains.question_answering import load_qa_chain\n", "from langchain.llms import OpenAI\n", - "from langchain.prompts import PromptTemplate\n", - "from langchain.memory import ConversationBufferMemory" + "from langchain.memory import ConversationBufferMemory\n", + "from langchain.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/modules/memory/agent_with_memory.ipynb b/docs/docs/modules/memory/agent_with_memory.ipynb index 5d889a2ca5a..9e00b2863ce 100644 --- a/docs/docs/modules/memory/agent_with_memory.ipynb +++ b/docs/docs/modules/memory/agent_with_memory.ipynb @@ -27,9 +27,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.agents import ZeroShotAgent, Tool, AgentExecutor\n", + "from langchain.agents import AgentExecutor, Tool, ZeroShotAgent\n", + "from langchain.chains import LLMChain\n", + "from langchain.llms import OpenAI\n", "from langchain.memory import ConversationBufferMemory\n", - "from langchain.llms import OpenAI\nfrom langchain.chains import LLMChain\n", "from langchain.utilities import GoogleSearchAPIWrapper" ] }, diff --git a/docs/docs/modules/memory/agent_with_memory_in_db.ipynb b/docs/docs/modules/memory/agent_with_memory_in_db.ipynb index eec24d7af9f..01ab5b81a52 100644 --- a/docs/docs/modules/memory/agent_with_memory_in_db.ipynb +++ b/docs/docs/modules/memory/agent_with_memory_in_db.ipynb @@ -33,11 +33,11 @@ }, "outputs": [], "source": [ - "from langchain.agents import ZeroShotAgent, Tool, AgentExecutor\n", + "from langchain.agents import AgentExecutor, Tool, ZeroShotAgent\n", + "from langchain.chains import LLMChain\n", + "from langchain.llms import OpenAI\n", "from langchain.memory import ConversationBufferMemory\n", "from langchain.memory.chat_message_histories import RedisChatMessageHistory\n", - "from langchain.llms import OpenAI\n", - "from langchain.chains import LLMChain\n", "from langchain.utilities import GoogleSearchAPIWrapper" ] }, diff --git a/docs/docs/modules/memory/conversational_customization.ipynb b/docs/docs/modules/memory/conversational_customization.ipynb index 7dbb48ae599..53efd4f3432 100644 --- a/docs/docs/modules/memory/conversational_customization.ipynb +++ b/docs/docs/modules/memory/conversational_customization.ipynb @@ -17,11 +17,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import OpenAI\n", "from langchain.chains import ConversationChain\n", + "from langchain.llms import OpenAI\n", "from langchain.memory import ConversationBufferMemory\n", "\n", - "\n", "llm = OpenAI(temperature=0)" ] }, diff --git a/docs/docs/modules/memory/custom_memory.ipynb b/docs/docs/modules/memory/custom_memory.ipynb index 4abc0c094a9..55ac9a591e2 100644 --- a/docs/docs/modules/memory/custom_memory.ipynb +++ b/docs/docs/modules/memory/custom_memory.ipynb @@ -25,10 +25,12 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import OpenAI\nfrom langchain.chains import ConversationChain\n", + "from typing import Any, Dict, List\n", + "\n", + "from langchain.chains import ConversationChain\n", + "from langchain.llms import OpenAI\n", "from langchain.schema import BaseMemory\n", - "from pydantic import BaseModel\n", - "from typing import List, Dict, Any" + "from pydantic import BaseModel" ] }, { diff --git a/docs/docs/modules/memory/multiple_memory.ipynb b/docs/docs/modules/memory/multiple_memory.ipynb index bca559f914a..cca3b11eacc 100644 --- a/docs/docs/modules/memory/multiple_memory.ipynb +++ b/docs/docs/modules/memory/multiple_memory.ipynb @@ -17,15 +17,14 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import OpenAI\n", - "from langchain.prompts import PromptTemplate\n", "from langchain.chains import ConversationChain\n", + "from langchain.llms import OpenAI\n", "from langchain.memory import (\n", - " ConversationBufferMemory,\n", " CombinedMemory,\n", + " ConversationBufferMemory,\n", " ConversationSummaryMemory,\n", ")\n", - "\n", + "from langchain.prompts import PromptTemplate\n", "\n", "conv_memory = ConversationBufferMemory(\n", " memory_key=\"chat_history_lines\", input_key=\"input\"\n", diff --git a/docs/docs/modules/memory/types/kg.ipynb b/docs/docs/modules/memory/types/kg.ipynb index 7e08615512c..c5838cd8d32 100644 --- a/docs/docs/modules/memory/types/kg.ipynb +++ b/docs/docs/modules/memory/types/kg.ipynb @@ -25,8 +25,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.memory import ConversationKGMemory\n", - "from langchain.llms import OpenAI" + "from langchain.llms import OpenAI\n", + "from langchain.memory import ConversationKGMemory" ] }, { @@ -180,8 +180,8 @@ "outputs": [], "source": [ "llm = OpenAI(temperature=0)\n", - "from langchain.prompts.prompt import PromptTemplate\n", "from langchain.chains import ConversationChain\n", + "from langchain.prompts.prompt import PromptTemplate\n", "\n", "template = \"\"\"The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. \n", "If the AI does not know the answer to a question, it truthfully says it does not know. The AI ONLY uses information contained in the \"Relevant Information\" section and does not hallucinate.\n", diff --git a/docs/docs/modules/memory/types/summary_buffer.ipynb b/docs/docs/modules/memory/types/summary_buffer.ipynb index 1a1acad763e..ffcb795eb19 100644 --- a/docs/docs/modules/memory/types/summary_buffer.ipynb +++ b/docs/docs/modules/memory/types/summary_buffer.ipynb @@ -28,8 +28,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.memory import ConversationSummaryBufferMemory\n", "from langchain.llms import OpenAI\n", + "from langchain.memory import ConversationSummaryBufferMemory\n", "\n", "llm = OpenAI()" ] diff --git a/docs/docs/modules/memory/types/token_buffer.ipynb b/docs/docs/modules/memory/types/token_buffer.ipynb index 4432f4d930b..0c117c7f3b6 100644 --- a/docs/docs/modules/memory/types/token_buffer.ipynb +++ b/docs/docs/modules/memory/types/token_buffer.ipynb @@ -27,8 +27,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.memory import ConversationTokenBufferMemory\n", "from langchain.llms import OpenAI\n", + "from langchain.memory import ConversationTokenBufferMemory\n", "\n", "llm = OpenAI()" ] diff --git a/docs/docs/modules/model_io/chat/token_usage_tracking.ipynb b/docs/docs/modules/model_io/chat/token_usage_tracking.ipynb index 3dc55b2d513..4451a4b6ca4 100644 --- a/docs/docs/modules/model_io/chat/token_usage_tracking.ipynb +++ b/docs/docs/modules/model_io/chat/token_usage_tracking.ipynb @@ -19,8 +19,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.callbacks import get_openai_callback" + "from langchain.callbacks import get_openai_callback\n", + "from langchain.chat_models import ChatOpenAI" ] }, { @@ -101,9 +101,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.agents import load_tools\n", - "from langchain.agents import initialize_agent\n", - "from langchain.agents import AgentType\n", + "from langchain.agents import AgentType, initialize_agent, load_tools\n", "from langchain.llms import OpenAI\n", "\n", "tools = load_tools([\"serpapi\", \"llm-math\"], llm=llm)\n", diff --git a/docs/docs/modules/model_io/llms/async_llm.ipynb b/docs/docs/modules/model_io/llms/async_llm.ipynb index 880fdb74a53..45f7c69ec2a 100644 --- a/docs/docs/modules/model_io/llms/async_llm.ipynb +++ b/docs/docs/modules/model_io/llms/async_llm.ipynb @@ -32,8 +32,8 @@ } ], "source": [ - "import time\n", "import asyncio\n", + "import time\n", "\n", "from langchain.llms import OpenAI\n", "\n", diff --git a/docs/docs/modules/model_io/llms/streaming_llm.ipynb b/docs/docs/modules/model_io/llms/streaming_llm.ipynb index cd75a6abb01..8ede97f441a 100644 --- a/docs/docs/modules/model_io/llms/streaming_llm.ipynb +++ b/docs/docs/modules/model_io/llms/streaming_llm.ipynb @@ -73,7 +73,6 @@ "source": [ "from langchain.llms import OpenAI\n", "\n", - "\n", "llm = OpenAI(model=\"gpt-3.5-turbo-instruct\", temperature=0, max_tokens=512)\n", "for chunk in llm.stream(\"Write me a song about sparkling water.\"):\n", " print(chunk, end=\"\", flush=True)" diff --git a/docs/docs/modules/model_io/llms/token_usage_tracking.ipynb b/docs/docs/modules/model_io/llms/token_usage_tracking.ipynb index 79f4b29e288..f0864108cae 100644 --- a/docs/docs/modules/model_io/llms/token_usage_tracking.ipynb +++ b/docs/docs/modules/model_io/llms/token_usage_tracking.ipynb @@ -19,8 +19,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import OpenAI\n", - "from langchain.callbacks import get_openai_callback" + "from langchain.callbacks import get_openai_callback\n", + "from langchain.llms import OpenAI" ] }, { @@ -101,9 +101,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.agents import load_tools\n", - "from langchain.agents import initialize_agent\n", - "from langchain.agents import AgentType\n", + "from langchain.agents import AgentType, initialize_agent, load_tools\n", "from langchain.llms import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", diff --git a/docs/docs/modules/model_io/output_parsers/datetime.ipynb b/docs/docs/modules/model_io/output_parsers/datetime.ipynb index 187cc473831..f65dc71794a 100644 --- a/docs/docs/modules/model_io/output_parsers/datetime.ipynb +++ b/docs/docs/modules/model_io/output_parsers/datetime.ipynb @@ -17,10 +17,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.prompts import PromptTemplate\n", - "from langchain.output_parsers import DatetimeOutputParser\n", "from langchain.chains import LLMChain\n", - "from langchain.llms import OpenAI" + "from langchain.llms import OpenAI\n", + "from langchain.output_parsers import DatetimeOutputParser\n", + "from langchain.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/modules/model_io/output_parsers/index.ipynb b/docs/docs/modules/model_io/output_parsers/index.ipynb index 1ff426c6e2f..917ee5e758c 100644 --- a/docs/docs/modules/model_io/output_parsers/index.ipynb +++ b/docs/docs/modules/model_io/output_parsers/index.ipynb @@ -55,7 +55,6 @@ "from langchain.prompts import PromptTemplate\n", "from langchain.pydantic_v1 import BaseModel, Field, validator\n", "\n", - "\n", "model = OpenAI(model_name=\"text-davinci-003\", temperature=0.0)\n", "\n", "\n", diff --git a/docs/docs/modules/model_io/output_parsers/retry.ipynb b/docs/docs/modules/model_io/output_parsers/retry.ipynb index d9910142a14..f7829327a74 100644 --- a/docs/docs/modules/model_io/output_parsers/retry.ipynb +++ b/docs/docs/modules/model_io/output_parsers/retry.ipynb @@ -17,15 +17,15 @@ "metadata": {}, "outputs": [], "source": [ + "from langchain.chat_models import ChatOpenAI\n", + "from langchain.llms import OpenAI\n", + "from langchain.output_parsers import (\n", + " OutputFixingParser,\n", + " PydanticOutputParser,\n", + ")\n", "from langchain.prompts import (\n", " PromptTemplate,\n", ")\n", - "from langchain.llms import OpenAI\n", - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.output_parsers import (\n", - " PydanticOutputParser,\n", - " OutputFixingParser,\n", - ")\n", "from pydantic import BaseModel, Field" ] }, diff --git a/docs/docs/modules/model_io/output_parsers/xml.ipynb b/docs/docs/modules/model_io/output_parsers/xml.ipynb index 62d14dba7dc..bb1c2b82ed3 100644 --- a/docs/docs/modules/model_io/output_parsers/xml.ipynb +++ b/docs/docs/modules/model_io/output_parsers/xml.ipynb @@ -20,9 +20,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.prompts import PromptTemplate\n", "from langchain.llms import Anthropic\n", - "from langchain.output_parsers import XMLOutputParser" + "from langchain.output_parsers import XMLOutputParser\n", + "from langchain.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/modules/model_io/prompts/example_selectors/mmr.ipynb b/docs/docs/modules/model_io/prompts/example_selectors/mmr.ipynb index b3f01b65131..35fe667e87b 100644 --- a/docs/docs/modules/model_io/prompts/example_selectors/mmr.ipynb +++ b/docs/docs/modules/model_io/prompts/example_selectors/mmr.ipynb @@ -17,13 +17,13 @@ "metadata": {}, "outputs": [], "source": [ + "from langchain.embeddings import OpenAIEmbeddings\n", + "from langchain.prompts import FewShotPromptTemplate, PromptTemplate\n", "from langchain.prompts.example_selector import (\n", " MaxMarginalRelevanceExampleSelector,\n", " SemanticSimilarityExampleSelector,\n", ")\n", "from langchain.vectorstores import FAISS\n", - "from langchain.embeddings import OpenAIEmbeddings\n", - "from langchain.prompts import FewShotPromptTemplate, PromptTemplate\n", "\n", "example_prompt = PromptTemplate(\n", " input_variables=[\"input\", \"output\"],\n", diff --git a/docs/docs/modules/model_io/prompts/example_selectors/ngram_overlap.ipynb b/docs/docs/modules/model_io/prompts/example_selectors/ngram_overlap.ipynb index 0a8cf4aeeb7..12d07d60cd9 100644 --- a/docs/docs/modules/model_io/prompts/example_selectors/ngram_overlap.ipynb +++ b/docs/docs/modules/model_io/prompts/example_selectors/ngram_overlap.ipynb @@ -19,7 +19,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.prompts import PromptTemplate, FewShotPromptTemplate\n", + "from langchain.prompts import FewShotPromptTemplate, PromptTemplate\n", "from langchain.prompts.example_selector.ngram_overlap import NGramOverlapExampleSelector\n", "\n", "example_prompt = PromptTemplate(\n", diff --git a/docs/docs/modules/model_io/prompts/prompt_templates/connecting_to_a_feature_store.ipynb b/docs/docs/modules/model_io/prompts/prompt_templates/connecting_to_a_feature_store.ipynb index 23bea0f672c..cc6e432df47 100644 --- a/docs/docs/modules/model_io/prompts/prompt_templates/connecting_to_a_feature_store.ipynb +++ b/docs/docs/modules/model_io/prompts/prompt_templates/connecting_to_a_feature_store.ipynb @@ -170,8 +170,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.chains import LLMChain" + "from langchain.chains import LLMChain\n", + "from langchain.chat_models import ChatOpenAI" ] }, { @@ -406,8 +406,8 @@ }, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.chains import LLMChain" + "from langchain.chains import LLMChain\n", + "from langchain.chat_models import ChatOpenAI" ] }, { @@ -574,8 +574,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.chains import LLMChain" + "from langchain.chains import LLMChain\n", + "from langchain.chat_models import ChatOpenAI" ] }, { @@ -652,15 +652,14 @@ "outputs": [], "source": [ "import pandas\n", - "\n", - "from pydantic import Extra\n", - "from langchain.prompts import PromptTemplate, StringPromptTemplate\n", "from azure.identity import AzureCliCredential\n", "from azureml.featurestore import (\n", " FeatureStoreClient,\n", - " init_online_lookup,\n", " get_online_features,\n", + " init_online_lookup,\n", ")\n", + "from langchain.prompts import PromptTemplate, StringPromptTemplate\n", + "from pydantic import Extra\n", "\n", "\n", "class AzureMLFeatureStorePromptTemplate(StringPromptTemplate, extra=Extra.allow):\n", @@ -792,8 +791,8 @@ "source": [ "os.environ[\"OPENAI_API_KEY\"] = \"\" # Fill the open ai key here\n", "\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.chains import LLMChain\n", + "from langchain.chat_models import ChatOpenAI\n", "\n", "chain = LLMChain(llm=ChatOpenAI(), prompt=prompt_template)" ] diff --git a/docs/docs/modules/model_io/prompts/prompt_templates/few_shot_examples_chat.ipynb b/docs/docs/modules/model_io/prompts/prompt_templates/few_shot_examples_chat.ipynb index c6050f3264d..aaf00e2bc40 100644 --- a/docs/docs/modules/model_io/prompts/prompt_templates/few_shot_examples_chat.ipynb +++ b/docs/docs/modules/model_io/prompts/prompt_templates/few_shot_examples_chat.ipynb @@ -43,8 +43,8 @@ "outputs": [], "source": [ "from langchain.prompts import (\n", - " FewShotChatMessagePromptTemplate,\n", " ChatPromptTemplate,\n", + " FewShotChatMessagePromptTemplate,\n", ")" ] }, @@ -191,8 +191,8 @@ }, "outputs": [], "source": [ - "from langchain.prompts import SemanticSimilarityExampleSelector\n", "from langchain.embeddings import OpenAIEmbeddings\n", + "from langchain.prompts import SemanticSimilarityExampleSelector\n", "from langchain.vectorstores import Chroma" ] }, @@ -289,8 +289,8 @@ "outputs": [], "source": [ "from langchain.prompts import (\n", - " FewShotChatMessagePromptTemplate,\n", " ChatPromptTemplate,\n", + " FewShotChatMessagePromptTemplate,\n", ")\n", "\n", "# Define the few-shot prompt.\n", diff --git a/docs/docs/modules/model_io/prompts/prompt_templates/prompts_pipelining.ipynb b/docs/docs/modules/model_io/prompts/prompt_templates/prompts_pipelining.ipynb index 2ad420b8b10..23ac81c4c6c 100644 --- a/docs/docs/modules/model_io/prompts/prompt_templates/prompts_pipelining.ipynb +++ b/docs/docs/modules/model_io/prompts/prompt_templates/prompts_pipelining.ipynb @@ -101,8 +101,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.chains import LLMChain" + "from langchain.chains import LLMChain\n", + "from langchain.chat_models import ChatOpenAI" ] }, { @@ -169,7 +169,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.schema import HumanMessage, AIMessage, SystemMessage" + "from langchain.schema import AIMessage, HumanMessage, SystemMessage" ] }, { @@ -258,8 +258,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.chains import LLMChain" + "from langchain.chains import LLMChain\n", + "from langchain.chat_models import ChatOpenAI" ] }, { diff --git a/docs/docs/use_cases/apis.ipynb b/docs/docs/use_cases/apis.ipynb index 42944481299..0c8d3cb2394 100644 --- a/docs/docs/use_cases/apis.ipynb +++ b/docs/docs/use_cases/apis.ipynb @@ -234,9 +234,9 @@ } ], "source": [ - "from langchain.llms import OpenAI\n", "from langchain.chains import APIChain\n", "from langchain.chains.api import open_meteo_docs\n", + "from langchain.llms import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", "chain = APIChain.from_llm_and_api_docs(\n", @@ -342,9 +342,10 @@ "outputs": [], "source": [ "import os\n", - "from langchain.llms import OpenAI\n", - "from langchain.chains.api import podcast_docs\n", + "\n", "from langchain.chains import APIChain\n", + "from langchain.chains.api import podcast_docs\n", + "from langchain.llms import OpenAI\n", "\n", "listen_api_key = \"xxx\" # Get api key here: https://www.listennotes.com/api/pricing/\n", "llm = OpenAI(temperature=0)\n", @@ -378,9 +379,9 @@ "metadata": {}, "outputs": [], "source": [ + "from langchain.chains import LLMChain, LLMRequestsChain\n", "from langchain.llms import OpenAI\n", - "from langchain.prompts import PromptTemplate\n", - "from langchain.chains import LLMRequestsChain, LLMChain" + "from langchain.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/use_cases/chatbots.ipynb b/docs/docs/use_cases/chatbots.ipynb index 5a5e1cc704d..68a1ffc6de1 100644 --- a/docs/docs/use_cases/chatbots.ipynb +++ b/docs/docs/use_cases/chatbots.ipynb @@ -94,8 +94,8 @@ } ], "source": [ - "from langchain.schema import HumanMessage, SystemMessage\n", "from langchain.chat_models import ChatOpenAI\n", + "from langchain.schema import HumanMessage, SystemMessage\n", "\n", "chat = ChatOpenAI()\n", "chat(\n", @@ -417,13 +417,13 @@ } ], "source": [ + "from langchain.chains import LLMChain\n", "from langchain.prompts import (\n", " ChatPromptTemplate,\n", + " HumanMessagePromptTemplate,\n", " MessagesPlaceholder,\n", " SystemMessagePromptTemplate,\n", - " HumanMessagePromptTemplate,\n", ")\n", - "from langchain.chains import LLMChain\n", "\n", "# LLM\n", "llm = ChatOpenAI()\n", @@ -647,8 +647,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.chains import ConversationalRetrievalChain\n", + "from langchain.chat_models import ChatOpenAI\n", "\n", "llm = ChatOpenAI()\n", "retriever = vectorstore.as_retriever()\n", diff --git a/docs/docs/use_cases/data_generation.ipynb b/docs/docs/use_cases/data_generation.ipynb index ded0e8196e9..e6fa11442fe 100644 --- a/docs/docs/use_cases/data_generation.ipynb +++ b/docs/docs/use_cases/data_generation.ipynb @@ -64,16 +64,16 @@ "# import dotenv\n", "# dotenv.load_dotenv()\n", "\n", - "from langchain.prompts import FewShotPromptTemplate, PromptTemplate\n", "from langchain.chat_models import ChatOpenAI\n", + "from langchain.prompts import FewShotPromptTemplate, PromptTemplate\n", "from langchain.pydantic_v1 import BaseModel\n", "from langchain_experimental.tabular_synthetic_data.openai import (\n", - " create_openai_data_generator,\n", " OPENAI_TEMPLATE,\n", + " create_openai_data_generator,\n", ")\n", "from langchain_experimental.tabular_synthetic_data.prompts import (\n", - " SYNTHETIC_FEW_SHOT_SUFFIX,\n", " SYNTHETIC_FEW_SHOT_PREFIX,\n", + " SYNTHETIC_FEW_SHOT_SUFFIX,\n", ")" ] }, @@ -254,8 +254,8 @@ "source": [ "from langchain.chat_models import ChatOpenAI\n", "from langchain_experimental.synthetic_data import (\n", - " create_data_generation_chain,\n", " DatasetGenerator,\n", + " create_data_generation_chain,\n", ")" ] }, @@ -488,12 +488,13 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import OpenAI\n", - "from langchain.prompts import PromptTemplate\n", - "from langchain.output_parsers import PydanticOutputParser\n", + "from typing import List\n", + "\n", "from langchain.chains import create_extraction_chain_pydantic\n", - "from pydantic import BaseModel, Field\n", - "from typing import List" + "from langchain.llms import OpenAI\n", + "from langchain.output_parsers import PydanticOutputParser\n", + "from langchain.prompts import PromptTemplate\n", + "from pydantic import BaseModel, Field" ] }, { diff --git a/docs/docs/use_cases/extraction.ipynb b/docs/docs/use_cases/extraction.ipynb index 0d720e4c6a1..4b5d580b5d9 100644 --- a/docs/docs/use_cases/extraction.ipynb +++ b/docs/docs/use_cases/extraction.ipynb @@ -104,8 +104,8 @@ } ], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.chains import create_extraction_chain\n", + "from langchain.chat_models import ChatOpenAI\n", "\n", "# Schema\n", "schema = {\n", @@ -380,8 +380,9 @@ ], "source": [ "from typing import Optional\n", - "from langchain.pydantic_v1 import BaseModel\n", + "\n", "from langchain.chains import create_extraction_chain_pydantic\n", + "from langchain.pydantic_v1 import BaseModel\n", "\n", "\n", "# Pydantic data class\n", @@ -445,13 +446,14 @@ } ], "source": [ - "from typing import Sequence, Optional\n", + "from typing import Optional, Sequence\n", + "\n", + "from langchain.llms import OpenAI\n", + "from langchain.output_parsers import PydanticOutputParser\n", "from langchain.prompts import (\n", " PromptTemplate,\n", ")\n", - "from langchain.llms import OpenAI\n", "from pydantic import BaseModel, Field, validator\n", - "from langchain.output_parsers import PydanticOutputParser\n", "\n", "\n", "class Person(BaseModel):\n", @@ -525,12 +527,12 @@ } ], "source": [ + "from langchain.llms import OpenAI\n", + "from langchain.output_parsers import PydanticOutputParser\n", "from langchain.prompts import (\n", " PromptTemplate,\n", ")\n", - "from langchain.llms import OpenAI\n", "from pydantic import BaseModel, Field, validator\n", - "from langchain.output_parsers import PydanticOutputParser\n", "\n", "\n", "# Define your desired data structure.\n", diff --git a/docs/docs/use_cases/graph/graph_arangodb_qa.ipynb b/docs/docs/use_cases/graph/graph_arangodb_qa.ipynb index 411a2ef0568..2e0bf932fc8 100644 --- a/docs/docs/use_cases/graph/graph_arangodb_qa.ipynb +++ b/docs/docs/use_cases/graph/graph_arangodb_qa.ipynb @@ -78,8 +78,9 @@ "source": [ "# Instantiate ArangoDB Database\n", "import json\n", - "from arango import ArangoClient\n", + "\n", "from adb_cloud_connector import get_temp_credentials\n", + "from arango import ArangoClient\n", "\n", "con = get_temp_credentials()\n", "\n", @@ -424,8 +425,8 @@ }, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.chains import ArangoGraphQAChain\n", + "from langchain.chat_models import ChatOpenAI\n", "\n", "chain = ArangoGraphQAChain.from_llm(\n", " ChatOpenAI(temperature=0), graph=graph, verbose=True\n", diff --git a/docs/docs/use_cases/graph/graph_cypher_qa.ipynb b/docs/docs/use_cases/graph/graph_cypher_qa.ipynb index b33e3d10d8f..daa06069a2a 100644 --- a/docs/docs/use_cases/graph/graph_cypher_qa.ipynb +++ b/docs/docs/use_cases/graph/graph_cypher_qa.ipynb @@ -38,8 +38,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.chains import GraphCypherQAChain\n", + "from langchain.chat_models import ChatOpenAI\n", "from langchain.graphs import Neo4jGraph" ] }, @@ -394,7 +394,6 @@ "source": [ "from langchain.prompts.prompt import PromptTemplate\n", "\n", - "\n", "CYPHER_GENERATION_TEMPLATE = \"\"\"Task:Generate Cypher statement to query a graph database.\n", "Instructions:\n", "Use only the provided relationship types and properties in the schema.\n", diff --git a/docs/docs/use_cases/graph/graph_falkordb_qa.ipynb b/docs/docs/use_cases/graph/graph_falkordb_qa.ipynb index 38be4c0bd23..6089dadc21c 100644 --- a/docs/docs/use_cases/graph/graph_falkordb_qa.ipynb +++ b/docs/docs/use_cases/graph/graph_falkordb_qa.ipynb @@ -28,9 +28,9 @@ "metadata": {}, "outputs": [], "source": [ + "from langchain.chains import FalkorDBQAChain\n", "from langchain.chat_models import ChatOpenAI\n", - "from langchain.graphs import FalkorDBGraph\n", - "from langchain.chains import FalkorDBQAChain" + "from langchain.graphs import FalkorDBGraph" ] }, { diff --git a/docs/docs/use_cases/graph/graph_hugegraph_qa.ipynb b/docs/docs/use_cases/graph/graph_hugegraph_qa.ipynb index dfd64125ae8..65b5e6dde4f 100644 --- a/docs/docs/use_cases/graph/graph_hugegraph_qa.ipynb +++ b/docs/docs/use_cases/graph/graph_hugegraph_qa.ipynb @@ -155,8 +155,8 @@ }, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.chains import HugeGraphQAChain\n", + "from langchain.chat_models import ChatOpenAI\n", "from langchain.graphs import HugeGraph" ] }, diff --git a/docs/docs/use_cases/graph/graph_kuzu_qa.ipynb b/docs/docs/use_cases/graph/graph_kuzu_qa.ipynb index 2604d0b5f2c..e3c576e7bc6 100644 --- a/docs/docs/use_cases/graph/graph_kuzu_qa.ipynb +++ b/docs/docs/use_cases/graph/graph_kuzu_qa.ipynb @@ -130,9 +130,9 @@ "metadata": {}, "outputs": [], "source": [ + "from langchain.chains import KuzuQAChain\n", "from langchain.chat_models import ChatOpenAI\n", - "from langchain.graphs import KuzuGraph\n", - "from langchain.chains import KuzuQAChain" + "from langchain.graphs import KuzuGraph" ] }, { diff --git a/docs/docs/use_cases/graph/graph_memgraph_qa.ipynb b/docs/docs/use_cases/graph/graph_memgraph_qa.ipynb index e896e8eea07..efe84774c11 100644 --- a/docs/docs/use_cases/graph/graph_memgraph_qa.ipynb +++ b/docs/docs/use_cases/graph/graph_memgraph_qa.ipynb @@ -64,14 +64,13 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.chains import GraphCypherQAChain\n", - "from langchain.graphs import MemgraphGraph\n", - "from langchain.prompts import PromptTemplate\n", + "import os\n", "\n", "from gqlalchemy import Memgraph\n", - "\n", - "import os" + "from langchain.chains import GraphCypherQAChain\n", + "from langchain.chat_models import ChatOpenAI\n", + "from langchain.graphs import MemgraphGraph\n", + "from langchain.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/use_cases/graph/graph_nebula_qa.ipynb b/docs/docs/use_cases/graph/graph_nebula_qa.ipynb index 738fe5c9b0e..9a4dd6a9015 100644 --- a/docs/docs/use_cases/graph/graph_nebula_qa.ipynb +++ b/docs/docs/use_cases/graph/graph_nebula_qa.ipynb @@ -121,8 +121,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.chains import NebulaGraphQAChain\n", + "from langchain.chat_models import ChatOpenAI\n", "from langchain.graphs import NebulaGraph" ] }, diff --git a/docs/docs/use_cases/graph/graph_sparql_qa.ipynb b/docs/docs/use_cases/graph/graph_sparql_qa.ipynb index 288dc874ecd..3ea9f86dd6b 100644 --- a/docs/docs/use_cases/graph/graph_sparql_qa.ipynb +++ b/docs/docs/use_cases/graph/graph_sparql_qa.ipynb @@ -30,8 +30,8 @@ }, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.chains import GraphSparqlQAChain\n", + "from langchain.chat_models import ChatOpenAI\n", "from langchain.graphs import RdfGraph" ] }, @@ -55,13 +55,13 @@ }, { "cell_type": "markdown", - "source": [ - "Note that providing a `local_file` is necessary for storing changes locally if the source is read-only." - ], + "id": "7af596b5", "metadata": { "collapsed": false }, - "id": "7af596b5" + "source": [ + "Note that providing a `local_file` is necessary for storing changes locally if the source is read-only." + ] }, { "cell_type": "markdown", diff --git a/docs/docs/use_cases/graph/neptune_cypher_qa.ipynb b/docs/docs/use_cases/graph/neptune_cypher_qa.ipynb index 68bb4e9e12a..e5dc7edd817 100644 --- a/docs/docs/use_cases/graph/neptune_cypher_qa.ipynb +++ b/docs/docs/use_cases/graph/neptune_cypher_qa.ipynb @@ -16,7 +16,6 @@ "source": [ "from langchain.graphs import NeptuneGraph\n", "\n", - "\n", "host = \"\"\n", "port = 8182\n", "use_https = True\n", @@ -41,8 +40,8 @@ } ], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.chains import NeptuneOpenCypherQAChain\n", + "from langchain.chat_models import ChatOpenAI\n", "\n", "llm = ChatOpenAI(temperature=0, model=\"gpt-4\")\n", "\n", diff --git a/docs/docs/use_cases/qa_structured/sql.ipynb b/docs/docs/use_cases/qa_structured/sql.ipynb index d9947df0bc7..32718d4e1a7 100644 --- a/docs/docs/use_cases/qa_structured/sql.ipynb +++ b/docs/docs/use_cases/qa_structured/sql.ipynb @@ -85,8 +85,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.utilities import SQLDatabase\n", "from langchain.llms import OpenAI\n", + "from langchain.utilities import SQLDatabase\n", "from langchain_experimental.sql import SQLDatabaseChain\n", "\n", "db = SQLDatabase.from_uri(\"sqlite:///Chinook.db\")\n", @@ -160,8 +160,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.chains import create_sql_query_chain" + "from langchain.chains import create_sql_query_chain\n", + "from langchain.chat_models import ChatOpenAI" ] }, { @@ -783,8 +783,8 @@ "outputs": [], "source": [ "from langchain.embeddings.openai import OpenAIEmbeddings\n", - "from langchain.vectorstores import FAISS\n", "from langchain.schema import Document\n", + "from langchain.vectorstores import FAISS\n", "\n", "embeddings = OpenAIEmbeddings()\n", "\n", @@ -835,10 +835,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.agents import create_sql_agent, AgentType\n", + "from langchain.agents import AgentType, create_sql_agent\n", "from langchain.agents.agent_toolkits import SQLDatabaseToolkit\n", - "from langchain.utilities import SQLDatabase\n", "from langchain.chat_models import ChatOpenAI\n", + "from langchain.utilities import SQLDatabase\n", "\n", "db = SQLDatabase.from_uri(\"sqlite:///Chinook.db\")\n", "llm = ChatOpenAI(model_name=\"gpt-4\", temperature=0)\n", @@ -973,7 +973,6 @@ "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.vectorstores import FAISS\n", "\n", - "\n", "texts = artists + albums\n", "\n", "embeddings = OpenAIEmbeddings()\n", @@ -995,10 +994,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.agents import create_sql_agent, AgentType\n", + "from langchain.agents import AgentType, create_sql_agent\n", "from langchain.agents.agent_toolkits import SQLDatabaseToolkit\n", - "from langchain.utilities import SQLDatabase\n", "from langchain.chat_models import ChatOpenAI\n", + "from langchain.utilities import SQLDatabase\n", "\n", "# db = SQLDatabase.from_uri(\"sqlite:///Chinook.db\")\n", "llm = ChatOpenAI(model_name=\"gpt-4\", temperature=0)\n", @@ -1160,8 +1159,8 @@ "outputs": [], "source": [ "from elasticsearch import Elasticsearch\n", - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.chains.elasticsearch_database import ElasticsearchDatabaseChain" + "from langchain.chains.elasticsearch_database import ElasticsearchDatabaseChain\n", + "from langchain.chat_models import ChatOpenAI" ] }, { diff --git a/docs/docs/use_cases/question_answering/code_understanding.ipynb b/docs/docs/use_cases/question_answering/code_understanding.ipynb index d90d73586fe..6162a15ca8f 100644 --- a/docs/docs/use_cases/question_answering/code_understanding.ipynb +++ b/docs/docs/use_cases/question_answering/code_understanding.ipynb @@ -79,9 +79,9 @@ "outputs": [], "source": [ "# from git import Repo\n", - "from langchain.text_splitter import Language\n", "from langchain.document_loaders.generic import GenericLoader\n", - "from langchain.document_loaders.parsers import LanguageParser" + "from langchain.document_loaders.parsers import LanguageParser\n", + "from langchain.text_splitter import Language" ] }, { @@ -200,8 +200,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.vectorstores import Chroma\n", "from langchain.embeddings.openai import OpenAIEmbeddings\n", + "from langchain.vectorstores import Chroma\n", "\n", "db = Chroma.from_documents(texts, OpenAIEmbeddings(disallowed_special=()))\n", "retriever = db.as_retriever(\n", @@ -231,9 +231,9 @@ "metadata": {}, "outputs": [], "source": [ + "from langchain.chains import ConversationalRetrievalChain\n", "from langchain.chat_models import ChatOpenAI\n", "from langchain.memory import ConversationSummaryMemory\n", - "from langchain.chains import ConversationalRetrievalChain\n", "\n", "llm = ChatOpenAI(model_name=\"gpt-4\")\n", "memory = ConversationSummaryMemory(\n", @@ -365,12 +365,12 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import LlamaCpp\n", - "from langchain.prompts import PromptTemplate\n", "from langchain.callbacks.manager import CallbackManager\n", - "from langchain.memory import ConversationSummaryMemory\n", + "from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n", "from langchain.chains import ConversationalRetrievalChain\n", - "from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler" + "from langchain.llms import LlamaCpp\n", + "from langchain.memory import ConversationSummaryMemory\n", + "from langchain.prompts import PromptTemplate" ] }, { diff --git a/docs/docs/use_cases/question_answering/conversational_retrieval_agents.ipynb b/docs/docs/use_cases/question_answering/conversational_retrieval_agents.ipynb index 59f5cac9350..3b0aae632ab 100644 --- a/docs/docs/use_cases/question_answering/conversational_retrieval_agents.ipynb +++ b/docs/docs/use_cases/question_answering/conversational_retrieval_agents.ipynb @@ -41,9 +41,9 @@ "metadata": {}, "outputs": [], "source": [ + "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain.vectorstores import FAISS\n", - "from langchain.embeddings import OpenAIEmbeddings\n", "\n", "documents = loader.load()\n", "text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n", @@ -428,8 +428,8 @@ "outputs": [], "source": [ "from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent\n", - "from langchain.schema.messages import SystemMessage\n", - "from langchain.prompts import MessagesPlaceholder" + "from langchain.prompts import MessagesPlaceholder\n", + "from langchain.schema.messages import SystemMessage" ] }, { diff --git a/docs/docs/use_cases/question_answering/document-context-aware-QA.ipynb b/docs/docs/use_cases/question_answering/document-context-aware-QA.ipynb index f2237c94581..3f16c8dc1bd 100644 --- a/docs/docs/use_cases/question_answering/document-context-aware-QA.ipynb +++ b/docs/docs/use_cases/question_answering/document-context-aware-QA.ipynb @@ -137,9 +137,9 @@ "outputs": [], "source": [ "# Create retriever\n", + "from langchain.chains.query_constructor.base import AttributeInfo\n", "from langchain.llms import OpenAI\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", - "from langchain.chains.query_constructor.base import AttributeInfo\n", "\n", "# Define our metadata\n", "metadata_field_info = [\n", diff --git a/docs/docs/use_cases/question_answering/index.ipynb b/docs/docs/use_cases/question_answering/index.ipynb index 3e95371191c..299f76c5a82 100644 --- a/docs/docs/use_cases/question_answering/index.ipynb +++ b/docs/docs/use_cases/question_answering/index.ipynb @@ -94,8 +94,8 @@ "source": [ "# Embed and store splits\n", "\n", - "from langchain.vectorstores import Chroma\n", "from langchain.embeddings import OpenAIEmbeddings\n", + "from langchain.vectorstores import Chroma\n", "\n", "vectorstore = Chroma.from_documents(documents=splits, embedding=OpenAIEmbeddings())\n", "retriever = vectorstore.as_retriever()" @@ -366,6 +366,7 @@ "outputs": [], "source": [ "import logging\n", + "\n", "from langchain.chat_models import ChatOpenAI\n", "from langchain.retrievers.multi_query import MultiQueryRetriever\n", "\n", diff --git a/docs/docs/use_cases/question_answering/local_retrieval_qa.ipynb b/docs/docs/use_cases/question_answering/local_retrieval_qa.ipynb index b32315a9102..319a9ef0b4b 100644 --- a/docs/docs/use_cases/question_answering/local_retrieval_qa.ipynb +++ b/docs/docs/use_cases/question_answering/local_retrieval_qa.ipynb @@ -88,8 +88,8 @@ } ], "source": [ - "from langchain.vectorstores import Chroma\n", "from langchain.embeddings import GPT4AllEmbeddings\n", + "from langchain.vectorstores import Chroma\n", "\n", "vectorstore = Chroma.from_documents(documents=all_splits, embedding=GPT4AllEmbeddings())" ] @@ -209,9 +209,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import LlamaCpp\n", "from langchain.callbacks.manager import CallbackManager\n", - "from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler" + "from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n", + "from langchain.llms import LlamaCpp" ] }, { @@ -413,8 +413,8 @@ } ], "source": [ - "from langchain.prompts import PromptTemplate\n", "from langchain.chains import LLMChain\n", + "from langchain.prompts import PromptTemplate\n", "\n", "# Prompt\n", "prompt = PromptTemplate.from_template(\n", diff --git a/docs/docs/use_cases/question_answering/multiple_retrieval.ipynb b/docs/docs/use_cases/question_answering/multiple_retrieval.ipynb index 53fcb511865..4733167cf0c 100644 --- a/docs/docs/use_cases/question_answering/multiple_retrieval.ipynb +++ b/docs/docs/use_cases/question_answering/multiple_retrieval.ipynb @@ -39,8 +39,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.utilities import SQLDatabase\n", "from langchain.chains import create_sql_query_chain\n", + "from langchain.utilities import SQLDatabase\n", "\n", "db = SQLDatabase.from_uri(\"sqlite:///../../../../../notebooks/Chinook.db\")\n", "query_chain = create_sql_query_chain(ChatOpenAI(temperature=0), db)" diff --git a/docs/docs/use_cases/question_answering/vector_db_text_generation.ipynb b/docs/docs/use_cases/question_answering/vector_db_text_generation.ipynb index 89f2246eedf..a10321575ce 100644 --- a/docs/docs/use_cases/question_answering/vector_db_text_generation.ipynb +++ b/docs/docs/use_cases/question_answering/vector_db_text_generation.ipynb @@ -24,15 +24,16 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import OpenAI\n", - "from langchain.docstore.document import Document\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", - "from langchain.vectorstores import Chroma\n", - "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.prompts import PromptTemplate\n", "import pathlib\n", "import subprocess\n", - "import tempfile" + "import tempfile\n", + "\n", + "from langchain.docstore.document import Document\n", + "from langchain.embeddings.openai import OpenAIEmbeddings\n", + "from langchain.llms import OpenAI\n", + "from langchain.prompts import PromptTemplate\n", + "from langchain.text_splitter import CharacterTextSplitter\n", + "from langchain.vectorstores import Chroma" ] }, { diff --git a/docs/docs/use_cases/summarization.ipynb b/docs/docs/use_cases/summarization.ipynb index 724c66360e1..dde55d81a76 100644 --- a/docs/docs/use_cases/summarization.ipynb +++ b/docs/docs/use_cases/summarization.ipynb @@ -205,9 +205,9 @@ } ], "source": [ + "from langchain.chains.summarize import load_summarize_chain\n", "from langchain.chat_models import ChatOpenAI\n", "from langchain.document_loaders import WebBaseLoader\n", - "from langchain.chains.summarize import load_summarize_chain\n", "\n", "loader = WebBaseLoader(\"https://lilianweng.github.io/posts/2023-06-23-agent/\")\n", "docs = loader.load()\n", @@ -245,9 +245,9 @@ } ], "source": [ + "from langchain.chains.combine_documents.stuff import StuffDocumentsChain\n", "from langchain.chains.llm import LLMChain\n", "from langchain.prompts import PromptTemplate\n", - "from langchain.chains.combine_documents.stuff import StuffDocumentsChain\n", "\n", "# Define prompt\n", "prompt_template = \"\"\"Write a concise summary of the following:\n", @@ -298,8 +298,8 @@ "metadata": {}, "outputs": [], "source": [ + "from langchain.chains import MapReduceDocumentsChain, ReduceDocumentsChain\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.chains import ReduceDocumentsChain, MapReduceDocumentsChain\n", "\n", "llm = ChatOpenAI(temperature=0)\n", "\n", diff --git a/docs/docs/use_cases/tagging.ipynb b/docs/docs/use_cases/tagging.ipynb index 2c927cf37bc..cffa8bebfd3 100644 --- a/docs/docs/use_cases/tagging.ipynb +++ b/docs/docs/use_cases/tagging.ipynb @@ -63,8 +63,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.chains import create_tagging_chain, create_tagging_chain_pydantic" + "from langchain.chains import create_tagging_chain, create_tagging_chain_pydantic\n", + "from langchain.chat_models import ChatOpenAI" ] }, { diff --git a/docs/docs/use_cases/web_scraping.ipynb b/docs/docs/use_cases/web_scraping.ipynb index ef4329aa1a3..62d64d1b333 100644 --- a/docs/docs/use_cases/web_scraping.ipynb +++ b/docs/docs/use_cases/web_scraping.ipynb @@ -407,6 +407,7 @@ ], "source": [ "import pprint\n", + "\n", "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", "\n", "\n", @@ -478,11 +479,11 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.vectorstores import Chroma\n", - "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.chat_models.openai import ChatOpenAI\n", + "from langchain.embeddings import OpenAIEmbeddings\n", + "from langchain.retrievers.web_research import WebResearchRetriever\n", "from langchain.utilities import GoogleSearchAPIWrapper\n", - "from langchain.retrievers.web_research import WebResearchRetriever" + "from langchain.vectorstores import Chroma" ] }, { @@ -585,13 +586,13 @@ }, { "cell_type": "markdown", + "id": "7a940df1", "metadata": {}, "source": [ "### Going deeper \n", "\n", "* Here's a [app](https://github.com/langchain-ai/web-explorer/tree/main) that wraps this retriever with a lighweight UI." - ], - "id": "7a940df1" + ] }, { "cell_type": "markdown", diff --git a/docs/scripts/copy_templates.py b/docs/scripts/copy_templates.py index e1f1d6140e1..21b0c7a4f37 100644 --- a/docs/scripts/copy_templates.py +++ b/docs/scripts/copy_templates.py @@ -1,9 +1,8 @@ import glob import os -from pathlib import Path import re import shutil - +from pathlib import Path TEMPLATES_DIR = Path(os.path.abspath(__file__)).parents[2] / "templates" DOCS_TEMPLATES_DIR = Path(os.path.abspath(__file__)).parents[1] / "docs" / "templates"