diff --git a/cookbook/LLaMA2_sql_chat.ipynb b/cookbook/LLaMA2_sql_chat.ipynb index 9316206ad96..edd98ce44a8 100644 --- a/cookbook/LLaMA2_sql_chat.ipynb +++ b/cookbook/LLaMA2_sql_chat.ipynb @@ -61,13 +61,13 @@ ], "source": [ "# Local\n", - "from langchain.chat_models import ChatOllama\n", + "from langchain_community.chat_models import ChatOllama\n", "\n", "llama2_chat = ChatOllama(model=\"llama2:13b-chat\")\n", "llama2_code = ChatOllama(model=\"codellama:7b-instruct\")\n", "\n", "# API\n", - "from langchain.llms import Replicate\n", + "from langchain_community.llms import Replicate\n", "\n", "# REPLICATE_API_TOKEN = getpass()\n", "# os.environ[\"REPLICATE_API_TOKEN\"] = REPLICATE_API_TOKEN\n", diff --git a/cookbook/Multi_modal_RAG.ipynb b/cookbook/Multi_modal_RAG.ipynb index 642e13623b1..d185215be80 100644 --- a/cookbook/Multi_modal_RAG.ipynb +++ b/cookbook/Multi_modal_RAG.ipynb @@ -198,8 +198,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.prompts import ChatPromptTemplate\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.output_parsers import StrOutputParser\n", "\n", "\n", @@ -353,10 +353,10 @@ "source": [ "import uuid\n", "\n", - "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.retrievers.multi_vector import MultiVectorRetriever\n", "from langchain.storage import InMemoryStore\n", "from langchain.vectorstores import Chroma\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_core.documents import Document\n", "\n", "\n", diff --git a/cookbook/Multi_modal_RAG_google.ipynb b/cookbook/Multi_modal_RAG_google.ipynb index 568c14998be..8c8ea6dd71a 100644 --- a/cookbook/Multi_modal_RAG_google.ipynb +++ b/cookbook/Multi_modal_RAG_google.ipynb @@ -158,9 +158,9 @@ } ], "source": [ - "from langchain.chat_models import ChatVertexAI\n", - "from langchain.llms import VertexAI\n", "from langchain.prompts import PromptTemplate\n", + "from langchain_community.chat_models import ChatVertexAI\n", + "from langchain_community.llms import VertexAI\n", "from langchain_core.messages import AIMessage\n", "from langchain_core.output_parsers import StrOutputParser\n", "from langchain_core.runnables import RunnableLambda\n", @@ -342,10 +342,10 @@ "source": [ "import uuid\n", "\n", - "from langchain.embeddings import VertexAIEmbeddings\n", "from langchain.retrievers.multi_vector import MultiVectorRetriever\n", "from langchain.storage import InMemoryStore\n", "from langchain.vectorstores import Chroma\n", + "from langchain_community.embeddings import VertexAIEmbeddings\n", "from langchain_core.documents import Document\n", "\n", "\n", diff --git a/cookbook/Semi_Structured_RAG.ipynb b/cookbook/Semi_Structured_RAG.ipynb index 91820b69d2f..e9615c0c688 100644 --- a/cookbook/Semi_Structured_RAG.ipynb +++ b/cookbook/Semi_Structured_RAG.ipynb @@ -235,8 +235,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.prompts import ChatPromptTemplate\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.output_parsers import StrOutputParser" ] }, @@ -318,10 +318,10 @@ "source": [ "import uuid\n", "\n", - "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.retrievers.multi_vector import MultiVectorRetriever\n", "from langchain.storage import InMemoryStore\n", "from langchain.vectorstores import Chroma\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_core.documents import Document\n", "\n", "# The vectorstore to use to index the child chunks\n", diff --git a/cookbook/Semi_structured_and_multi_modal_RAG.ipynb b/cookbook/Semi_structured_and_multi_modal_RAG.ipynb index 3c618e45d67..e6ba451522a 100644 --- a/cookbook/Semi_structured_and_multi_modal_RAG.ipynb +++ b/cookbook/Semi_structured_and_multi_modal_RAG.ipynb @@ -211,8 +211,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.prompts import ChatPromptTemplate\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.output_parsers import StrOutputParser" ] }, @@ -373,10 +373,10 @@ "source": [ "import uuid\n", "\n", - "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.retrievers.multi_vector import MultiVectorRetriever\n", "from langchain.storage import InMemoryStore\n", "from langchain.vectorstores import Chroma\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_core.documents import Document\n", "\n", "# The vectorstore to use to index the child chunks\n", diff --git a/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb b/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb index 5d43158455f..7dd9ced8a8f 100644 --- a/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb +++ b/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb @@ -209,8 +209,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOllama\n", "from langchain.prompts import ChatPromptTemplate\n", + "from langchain_community.chat_models import ChatOllama\n", "from langchain_core.output_parsers import StrOutputParser" ] }, @@ -376,10 +376,10 @@ "source": [ "import uuid\n", "\n", - "from langchain.embeddings import GPT4AllEmbeddings\n", "from langchain.retrievers.multi_vector import MultiVectorRetriever\n", "from langchain.storage import InMemoryStore\n", "from langchain.vectorstores import Chroma\n", + "from langchain_community.embeddings import GPT4AllEmbeddings\n", "from langchain_core.documents import Document\n", "\n", "# The vectorstore to use to index the child chunks\n", diff --git a/cookbook/advanced_rag_eval.ipynb b/cookbook/advanced_rag_eval.ipynb index 2ca2048f5db..8e286a17353 100644 --- a/cookbook/advanced_rag_eval.ipynb +++ b/cookbook/advanced_rag_eval.ipynb @@ -132,8 +132,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.vectorstores import Chroma\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", "\n", "baseline = Chroma.from_texts(\n", " texts=all_splits_pypdf_texts,\n", @@ -160,8 +160,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.prompts import ChatPromptTemplate\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.output_parsers import StrOutputParser\n", "\n", "# Prompt\n", diff --git a/cookbook/agent_vectorstore.ipynb b/cookbook/agent_vectorstore.ipynb index 1ea603fd68e..38f281e6d20 100644 --- a/cookbook/agent_vectorstore.ipynb +++ b/cookbook/agent_vectorstore.ipynb @@ -28,10 +28,10 @@ "outputs": [], "source": [ "from langchain.chains import RetrievalQA\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", - "from langchain.llms import OpenAI\n", "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain.vectorstores import Chroma\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.llms import OpenAI\n", "\n", "llm = OpenAI(temperature=0)" ] @@ -161,7 +161,7 @@ "source": [ "# Import things that are needed generically\n", "from langchain.agents import AgentType, Tool, initialize_agent\n", - "from langchain.llms import OpenAI" + "from langchain_community.llms import OpenAI" ] }, { diff --git a/cookbook/analyze_document.ipynb b/cookbook/analyze_document.ipynb index 9bfc43918a6..9b61507c1ec 100644 --- a/cookbook/analyze_document.ipynb +++ b/cookbook/analyze_document.ipynb @@ -29,7 +29,7 @@ "outputs": [], "source": [ "from langchain.chains import AnalyzeDocumentChain\n", - "from langchain.chat_models import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "llm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)" ] diff --git a/cookbook/autogpt/autogpt.ipynb b/cookbook/autogpt/autogpt.ipynb index 4410ac16c2c..6d1af21ef9d 100644 --- a/cookbook/autogpt/autogpt.ipynb +++ b/cookbook/autogpt/autogpt.ipynb @@ -62,8 +62,8 @@ "outputs": [], "source": [ "from langchain.docstore import InMemoryDocstore\n", - "from langchain.embeddings import OpenAIEmbeddings\n", - "from langchain.vectorstores import FAISS" + "from langchain.vectorstores import FAISS\n", + "from langchain_community.embeddings import OpenAIEmbeddings" ] }, { @@ -100,7 +100,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_experimental.autonomous_agents import AutoGPT" ] }, diff --git a/cookbook/autogpt/marathon_times.ipynb b/cookbook/autogpt/marathon_times.ipynb index 42f06075920..9141a2c3120 100644 --- a/cookbook/autogpt/marathon_times.ipynb +++ b/cookbook/autogpt/marathon_times.ipynb @@ -40,8 +40,8 @@ "import nest_asyncio\n", "import pandas as pd\n", "from langchain.agents.agent_toolkits.pandas.base import create_pandas_dataframe_agent\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.docstore.document import Document\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_experimental.autonomous_agents import AutoGPT\n", "\n", "# Needed synce jupyter runs an async eventloop\n", @@ -311,8 +311,8 @@ "# Memory\n", "import faiss\n", "from langchain.docstore import InMemoryDocstore\n", - "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.vectorstores import FAISS\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", "\n", "embeddings_model = OpenAIEmbeddings()\n", "embedding_size = 1536\n", diff --git a/cookbook/baby_agi.ipynb b/cookbook/baby_agi.ipynb index a11c998e44c..de8ba80e70f 100644 --- a/cookbook/baby_agi.ipynb +++ b/cookbook/baby_agi.ipynb @@ -31,8 +31,8 @@ "source": [ "from typing import Optional\n", "\n", - "from langchain.embeddings import OpenAIEmbeddings\n", - "from langchain.llms import OpenAI\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_community.llms import OpenAI\n", "from langchain_experimental.autonomous_agents import BabyAGI" ] }, diff --git a/cookbook/baby_agi_with_agent.ipynb b/cookbook/baby_agi_with_agent.ipynb index 27fc1ec55a4..8d4f13c1b44 100644 --- a/cookbook/baby_agi_with_agent.ipynb +++ b/cookbook/baby_agi_with_agent.ipynb @@ -28,9 +28,9 @@ "from typing import Optional\n", "\n", "from langchain.chains import LLMChain\n", - "from langchain.embeddings import OpenAIEmbeddings\n", - "from langchain.llms import OpenAI\n", "from langchain.prompts import PromptTemplate\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_community.llms import OpenAI\n", "from langchain_experimental.autonomous_agents import BabyAGI" ] }, @@ -108,8 +108,8 @@ "source": [ "from langchain.agents import AgentExecutor, Tool, ZeroShotAgent\n", "from langchain.chains import LLMChain\n", - "from langchain.llms import OpenAI\n", "from langchain.utilities import SerpAPIWrapper\n", + "from langchain_community.llms import OpenAI\n", "\n", "todo_prompt = PromptTemplate.from_template(\n", " \"You are a planner who is an expert at coming up with a todo list for a given objective. Come up with a todo list for this objective: {objective}\"\n", diff --git a/cookbook/camel_role_playing.ipynb b/cookbook/camel_role_playing.ipynb index 158b231c449..2feffde2007 100644 --- a/cookbook/camel_role_playing.ipynb +++ b/cookbook/camel_role_playing.ipynb @@ -36,7 +36,6 @@ "source": [ "from typing import List\n", "\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.prompts.chat import (\n", " HumanMessagePromptTemplate,\n", " SystemMessagePromptTemplate,\n", @@ -46,7 +45,8 @@ " BaseMessage,\n", " HumanMessage,\n", " SystemMessage,\n", - ")" + ")\n", + "from langchain_community.chat_models import ChatOpenAI" ] }, { diff --git a/cookbook/causal_program_aided_language_model.ipynb b/cookbook/causal_program_aided_language_model.ipynb index 2c2c1f3d83a..5e5b3c0b574 100644 --- a/cookbook/causal_program_aided_language_model.ipynb +++ b/cookbook/causal_program_aided_language_model.ipynb @@ -47,7 +47,7 @@ "outputs": [], "source": [ "from IPython.display import SVG\n", - "from langchain.llms import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "from langchain_experimental.cpal.base import CPALChain\n", "from langchain_experimental.pal_chain import PALChain\n", "\n", diff --git a/cookbook/code-analysis-deeplake.ipynb b/cookbook/code-analysis-deeplake.ipynb index 50665047663..65c5babe7ed 100644 --- a/cookbook/code-analysis-deeplake.ipynb +++ b/cookbook/code-analysis-deeplake.ipynb @@ -657,7 +657,7 @@ } ], "source": [ - "from langchain.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "\n", "embeddings = OpenAIEmbeddings()\n", "embeddings" @@ -834,7 +834,7 @@ "outputs": [], "source": [ "from langchain.chains import ConversationalRetrievalChain\n", - "from langchain.chat_models import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "model = ChatOpenAI(\n", " model_name=\"gpt-3.5-turbo-0613\"\n", diff --git a/cookbook/custom_agent_with_plugin_retrieval.ipynb b/cookbook/custom_agent_with_plugin_retrieval.ipynb index 9b081065fd9..8113d5fcd70 100644 --- a/cookbook/custom_agent_with_plugin_retrieval.ipynb +++ b/cookbook/custom_agent_with_plugin_retrieval.ipynb @@ -42,10 +42,10 @@ ")\n", "from langchain.agents.agent_toolkits import NLAToolkit\n", "from langchain.chains import LLMChain\n", - "from langchain.llms import OpenAI\n", "from langchain.prompts import StringPromptTemplate\n", "from langchain.schema import AgentAction, AgentFinish\n", - "from langchain.tools.plugin import AIPlugin" + "from langchain.tools.plugin import AIPlugin\n", + "from langchain_community.llms import OpenAI" ] }, { @@ -114,9 +114,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.schema import Document\n", - "from langchain.vectorstores import FAISS" + "from langchain.vectorstores import FAISS\n", + "from langchain_community.embeddings import OpenAIEmbeddings" ] }, { diff --git a/cookbook/custom_agent_with_plugin_retrieval_using_plugnplai.ipynb b/cookbook/custom_agent_with_plugin_retrieval_using_plugnplai.ipynb index 2937337d0da..ece5f081990 100644 --- a/cookbook/custom_agent_with_plugin_retrieval_using_plugnplai.ipynb +++ b/cookbook/custom_agent_with_plugin_retrieval_using_plugnplai.ipynb @@ -67,10 +67,10 @@ ")\n", "from langchain.agents.agent_toolkits import NLAToolkit\n", "from langchain.chains import LLMChain\n", - "from langchain.llms import OpenAI\n", "from langchain.prompts import StringPromptTemplate\n", "from langchain.schema import AgentAction, AgentFinish\n", - "from langchain.tools.plugin import AIPlugin" + "from langchain.tools.plugin import AIPlugin\n", + "from langchain_community.llms import OpenAI" ] }, { @@ -138,9 +138,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.schema import Document\n", - "from langchain.vectorstores import FAISS" + "from langchain.vectorstores import FAISS\n", + "from langchain_community.embeddings import OpenAIEmbeddings" ] }, { diff --git a/cookbook/custom_agent_with_tool_retrieval.ipynb b/cookbook/custom_agent_with_tool_retrieval.ipynb index 6abd97bcffe..d9259749dd8 100644 --- a/cookbook/custom_agent_with_tool_retrieval.ipynb +++ b/cookbook/custom_agent_with_tool_retrieval.ipynb @@ -39,10 +39,10 @@ " Tool,\n", ")\n", "from langchain.chains import LLMChain\n", - "from langchain.llms import OpenAI\n", "from langchain.prompts import StringPromptTemplate\n", "from langchain.schema import AgentAction, AgentFinish\n", - "from langchain.utilities import SerpAPIWrapper" + "from langchain.utilities import SerpAPIWrapper\n", + "from langchain_community.llms import OpenAI" ] }, { @@ -103,9 +103,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.schema import Document\n", - "from langchain.vectorstores import FAISS" + "from langchain.vectorstores import FAISS\n", + "from langchain_community.embeddings import OpenAIEmbeddings" ] }, { diff --git a/cookbook/databricks_sql_db.ipynb b/cookbook/databricks_sql_db.ipynb index c37794143b8..f00d9953497 100644 --- a/cookbook/databricks_sql_db.ipynb +++ b/cookbook/databricks_sql_db.ipynb @@ -93,7 +93,7 @@ "outputs": [], "source": [ "# Creating a OpenAI Chat LLM wrapper\n", - "from langchain.chat_models import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "llm = ChatOpenAI(temperature=0, model_name=\"gpt-4\")" ] diff --git a/cookbook/deeplake_semantic_search_over_chat.ipynb b/cookbook/deeplake_semantic_search_over_chat.ipynb index aa6eb8c7cc2..6c146cfc060 100644 --- a/cookbook/deeplake_semantic_search_over_chat.ipynb +++ b/cookbook/deeplake_semantic_search_over_chat.ipynb @@ -52,13 +52,13 @@ "import os\n", "\n", "from langchain.chains import RetrievalQA\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", - "from langchain.llms import OpenAI\n", "from langchain.text_splitter import (\n", " CharacterTextSplitter,\n", " RecursiveCharacterTextSplitter,\n", ")\n", "from langchain.vectorstores import DeepLake\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.llms import OpenAI\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")\n", "activeloop_token = getpass.getpass(\"Activeloop Token:\")\n", diff --git a/cookbook/docugami_xml_kg_rag.ipynb b/cookbook/docugami_xml_kg_rag.ipynb index 9e3170179c0..6610e82d186 100644 --- a/cookbook/docugami_xml_kg_rag.ipynb +++ b/cookbook/docugami_xml_kg_rag.ipynb @@ -470,12 +470,12 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.prompts import (\n", " ChatPromptTemplate,\n", " HumanMessagePromptTemplate,\n", " SystemMessagePromptTemplate,\n", ")\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.output_parsers import StrOutputParser" ] }, @@ -545,10 +545,10 @@ "source": [ "import uuid\n", "\n", - "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.retrievers.multi_vector import MultiVectorRetriever\n", "from langchain.storage import InMemoryStore\n", "from langchain.vectorstores.chroma import Chroma\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_core.documents import Document\n", "\n", "\n", diff --git a/cookbook/elasticsearch_db_qa.ipynb b/cookbook/elasticsearch_db_qa.ipynb index 02a8faa77a4..af4bc6d71e6 100644 --- a/cookbook/elasticsearch_db_qa.ipynb +++ b/cookbook/elasticsearch_db_qa.ipynb @@ -39,7 +39,7 @@ "source": [ "from elasticsearch import Elasticsearch\n", "from langchain.chains.elasticsearch_database import ElasticsearchDatabaseChain\n", - "from langchain.chat_models import ChatOpenAI" + "from langchain_community.chat_models import ChatOpenAI" ] }, { diff --git a/cookbook/extraction_openai_tools.ipynb b/cookbook/extraction_openai_tools.ipynb index 23a8c1f2d7b..fd251b35d07 100644 --- a/cookbook/extraction_openai_tools.ipynb +++ b/cookbook/extraction_openai_tools.ipynb @@ -22,7 +22,7 @@ "from typing import List, Optional\n", "\n", "from langchain.chains.openai_tools import create_extraction_chain_pydantic\n", - "from langchain.chat_models import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.pydantic_v1 import BaseModel" ] }, diff --git a/cookbook/fake_llm.ipynb b/cookbook/fake_llm.ipynb index 016f3e9fcce..7d6fb84bb13 100644 --- a/cookbook/fake_llm.ipynb +++ b/cookbook/fake_llm.ipynb @@ -20,7 +20,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms.fake import FakeListLLM" + "from langchain_community.llms.fake import FakeListLLM" ] }, { diff --git a/cookbook/forward_looking_retrieval_augmented_generation.ipynb b/cookbook/forward_looking_retrieval_augmented_generation.ipynb index ff17aac14a8..d6cacd0a3f2 100644 --- a/cookbook/forward_looking_retrieval_augmented_generation.ipynb +++ b/cookbook/forward_looking_retrieval_augmented_generation.ipynb @@ -73,10 +73,10 @@ " AsyncCallbackManagerForRetrieverRun,\n", " CallbackManagerForRetrieverRun,\n", ")\n", - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.llms import OpenAI\n", "from langchain.schema import BaseRetriever, Document\n", - "from langchain.utilities import GoogleSerperAPIWrapper" + "from langchain.utilities import GoogleSerperAPIWrapper\n", + "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_community.llms import OpenAI" ] }, { diff --git a/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb b/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb index 8313966cb41..c28ec714465 100644 --- a/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb +++ b/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb @@ -47,11 +47,11 @@ "from datetime import datetime, timedelta\n", "from typing import List\n", "\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.docstore import InMemoryDocstore\n", - "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.retrievers import TimeWeightedVectorStoreRetriever\n", "from langchain.vectorstores import FAISS\n", + "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", "from termcolor import colored" ] }, diff --git a/cookbook/hugginggpt.ipynb b/cookbook/hugginggpt.ipynb index 41fe127f0bc..d94076cbf38 100644 --- a/cookbook/hugginggpt.ipynb +++ b/cookbook/hugginggpt.ipynb @@ -75,7 +75,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "from langchain_experimental.autonomous_agents import HuggingGPT\n", "\n", "# %env OPENAI_API_BASE=http://localhost:8000/v1" diff --git a/cookbook/human_approval.ipynb b/cookbook/human_approval.ipynb index a01b7269cef..aae360250f1 100644 --- a/cookbook/human_approval.ipynb +++ b/cookbook/human_approval.ipynb @@ -159,7 +159,7 @@ "outputs": [], "source": [ "from langchain.agents import AgentType, initialize_agent, load_tools\n", - "from langchain.llms import OpenAI" + "from langchain_community.llms import OpenAI" ] }, { diff --git a/cookbook/human_input_chat_model.ipynb b/cookbook/human_input_chat_model.ipynb index 35a2f5969d0..e2ecbfc951f 100644 --- a/cookbook/human_input_chat_model.ipynb +++ b/cookbook/human_input_chat_model.ipynb @@ -20,7 +20,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models.human import HumanInputChatModel" + "from langchain_community.chat_models.human import HumanInputChatModel" ] }, { diff --git a/cookbook/human_input_llm.ipynb b/cookbook/human_input_llm.ipynb index c06da208d38..fa8a8774082 100644 --- a/cookbook/human_input_llm.ipynb +++ b/cookbook/human_input_llm.ipynb @@ -19,7 +19,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms.human import HumanInputLLM" + "from langchain_community.llms.human import HumanInputLLM" ] }, { diff --git a/cookbook/hypothetical_document_embeddings.ipynb b/cookbook/hypothetical_document_embeddings.ipynb index d815aa9c443..b729675dd13 100644 --- a/cookbook/hypothetical_document_embeddings.ipynb +++ b/cookbook/hypothetical_document_embeddings.ipynb @@ -21,9 +21,9 @@ "outputs": [], "source": [ "from langchain.chains import HypotheticalDocumentEmbedder, LLMChain\n", - "from langchain.embeddings import OpenAIEmbeddings\n", - "from langchain.llms import OpenAI\n", - "from langchain.prompts import PromptTemplate" + "from langchain.prompts import PromptTemplate\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_community.llms import OpenAI" ] }, { diff --git a/cookbook/learned_prompt_optimization.ipynb b/cookbook/learned_prompt_optimization.ipynb index eded9d6804d..3f4d02dd461 100644 --- a/cookbook/learned_prompt_optimization.ipynb +++ b/cookbook/learned_prompt_optimization.ipynb @@ -49,7 +49,7 @@ "source": [ "# pick and configure the LLM of your choice\n", "\n", - "from langchain.llms import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "llm = OpenAI(model=\"gpt-3.5-turbo-instruct\")" ] diff --git a/cookbook/llm_bash.ipynb b/cookbook/llm_bash.ipynb index c247908acf5..9a345df74f4 100644 --- a/cookbook/llm_bash.ipynb +++ b/cookbook/llm_bash.ipynb @@ -43,7 +43,7 @@ } ], "source": [ - "from langchain.llms import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "from langchain_experimental.llm_bash.base import LLMBashChain\n", "\n", "llm = OpenAI(temperature=0)\n", diff --git a/cookbook/llm_checker.ipynb b/cookbook/llm_checker.ipynb index eea872bf719..cfc5f2356ab 100644 --- a/cookbook/llm_checker.ipynb +++ b/cookbook/llm_checker.ipynb @@ -42,7 +42,7 @@ ], "source": [ "from langchain.chains import LLMCheckerChain\n", - "from langchain.llms import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "llm = OpenAI(temperature=0.7)\n", "\n", diff --git a/cookbook/llm_math.ipynb b/cookbook/llm_math.ipynb index 0e2079b9558..e0a026ba35c 100644 --- a/cookbook/llm_math.ipynb +++ b/cookbook/llm_math.ipynb @@ -46,7 +46,7 @@ ], "source": [ "from langchain.chains import LLMMathChain\n", - "from langchain.llms import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", "llm_math = LLMMathChain.from_llm(llm, verbose=True)\n", diff --git a/cookbook/llm_summarization_checker.ipynb b/cookbook/llm_summarization_checker.ipynb index f4679f2463d..8501c98daf9 100644 --- a/cookbook/llm_summarization_checker.ipynb +++ b/cookbook/llm_summarization_checker.ipynb @@ -331,7 +331,7 @@ ], "source": [ "from langchain.chains import LLMSummarizationCheckerChain\n", - "from langchain.llms import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", "checker_chain = LLMSummarizationCheckerChain.from_llm(llm, verbose=True, max_checks=2)\n", @@ -822,7 +822,7 @@ ], "source": [ "from langchain.chains import LLMSummarizationCheckerChain\n", - "from langchain.llms import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", "checker_chain = LLMSummarizationCheckerChain.from_llm(llm, verbose=True, max_checks=3)\n", @@ -1096,7 +1096,7 @@ ], "source": [ "from langchain.chains import LLMSummarizationCheckerChain\n", - "from langchain.llms import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", "checker_chain = LLMSummarizationCheckerChain.from_llm(llm, max_checks=3, verbose=True)\n", diff --git a/cookbook/llm_symbolic_math.ipynb b/cookbook/llm_symbolic_math.ipynb index bcd500b76c8..10275f83ca5 100644 --- a/cookbook/llm_symbolic_math.ipynb +++ b/cookbook/llm_symbolic_math.ipynb @@ -14,7 +14,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "from langchain_experimental.llm_symbolic_math.base import LLMSymbolicMathChain\n", "\n", "llm = OpenAI(temperature=0)\n", diff --git a/cookbook/meta_prompt.ipynb b/cookbook/meta_prompt.ipynb index 2339907a269..f0e78ab197d 100644 --- a/cookbook/meta_prompt.ipynb +++ b/cookbook/meta_prompt.ipynb @@ -57,9 +57,9 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.llms import OpenAI\n", "from langchain.memory import ConversationBufferWindowMemory\n", - "from langchain.prompts import PromptTemplate" + "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import OpenAI" ] }, { diff --git a/cookbook/multi_modal_QA.ipynb b/cookbook/multi_modal_QA.ipynb index 2c52034e2c2..1e316cdb07d 100644 --- a/cookbook/multi_modal_QA.ipynb +++ b/cookbook/multi_modal_QA.ipynb @@ -91,7 +91,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.messages import HumanMessage, SystemMessage" ] }, diff --git a/cookbook/multi_modal_RAG_chroma.ipynb b/cookbook/multi_modal_RAG_chroma.ipynb index 7df53b8ff9e..c4305d64ae5 100644 --- a/cookbook/multi_modal_RAG_chroma.ipynb +++ b/cookbook/multi_modal_RAG_chroma.ipynb @@ -315,7 +315,7 @@ "source": [ "from operator import itemgetter\n", "\n", - "from langchain.chat_models import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.messages import HumanMessage, SystemMessage\n", "from langchain_core.output_parsers import StrOutputParser\n", "from langchain_core.runnables import RunnableLambda, RunnablePassthrough\n", diff --git a/cookbook/multi_modal_output_agent.ipynb b/cookbook/multi_modal_output_agent.ipynb index 71f39e31a74..8626c3bcf2d 100644 --- a/cookbook/multi_modal_output_agent.ipynb +++ b/cookbook/multi_modal_output_agent.ipynb @@ -43,8 +43,8 @@ "outputs": [], "source": [ "from langchain.agents import AgentType, initialize_agent\n", - "from langchain.llms import OpenAI\n", - "from langchain.tools import SteamshipImageGenerationTool" + "from langchain.tools import SteamshipImageGenerationTool\n", + "from langchain_community.llms import OpenAI" ] }, { diff --git a/cookbook/multi_player_dnd.ipynb b/cookbook/multi_player_dnd.ipynb index 3a9c3e4b012..c03bb6ad22e 100644 --- a/cookbook/multi_player_dnd.ipynb +++ b/cookbook/multi_player_dnd.ipynb @@ -28,11 +28,11 @@ "source": [ "from typing import Callable, List\n", "\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.schema import (\n", " HumanMessage,\n", " SystemMessage,\n", - ")" + ")\n", + "from langchain_community.chat_models import ChatOpenAI" ] }, { diff --git a/cookbook/multiagent_authoritarian.ipynb b/cookbook/multiagent_authoritarian.ipynb index 18b2fcb7815..8e9a82a062d 100644 --- a/cookbook/multiagent_authoritarian.ipynb +++ b/cookbook/multiagent_authoritarian.ipynb @@ -33,7 +33,6 @@ "from typing import Callable, List\n", "\n", "import tenacity\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.output_parsers import RegexParser\n", "from langchain.prompts import (\n", " PromptTemplate,\n", @@ -41,7 +40,8 @@ "from langchain.schema import (\n", " HumanMessage,\n", " SystemMessage,\n", - ")" + ")\n", + "from langchain_community.chat_models import ChatOpenAI" ] }, { diff --git a/cookbook/multiagent_bidding.ipynb b/cookbook/multiagent_bidding.ipynb index 7ee0d7321ee..1ee6383d920 100644 --- a/cookbook/multiagent_bidding.ipynb +++ b/cookbook/multiagent_bidding.ipynb @@ -27,13 +27,13 @@ "from typing import Callable, List\n", "\n", "import tenacity\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.output_parsers import RegexParser\n", "from langchain.prompts import PromptTemplate\n", "from langchain.schema import (\n", " HumanMessage,\n", " SystemMessage,\n", - ")" + ")\n", + "from langchain_community.chat_models import ChatOpenAI" ] }, { diff --git a/cookbook/myscale_vector_sql.ipynb b/cookbook/myscale_vector_sql.ipynb index af50a5a154a..b2d9feefb17 100644 --- a/cookbook/myscale_vector_sql.ipynb +++ b/cookbook/myscale_vector_sql.ipynb @@ -31,9 +31,9 @@ "from os import environ\n", "\n", "from langchain.chains import LLMChain\n", - "from langchain.llms import OpenAI\n", "from langchain.prompts import PromptTemplate\n", "from langchain.utilities import SQLDatabase\n", + "from langchain_community.llms import OpenAI\n", "from langchain_experimental.sql.vector_sql import VectorSQLDatabaseChain\n", "from sqlalchemy import MetaData, create_engine\n", "\n", @@ -57,7 +57,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import HuggingFaceInstructEmbeddings\n", + "from langchain_community.embeddings import HuggingFaceInstructEmbeddings\n", "from langchain_experimental.sql.vector_sql import VectorSQLOutputParser\n", "\n", "output_parser = VectorSQLOutputParser.from_embeddings(\n", @@ -75,8 +75,8 @@ "outputs": [], "source": [ "from langchain.callbacks import StdOutCallbackHandler\n", - "from langchain.llms import OpenAI\n", "from langchain.utilities.sql_database import SQLDatabase\n", + "from langchain_community.llms import OpenAI\n", "from langchain_experimental.sql.prompt import MYSCALE_PROMPT\n", "from langchain_experimental.sql.vector_sql import VectorSQLDatabaseChain\n", "\n", @@ -117,7 +117,7 @@ "outputs": [], "source": [ "from langchain.chains.qa_with_sources.retrieval import RetrievalQAWithSourcesChain\n", - "from langchain.chat_models import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_experimental.retrievers.vector_sql_database import (\n", " VectorSQLDatabaseChainRetriever,\n", ")\n", diff --git a/cookbook/openai_functions_retrieval_qa.ipynb b/cookbook/openai_functions_retrieval_qa.ipynb index f5bce419330..694ae747931 100644 --- a/cookbook/openai_functions_retrieval_qa.ipynb +++ b/cookbook/openai_functions_retrieval_qa.ipynb @@ -21,9 +21,9 @@ "source": [ "from langchain.chains import RetrievalQA\n", "from langchain.document_loaders import TextLoader\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Chroma" + "from langchain.vectorstores import Chroma\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings" ] }, { @@ -52,8 +52,8 @@ "source": [ "from langchain.chains import create_qa_with_sources_chain\n", "from langchain.chains.combine_documents.stuff import StuffDocumentsChain\n", - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.prompts import PromptTemplate" + "from langchain.prompts import PromptTemplate\n", + "from langchain_community.chat_models import ChatOpenAI" ] }, { diff --git a/cookbook/openai_v1_cookbook.ipynb b/cookbook/openai_v1_cookbook.ipynb index a8bcc2cf541..8e0b95020ae 100644 --- a/cookbook/openai_v1_cookbook.ipynb +++ b/cookbook/openai_v1_cookbook.ipynb @@ -28,7 +28,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.messages import HumanMessage, SystemMessage" ] }, @@ -414,7 +414,7 @@ "BREAKING CHANGES:\n", "- To use Azure embeddings with OpenAI V1, you'll need to use the new `AzureOpenAIEmbeddings` instead of the existing `OpenAIEmbeddings`. `OpenAIEmbeddings` continue to work when using Azure with `openai<1`.\n", "```python\n", - "from langchain.embeddings import AzureOpenAIEmbeddings\n", + "from langchain_community.embeddings import AzureOpenAIEmbeddings\n", "```\n", "\n", "\n", diff --git a/cookbook/petting_zoo.ipynb b/cookbook/petting_zoo.ipynb index bfb8c1a6e90..5c269b1a67c 100644 --- a/cookbook/petting_zoo.ipynb +++ b/cookbook/petting_zoo.ipynb @@ -47,12 +47,12 @@ "import inspect\n", "\n", "import tenacity\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.output_parsers import RegexParser\n", "from langchain.schema import (\n", " HumanMessage,\n", " SystemMessage,\n", - ")" + ")\n", + "from langchain_community.chat_models import ChatOpenAI" ] }, { diff --git a/cookbook/plan_and_execute_agent.ipynb b/cookbook/plan_and_execute_agent.ipynb index e37ee550be1..50e5b1ab65f 100644 --- a/cookbook/plan_and_execute_agent.ipynb +++ b/cookbook/plan_and_execute_agent.ipynb @@ -30,9 +30,9 @@ "outputs": [], "source": [ "from langchain.chains import LLMMathChain\n", - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.llms import OpenAI\n", "from langchain.utilities import DuckDuckGoSearchAPIWrapper\n", + "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_community.llms import OpenAI\n", "from langchain_core.tools import Tool\n", "from langchain_experimental.plan_and_execute import (\n", " PlanAndExecute,\n", diff --git a/cookbook/press_releases.ipynb b/cookbook/press_releases.ipynb index beb89f10c4a..a86927f7afe 100644 --- a/cookbook/press_releases.ipynb +++ b/cookbook/press_releases.ipynb @@ -81,8 +81,8 @@ "outputs": [], "source": [ "from langchain.chains import ConversationalRetrievalChain\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.retrievers import KayAiRetriever\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "model = ChatOpenAI(model_name=\"gpt-3.5-turbo\")\n", "retriever = KayAiRetriever.create(\n", diff --git a/cookbook/program_aided_language_model.ipynb b/cookbook/program_aided_language_model.ipynb index dba6c5eef59..5eed7766eae 100644 --- a/cookbook/program_aided_language_model.ipynb +++ b/cookbook/program_aided_language_model.ipynb @@ -17,7 +17,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "from langchain_experimental.pal_chain import PALChain" ] }, diff --git a/cookbook/qa_citations.ipynb b/cookbook/qa_citations.ipynb index 06754692ddd..2ca389a063e 100644 --- a/cookbook/qa_citations.ipynb +++ b/cookbook/qa_citations.ipynb @@ -27,7 +27,7 @@ ], "source": [ "from langchain.chains import create_citation_fuzzy_match_chain\n", - "from langchain.chat_models import ChatOpenAI" + "from langchain_community.chat_models import ChatOpenAI" ] }, { diff --git a/cookbook/qianfan_baidu_elasticesearch_RAG.ipynb b/cookbook/qianfan_baidu_elasticesearch_RAG.ipynb index f0d1822b1e7..2446c50a465 100644 --- a/cookbook/qianfan_baidu_elasticesearch_RAG.ipynb +++ b/cookbook/qianfan_baidu_elasticesearch_RAG.ipynb @@ -60,10 +60,10 @@ "from baidubce.bce_client_configuration import BceClientConfiguration\n", "from langchain.chains.retrieval_qa import RetrievalQA\n", "from langchain.document_loaders.baiducloud_bos_directory import BaiduBOSDirectoryLoader\n", - "from langchain.embeddings.huggingface import HuggingFaceEmbeddings\n", - "from langchain.llms.baidu_qianfan_endpoint import QianfanLLMEndpoint\n", "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", - "from langchain.vectorstores import BESVectorStore" + "from langchain.vectorstores import BESVectorStore\n", + "from langchain_community.embeddings.huggingface import HuggingFaceEmbeddings\n", + "from langchain_community.llms.baidu_qianfan_endpoint import QianfanLLMEndpoint" ] }, { diff --git a/cookbook/rag_fusion.ipynb b/cookbook/rag_fusion.ipynb index f7823fb18cb..99d247128ee 100644 --- a/cookbook/rag_fusion.ipynb +++ b/cookbook/rag_fusion.ipynb @@ -30,8 +30,8 @@ "outputs": [], "source": [ "import pinecone\n", - "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.vectorstores import Pinecone\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", "\n", "pinecone.init(api_key=\"...\", environment=\"...\")" ] @@ -86,7 +86,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.output_parsers import StrOutputParser" ] }, diff --git a/cookbook/retrieval_in_sql.ipynb b/cookbook/retrieval_in_sql.ipynb index 1a4c27a689d..32d2384cadb 100644 --- a/cookbook/retrieval_in_sql.ipynb +++ b/cookbook/retrieval_in_sql.ipynb @@ -42,8 +42,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.sql_database import SQLDatabase\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "CONNECTION_STRING = \"postgresql+psycopg2://postgres:test@localhost:5432/vectordb\" # Replace with your own\n", "db = SQLDatabase.from_uri(CONNECTION_STRING)" @@ -88,7 +88,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import OpenAIEmbeddings\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", "\n", "embeddings_model = OpenAIEmbeddings()" ] @@ -267,7 +267,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.output_parsers import StrOutputParser\n", "from langchain_core.runnables import RunnablePassthrough\n", "\n", diff --git a/cookbook/rewrite.ipynb b/cookbook/rewrite.ipynb index 4faf2babcc5..69318f55cf7 100644 --- a/cookbook/rewrite.ipynb +++ b/cookbook/rewrite.ipynb @@ -31,9 +31,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.prompts import ChatPromptTemplate\n", "from langchain.utilities import DuckDuckGoSearchAPIWrapper\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.output_parsers import StrOutputParser\n", "from langchain_core.runnables import RunnablePassthrough" ] diff --git a/cookbook/sales_agent_with_context.ipynb b/cookbook/sales_agent_with_context.ipynb index f402ecb0b92..48baa6f75c3 100644 --- a/cookbook/sales_agent_with_context.ipynb +++ b/cookbook/sales_agent_with_context.ipynb @@ -49,14 +49,14 @@ "from langchain.agents.conversational.prompt import FORMAT_INSTRUCTIONS\n", "from langchain.chains import LLMChain, RetrievalQA\n", "from langchain.chains.base import Chain\n", - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", - "from langchain.llms import BaseLLM, OpenAI\n", "from langchain.prompts import PromptTemplate\n", "from langchain.prompts.base import StringPromptTemplate\n", "from langchain.schema import AgentAction, AgentFinish\n", "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain.vectorstores import Chroma\n", + "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.llms import BaseLLM, OpenAI\n", "from pydantic import BaseModel, Field" ] }, diff --git a/cookbook/selecting_llms_based_on_context_length.ipynb b/cookbook/selecting_llms_based_on_context_length.ipynb index 58976419b53..ae885f5e0b6 100644 --- a/cookbook/selecting_llms_based_on_context_length.ipynb +++ b/cookbook/selecting_llms_based_on_context_length.ipynb @@ -17,8 +17,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.prompts import PromptTemplate\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.output_parsers import StrOutputParser\n", "from langchain_core.prompt_values import PromptValue" ] diff --git a/cookbook/self_query_hotel_search.ipynb b/cookbook/self_query_hotel_search.ipynb index 5a84a02f999..a3b2f20d2fe 100644 --- a/cookbook/self_query_hotel_search.ipynb +++ b/cookbook/self_query_hotel_search.ipynb @@ -255,7 +255,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "model = ChatOpenAI(model=\"gpt-4\")\n", "res = model.predict(\n", @@ -1083,8 +1083,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.vectorstores import ElasticsearchStore\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", "\n", "embeddings = OpenAIEmbeddings()" ] diff --git a/cookbook/sharedmemory_for_tools.ipynb b/cookbook/sharedmemory_for_tools.ipynb index 7c1e5df5d55..1624199fe2f 100644 --- a/cookbook/sharedmemory_for_tools.ipynb +++ b/cookbook/sharedmemory_for_tools.ipynb @@ -24,10 +24,10 @@ "source": [ "from langchain.agents import AgentExecutor, Tool, ZeroShotAgent\n", "from langchain.chains import LLMChain\n", - "from langchain.llms import OpenAI\n", "from langchain.memory import ConversationBufferMemory, ReadOnlySharedMemory\n", "from langchain.prompts import PromptTemplate\n", - "from langchain.utilities import GoogleSearchAPIWrapper" + "from langchain.utilities import GoogleSearchAPIWrapper\n", + "from langchain_community.llms import OpenAI" ] }, { diff --git a/cookbook/smart_llm.ipynb b/cookbook/smart_llm.ipynb index b7146daaae5..b8bb31c97e3 100644 --- a/cookbook/smart_llm.ipynb +++ b/cookbook/smart_llm.ipynb @@ -51,8 +51,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.prompts import PromptTemplate\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_experimental.smart_llm import SmartLLMChain" ] }, diff --git a/cookbook/sql_db_qa.mdx b/cookbook/sql_db_qa.mdx index 39e0a8209bf..edc96480f24 100644 --- a/cookbook/sql_db_qa.mdx +++ b/cookbook/sql_db_qa.mdx @@ -9,7 +9,7 @@ To set it up, follow the instructions on https://database.guide/2-sample-databas ```python -from langchain.llms import OpenAI +from langchain_community.llms import OpenAI from langchain.utilities import SQLDatabase from langchain_experimental.sql import SQLDatabaseChain ``` @@ -200,7 +200,7 @@ result["intermediate_steps"] How to add memory to a SQLDatabaseChain: ```python -from langchain.llms import OpenAI +from langchain_community.llms import OpenAI from langchain.utilities import SQLDatabase from langchain_experimental.sql import SQLDatabaseChain ``` @@ -647,7 +647,7 @@ Sometimes you may not have the luxury of using OpenAI or other service-hosted la import logging import torch from transformers import AutoTokenizer, GPT2TokenizerFast, pipeline, AutoModelForSeq2SeqLM, AutoModelForCausalLM -from langchain.llms import HuggingFacePipeline +from langchain_community.llms import HuggingFacePipeline # Note: This model requires a large GPU, e.g. an 80GB A100. See documentation for other ways to run private non-OpenAI models. model_id = "google/flan-ul2" @@ -994,7 +994,7 @@ Now that you have some examples (with manually corrected output SQL), you can do ```python from langchain.prompts import FewShotPromptTemplate, PromptTemplate from langchain.chains.sql_database.prompt import _sqlite_prompt, PROMPT_SUFFIX -from langchain.embeddings.huggingface import HuggingFaceEmbeddings +from langchain_community.embeddings.huggingface import HuggingFaceEmbeddings from langchain.prompts.example_selector.semantic_similarity import SemanticSimilarityExampleSelector from langchain.vectorstores import Chroma diff --git a/cookbook/stepback-qa.ipynb b/cookbook/stepback-qa.ipynb index 920d0001a2b..60a9252b637 100644 --- a/cookbook/stepback-qa.ipynb +++ b/cookbook/stepback-qa.ipynb @@ -23,8 +23,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.prompts import ChatPromptTemplate, FewShotChatMessagePromptTemplate\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.output_parsers import StrOutputParser\n", "from langchain_core.runnables import RunnableLambda" ] diff --git a/cookbook/tree_of_thought.ipynb b/cookbook/tree_of_thought.ipynb index 7ca32eff7c2..6fead2e0cc8 100644 --- a/cookbook/tree_of_thought.ipynb +++ b/cookbook/tree_of_thought.ipynb @@ -24,7 +24,7 @@ } ], "source": [ - "from langchain.llms import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "llm = OpenAI(temperature=1, max_tokens=512, model=\"gpt-3.5-turbo-instruct\")" ] diff --git a/cookbook/twitter-the-algorithm-analysis-deeplake.ipynb b/cookbook/twitter-the-algorithm-analysis-deeplake.ipynb index 0e1998a93cd..23e07c78f41 100644 --- a/cookbook/twitter-the-algorithm-analysis-deeplake.ipynb +++ b/cookbook/twitter-the-algorithm-analysis-deeplake.ipynb @@ -37,8 +37,8 @@ "import getpass\n", "import os\n", "\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.vectorstores import DeepLake\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")\n", "activeloop_token = getpass.getpass(\"Activeloop Token:\")\n", @@ -3809,7 +3809,7 @@ "outputs": [], "source": [ "from langchain.chains import ConversationalRetrievalChain\n", - "from langchain.chat_models import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "model = ChatOpenAI(model_name=\"gpt-3.5-turbo-0613\") # switch to 'gpt-4'\n", "qa = ConversationalRetrievalChain.from_llm(model, retriever=retriever)" diff --git a/cookbook/two_agent_debate_tools.ipynb b/cookbook/two_agent_debate_tools.ipynb index 808053733ed..9fc9d1a757c 100644 --- a/cookbook/two_agent_debate_tools.ipynb +++ b/cookbook/two_agent_debate_tools.ipynb @@ -24,13 +24,13 @@ "source": [ "from typing import Callable, List\n", "\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.memory import ConversationBufferMemory\n", "from langchain.schema import (\n", " AIMessage,\n", " HumanMessage,\n", " SystemMessage,\n", - ")" + ")\n", + "from langchain_community.chat_models import ChatOpenAI" ] }, { diff --git a/cookbook/two_player_dnd.ipynb b/cookbook/two_player_dnd.ipynb index 627a683e1f8..c36a59b4774 100644 --- a/cookbook/two_player_dnd.ipynb +++ b/cookbook/two_player_dnd.ipynb @@ -24,11 +24,11 @@ "source": [ "from typing import Callable, List\n", "\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.schema import (\n", " HumanMessage,\n", " SystemMessage,\n", - ")" + ")\n", + "from langchain_community.chat_models import ChatOpenAI" ] }, { diff --git a/cookbook/wikibase_agent.ipynb b/cookbook/wikibase_agent.ipynb index b44df0c1bc0..71c5294b8cf 100644 --- a/cookbook/wikibase_agent.ipynb +++ b/cookbook/wikibase_agent.ipynb @@ -599,7 +599,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "llm = ChatOpenAI(model_name=\"gpt-4\", temperature=0)" ] diff --git a/docs/docs/_templates/integration.mdx b/docs/docs/_templates/integration.mdx index addb12042b9..1ef74269cdc 100644 --- a/docs/docs/_templates/integration.mdx +++ b/docs/docs/_templates/integration.mdx @@ -32,7 +32,7 @@ There isn't any special setup for it. See a [usage example](/docs/integrations/llms/INCLUDE_REAL_NAME). ```python -from langchain.llms import integration_class_REPLACE_ME +from langchain_community.llms import integration_class_REPLACE_ME ``` ## Text Embedding Models @@ -40,7 +40,7 @@ from langchain.llms import integration_class_REPLACE_ME See a [usage example](/docs/integrations/text_embedding/INCLUDE_REAL_NAME) ```python -from langchain.embeddings import integration_class_REPLACE_ME +from langchain_community.embeddings import integration_class_REPLACE_ME ``` ## Chat models @@ -48,7 +48,7 @@ from langchain.embeddings import integration_class_REPLACE_ME See a [usage example](/docs/integrations/chat/INCLUDE_REAL_NAME) ```python -from langchain.chat_models import integration_class_REPLACE_ME +from langchain_community.chat_models import integration_class_REPLACE_ME ``` ## Document Loader diff --git a/docs/docs/expression_language/cookbook/agent.ipynb b/docs/docs/expression_language/cookbook/agent.ipynb index 325ce992c1c..5459c6cc11f 100644 --- a/docs/docs/expression_language/cookbook/agent.ipynb +++ b/docs/docs/expression_language/cookbook/agent.ipynb @@ -20,7 +20,7 @@ "from langchain import hub\n", "from langchain.agents import AgentExecutor, tool\n", "from langchain.agents.output_parsers import XMLAgentOutputParser\n", - "from langchain.chat_models import ChatAnthropic" + "from langchain_community.chat_models import ChatAnthropic" ] }, { diff --git a/docs/docs/expression_language/cookbook/code_writing.ipynb b/docs/docs/expression_language/cookbook/code_writing.ipynb index 5da7992c735..e3f59f39ea2 100644 --- a/docs/docs/expression_language/cookbook/code_writing.ipynb +++ b/docs/docs/expression_language/cookbook/code_writing.ipynb @@ -17,10 +17,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.prompts import (\n", " ChatPromptTemplate,\n", ")\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.output_parsers import StrOutputParser\n", "from langchain_experimental.utilities import PythonREPL" ] diff --git a/docs/docs/expression_language/cookbook/embedding_router.ipynb b/docs/docs/expression_language/cookbook/embedding_router.ipynb index 2123963b71a..51e193709de 100644 --- a/docs/docs/expression_language/cookbook/embedding_router.ipynb +++ b/docs/docs/expression_language/cookbook/embedding_router.ipynb @@ -19,10 +19,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.prompts import PromptTemplate\n", "from langchain.utils.math import cosine_similarity\n", + "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_core.output_parsers import StrOutputParser\n", "from langchain_core.runnables import RunnableLambda, RunnablePassthrough\n", "\n", diff --git a/docs/docs/expression_language/cookbook/memory.ipynb b/docs/docs/expression_language/cookbook/memory.ipynb index a5fcbbb8c2e..6fd8a692648 100644 --- a/docs/docs/expression_language/cookbook/memory.ipynb +++ b/docs/docs/expression_language/cookbook/memory.ipynb @@ -19,9 +19,9 @@ "source": [ "from operator import itemgetter\n", "\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.memory import ConversationBufferMemory\n", "from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.runnables import RunnableLambda, RunnablePassthrough\n", "\n", "model = ChatOpenAI()\n", diff --git a/docs/docs/expression_language/cookbook/moderation.ipynb b/docs/docs/expression_language/cookbook/moderation.ipynb index 1d091e3497d..1ab1c117dee 100644 --- a/docs/docs/expression_language/cookbook/moderation.ipynb +++ b/docs/docs/expression_language/cookbook/moderation.ipynb @@ -18,8 +18,8 @@ "outputs": [], "source": [ "from langchain.chains import OpenAIModerationChain\n", - "from langchain.llms import OpenAI\n", - "from langchain.prompts import ChatPromptTemplate" + "from langchain.prompts import ChatPromptTemplate\n", + "from langchain_community.llms import OpenAI" ] }, { diff --git a/docs/docs/expression_language/cookbook/multiple_chains.ipynb b/docs/docs/expression_language/cookbook/multiple_chains.ipynb index 01a98cea328..2aecbb9cb32 100644 --- a/docs/docs/expression_language/cookbook/multiple_chains.ipynb +++ b/docs/docs/expression_language/cookbook/multiple_chains.ipynb @@ -39,9 +39,9 @@ "source": [ "from operator import itemgetter\n", "\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.prompts import ChatPromptTemplate\n", "from langchain.schema import StrOutputParser\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "prompt1 = ChatPromptTemplate.from_template(\"what is the city {person} is from?\")\n", "prompt2 = ChatPromptTemplate.from_template(\n", diff --git a/docs/docs/expression_language/cookbook/prompt_llm_parser.ipynb b/docs/docs/expression_language/cookbook/prompt_llm_parser.ipynb index 9f6bae10cfc..6abaf835b33 100644 --- a/docs/docs/expression_language/cookbook/prompt_llm_parser.ipynb +++ b/docs/docs/expression_language/cookbook/prompt_llm_parser.ipynb @@ -42,8 +42,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.prompts import ChatPromptTemplate\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "prompt = ChatPromptTemplate.from_template(\"tell me a joke about {foo}\")\n", "model = ChatOpenAI()\n", diff --git a/docs/docs/expression_language/cookbook/prompt_size.ipynb b/docs/docs/expression_language/cookbook/prompt_size.ipynb index 2ee6945c88a..f407ffc0be5 100644 --- a/docs/docs/expression_language/cookbook/prompt_size.ipynb +++ b/docs/docs/expression_language/cookbook/prompt_size.ipynb @@ -26,12 +26,12 @@ "from langchain.agents import AgentExecutor, load_tools\n", "from langchain.agents.format_scratchpad import format_to_openai_function_messages\n", "from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder\n", "from langchain.prompts.chat import ChatPromptValue\n", "from langchain.tools import WikipediaQueryRun\n", "from langchain.tools.render import format_tool_to_openai_function\n", - "from langchain.utilities import WikipediaAPIWrapper" + "from langchain.utilities import WikipediaAPIWrapper\n", + "from langchain_community.chat_models import ChatOpenAI" ] }, { diff --git a/docs/docs/expression_language/cookbook/retrieval.ipynb b/docs/docs/expression_language/cookbook/retrieval.ipynb index f914170aa86..7bc6acd3e52 100644 --- a/docs/docs/expression_language/cookbook/retrieval.ipynb +++ b/docs/docs/expression_language/cookbook/retrieval.ipynb @@ -38,10 +38,10 @@ "source": [ "from operator import itemgetter\n", "\n", - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.prompts import ChatPromptTemplate\n", "from langchain.vectorstores import FAISS\n", + "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_core.output_parsers import StrOutputParser\n", "from langchain_core.runnables import RunnableLambda, RunnablePassthrough" ] diff --git a/docs/docs/expression_language/cookbook/sql_db.ipynb b/docs/docs/expression_language/cookbook/sql_db.ipynb index 71fe2dcc341..5dc2a31c77b 100644 --- a/docs/docs/expression_language/cookbook/sql_db.ipynb +++ b/docs/docs/expression_language/cookbook/sql_db.ipynb @@ -93,7 +93,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.output_parsers import StrOutputParser\n", "from langchain_core.runnables import RunnablePassthrough\n", "\n", diff --git a/docs/docs/expression_language/cookbook/tools.ipynb b/docs/docs/expression_language/cookbook/tools.ipynb index c0f6a99794d..8d5ab060058 100644 --- a/docs/docs/expression_language/cookbook/tools.ipynb +++ b/docs/docs/expression_language/cookbook/tools.ipynb @@ -27,9 +27,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.prompts import ChatPromptTemplate\n", "from langchain.tools import DuckDuckGoSearchRun\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.output_parsers import StrOutputParser" ] }, diff --git a/docs/docs/expression_language/get_started.ipynb b/docs/docs/expression_language/get_started.ipynb index d3533eacd34..63c62052e44 100644 --- a/docs/docs/expression_language/get_started.ipynb +++ b/docs/docs/expression_language/get_started.ipynb @@ -48,8 +48,8 @@ } ], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.prompts import ChatPromptTemplate\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.output_parsers import StrOutputParser\n", "\n", "prompt = ChatPromptTemplate.from_template(\"tell me a short joke about {topic}\")\n", @@ -209,7 +209,7 @@ } ], "source": [ - "from langchain.llms import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "llm = OpenAI(model=\"gpt-3.5-turbo-instruct\")\n", "llm.invoke(prompt_value)" @@ -324,10 +324,10 @@ "# Requires:\n", "# pip install langchain docarray tiktoken\n", "\n", - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.prompts import ChatPromptTemplate\n", "from langchain.vectorstores import DocArrayInMemorySearch\n", + "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_core.output_parsers import StrOutputParser\n", "from langchain_core.runnables import RunnableParallel, RunnablePassthrough\n", "\n", diff --git a/docs/docs/expression_language/how_to/binding.ipynb b/docs/docs/expression_language/how_to/binding.ipynb index 6f9978bcc10..087850902fd 100644 --- a/docs/docs/expression_language/how_to/binding.ipynb +++ b/docs/docs/expression_language/how_to/binding.ipynb @@ -19,9 +19,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.prompts import ChatPromptTemplate\n", "from langchain.schema import StrOutputParser\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.runnables import RunnablePassthrough" ] }, diff --git a/docs/docs/expression_language/how_to/configure.ipynb b/docs/docs/expression_language/how_to/configure.ipynb index 5b780469766..9e208ad25ce 100644 --- a/docs/docs/expression_language/how_to/configure.ipynb +++ b/docs/docs/expression_language/how_to/configure.ipynb @@ -41,8 +41,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.prompts import PromptTemplate\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.runnables import ConfigurableField\n", "\n", "model = ChatOpenAI(temperature=0).configurable_fields(\n", @@ -263,8 +263,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatAnthropic, ChatOpenAI\n", "from langchain.prompts import PromptTemplate\n", + "from langchain_community.chat_models import ChatAnthropic, ChatOpenAI\n", "from langchain_core.runnables import ConfigurableField" ] }, diff --git a/docs/docs/expression_language/how_to/fallbacks.ipynb b/docs/docs/expression_language/how_to/fallbacks.ipynb index cc3578106a3..e736c984e48 100644 --- a/docs/docs/expression_language/how_to/fallbacks.ipynb +++ b/docs/docs/expression_language/how_to/fallbacks.ipynb @@ -31,7 +31,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatAnthropic, ChatOpenAI" + "from langchain_community.chat_models import ChatAnthropic, ChatOpenAI" ] }, { @@ -240,8 +240,8 @@ "outputs": [], "source": [ "# Now lets create a chain with the normal OpenAI model\n", - "from langchain.llms import OpenAI\n", "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import OpenAI\n", "\n", "prompt_template = \"\"\"Instructions: You should always include a compliment in your response.\n", "\n", diff --git a/docs/docs/expression_language/how_to/functions.ipynb b/docs/docs/expression_language/how_to/functions.ipynb index ceeb46102bc..d26caac51cd 100644 --- a/docs/docs/expression_language/how_to/functions.ipynb +++ b/docs/docs/expression_language/how_to/functions.ipynb @@ -33,8 +33,8 @@ "source": [ "from operator import itemgetter\n", "\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.prompts import ChatPromptTemplate\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.runnables import RunnableLambda\n", "\n", "\n", diff --git a/docs/docs/expression_language/how_to/generators.ipynb b/docs/docs/expression_language/how_to/generators.ipynb index c9635c8aacd..caf1bade4b2 100644 --- a/docs/docs/expression_language/how_to/generators.ipynb +++ b/docs/docs/expression_language/how_to/generators.ipynb @@ -32,8 +32,8 @@ "source": [ "from typing import Iterator, List\n", "\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.prompts.chat import ChatPromptTemplate\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.output_parsers import StrOutputParser\n", "\n", "prompt = ChatPromptTemplate.from_template(\n", diff --git a/docs/docs/expression_language/how_to/map.ipynb b/docs/docs/expression_language/how_to/map.ipynb index 02e431899e8..957f3fc3da0 100644 --- a/docs/docs/expression_language/how_to/map.ipynb +++ b/docs/docs/expression_language/how_to/map.ipynb @@ -44,10 +44,10 @@ } ], "source": [ - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.prompts import ChatPromptTemplate\n", "from langchain.vectorstores import FAISS\n", + "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_core.output_parsers import StrOutputParser\n", "from langchain_core.runnables import RunnablePassthrough\n", "\n", @@ -128,10 +128,10 @@ "source": [ "from operator import itemgetter\n", "\n", - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.prompts import ChatPromptTemplate\n", "from langchain.vectorstores import FAISS\n", + "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_core.output_parsers import StrOutputParser\n", "from langchain_core.runnables import RunnablePassthrough\n", "\n", @@ -192,8 +192,8 @@ } ], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.prompts import ChatPromptTemplate\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.runnables import RunnableParallel\n", "\n", "model = ChatOpenAI()\n", diff --git a/docs/docs/expression_language/how_to/message_history.ipynb b/docs/docs/expression_language/how_to/message_history.ipynb index d16ead46776..f18796731a4 100644 --- a/docs/docs/expression_language/how_to/message_history.ipynb +++ b/docs/docs/expression_language/how_to/message_history.ipynb @@ -131,9 +131,9 @@ "source": [ "from typing import Optional\n", "\n", - "from langchain.chat_models import ChatAnthropic\n", "from langchain.memory.chat_message_histories import RedisChatMessageHistory\n", "from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder\n", + "from langchain_community.chat_models import ChatAnthropic\n", "from langchain_core.chat_history import BaseChatMessageHistory\n", "from langchain_core.runnables.history import RunnableWithMessageHistory" ] diff --git a/docs/docs/expression_language/how_to/passthrough.ipynb b/docs/docs/expression_language/how_to/passthrough.ipynb index 482b8508421..54801656eaf 100644 --- a/docs/docs/expression_language/how_to/passthrough.ipynb +++ b/docs/docs/expression_language/how_to/passthrough.ipynb @@ -97,10 +97,10 @@ } ], "source": [ - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.prompts import ChatPromptTemplate\n", "from langchain.vectorstores import FAISS\n", + "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_core.output_parsers import StrOutputParser\n", "from langchain_core.runnables import RunnablePassthrough\n", "\n", diff --git a/docs/docs/expression_language/how_to/routing.ipynb b/docs/docs/expression_language/how_to/routing.ipynb index b78cff2629e..0738d4889c9 100644 --- a/docs/docs/expression_language/how_to/routing.ipynb +++ b/docs/docs/expression_language/how_to/routing.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "9e45e81c-e16e-4c6c-b6a3-2362e5193827", "metadata": {}, "source": [ "---\n", @@ -51,8 +52,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatAnthropic\n", "from langchain.prompts import PromptTemplate\n", + "from langchain_community.chat_models import ChatAnthropic\n", "from langchain_core.output_parsers import StrOutputParser" ] }, diff --git a/docs/docs/expression_language/interface.ipynb b/docs/docs/expression_language/interface.ipynb index 30432b671b9..b58d1811c72 100644 --- a/docs/docs/expression_language/interface.ipynb +++ b/docs/docs/expression_language/interface.ipynb @@ -57,8 +57,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.prompts import ChatPromptTemplate\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "model = ChatOpenAI()\n", "prompt = ChatPromptTemplate.from_template(\"tell me a joke about {topic}\")\n", @@ -659,8 +659,8 @@ } ], "source": [ - "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.vectorstores import FAISS\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_core.output_parsers import StrOutputParser\n", "from langchain_core.runnables import RunnablePassthrough\n", "\n", diff --git a/docs/docs/expression_language/why.ipynb b/docs/docs/expression_language/why.ipynb index 2cdc5d63e0c..80f5c40e479 100644 --- a/docs/docs/expression_language/why.ipynb +++ b/docs/docs/expression_language/why.ipynb @@ -42,7 +42,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain.prompts import ChatPromptTemplate\n", "from langchain_core.output_parsers import StrOutputParser\n", "\n", @@ -389,7 +389,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "llm = OpenAI(model=\"gpt-3.5-turbo-instruct\")\n", "llm_chain = (\n", @@ -468,7 +468,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatAnthropic\n", + "from langchain_community.chat_models import ChatAnthropic\n", "\n", "anthropic = ChatAnthropic(model=\"claude-2\")\n", "anthropic_chain = (\n", @@ -1002,8 +1002,8 @@ "source": [ "import os\n", "\n", - "from langchain.chat_models import ChatAnthropic, ChatOpenAI\n", - "from langchain.llms import OpenAI\n", + "from langchain_community.chat_models import ChatAnthropic, ChatOpenAI\n", + "from langchain_community.llms import OpenAI\n", "from langchain_core.output_parsers import StrOutputParser\n", "from langchain_core.prompts import ChatPromptTemplate\n", "from langchain_core.runnables import RunnablePassthrough\n", diff --git a/docs/docs/get_started/quickstart.mdx b/docs/docs/get_started/quickstart.mdx index 85247950613..a43b9baa362 100644 --- a/docs/docs/get_started/quickstart.mdx +++ b/docs/docs/get_started/quickstart.mdx @@ -85,7 +85,7 @@ export OPENAI_API_KEY="..." We can then initialize the model: ```python -from langchain.chat_models import ChatOpenAI +from langchain_community.chat_models import ChatOpenAI llm = ChatOpenAI() ``` @@ -93,7 +93,7 @@ llm = ChatOpenAI() If you'd prefer not to set an environment variable you can pass the key in directly via the `openai_api_key` named parameter when initiating the OpenAI LLM class: ```python -from langchain.chat_models import ChatOpenAI +from langchain_community.chat_models import ChatOpenAI llm = ChatOpenAI(openai_api_key="...") ``` @@ -110,7 +110,7 @@ First, follow [these instructions](https://github.com/jmorganca/ollama) to set u Then, make sure the Ollama server is running. After that, you can do: ```python -from langchain.llms import Ollama +from langchain_community.llms import Ollama llm = Ollama(model="llama2") ``` @@ -412,7 +412,7 @@ pip install langchainhub Now we can use it to get a predefined prompt ```python -from langchain.chat_models import ChatOpenAI +from langchain_community.chat_models import ChatOpenAI from langchain import hub from langchain.agents import create_openai_functions_agent from langchain.agents import AgentExecutor @@ -476,14 +476,14 @@ from typing import List from fastapi import FastAPI from langchain.prompts import ChatPromptTemplate -from langchain.chat_models import ChatOpenAI +from langchain_community.chat_models import ChatOpenAI from langchain_community.document_loaders import WebBaseLoader from langchain_community.embeddings import OpenAIEmbeddings from langchain_community.vectorstores import DocArrayInMemorySearch from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.tools.retriever import create_retriever_tool from langchain_community.tools.tavily_search import TavilySearchResults -from langchain.chat_models import ChatOpenAI +from langchain_community.chat_models import ChatOpenAI from langchain import hub from langchain.agents import create_openai_functions_agent from langchain.agents import AgentExecutor diff --git a/docs/docs/guides/debugging.md b/docs/docs/guides/debugging.md index a0ac5a5e894..ba4c3ffc4ca 100644 --- a/docs/docs/guides/debugging.md +++ b/docs/docs/guides/debugging.md @@ -25,7 +25,7 @@ Let's suppose we have a simple agent, and want to visualize the actions it takes ```python from langchain.agents import AgentType, initialize_agent, load_tools -from langchain.chat_models import ChatOpenAI +from langchain_community.chat_models import ChatOpenAI llm = ChatOpenAI(model_name="gpt-4", temperature=0) tools = load_tools(["ddg-search", "llm-math"], llm=llm) diff --git a/docs/docs/guides/evaluation/comparison/custom.ipynb b/docs/docs/guides/evaluation/comparison/custom.ipynb index c4ed70bcfb5..5f33bd4f88c 100644 --- a/docs/docs/guides/evaluation/comparison/custom.ipynb +++ b/docs/docs/guides/evaluation/comparison/custom.ipynb @@ -120,8 +120,8 @@ "from typing import Any, Optional\n", "\n", "from langchain.chains import LLMChain\n", - "from langchain.chat_models import ChatAnthropic\n", "from langchain.evaluation import PairwiseStringEvaluator\n", + "from langchain_community.chat_models import ChatAnthropic\n", "\n", "\n", "class CustomPreferenceEvaluator(PairwiseStringEvaluator):\n", diff --git a/docs/docs/guides/evaluation/comparison/pairwise_embedding_distance.ipynb b/docs/docs/guides/evaluation/comparison/pairwise_embedding_distance.ipynb index e0d5b75febf..7a913ba1be2 100644 --- a/docs/docs/guides/evaluation/comparison/pairwise_embedding_distance.ipynb +++ b/docs/docs/guides/evaluation/comparison/pairwise_embedding_distance.ipynb @@ -156,7 +156,7 @@ }, "outputs": [], "source": [ - "from langchain.embeddings import HuggingFaceEmbeddings\n", + "from langchain_community.embeddings import HuggingFaceEmbeddings\n", "\n", "embedding_model = HuggingFaceEmbeddings()\n", "hf_evaluator = load_evaluator(\"pairwise_embedding_distance\", embeddings=embedding_model)" diff --git a/docs/docs/guides/evaluation/comparison/pairwise_string.ipynb b/docs/docs/guides/evaluation/comparison/pairwise_string.ipynb index cc197bf5e61..259affbd2b2 100644 --- a/docs/docs/guides/evaluation/comparison/pairwise_string.ipynb +++ b/docs/docs/guides/evaluation/comparison/pairwise_string.ipynb @@ -236,7 +236,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatAnthropic\n", + "from langchain_community.chat_models import ChatAnthropic\n", "\n", "llm = ChatAnthropic(temperature=0)\n", "\n", diff --git a/docs/docs/guides/evaluation/examples/comparisons.ipynb b/docs/docs/guides/evaluation/examples/comparisons.ipynb index 09d44b828c1..eb12a8773d9 100644 --- a/docs/docs/guides/evaluation/examples/comparisons.ipynb +++ b/docs/docs/guides/evaluation/examples/comparisons.ipynb @@ -99,8 +99,8 @@ "outputs": [], "source": [ "from langchain.agents import AgentType, Tool, initialize_agent\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.utilities import SerpAPIWrapper\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "# Initialize the language model\n", "# You can add your own OpenAI API key by adding openai_api_key=\"\"\n", diff --git a/docs/docs/guides/evaluation/string/criteria_eval_chain.ipynb b/docs/docs/guides/evaluation/string/criteria_eval_chain.ipynb index f09e7c3941c..0f9c49487f6 100644 --- a/docs/docs/guides/evaluation/string/criteria_eval_chain.ipynb +++ b/docs/docs/guides/evaluation/string/criteria_eval_chain.ipynb @@ -331,7 +331,7 @@ }, "outputs": [], "source": [ - "from langchain.chat_models import ChatAnthropic\n", + "from langchain_community.chat_models import ChatAnthropic\n", "\n", "llm = ChatAnthropic(temperature=0)\n", "evaluator = load_evaluator(\"criteria\", llm=llm, criteria=\"conciseness\")" diff --git a/docs/docs/guides/evaluation/string/embedding_distance.ipynb b/docs/docs/guides/evaluation/string/embedding_distance.ipynb index 3d9030ddd32..5164469a44c 100644 --- a/docs/docs/guides/evaluation/string/embedding_distance.ipynb +++ b/docs/docs/guides/evaluation/string/embedding_distance.ipynb @@ -142,7 +142,7 @@ }, "outputs": [], "source": [ - "from langchain.embeddings import HuggingFaceEmbeddings\n", + "from langchain_community.embeddings import HuggingFaceEmbeddings\n", "\n", "embedding_model = HuggingFaceEmbeddings()\n", "hf_evaluator = load_evaluator(\"embedding_distance\", embeddings=embedding_model)" diff --git a/docs/docs/guides/evaluation/string/scoring_eval_chain.ipynb b/docs/docs/guides/evaluation/string/scoring_eval_chain.ipynb index bf035262d79..f7c42d5392e 100644 --- a/docs/docs/guides/evaluation/string/scoring_eval_chain.ipynb +++ b/docs/docs/guides/evaluation/string/scoring_eval_chain.ipynb @@ -24,8 +24,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.evaluation import load_evaluator\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "evaluator = load_evaluator(\"labeled_score_string\", llm=ChatOpenAI(model=\"gpt-4\"))" ] diff --git a/docs/docs/guides/evaluation/trajectory/custom.ipynb b/docs/docs/guides/evaluation/trajectory/custom.ipynb index 4594f044c16..11823f40354 100644 --- a/docs/docs/guides/evaluation/trajectory/custom.ipynb +++ b/docs/docs/guides/evaluation/trajectory/custom.ipynb @@ -24,9 +24,9 @@ "from typing import Any, Optional, Sequence, Tuple\n", "\n", "from langchain.chains import LLMChain\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.evaluation import AgentTrajectoryEvaluator\n", "from langchain.schema import AgentAction\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "\n", "class StepNecessityEvaluator(AgentTrajectoryEvaluator):\n", diff --git a/docs/docs/guides/evaluation/trajectory/trajectory_eval.ipynb b/docs/docs/guides/evaluation/trajectory/trajectory_eval.ipynb index c40bf8dd872..8c7a04cf0c7 100644 --- a/docs/docs/guides/evaluation/trajectory/trajectory_eval.ipynb +++ b/docs/docs/guides/evaluation/trajectory/trajectory_eval.ipynb @@ -75,8 +75,8 @@ "from urllib.parse import urlparse\n", "\n", "from langchain.agents import AgentType, initialize_agent\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.tools import tool\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from pydantic import HttpUrl\n", "\n", "\n", @@ -190,7 +190,7 @@ }, "outputs": [], "source": [ - "from langchain.chat_models import ChatAnthropic\n", + "from langchain_community.chat_models import ChatAnthropic\n", "\n", "eval_llm = ChatAnthropic(temperature=0)\n", "evaluator = load_evaluator(\"trajectory\", llm=eval_llm)" diff --git a/docs/docs/guides/fallbacks.ipynb b/docs/docs/guides/fallbacks.ipynb index cde2b709271..2f354be3c2b 100644 --- a/docs/docs/guides/fallbacks.ipynb +++ b/docs/docs/guides/fallbacks.ipynb @@ -33,7 +33,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatAnthropic, ChatOpenAI" + "from langchain_community.chat_models import ChatAnthropic, ChatOpenAI" ] }, { @@ -205,8 +205,8 @@ "outputs": [], "source": [ "# Now lets create a chain with the normal OpenAI model\n", - "from langchain.llms import OpenAI\n", "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import OpenAI\n", "\n", "prompt_template = \"\"\"Instructions: You should always include a compliment in your response.\n", "\n", diff --git a/docs/docs/guides/local_llms.ipynb b/docs/docs/guides/local_llms.ipynb index d47ca549eb0..a00f5043d8d 100644 --- a/docs/docs/guides/local_llms.ipynb +++ b/docs/docs/guides/local_llms.ipynb @@ -94,7 +94,7 @@ } ], "source": [ - "from langchain.llms import Ollama\n", + "from langchain_community.llms import Ollama\n", "\n", "llm = Ollama(model=\"llama2\")\n", "llm(\"The first man on the moon was ...\")" @@ -222,7 +222,7 @@ } ], "source": [ - "from langchain.llms import Ollama\n", + "from langchain_community.llms import Ollama\n", "\n", "llm = Ollama(model=\"llama2:13b\")\n", "llm(\"The first man on the moon was ... think step by step\")" @@ -289,7 +289,7 @@ "source": [ "from langchain.callbacks.manager import CallbackManager\n", "from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n", - "from langchain.llms import LlamaCpp\n", + "from langchain_community.llms import LlamaCpp\n", "\n", "llm = LlamaCpp(\n", " model_path=\"/Users/rlm/Desktop/Code/llama.cpp/models/openorca-platypus2-13b.gguf.q4_0.bin\",\n", @@ -400,7 +400,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import GPT4All\n", + "from langchain_community.llms import GPT4All\n", "\n", "llm = GPT4All(\n", " model=\"/Users/rlm/Desktop/Code/gpt4all/models/nous-hermes-13b.ggmlv3.q4_0.bin\"\n", diff --git a/docs/docs/guides/model_laboratory.ipynb b/docs/docs/guides/model_laboratory.ipynb index c3c650feaa5..f82cc207315 100644 --- a/docs/docs/guides/model_laboratory.ipynb +++ b/docs/docs/guides/model_laboratory.ipynb @@ -19,9 +19,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import Cohere, HuggingFaceHub, OpenAI\n", "from langchain.model_laboratory import ModelLaboratory\n", - "from langchain.prompts import PromptTemplate" + "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import Cohere, HuggingFaceHub, OpenAI" ] }, { diff --git a/docs/docs/guides/privacy/presidio_data_anonymization/index.ipynb b/docs/docs/guides/privacy/presidio_data_anonymization/index.ipynb index 1d64d64ec55..539658b1ecc 100644 --- a/docs/docs/guides/privacy/presidio_data_anonymization/index.ipynb +++ b/docs/docs/guides/privacy/presidio_data_anonymization/index.ipynb @@ -129,8 +129,8 @@ } ], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.prompts.prompt import PromptTemplate\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "anonymizer = PresidioAnonymizer()\n", "\n", diff --git a/docs/docs/guides/privacy/presidio_data_anonymization/qa_privacy_protection.ipynb b/docs/docs/guides/privacy/presidio_data_anonymization/qa_privacy_protection.ipynb index fe68b361d79..6aefe9d3419 100644 --- a/docs/docs/guides/privacy/presidio_data_anonymization/qa_privacy_protection.ipynb +++ b/docs/docs/guides/privacy/presidio_data_anonymization/qa_privacy_protection.ipynb @@ -637,9 +637,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", "from langchain.vectorstores import FAISS\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "\n", "# 2. Load the data: In our case data's already loaded\n", "# 3. Anonymize the data before indexing\n", @@ -664,8 +664,8 @@ "source": [ "from operator import itemgetter\n", "\n", - "from langchain.chat_models.openai import ChatOpenAI\n", "from langchain.prompts import ChatPromptTemplate\n", + "from langchain_community.chat_models.openai import ChatOpenAI\n", "from langchain_core.output_parsers import StrOutputParser\n", "from langchain_core.runnables import (\n", " RunnableLambda,\n", @@ -822,7 +822,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import HuggingFaceBgeEmbeddings\n", + "from langchain_community.embeddings import HuggingFaceBgeEmbeddings\n", "\n", "model_name = \"BAAI/bge-base-en-v1.5\"\n", "# model_kwargs = {'device': 'cuda'}\n", diff --git a/docs/docs/guides/privacy/presidio_data_anonymization/reversible.ipynb b/docs/docs/guides/privacy/presidio_data_anonymization/reversible.ipynb index b68e74e0f9e..bcc66c08298 100644 --- a/docs/docs/guides/privacy/presidio_data_anonymization/reversible.ipynb +++ b/docs/docs/guides/privacy/presidio_data_anonymization/reversible.ipynb @@ -207,8 +207,8 @@ } ], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.prompts.prompt import PromptTemplate\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "anonymizer = PresidioReversibleAnonymizer()\n", "\n", diff --git a/docs/docs/guides/safety/amazon_comprehend_chain.ipynb b/docs/docs/guides/safety/amazon_comprehend_chain.ipynb index b4bf7de26c4..a5704125f43 100644 --- a/docs/docs/guides/safety/amazon_comprehend_chain.ipynb +++ b/docs/docs/guides/safety/amazon_comprehend_chain.ipynb @@ -105,8 +105,8 @@ }, "outputs": [], "source": [ - "from langchain.llms.fake import FakeListLLM\n", "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms.fake import FakeListLLM\n", "from langchain_experimental.comprehend_moderation.base_moderation_exceptions import (\n", " ModerationPiiError,\n", ")\n", @@ -242,8 +242,8 @@ }, "outputs": [], "source": [ - "from langchain.llms.fake import FakeListLLM\n", "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms.fake import FakeListLLM\n", "\n", "template = \"\"\"Question: {question}\n", "\n", @@ -405,8 +405,8 @@ }, "outputs": [], "source": [ - "from langchain.llms.fake import FakeListLLM\n", "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms.fake import FakeListLLM\n", "\n", "template = \"\"\"Question: {question}\n", "\n", @@ -566,8 +566,8 @@ }, "outputs": [], "source": [ - "from langchain.llms import HuggingFaceHub\n", "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import HuggingFaceHub\n", "\n", "template = \"\"\"{question}\"\"\"\n", "\n", @@ -696,9 +696,9 @@ "source": [ "import json\n", "\n", - "from langchain.llms import SagemakerEndpoint\n", - "from langchain.llms.sagemaker_endpoint import LLMContentHandler\n", "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import SagemakerEndpoint\n", + "from langchain_community.llms.sagemaker_endpoint import LLMContentHandler\n", "\n", "\n", "class ContentHandler(LLMContentHandler):\n", diff --git a/docs/docs/guides/safety/constitutional_chain.mdx b/docs/docs/guides/safety/constitutional_chain.mdx index 3f96559e981..38356470ada 100644 --- a/docs/docs/guides/safety/constitutional_chain.mdx +++ b/docs/docs/guides/safety/constitutional_chain.mdx @@ -12,7 +12,7 @@ content that may violate guidelines, be offensive, or deviate from the desired c ```python # Imports -from langchain.llms import OpenAI +from langchain_community.llms import OpenAI from langchain.prompts import PromptTemplate from langchain.chains.llm import LLMChain from langchain.chains.constitutional_ai.base import ConstitutionalChain diff --git a/docs/docs/guides/safety/hugging_face_prompt_injection.ipynb b/docs/docs/guides/safety/hugging_face_prompt_injection.ipynb index be41c203346..fc648f81a02 100644 --- a/docs/docs/guides/safety/hugging_face_prompt_injection.ipynb +++ b/docs/docs/guides/safety/hugging_face_prompt_injection.ipynb @@ -206,7 +206,7 @@ ], "source": [ "from langchain.agents import AgentType, initialize_agent\n", - "from langchain.llms import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", "agent = initialize_agent(\n", diff --git a/docs/docs/guides/safety/logical_fallacy_chain.mdx b/docs/docs/guides/safety/logical_fallacy_chain.mdx index 1d785623d8b..108a8520616 100644 --- a/docs/docs/guides/safety/logical_fallacy_chain.mdx +++ b/docs/docs/guides/safety/logical_fallacy_chain.mdx @@ -21,7 +21,7 @@ Therefore, it is crucial that model developers proactively address logical falla ```python # Imports -from langchain.llms import OpenAI +from langchain_community.llms import OpenAI from langchain.prompts import PromptTemplate from langchain.chains.llm import LLMChain from langchain_experimental.fallacy_removal.base import FallacyChain diff --git a/docs/docs/guides/safety/moderation.mdx b/docs/docs/guides/safety/moderation.mdx index 8b370158277..72353fa2311 100644 --- a/docs/docs/guides/safety/moderation.mdx +++ b/docs/docs/guides/safety/moderation.mdx @@ -22,7 +22,7 @@ We'll show: ```python -from langchain.llms import OpenAI +from langchain_community.llms import OpenAI from langchain.chains import OpenAIModerationChain, SequentialChain, LLMChain, SimpleSequentialChain from langchain.prompts import PromptTemplate ``` diff --git a/docs/docs/integrations/callbacks/argilla.ipynb b/docs/docs/integrations/callbacks/argilla.ipynb index 9e89cb5da92..0fed211592c 100644 --- a/docs/docs/integrations/callbacks/argilla.ipynb +++ b/docs/docs/integrations/callbacks/argilla.ipynb @@ -215,7 +215,7 @@ ], "source": [ "from langchain.callbacks import ArgillaCallbackHandler, StdOutCallbackHandler\n", - "from langchain.llms import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "argilla_callback = ArgillaCallbackHandler(\n", " dataset_name=\"langchain-dataset\",\n", @@ -280,8 +280,8 @@ "source": [ "from langchain.callbacks import ArgillaCallbackHandler, StdOutCallbackHandler\n", "from langchain.chains import LLMChain\n", - "from langchain.llms import OpenAI\n", "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import OpenAI\n", "\n", "argilla_callback = ArgillaCallbackHandler(\n", " dataset_name=\"langchain-dataset\",\n", @@ -363,7 +363,7 @@ "source": [ "from langchain.agents import AgentType, initialize_agent, load_tools\n", "from langchain.callbacks import ArgillaCallbackHandler, StdOutCallbackHandler\n", - "from langchain.llms import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "argilla_callback = ArgillaCallbackHandler(\n", " dataset_name=\"langchain-dataset\",\n", diff --git a/docs/docs/integrations/callbacks/confident.ipynb b/docs/docs/integrations/callbacks/confident.ipynb index b0d56763391..758177dbe7e 100644 --- a/docs/docs/integrations/callbacks/confident.ipynb +++ b/docs/docs/integrations/callbacks/confident.ipynb @@ -152,7 +152,7 @@ } ], "source": [ - "from langchain.llms import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "llm = OpenAI(\n", " temperature=0,\n", @@ -216,10 +216,10 @@ "import requests\n", "from langchain.chains import RetrievalQA\n", "from langchain.document_loaders import TextLoader\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", - "from langchain.llms import OpenAI\n", "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain.vectorstores import Chroma\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.llms import OpenAI\n", "\n", "text_file_url = \"https://raw.githubusercontent.com/hwchase17/chat-your-data/master/state_of_the_union.txt\"\n", "\n", diff --git a/docs/docs/integrations/callbacks/context.ipynb b/docs/docs/integrations/callbacks/context.ipynb index b250f439b61..ed65e084c50 100644 --- a/docs/docs/integrations/callbacks/context.ipynb +++ b/docs/docs/integrations/callbacks/context.ipynb @@ -100,11 +100,11 @@ "import os\n", "\n", "from langchain.callbacks import ContextCallbackHandler\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.schema import (\n", " HumanMessage,\n", " SystemMessage,\n", ")\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "token = os.environ[\"CONTEXT_API_TOKEN\"]\n", "\n", @@ -157,12 +157,12 @@ "\n", "from langchain.callbacks import ContextCallbackHandler\n", "from langchain.chains import LLMChain\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.prompts import PromptTemplate\n", "from langchain.prompts.chat import (\n", " ChatPromptTemplate,\n", " HumanMessagePromptTemplate,\n", ")\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "token = os.environ[\"CONTEXT_API_TOKEN\"]\n", "\n", diff --git a/docs/docs/integrations/callbacks/infino.ipynb b/docs/docs/integrations/callbacks/infino.ipynb index 367f3a2f2d8..03553c2a28f 100644 --- a/docs/docs/integrations/callbacks/infino.ipynb +++ b/docs/docs/integrations/callbacks/infino.ipynb @@ -54,7 +54,7 @@ "import matplotlib.pyplot as plt\n", "from infinopy import InfinoClient\n", "from langchain.callbacks import InfinoCallbackHandler\n", - "from langchain.llms import OpenAI" + "from langchain_community.llms import OpenAI" ] }, { @@ -316,8 +316,8 @@ "# os.environ[\"OPENAI_API_KEY\"] = \"YOUR_API_KEY\"\n", "\n", "from langchain.chains.summarize import load_summarize_chain\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.document_loaders import WebBaseLoader\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "# Create callback handler. This logs latency, errors, token usage, prompts, as well as prompt responses to Infino.\n", "handler = InfinoCallbackHandler(\n", diff --git a/docs/docs/integrations/callbacks/labelstudio.ipynb b/docs/docs/integrations/callbacks/labelstudio.ipynb index bb733f0dc15..9ca111ffd09 100644 --- a/docs/docs/integrations/callbacks/labelstudio.ipynb +++ b/docs/docs/integrations/callbacks/labelstudio.ipynb @@ -171,7 +171,7 @@ "outputs": [], "source": [ "from langchain.callbacks import LabelStudioCallbackHandler\n", - "from langchain.llms import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "llm = OpenAI(\n", " temperature=0, callbacks=[LabelStudioCallbackHandler(project_name=\"My Project\")]\n", @@ -242,8 +242,8 @@ "outputs": [], "source": [ "from langchain.callbacks import LabelStudioCallbackHandler\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.schema import HumanMessage, SystemMessage\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "chat_llm = ChatOpenAI(\n", " callbacks=[\n", diff --git a/docs/docs/integrations/callbacks/llmonitor.md b/docs/docs/integrations/callbacks/llmonitor.md index 4ee85429f62..ee74df6db1f 100644 --- a/docs/docs/integrations/callbacks/llmonitor.md +++ b/docs/docs/integrations/callbacks/llmonitor.md @@ -27,8 +27,8 @@ handler = LLMonitorCallbackHandler(app_id="...") ## Usage with LLM/Chat models ```python -from langchain.llms import OpenAI -from langchain.chat_models import ChatOpenAI +from langchain_community.llms import OpenAI +from langchain_community.chat_models import ChatOpenAI from langchain.callbacks import LLMonitorCallbackHandler handler = LLMonitorCallbackHandler() @@ -52,7 +52,7 @@ It is also recommended to pass `agent_name` in the metadata to be able to distin Example: ```python -from langchain.chat_models import ChatOpenAI +from langchain_community.chat_models import ChatOpenAI from langchain.schema import SystemMessage, HumanMessage from langchain.agents import OpenAIFunctionsAgent, AgentExecutor, tool from langchain.callbacks import LLMonitorCallbackHandler @@ -85,7 +85,7 @@ Another example: ```python from langchain.agents import load_tools, initialize_agent, AgentType -from langchain.llms import OpenAI +from langchain_community.llms import OpenAI from langchain.callbacks import LLMonitorCallbackHandler handler = LLMonitorCallbackHandler() diff --git a/docs/docs/integrations/callbacks/promptlayer.ipynb b/docs/docs/integrations/callbacks/promptlayer.ipynb index 28d5977b748..de7f52ffd31 100644 --- a/docs/docs/integrations/callbacks/promptlayer.ipynb +++ b/docs/docs/integrations/callbacks/promptlayer.ipynb @@ -76,10 +76,10 @@ "source": [ "import promptlayer # Don't forget this 🍰\n", "from langchain.callbacks import PromptLayerCallbackHandler\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.schema import (\n", " HumanMessage,\n", ")\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "chat_llm = ChatOpenAI(\n", " temperature=0,\n", @@ -110,7 +110,7 @@ "source": [ "import promptlayer # Don't forget this 🍰\n", "from langchain.callbacks import PromptLayerCallbackHandler\n", - "from langchain.llms import GPT4All\n", + "from langchain_community.llms import GPT4All\n", "\n", "model = GPT4All(model=\"./models/gpt4all-model.bin\", n_ctx=512, n_threads=8)\n", "\n", @@ -142,7 +142,7 @@ "source": [ "import promptlayer # Don't forget this 🍰\n", "from langchain.callbacks import PromptLayerCallbackHandler\n", - "from langchain.llms import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "\n", "def pl_id_callback(promptlayer_request_id):\n", diff --git a/docs/docs/integrations/callbacks/sagemaker_tracking.ipynb b/docs/docs/integrations/callbacks/sagemaker_tracking.ipynb index 7b88a910b81..82ecc3298b6 100644 --- a/docs/docs/integrations/callbacks/sagemaker_tracking.ipynb +++ b/docs/docs/integrations/callbacks/sagemaker_tracking.ipynb @@ -82,8 +82,8 @@ "from langchain.agents import initialize_agent, load_tools\n", "from langchain.callbacks import SageMakerCallbackHandler\n", "from langchain.chains import LLMChain, SimpleSequentialChain\n", - "from langchain.llms import OpenAI\n", "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import OpenAI\n", "from sagemaker.analytics import ExperimentAnalytics\n", "from sagemaker.experiments.run import Run\n", "from sagemaker.session import Session" diff --git a/docs/docs/integrations/callbacks/streamlit.md b/docs/docs/integrations/callbacks/streamlit.md index 4704d9d5579..28a83daf3ae 100644 --- a/docs/docs/integrations/callbacks/streamlit.md +++ b/docs/docs/integrations/callbacks/streamlit.md @@ -44,7 +44,7 @@ agent in your Streamlit app and simply pass the `StreamlitCallbackHandler` to `a thoughts and actions live in your app. ```python -from langchain.llms import OpenAI +from langchain_community.llms import OpenAI from langchain.agents import AgentType, initialize_agent, load_tools from langchain.callbacks import StreamlitCallbackHandler import streamlit as st diff --git a/docs/docs/integrations/callbacks/trubrics.ipynb b/docs/docs/integrations/callbacks/trubrics.ipynb index e1d0bf726d5..cc09ad9db7a 100644 --- a/docs/docs/integrations/callbacks/trubrics.ipynb +++ b/docs/docs/integrations/callbacks/trubrics.ipynb @@ -149,7 +149,7 @@ "outputs": [], "source": [ "from langchain.callbacks import TrubricsCallbackHandler\n", - "from langchain.llms import OpenAI" + "from langchain_community.llms import OpenAI" ] }, { @@ -267,8 +267,8 @@ "outputs": [], "source": [ "from langchain.callbacks import TrubricsCallbackHandler\n", - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.schema import HumanMessage, SystemMessage" + "from langchain.schema import HumanMessage, SystemMessage\n", + "from langchain_community.chat_models import ChatOpenAI" ] }, { diff --git a/docs/docs/integrations/chat/alibaba_cloud_pai_eas.ipynb b/docs/docs/integrations/chat/alibaba_cloud_pai_eas.ipynb index f24bf8a0273..c2eb7675475 100644 --- a/docs/docs/integrations/chat/alibaba_cloud_pai_eas.ipynb +++ b/docs/docs/integrations/chat/alibaba_cloud_pai_eas.ipynb @@ -46,7 +46,7 @@ "source": [ "import os\n", "\n", - "from langchain.chat_models import PaiEasChatEndpoint\n", + "from langchain_community.chat_models import PaiEasChatEndpoint\n", "from langchain_core.language_models.chat_models import HumanMessage\n", "\n", "os.environ[\"EAS_SERVICE_URL\"] = \"Your_EAS_Service_URL\"\n", diff --git a/docs/docs/integrations/chat/anthropic.ipynb b/docs/docs/integrations/chat/anthropic.ipynb index 2a9cfa63e99..680e82f99d4 100644 --- a/docs/docs/integrations/chat/anthropic.ipynb +++ b/docs/docs/integrations/chat/anthropic.ipynb @@ -29,8 +29,8 @@ }, "outputs": [], "source": [ - "from langchain.chat_models import ChatAnthropic\n", - "from langchain.schema import HumanMessage" + "from langchain.schema import HumanMessage\n", + "from langchain_community.chat_models import ChatAnthropic" ] }, { diff --git a/docs/docs/integrations/chat/anyscale.ipynb b/docs/docs/integrations/chat/anyscale.ipynb index 3d2e9e80cba..46ae43a17cc 100644 --- a/docs/docs/integrations/chat/anyscale.ipynb +++ b/docs/docs/integrations/chat/anyscale.ipynb @@ -80,7 +80,7 @@ } ], "source": [ - "from langchain.chat_models import ChatAnyscale\n", + "from langchain_community.chat_models import ChatAnyscale\n", "\n", "chats = {\n", " model: ChatAnyscale(model_name=model, temperature=1.0)\n", diff --git a/docs/docs/integrations/chat/azure_chat_openai.ipynb b/docs/docs/integrations/chat/azure_chat_openai.ipynb index a6bee8f49cb..cce6508e16b 100644 --- a/docs/docs/integrations/chat/azure_chat_openai.ipynb +++ b/docs/docs/integrations/chat/azure_chat_openai.ipynb @@ -31,8 +31,8 @@ "source": [ "import os\n", "\n", - "from langchain.chat_models import AzureChatOpenAI\n", - "from langchain.schema import HumanMessage" + "from langchain.schema import HumanMessage\n", + "from langchain_community.chat_models import AzureChatOpenAI" ] }, { diff --git a/docs/docs/integrations/chat/azureml_chat_endpoint.ipynb b/docs/docs/integrations/chat/azureml_chat_endpoint.ipynb index 2e0a09aff45..61d3cf14cda 100644 --- a/docs/docs/integrations/chat/azureml_chat_endpoint.ipynb +++ b/docs/docs/integrations/chat/azureml_chat_endpoint.ipynb @@ -28,7 +28,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models.azureml_endpoint import AzureMLChatOnlineEndpoint" + "from langchain_community.chat_models.azureml_endpoint import AzureMLChatOnlineEndpoint" ] }, { @@ -71,8 +71,8 @@ } ], "source": [ - "from langchain.chat_models.azureml_endpoint import LlamaContentFormatter\n", "from langchain.schema import HumanMessage\n", + "from langchain_community.chat_models.azureml_endpoint import LlamaContentFormatter\n", "\n", "chat = AzureMLChatOnlineEndpoint(\n", " endpoint_url=\"https://..inference.ml.azure.com/score\",\n", diff --git a/docs/docs/integrations/chat/baichuan.ipynb b/docs/docs/integrations/chat/baichuan.ipynb index 462a6aa2bc0..2724727ad75 100644 --- a/docs/docs/integrations/chat/baichuan.ipynb +++ b/docs/docs/integrations/chat/baichuan.ipynb @@ -29,8 +29,8 @@ }, "outputs": [], "source": [ - "from langchain.chat_models import ChatBaichuan\n", - "from langchain.schema import HumanMessage" + "from langchain.schema import HumanMessage\n", + "from langchain_community.chat_models import ChatBaichuan" ] }, { diff --git a/docs/docs/integrations/chat/baidu_qianfan_endpoint.ipynb b/docs/docs/integrations/chat/baidu_qianfan_endpoint.ipynb index 9506fa5a77a..decf4413601 100644 --- a/docs/docs/integrations/chat/baidu_qianfan_endpoint.ipynb +++ b/docs/docs/integrations/chat/baidu_qianfan_endpoint.ipynb @@ -70,7 +70,7 @@ "\"\"\"For basic init and call\"\"\"\n", "import os\n", "\n", - "from langchain.chat_models import QianfanChatEndpoint\n", + "from langchain_community.chat_models import QianfanChatEndpoint\n", "from langchain_core.language_models.chat_models import HumanMessage\n", "\n", "os.environ[\"QIANFAN_AK\"] = \"your_ak\"\n", @@ -123,8 +123,8 @@ } ], "source": [ - "from langchain.chat_models import QianfanChatEndpoint\n", "from langchain.schema import HumanMessage\n", + "from langchain_community.chat_models import QianfanChatEndpoint\n", "\n", "chatLLM = QianfanChatEndpoint(\n", " streaming=True,\n", diff --git a/docs/docs/integrations/chat/bedrock.ipynb b/docs/docs/integrations/chat/bedrock.ipynb index 3957c9c1e46..6b13600c50f 100644 --- a/docs/docs/integrations/chat/bedrock.ipynb +++ b/docs/docs/integrations/chat/bedrock.ipynb @@ -47,8 +47,8 @@ }, "outputs": [], "source": [ - "from langchain.chat_models import BedrockChat\n", - "from langchain.schema import HumanMessage" + "from langchain.schema import HumanMessage\n", + "from langchain_community.chat_models import BedrockChat" ] }, { diff --git a/docs/docs/integrations/chat/cohere.ipynb b/docs/docs/integrations/chat/cohere.ipynb index 8f05b1c6673..5787d4f37ce 100644 --- a/docs/docs/integrations/chat/cohere.ipynb +++ b/docs/docs/integrations/chat/cohere.ipynb @@ -29,8 +29,8 @@ }, "outputs": [], "source": [ - "from langchain.chat_models import ChatCohere\n", - "from langchain.schema import HumanMessage" + "from langchain.schema import HumanMessage\n", + "from langchain_community.chat_models import ChatCohere" ] }, { diff --git a/docs/docs/integrations/chat/ernie.ipynb b/docs/docs/integrations/chat/ernie.ipynb index d98fcdb592f..17c5fb650cc 100644 --- a/docs/docs/integrations/chat/ernie.ipynb +++ b/docs/docs/integrations/chat/ernie.ipynb @@ -37,8 +37,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ErnieBotChat\n", - "from langchain.schema import HumanMessage" + "from langchain.schema import HumanMessage\n", + "from langchain_community.chat_models import ErnieBotChat" ] }, { diff --git a/docs/docs/integrations/chat/everlyai.ipynb b/docs/docs/integrations/chat/everlyai.ipynb index 3f18b36f7e6..6d2f41efa98 100644 --- a/docs/docs/integrations/chat/everlyai.ipynb +++ b/docs/docs/integrations/chat/everlyai.ipynb @@ -73,8 +73,8 @@ } ], "source": [ - "from langchain.chat_models import ChatEverlyAI\n", "from langchain.schema import HumanMessage, SystemMessage\n", + "from langchain_community.chat_models import ChatEverlyAI\n", "\n", "messages = [\n", " SystemMessage(content=\"You are a helpful AI that shares everything you know.\"),\n", @@ -127,8 +127,8 @@ ], "source": [ "from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n", - "from langchain.chat_models import ChatEverlyAI\n", "from langchain.schema import HumanMessage, SystemMessage\n", + "from langchain_community.chat_models import ChatEverlyAI\n", "\n", "messages = [\n", " SystemMessage(content=\"You are a humorous AI that delights people.\"),\n", @@ -185,8 +185,8 @@ ], "source": [ "from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n", - "from langchain.chat_models import ChatEverlyAI\n", "from langchain.schema import HumanMessage, SystemMessage\n", + "from langchain_community.chat_models import ChatEverlyAI\n", "\n", "messages = [\n", " SystemMessage(content=\"You are a humorous AI that delights people.\"),\n", diff --git a/docs/docs/integrations/chat/fireworks.ipynb b/docs/docs/integrations/chat/fireworks.ipynb index 5428039f44e..e5a058df5bd 100644 --- a/docs/docs/integrations/chat/fireworks.ipynb +++ b/docs/docs/integrations/chat/fireworks.ipynb @@ -37,8 +37,8 @@ "source": [ "import os\n", "\n", - "from langchain.chat_models.fireworks import ChatFireworks\n", - "from langchain.schema import HumanMessage, SystemMessage" + "from langchain.schema import HumanMessage, SystemMessage\n", + "from langchain_community.chat_models.fireworks import ChatFireworks" ] }, { @@ -156,9 +156,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatFireworks\n", "from langchain.memory import ConversationBufferMemory\n", "from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder\n", + "from langchain_community.chat_models import ChatFireworks\n", "from langchain_core.runnables import RunnablePassthrough\n", "\n", "llm = ChatFireworks(\n", diff --git a/docs/docs/integrations/chat/gigachat.ipynb b/docs/docs/integrations/chat/gigachat.ipynb index 60fa65376d8..b8cf3c1519c 100644 --- a/docs/docs/integrations/chat/gigachat.ipynb +++ b/docs/docs/integrations/chat/gigachat.ipynb @@ -54,7 +54,7 @@ }, "outputs": [], "source": [ - "from langchain.chat_models import GigaChat\n", + "from langchain_community.chat_models import GigaChat\n", "\n", "chat = GigaChat(verify_ssl_certs=False)" ] diff --git a/docs/docs/integrations/chat/google_vertex_ai_palm.ipynb b/docs/docs/integrations/chat/google_vertex_ai_palm.ipynb index 32ce8ec3f11..3b46fbddc12 100644 --- a/docs/docs/integrations/chat/google_vertex_ai_palm.ipynb +++ b/docs/docs/integrations/chat/google_vertex_ai_palm.ipynb @@ -50,8 +50,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatVertexAI\n", - "from langchain.prompts import ChatPromptTemplate" + "from langchain.prompts import ChatPromptTemplate\n", + "from langchain_community.chat_models import ChatVertexAI" ] }, { diff --git a/docs/docs/integrations/chat/jinachat.ipynb b/docs/docs/integrations/chat/jinachat.ipynb index 53a29162dc3..672018f4775 100644 --- a/docs/docs/integrations/chat/jinachat.ipynb +++ b/docs/docs/integrations/chat/jinachat.ipynb @@ -19,13 +19,13 @@ }, "outputs": [], "source": [ - "from langchain.chat_models import JinaChat\n", "from langchain.prompts.chat import (\n", " ChatPromptTemplate,\n", " HumanMessagePromptTemplate,\n", " SystemMessagePromptTemplate,\n", ")\n", - "from langchain.schema import HumanMessage, SystemMessage" + "from langchain.schema import HumanMessage, SystemMessage\n", + "from langchain_community.chat_models import JinaChat" ] }, { diff --git a/docs/docs/integrations/chat/konko.ipynb b/docs/docs/integrations/chat/konko.ipynb index 6e4e19bf3dc..95c826a0937 100644 --- a/docs/docs/integrations/chat/konko.ipynb +++ b/docs/docs/integrations/chat/konko.ipynb @@ -42,8 +42,8 @@ }, "outputs": [], "source": [ - "from langchain.chat_models import ChatKonko\n", - "from langchain.schema import HumanMessage, SystemMessage" + "from langchain.schema import HumanMessage, SystemMessage\n", + "from langchain_community.chat_models import ChatKonko" ] }, { diff --git a/docs/docs/integrations/chat/litellm.ipynb b/docs/docs/integrations/chat/litellm.ipynb index a93d595bfbd..1de1f3b9540 100644 --- a/docs/docs/integrations/chat/litellm.ipynb +++ b/docs/docs/integrations/chat/litellm.ipynb @@ -32,8 +32,8 @@ }, "outputs": [], "source": [ - "from langchain.chat_models import ChatLiteLLM\n", - "from langchain.schema import HumanMessage" + "from langchain.schema import HumanMessage\n", + "from langchain_community.chat_models import ChatLiteLLM" ] }, { diff --git a/docs/docs/integrations/chat/llama2_chat.ipynb b/docs/docs/integrations/chat/llama2_chat.ipynb index 98cce09dfa0..de1e773e34b 100644 --- a/docs/docs/integrations/chat/llama2_chat.ipynb +++ b/docs/docs/integrations/chat/llama2_chat.ipynb @@ -122,7 +122,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import HuggingFaceTextGenInference\n", + "from langchain_community.llms import HuggingFaceTextGenInference\n", "\n", "llm = HuggingFaceTextGenInference(\n", " inference_server_url=\"http://127.0.0.1:8080/\",\n", @@ -597,7 +597,7 @@ "source": [ "from os.path import expanduser\n", "\n", - "from langchain.llms import LlamaCpp\n", + "from langchain_community.llms import LlamaCpp\n", "\n", "model_path = expanduser(\"~/Models/llama-2-7b-chat.Q4_0.gguf\")\n", "\n", diff --git a/docs/docs/integrations/chat/minimax.ipynb b/docs/docs/integrations/chat/minimax.ipynb index e10eeb0d2a7..9b0735f7f27 100644 --- a/docs/docs/integrations/chat/minimax.ipynb +++ b/docs/docs/integrations/chat/minimax.ipynb @@ -39,8 +39,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import MiniMaxChat\n", - "from langchain.schema import HumanMessage" + "from langchain.schema import HumanMessage\n", + "from langchain_community.chat_models import MiniMaxChat" ] }, { diff --git a/docs/docs/integrations/chat/ollama.ipynb b/docs/docs/integrations/chat/ollama.ipynb index 99b6fba3a0f..d8fa6db3568 100644 --- a/docs/docs/integrations/chat/ollama.ipynb +++ b/docs/docs/integrations/chat/ollama.ipynb @@ -72,7 +72,7 @@ "source": [ "from langchain.callbacks.manager import CallbackManager\n", "from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n", - "from langchain.chat_models import ChatOllama\n", + "from langchain_community.chat_models import ChatOllama\n", "\n", "chat_model = ChatOllama(\n", " model=\"llama2:7b-chat\",\n", @@ -137,7 +137,7 @@ "source": [ "from langchain.callbacks.manager import CallbackManager\n", "from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n", - "from langchain.chat_models import ChatOllama\n", + "from langchain_community.chat_models import ChatOllama\n", "\n", "chat_model = ChatOllama(\n", " model=\"llama2\",\n", @@ -343,7 +343,7 @@ } ], "source": [ - "from langchain.chat_models import ChatOllama\n", + "from langchain_community.chat_models import ChatOllama\n", "from langchain_core.messages import HumanMessage\n", "\n", "chat_model = ChatOllama(\n", diff --git a/docs/docs/integrations/chat/openai.ipynb b/docs/docs/integrations/chat/openai.ipynb index 5fa123d0c98..8b9619279ba 100644 --- a/docs/docs/integrations/chat/openai.ipynb +++ b/docs/docs/integrations/chat/openai.ipynb @@ -29,13 +29,13 @@ }, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.prompts.chat import (\n", " ChatPromptTemplate,\n", " HumanMessagePromptTemplate,\n", " SystemMessagePromptTemplate,\n", ")\n", - "from langchain.schema import HumanMessage, SystemMessage" + "from langchain.schema import HumanMessage, SystemMessage\n", + "from langchain_community.chat_models import ChatOpenAI" ] }, { diff --git a/docs/docs/integrations/chat/promptlayer_chatopenai.ipynb b/docs/docs/integrations/chat/promptlayer_chatopenai.ipynb index 2b53e329e4c..de2cc76d707 100644 --- a/docs/docs/integrations/chat/promptlayer_chatopenai.ipynb +++ b/docs/docs/integrations/chat/promptlayer_chatopenai.ipynb @@ -62,8 +62,8 @@ "source": [ "import os\n", "\n", - "from langchain.chat_models import PromptLayerChatOpenAI\n", - "from langchain.schema import HumanMessage" + "from langchain.schema import HumanMessage\n", + "from langchain_community.chat_models import PromptLayerChatOpenAI" ] }, { diff --git a/docs/docs/integrations/chat/tencent_hunyuan.ipynb b/docs/docs/integrations/chat/tencent_hunyuan.ipynb index 3c6a9ef3425..d184784f66f 100644 --- a/docs/docs/integrations/chat/tencent_hunyuan.ipynb +++ b/docs/docs/integrations/chat/tencent_hunyuan.ipynb @@ -36,8 +36,8 @@ }, "outputs": [], "source": [ - "from langchain.chat_models import ChatHunyuan\n", - "from langchain.schema import HumanMessage" + "from langchain.schema import HumanMessage\n", + "from langchain_community.chat_models import ChatHunyuan" ] }, { diff --git a/docs/docs/integrations/chat/tongyi.ipynb b/docs/docs/integrations/chat/tongyi.ipynb index 3de68b1e5ce..1180d6b982b 100644 --- a/docs/docs/integrations/chat/tongyi.ipynb +++ b/docs/docs/integrations/chat/tongyi.ipynb @@ -100,8 +100,8 @@ } ], "source": [ - "from langchain.chat_models.tongyi import ChatTongyi\n", "from langchain.schema import HumanMessage\n", + "from langchain_community.chat_models.tongyi import ChatTongyi\n", "\n", "chatLLM = ChatTongyi(\n", " streaming=True,\n", diff --git a/docs/docs/integrations/chat/vllm.ipynb b/docs/docs/integrations/chat/vllm.ipynb index 11023a201b0..08bdeb6967c 100644 --- a/docs/docs/integrations/chat/vllm.ipynb +++ b/docs/docs/integrations/chat/vllm.ipynb @@ -31,13 +31,13 @@ }, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.prompts.chat import (\n", " ChatPromptTemplate,\n", " HumanMessagePromptTemplate,\n", " SystemMessagePromptTemplate,\n", ")\n", - "from langchain.schema import HumanMessage, SystemMessage" + "from langchain.schema import HumanMessage, SystemMessage\n", + "from langchain_community.chat_models import ChatOpenAI" ] }, { diff --git a/docs/docs/integrations/chat/volcengine_maas.ipynb b/docs/docs/integrations/chat/volcengine_maas.ipynb index e7c39c6b6f6..6c3824591fe 100644 --- a/docs/docs/integrations/chat/volcengine_maas.ipynb +++ b/docs/docs/integrations/chat/volcengine_maas.ipynb @@ -48,8 +48,8 @@ }, "outputs": [], "source": [ - "from langchain.chat_models import VolcEngineMaasChat\n", - "from langchain.schema import HumanMessage" + "from langchain.schema import HumanMessage\n", + "from langchain_community.chat_models import VolcEngineMaasChat" ] }, { diff --git a/docs/docs/integrations/chat/yandex.ipynb b/docs/docs/integrations/chat/yandex.ipynb index 8e9d81462e7..40e3a330cec 100644 --- a/docs/docs/integrations/chat/yandex.ipynb +++ b/docs/docs/integrations/chat/yandex.ipynb @@ -58,8 +58,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatYandexGPT\n", - "from langchain.schema import HumanMessage, SystemMessage" + "from langchain.schema import HumanMessage, SystemMessage\n", + "from langchain_community.chat_models import ChatYandexGPT" ] }, { diff --git a/docs/docs/integrations/chat_loaders/discord.ipynb b/docs/docs/integrations/chat_loaders/discord.ipynb index 39f92b9a3c3..f6e0ef89488 100644 --- a/docs/docs/integrations/chat_loaders/discord.ipynb +++ b/docs/docs/integrations/chat_loaders/discord.ipynb @@ -284,7 +284,7 @@ } ], "source": [ - "from langchain.chat_models import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "llm = ChatOpenAI()\n", "\n", diff --git a/docs/docs/integrations/chat_loaders/facebook.ipynb b/docs/docs/integrations/chat_loaders/facebook.ipynb index 92a3ef63e2e..9062b8cc09e 100644 --- a/docs/docs/integrations/chat_loaders/facebook.ipynb +++ b/docs/docs/integrations/chat_loaders/facebook.ipynb @@ -500,7 +500,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "model = ChatOpenAI(\n", " model=job.fine_tuned_model,\n", diff --git a/docs/docs/integrations/chat_loaders/imessage.ipynb b/docs/docs/integrations/chat_loaders/imessage.ipynb index ffd6f2f3c4f..b69ff54e701 100644 --- a/docs/docs/integrations/chat_loaders/imessage.ipynb +++ b/docs/docs/integrations/chat_loaders/imessage.ipynb @@ -344,7 +344,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "model = ChatOpenAI(\n", " model=job.fine_tuned_model,\n", diff --git a/docs/docs/integrations/chat_loaders/langsmith_dataset.ipynb b/docs/docs/integrations/chat_loaders/langsmith_dataset.ipynb index 7782f03bbf1..cc7995ba077 100644 --- a/docs/docs/integrations/chat_loaders/langsmith_dataset.ipynb +++ b/docs/docs/integrations/chat_loaders/langsmith_dataset.ipynb @@ -230,7 +230,7 @@ "model_id = job.fine_tuned_model\n", "\n", "# Use the fine-tuned model in LangChain\n", - "from langchain.chat_models import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "model = ChatOpenAI(\n", " model=model_id,\n", diff --git a/docs/docs/integrations/chat_loaders/langsmith_llm_runs.ipynb b/docs/docs/integrations/chat_loaders/langsmith_llm_runs.ipynb index a911e141ee8..77349e664d3 100644 --- a/docs/docs/integrations/chat_loaders/langsmith_llm_runs.ipynb +++ b/docs/docs/integrations/chat_loaders/langsmith_llm_runs.ipynb @@ -149,9 +149,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.output_parsers.openai_functions import PydanticOutputFunctionsParser\n", "from langchain.prompts import ChatPromptTemplate\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "prompt = ChatPromptTemplate.from_messages(\n", " [\n", @@ -365,7 +365,7 @@ "model_id = job.fine_tuned_model\n", "\n", "# Use the fine-tuned model in LangChain\n", - "from langchain.chat_models import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "model = ChatOpenAI(\n", " model=model_id,\n", diff --git a/docs/docs/integrations/chat_loaders/slack.ipynb b/docs/docs/integrations/chat_loaders/slack.ipynb index 044eb4a3854..0a68c1cd52c 100644 --- a/docs/docs/integrations/chat_loaders/slack.ipynb +++ b/docs/docs/integrations/chat_loaders/slack.ipynb @@ -133,7 +133,7 @@ } ], "source": [ - "from langchain.chat_models import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "llm = ChatOpenAI()\n", "\n", diff --git a/docs/docs/integrations/chat_loaders/telegram.ipynb b/docs/docs/integrations/chat_loaders/telegram.ipynb index cc0269d51fc..d6b908f4afb 100644 --- a/docs/docs/integrations/chat_loaders/telegram.ipynb +++ b/docs/docs/integrations/chat_loaders/telegram.ipynb @@ -176,7 +176,7 @@ } ], "source": [ - "from langchain.chat_models import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "llm = ChatOpenAI()\n", "\n", diff --git a/docs/docs/integrations/chat_loaders/wechat.ipynb b/docs/docs/integrations/chat_loaders/wechat.ipynb index 4d901386f52..c586b2dc360 100644 --- a/docs/docs/integrations/chat_loaders/wechat.ipynb +++ b/docs/docs/integrations/chat_loaders/wechat.ipynb @@ -263,7 +263,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "llm = ChatOpenAI()\n", "\n", diff --git a/docs/docs/integrations/chat_loaders/whatsapp.ipynb b/docs/docs/integrations/chat_loaders/whatsapp.ipynb index 0cda70c1698..9c59486bc5e 100644 --- a/docs/docs/integrations/chat_loaders/whatsapp.ipynb +++ b/docs/docs/integrations/chat_loaders/whatsapp.ipynb @@ -166,7 +166,7 @@ } ], "source": [ - "from langchain.chat_models import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "llm = ChatOpenAI()\n", "\n", diff --git a/docs/docs/integrations/document_loaders/amazon_textract.ipynb b/docs/docs/integrations/document_loaders/amazon_textract.ipynb index 392d08b3bdc..208d0cbec22 100644 --- a/docs/docs/integrations/document_loaders/amazon_textract.ipynb +++ b/docs/docs/integrations/document_loaders/amazon_textract.ipynb @@ -267,7 +267,7 @@ ], "source": [ "from langchain.chains.question_answering import load_qa_chain\n", - "from langchain.llms import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "chain = load_qa_chain(llm=OpenAI(), chain_type=\"map_reduce\")\n", "query = [\"Who are the autors?\"]\n", diff --git a/docs/docs/integrations/document_loaders/docugami.ipynb b/docs/docs/integrations/document_loaders/docugami.ipynb index ee36e8f2a59..48edcb2e0d0 100644 --- a/docs/docs/integrations/document_loaders/docugami.ipynb +++ b/docs/docs/integrations/document_loaders/docugami.ipynb @@ -211,9 +211,9 @@ "outputs": [], "source": [ "from langchain.chains import RetrievalQA\n", - "from langchain.embeddings import OpenAIEmbeddings\n", - "from langchain.llms.openai import OpenAI\n", "from langchain.vectorstores.chroma import Chroma\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_community.llms.openai import OpenAI\n", "\n", "embedding = OpenAIEmbeddings()\n", "vectordb = Chroma.from_documents(documents=chunks, embedding=embedding)\n", @@ -539,10 +539,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.retrievers.multi_vector import MultiVectorRetriever, SearchType\n", "from langchain.storage import InMemoryStore\n", "from langchain.vectorstores.chroma import Chroma\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", "\n", "# The vectorstore to use to index the child chunks\n", "vectorstore = Chroma(collection_name=\"big2small\", embedding_function=OpenAIEmbeddings())\n", diff --git a/docs/docs/integrations/document_loaders/figma.ipynb b/docs/docs/integrations/document_loaders/figma.ipynb index e7739a37cb5..1b6db34f2ad 100644 --- a/docs/docs/integrations/document_loaders/figma.ipynb +++ b/docs/docs/integrations/document_loaders/figma.ipynb @@ -23,14 +23,14 @@ "source": [ "import os\n", "\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.document_loaders.figma import FigmaFileLoader\n", "from langchain.indexes import VectorstoreIndexCreator\n", "from langchain.prompts.chat import (\n", " ChatPromptTemplate,\n", " HumanMessagePromptTemplate,\n", " SystemMessagePromptTemplate,\n", - ")" + ")\n", + "from langchain_community.chat_models import ChatOpenAI" ] }, { diff --git a/docs/docs/integrations/document_loaders/github.ipynb b/docs/docs/integrations/document_loaders/github.ipynb index 3582385aeb1..3d194bb8033 100644 --- a/docs/docs/integrations/document_loaders/github.ipynb +++ b/docs/docs/integrations/document_loaders/github.ipynb @@ -203,7 +203,7 @@ "os.environ[\"OPENAI_API_KEY\"] = \"...\"\r\n", "\r\n", "from langchain.chains import LLMChain\r\n", - "from langchain.chat_models import ChatOpenAI\r\n", + "from langchain_community.chat_models import ChatOpenAI\r\n", "from langchain.prompts import PromptTemplate\r\n", "from langchain.prompts.chat import ChatPromptTemplate\r\n", "from langchain.schema import messages_from_dict\r\n", diff --git a/docs/docs/integrations/document_loaders/larksuite.ipynb b/docs/docs/integrations/document_loaders/larksuite.ipynb index 363d0540f05..1ecdc762d87 100644 --- a/docs/docs/integrations/document_loaders/larksuite.ipynb +++ b/docs/docs/integrations/document_loaders/larksuite.ipynb @@ -74,7 +74,7 @@ "source": [ "# see https://python.langchain.com/docs/use_cases/summarization for more details\n", "from langchain.chains.summarize import load_summarize_chain\n", - "from langchain.llms.fake import FakeListLLM\n", + "from langchain_community.llms.fake import FakeListLLM\n", "\n", "llm = FakeListLLM()\n", "chain = load_summarize_chain(llm, chain_type=\"map_reduce\")\n", diff --git a/docs/docs/integrations/document_loaders/psychic.ipynb b/docs/docs/integrations/document_loaders/psychic.ipynb index 7778d1049d0..bb2b7cfe7d5 100644 --- a/docs/docs/integrations/document_loaders/psychic.ipynb +++ b/docs/docs/integrations/document_loaders/psychic.ipynb @@ -78,10 +78,10 @@ "outputs": [], "source": [ "from langchain.chains import RetrievalQAWithSourcesChain\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", - "from langchain.llms import OpenAI\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Chroma" + "from langchain.vectorstores import Chroma\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.llms import OpenAI" ] }, { diff --git a/docs/docs/integrations/document_loaders/youtube_audio.ipynb b/docs/docs/integrations/document_loaders/youtube_audio.ipynb index ae1f89009fd..6ed1a7b086c 100644 --- a/docs/docs/integrations/document_loaders/youtube_audio.ipynb +++ b/docs/docs/integrations/document_loaders/youtube_audio.ipynb @@ -166,10 +166,10 @@ "outputs": [], "source": [ "from langchain.chains import RetrievalQA\n", - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", - "from langchain.vectorstores import FAISS" + "from langchain.vectorstores import FAISS\n", + "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_community.embeddings import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/document_transformers/openai_metadata_tagger.ipynb b/docs/docs/integrations/document_transformers/openai_metadata_tagger.ipynb index b304fbb9cb0..abc62c8c78c 100644 --- a/docs/docs/integrations/document_transformers/openai_metadata_tagger.ipynb +++ b/docs/docs/integrations/document_transformers/openai_metadata_tagger.ipynb @@ -21,9 +21,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.document_transformers.openai_functions import create_metadata_tagger\n", - "from langchain.schema import Document" + "from langchain.schema import Document\n", + "from langchain_community.chat_models import ChatOpenAI" ] }, { diff --git a/docs/docs/integrations/llms/ai21.ipynb b/docs/docs/integrations/llms/ai21.ipynb index b54df8529aa..5d7b6a25494 100644 --- a/docs/docs/integrations/llms/ai21.ipynb +++ b/docs/docs/integrations/llms/ai21.ipynb @@ -59,8 +59,8 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.llms import AI21\n", - "from langchain.prompts import PromptTemplate" + "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import AI21" ] }, { diff --git a/docs/docs/integrations/llms/aleph_alpha.ipynb b/docs/docs/integrations/llms/aleph_alpha.ipynb index 4e4c23bd354..20c1ad47ad9 100644 --- a/docs/docs/integrations/llms/aleph_alpha.ipynb +++ b/docs/docs/integrations/llms/aleph_alpha.ipynb @@ -59,8 +59,8 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.llms import AlephAlpha\n", - "from langchain.prompts import PromptTemplate" + "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import AlephAlpha" ] }, { diff --git a/docs/docs/integrations/llms/amazon_api_gateway.ipynb b/docs/docs/integrations/llms/amazon_api_gateway.ipynb index a4046548e55..86dc9e73195 100644 --- a/docs/docs/integrations/llms/amazon_api_gateway.ipynb +++ b/docs/docs/integrations/llms/amazon_api_gateway.ipynb @@ -29,7 +29,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import AmazonAPIGateway" + "from langchain_community.llms import AmazonAPIGateway" ] }, { diff --git a/docs/docs/integrations/llms/anyscale.ipynb b/docs/docs/integrations/llms/anyscale.ipynb index d18434a0c20..e297cfd9492 100644 --- a/docs/docs/integrations/llms/anyscale.ipynb +++ b/docs/docs/integrations/llms/anyscale.ipynb @@ -49,8 +49,8 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.llms import Anyscale\n", - "from langchain.prompts import PromptTemplate" + "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import Anyscale" ] }, { diff --git a/docs/docs/integrations/llms/arcee.ipynb b/docs/docs/integrations/llms/arcee.ipynb index f035278033e..4e73a0a92c6 100644 --- a/docs/docs/integrations/llms/arcee.ipynb +++ b/docs/docs/integrations/llms/arcee.ipynb @@ -23,7 +23,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import Arcee\n", + "from langchain_community.llms import Arcee\n", "\n", "# Create an instance of the Arcee class\n", "arcee = Arcee(\n", diff --git a/docs/docs/integrations/llms/azure_ml.ipynb b/docs/docs/integrations/llms/azure_ml.ipynb index f6e5c56f64e..63aa8a2cb06 100644 --- a/docs/docs/integrations/llms/azure_ml.ipynb +++ b/docs/docs/integrations/llms/azure_ml.ipynb @@ -17,7 +17,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms.azureml_endpoint import AzureMLOnlineEndpoint" + "from langchain_community.llms.azureml_endpoint import AzureMLOnlineEndpoint" ] }, { @@ -76,7 +76,10 @@ "import os\n", "from typing import Dict\n", "\n", - "from langchain.llms.azureml_endpoint import AzureMLOnlineEndpoint, ContentFormatterBase\n", + "from langchain_community.llms.azureml_endpoint import (\n", + " AzureMLOnlineEndpoint,\n", + " ContentFormatterBase,\n", + ")\n", "\n", "\n", "class CustomFormatter(ContentFormatterBase):\n", @@ -155,8 +158,8 @@ ], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.llms.azureml_endpoint import DollyContentFormatter\n", "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms.azureml_endpoint import DollyContentFormatter\n", "\n", "formatter_template = \"Write a {word_count} word essay about {topic}.\"\n", "\n", @@ -200,7 +203,7 @@ } ], "source": [ - "from langchain.llms.loading import load_llm\n", + "from langchain_community.llms.loading import load_llm\n", "\n", "save_llm = AzureMLOnlineEndpoint(\n", " deployment_name=\"databricks-dolly-v2-12b-4\",\n", diff --git a/docs/docs/integrations/llms/azure_openai.ipynb b/docs/docs/integrations/llms/azure_openai.ipynb index c7b1e59f665..b501f91c962 100644 --- a/docs/docs/integrations/llms/azure_openai.ipynb +++ b/docs/docs/integrations/llms/azure_openai.ipynb @@ -124,7 +124,7 @@ "outputs": [], "source": [ "# Import Azure OpenAI\n", - "from langchain.llms import AzureOpenAI" + "from langchain_community.llms import AzureOpenAI" ] }, { diff --git a/docs/docs/integrations/llms/baidu_qianfan_endpoint.ipynb b/docs/docs/integrations/llms/baidu_qianfan_endpoint.ipynb index 5b3e823fb6b..36a2149e152 100644 --- a/docs/docs/integrations/llms/baidu_qianfan_endpoint.ipynb +++ b/docs/docs/integrations/llms/baidu_qianfan_endpoint.ipynb @@ -74,7 +74,7 @@ "\"\"\"For basic init and call\"\"\"\n", "import os\n", "\n", - "from langchain.llms import QianfanLLMEndpoint\n", + "from langchain_community.llms import QianfanLLMEndpoint\n", "\n", "os.environ[\"QIANFAN_AK\"] = \"your_ak\"\n", "os.environ[\"QIANFAN_SK\"] = \"your_sk\"\n", diff --git a/docs/docs/integrations/llms/banana.ipynb b/docs/docs/integrations/llms/banana.ipynb index ab95acdd2e0..72e9b5488e4 100644 --- a/docs/docs/integrations/llms/banana.ipynb +++ b/docs/docs/integrations/llms/banana.ipynb @@ -52,8 +52,8 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.llms import Banana\n", - "from langchain.prompts import PromptTemplate" + "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import Banana" ] }, { diff --git a/docs/docs/integrations/llms/beam.ipynb b/docs/docs/integrations/llms/beam.ipynb index 091ae1da98e..3d93912848d 100644 --- a/docs/docs/integrations/llms/beam.ipynb +++ b/docs/docs/integrations/llms/beam.ipynb @@ -111,7 +111,7 @@ }, "outputs": [], "source": [ - "from langchain.llms.beam import Beam\n", + "from langchain_community.llms.beam import Beam\n", "\n", "llm = Beam(\n", " model_name=\"gpt2\",\n", diff --git a/docs/docs/integrations/llms/bedrock.ipynb b/docs/docs/integrations/llms/bedrock.ipynb index 6439815a9ac..2c26650f513 100644 --- a/docs/docs/integrations/llms/bedrock.ipynb +++ b/docs/docs/integrations/llms/bedrock.ipynb @@ -39,7 +39,7 @@ }, "outputs": [], "source": [ - "from langchain.llms import Bedrock\n", + "from langchain_community.llms import Bedrock\n", "\n", "llm = Bedrock(\n", " credentials_profile_name=\"bedrock-admin\", model_id=\"amazon.titan-text-express-v1\"\n", @@ -84,7 +84,7 @@ "outputs": [], "source": [ "from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n", - "from langchain.llms import Bedrock\n", + "from langchain_community.llms import Bedrock\n", "\n", "llm = Bedrock(\n", " credentials_profile_name=\"bedrock-admin\",\n", diff --git a/docs/docs/integrations/llms/bittensor.ipynb b/docs/docs/integrations/llms/bittensor.ipynb index d00b4a9982b..200a0af3e54 100644 --- a/docs/docs/integrations/llms/bittensor.ipynb +++ b/docs/docs/integrations/llms/bittensor.ipynb @@ -34,7 +34,7 @@ "from pprint import pprint\n", "\n", "from langchain.globals import set_debug\n", - "from langchain.llms import NIBittensorLLM\n", + "from langchain_community.llms import NIBittensorLLM\n", "\n", "set_debug(True)\n", "\n", @@ -82,8 +82,8 @@ "source": [ "from langchain.chains import LLMChain\n", "from langchain.globals import set_debug\n", - "from langchain.llms import NIBittensorLLM\n", "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import NIBittensorLLM\n", "\n", "set_debug(True)\n", "\n", @@ -141,9 +141,9 @@ " ZeroShotAgent,\n", ")\n", "from langchain.chains import LLMChain\n", - "from langchain.llms import NIBittensorLLM\n", "from langchain.memory import ConversationBufferMemory\n", "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import NIBittensorLLM\n", "\n", "memory = ConversationBufferMemory(memory_key=\"chat_history\")\n", "\n", diff --git a/docs/docs/integrations/llms/cerebriumai.ipynb b/docs/docs/integrations/llms/cerebriumai.ipynb index 051a82cdcc1..a557a7c2a7c 100644 --- a/docs/docs/integrations/llms/cerebriumai.ipynb +++ b/docs/docs/integrations/llms/cerebriumai.ipynb @@ -45,8 +45,8 @@ "import os\n", "\n", "from langchain.chains import LLMChain\n", - "from langchain.llms import CerebriumAI\n", - "from langchain.prompts import PromptTemplate" + "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import CerebriumAI" ] }, { diff --git a/docs/docs/integrations/llms/chatglm.ipynb b/docs/docs/integrations/llms/chatglm.ipynb index 82867b0f091..53153a184ac 100644 --- a/docs/docs/integrations/llms/chatglm.ipynb +++ b/docs/docs/integrations/llms/chatglm.ipynb @@ -22,8 +22,8 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.llms import ChatGLM\n", "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import ChatGLM\n", "\n", "# import os" ] diff --git a/docs/docs/integrations/llms/clarifai.ipynb b/docs/docs/integrations/llms/clarifai.ipynb index bac65941b39..16b03b2f33c 100644 --- a/docs/docs/integrations/llms/clarifai.ipynb +++ b/docs/docs/integrations/llms/clarifai.ipynb @@ -87,8 +87,8 @@ "source": [ "# Import the required modules\n", "from langchain.chains import LLMChain\n", - "from langchain.llms import Clarifai\n", - "from langchain.prompts import PromptTemplate" + "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import Clarifai" ] }, { diff --git a/docs/docs/integrations/llms/cloudflare_workersai.ipynb b/docs/docs/integrations/llms/cloudflare_workersai.ipynb index 3f12130b201..a4cf57440a0 100644 --- a/docs/docs/integrations/llms/cloudflare_workersai.ipynb +++ b/docs/docs/integrations/llms/cloudflare_workersai.ipynb @@ -19,8 +19,8 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.llms.cloudflare_workersai import CloudflareWorkersAI\n", "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms.cloudflare_workersai import CloudflareWorkersAI\n", "\n", "template = \"\"\"Human: {question}\n", "\n", diff --git a/docs/docs/integrations/llms/cohere.ipynb b/docs/docs/integrations/llms/cohere.ipynb index 7f0786d33e2..a4f93a1fd59 100644 --- a/docs/docs/integrations/llms/cohere.ipynb +++ b/docs/docs/integrations/llms/cohere.ipynb @@ -59,8 +59,8 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.llms import Cohere\n", - "from langchain.prompts import PromptTemplate" + "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import Cohere" ] }, { diff --git a/docs/docs/integrations/llms/ctransformers.ipynb b/docs/docs/integrations/llms/ctransformers.ipynb index 424be57c9dc..a304f9ed7f3 100644 --- a/docs/docs/integrations/llms/ctransformers.ipynb +++ b/docs/docs/integrations/llms/ctransformers.ipynb @@ -43,7 +43,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import CTransformers\n", + "from langchain_community.llms import CTransformers\n", "\n", "llm = CTransformers(model=\"marella/gpt-2-ggml\")" ] diff --git a/docs/docs/integrations/llms/ctranslate2.ipynb b/docs/docs/integrations/llms/ctranslate2.ipynb index ebf20e4bd74..e38e9e8a5dd 100644 --- a/docs/docs/integrations/llms/ctranslate2.ipynb +++ b/docs/docs/integrations/llms/ctranslate2.ipynb @@ -60,7 +60,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import CTranslate2\n", + "from langchain_community.llms import CTranslate2\n", "\n", "llm = CTranslate2(\n", " # output_dir from above:\n", diff --git a/docs/docs/integrations/llms/databricks.ipynb b/docs/docs/integrations/llms/databricks.ipynb index 0773eff8d20..7531c781025 100644 --- a/docs/docs/integrations/llms/databricks.ipynb +++ b/docs/docs/integrations/llms/databricks.ipynb @@ -70,7 +70,7 @@ } ], "source": [ - "from langchain.chat_models import ChatDatabricks\n", + "from langchain_community.chat_models import ChatDatabricks\n", "from langchain_core.messages import HumanMessage\n", "from mlflow.deployments import get_deploy_client\n", "\n", @@ -128,7 +128,7 @@ } ], "source": [ - "from langchain.embeddings import DatabricksEmbeddings\n", + "from langchain_community.embeddings import DatabricksEmbeddings\n", "\n", "embeddings = DatabricksEmbeddings(endpoint=\"databricks-bge-large-en\")\n", "embeddings.embed_query(\"hello\")[:3]" @@ -171,7 +171,7 @@ } ], "source": [ - "from langchain.llms import Databricks\n", + "from langchain_community.llms import Databricks\n", "\n", "# If running a Databricks notebook attached to an interactive cluster in \"single user\"\n", "# or \"no isolation shared\" mode, you only need to specify the endpoint name to create\n", diff --git a/docs/docs/integrations/llms/deepinfra.ipynb b/docs/docs/integrations/llms/deepinfra.ipynb index 257e7ed1278..14d2730a093 100644 --- a/docs/docs/integrations/llms/deepinfra.ipynb +++ b/docs/docs/integrations/llms/deepinfra.ipynb @@ -70,7 +70,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import DeepInfra\n", + "from langchain_community.llms import DeepInfra\n", "\n", "llm = DeepInfra(model_id=\"meta-llama/Llama-2-70b-chat-hf\")\n", "llm.model_kwargs = {\n", diff --git a/docs/docs/integrations/llms/deepsparse.ipynb b/docs/docs/integrations/llms/deepsparse.ipynb index 886a147ebb1..bdec459e733 100644 --- a/docs/docs/integrations/llms/deepsparse.ipynb +++ b/docs/docs/integrations/llms/deepsparse.ipynb @@ -26,7 +26,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import DeepSparse\n", + "from langchain_community.llms import DeepSparse\n", "\n", "llm = DeepSparse(\n", " model=\"zoo:nlg/text_generation/codegen_mono-350m/pytorch/huggingface/bigpython_bigquery_thepile/base-none\"\n", diff --git a/docs/docs/integrations/llms/edenai.ipynb b/docs/docs/integrations/llms/edenai.ipynb index a4267511b7b..b42fc5d9875 100644 --- a/docs/docs/integrations/llms/edenai.ipynb +++ b/docs/docs/integrations/llms/edenai.ipynb @@ -54,7 +54,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import EdenAI" + "from langchain_community.llms import EdenAI" ] }, { @@ -190,7 +190,7 @@ "outputs": [], "source": [ "from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n", - "from langchain.llms import EdenAI\n", + "from langchain_community.llms import EdenAI\n", "\n", "llm = EdenAI(\n", " callbacks=[StreamingStdOutCallbackHandler()],\n", diff --git a/docs/docs/integrations/llms/fireworks.ipynb b/docs/docs/integrations/llms/fireworks.ipynb index 5144e7a52f6..0782474a626 100644 --- a/docs/docs/integrations/llms/fireworks.ipynb +++ b/docs/docs/integrations/llms/fireworks.ipynb @@ -21,8 +21,8 @@ "source": [ "import os\n", "\n", - "from langchain.llms.fireworks import Fireworks\n", - "from langchain.prompts import PromptTemplate" + "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms.fireworks import Fireworks" ] }, { @@ -205,8 +205,8 @@ } ], "source": [ - "from langchain.llms.fireworks import Fireworks\n", "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms.fireworks import Fireworks\n", "\n", "llm = Fireworks(\n", " model=\"accounts/fireworks/models/llama-v2-13b\",\n", diff --git a/docs/docs/integrations/llms/forefrontai.ipynb b/docs/docs/integrations/llms/forefrontai.ipynb index ea3b9fb32e6..075436f7d8b 100644 --- a/docs/docs/integrations/llms/forefrontai.ipynb +++ b/docs/docs/integrations/llms/forefrontai.ipynb @@ -28,8 +28,8 @@ "import os\n", "\n", "from langchain.chains import LLMChain\n", - "from langchain.llms import ForefrontAI\n", - "from langchain.prompts import PromptTemplate" + "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import ForefrontAI" ] }, { diff --git a/docs/docs/integrations/llms/gigachat.ipynb b/docs/docs/integrations/llms/gigachat.ipynb index 617cb46d579..8e61763f74e 100644 --- a/docs/docs/integrations/llms/gigachat.ipynb +++ b/docs/docs/integrations/llms/gigachat.ipynb @@ -54,7 +54,7 @@ }, "outputs": [], "source": [ - "from langchain.llms import GigaChat\n", + "from langchain_community.llms import GigaChat\n", "\n", "llm = GigaChat(verify_ssl_certs=False)" ] diff --git a/docs/docs/integrations/llms/google_vertex_ai_palm.ipynb b/docs/docs/integrations/llms/google_vertex_ai_palm.ipynb index c61a6ed04f2..217691f6daf 100644 --- a/docs/docs/integrations/llms/google_vertex_ai_palm.ipynb +++ b/docs/docs/integrations/llms/google_vertex_ai_palm.ipynb @@ -76,7 +76,7 @@ } ], "source": [ - "from langchain.llms import VertexAI\n", + "from langchain_community.llms import VertexAI\n", "\n", "llm = VertexAI()\n", "print(llm(\"What are some of the pros and cons of Python as a programming language?\"))" @@ -414,7 +414,7 @@ } ], "source": [ - "from langchain.chat_models import ChatVertexAI\n", + "from langchain_community.chat_models import ChatVertexAI\n", "from langchain_core.messages import HumanMessage\n", "\n", "llm = ChatVertexAI(model_name=\"gemini-ultra-vision\")\n", @@ -592,7 +592,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import VertexAIModelGarden" + "from langchain_community.llms import VertexAIModelGarden" ] }, { diff --git a/docs/docs/integrations/llms/gooseai.ipynb b/docs/docs/integrations/llms/gooseai.ipynb index 30b2221373a..c2a15c50a7a 100644 --- a/docs/docs/integrations/llms/gooseai.ipynb +++ b/docs/docs/integrations/llms/gooseai.ipynb @@ -44,8 +44,8 @@ "import os\n", "\n", "from langchain.chains import LLMChain\n", - "from langchain.llms import GooseAI\n", - "from langchain.prompts import PromptTemplate" + "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import GooseAI" ] }, { diff --git a/docs/docs/integrations/llms/gpt4all.ipynb b/docs/docs/integrations/llms/gpt4all.ipynb index 5b0b59f6ac9..7e1004c982b 100644 --- a/docs/docs/integrations/llms/gpt4all.ipynb +++ b/docs/docs/integrations/llms/gpt4all.ipynb @@ -49,8 +49,8 @@ "source": [ "from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n", "from langchain.chains import LLMChain\n", - "from langchain.llms import GPT4All\n", - "from langchain.prompts import PromptTemplate" + "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import GPT4All" ] }, { diff --git a/docs/docs/integrations/llms/gradient.ipynb b/docs/docs/integrations/llms/gradient.ipynb index 4e769ea576a..372255b1f9a 100644 --- a/docs/docs/integrations/llms/gradient.ipynb +++ b/docs/docs/integrations/llms/gradient.ipynb @@ -25,8 +25,8 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.llms import GradientLLM\n", - "from langchain.prompts import PromptTemplate" + "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import GradientLLM" ] }, { diff --git a/docs/docs/integrations/llms/huggingface_hub.ipynb b/docs/docs/integrations/llms/huggingface_hub.ipynb index 2e5992ac21c..6294967180d 100644 --- a/docs/docs/integrations/llms/huggingface_hub.ipynb +++ b/docs/docs/integrations/llms/huggingface_hub.ipynb @@ -91,7 +91,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import HuggingFaceHub" + "from langchain_community.llms import HuggingFaceHub" ] }, { diff --git a/docs/docs/integrations/llms/huggingface_pipelines.ipynb b/docs/docs/integrations/llms/huggingface_pipelines.ipynb index b6d41475ddd..59dfceafdf8 100644 --- a/docs/docs/integrations/llms/huggingface_pipelines.ipynb +++ b/docs/docs/integrations/llms/huggingface_pipelines.ipynb @@ -55,7 +55,7 @@ }, "outputs": [], "source": [ - "from langchain.llms.huggingface_pipeline import HuggingFacePipeline\n", + "from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline\n", "\n", "hf = HuggingFacePipeline.from_model_id(\n", " model_id=\"gpt2\",\n", @@ -78,7 +78,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms.huggingface_pipeline import HuggingFacePipeline\n", + "from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline\n", "from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline\n", "\n", "model_id = \"gpt2\"\n", diff --git a/docs/docs/integrations/llms/huggingface_textgen_inference.ipynb b/docs/docs/integrations/llms/huggingface_textgen_inference.ipynb index 395a5d8bad9..e9b5e31c386 100644 --- a/docs/docs/integrations/llms/huggingface_textgen_inference.ipynb +++ b/docs/docs/integrations/llms/huggingface_textgen_inference.ipynb @@ -35,7 +35,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import HuggingFaceTextGenInference\n", + "from langchain_community.llms import HuggingFaceTextGenInference\n", "\n", "llm = HuggingFaceTextGenInference(\n", " inference_server_url=\"http://localhost:8010/\",\n", @@ -63,7 +63,7 @@ "outputs": [], "source": [ "from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n", - "from langchain.llms import HuggingFaceTextGenInference\n", + "from langchain_community.llms import HuggingFaceTextGenInference\n", "\n", "llm = HuggingFaceTextGenInference(\n", " inference_server_url=\"http://localhost:8010/\",\n", diff --git a/docs/docs/integrations/llms/javelin.ipynb b/docs/docs/integrations/llms/javelin.ipynb index e067a066db5..935dd0c6e10 100644 --- a/docs/docs/integrations/llms/javelin.ipynb +++ b/docs/docs/integrations/llms/javelin.ipynb @@ -92,8 +92,8 @@ ], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.llms import JavelinAIGateway\n", "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import JavelinAIGateway\n", "\n", "route_completions = \"eng_dept03\"\n", "\n", @@ -140,7 +140,7 @@ } ], "source": [ - "from langchain.embeddings import JavelinAIGatewayEmbeddings\n", + "from langchain_community.embeddings import JavelinAIGatewayEmbeddings\n", "\n", "embeddings = JavelinAIGatewayEmbeddings(\n", " gateway_uri=\"http://localhost:8000\", # replace with service URL or host/port of Javelin\n", @@ -180,8 +180,8 @@ } ], "source": [ - "from langchain.chat_models import ChatJavelinAIGateway\n", "from langchain.schema import HumanMessage, SystemMessage\n", + "from langchain_community.chat_models import ChatJavelinAIGateway\n", "\n", "messages = [\n", " SystemMessage(\n", diff --git a/docs/docs/integrations/llms/jsonformer_experimental.ipynb b/docs/docs/integrations/llms/jsonformer_experimental.ipynb index 8acf15618be..496e4af94c3 100644 --- a/docs/docs/integrations/llms/jsonformer_experimental.ipynb +++ b/docs/docs/integrations/llms/jsonformer_experimental.ipynb @@ -152,7 +152,7 @@ } ], "source": [ - "from langchain.llms import HuggingFacePipeline\n", + "from langchain_community.llms import HuggingFacePipeline\n", "from transformers import pipeline\n", "\n", "hf_model = pipeline(\n", diff --git a/docs/docs/integrations/llms/koboldai.ipynb b/docs/docs/integrations/llms/koboldai.ipynb index 8cdc2752918..d55a73efc26 100644 --- a/docs/docs/integrations/llms/koboldai.ipynb +++ b/docs/docs/integrations/llms/koboldai.ipynb @@ -23,7 +23,7 @@ }, "outputs": [], "source": [ - "from langchain.llms import KoboldApiLLM" + "from langchain_community.llms import KoboldApiLLM" ] }, { diff --git a/docs/docs/integrations/llms/llamacpp.ipynb b/docs/docs/integrations/llms/llamacpp.ipynb index b0e814166a8..192c842dc48 100644 --- a/docs/docs/integrations/llms/llamacpp.ipynb +++ b/docs/docs/integrations/llms/llamacpp.ipynb @@ -195,8 +195,8 @@ "from langchain.callbacks.manager import CallbackManager\n", "from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n", "from langchain.chains import LLMChain\n", - "from langchain.llms import LlamaCpp\n", - "from langchain.prompts import PromptTemplate" + "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import LlamaCpp" ] }, { diff --git a/docs/docs/integrations/llms/llm_caching.ipynb b/docs/docs/integrations/llms/llm_caching.ipynb index 70cbff7fad2..f0de67588e8 100644 --- a/docs/docs/integrations/llms/llm_caching.ipynb +++ b/docs/docs/integrations/llms/llm_caching.ipynb @@ -18,7 +18,7 @@ "outputs": [], "source": [ "from langchain.globals import set_llm_cache\n", - "from langchain.llms import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "# To make the caching really obvious, lets use a slower model.\n", "llm = OpenAI(model_name=\"gpt-3.5-turbo-instruct\", n=2, best_of=2)" @@ -415,7 +415,7 @@ "outputs": [], "source": [ "from langchain.cache import RedisSemanticCache\n", - "from langchain.embeddings import OpenAIEmbeddings\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", "\n", "set_llm_cache(\n", " RedisSemanticCache(redis_url=\"redis://localhost:6379\", embedding=OpenAIEmbeddings())\n", @@ -1059,7 +1059,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import OpenAIEmbeddings\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", "\n", "embedding = OpenAIEmbeddings()" ] @@ -1267,7 +1267,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import OpenAIEmbeddings\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", "\n", "embedding = OpenAIEmbeddings()" ] diff --git a/docs/docs/integrations/llms/lmformatenforcer_experimental.ipynb b/docs/docs/integrations/llms/lmformatenforcer_experimental.ipynb index d648064f970..7e363131d21 100644 --- a/docs/docs/integrations/llms/lmformatenforcer_experimental.ipynb +++ b/docs/docs/integrations/llms/lmformatenforcer_experimental.ipynb @@ -195,7 +195,7 @@ } ], "source": [ - "from langchain.llms import HuggingFacePipeline\n", + "from langchain_community.llms import HuggingFacePipeline\n", "from transformers import pipeline\n", "\n", "hf_model = pipeline(\n", diff --git a/docs/docs/integrations/llms/manifest.ipynb b/docs/docs/integrations/llms/manifest.ipynb index 9ab68ad626d..2553c107c25 100644 --- a/docs/docs/integrations/llms/manifest.ipynb +++ b/docs/docs/integrations/llms/manifest.ipynb @@ -41,7 +41,7 @@ }, "outputs": [], "source": [ - "from langchain.llms.manifest import ManifestWrapper\n", + "from langchain_community.llms.manifest import ManifestWrapper\n", "from manifest import Manifest" ] }, diff --git a/docs/docs/integrations/llms/minimax.ipynb b/docs/docs/integrations/llms/minimax.ipynb index 428cf8e54f0..efb3a924b5d 100644 --- a/docs/docs/integrations/llms/minimax.ipynb +++ b/docs/docs/integrations/llms/minimax.ipynb @@ -36,7 +36,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import Minimax" + "from langchain_community.llms import Minimax" ] }, { @@ -97,8 +97,8 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.llms import Minimax\n", - "from langchain.prompts import PromptTemplate" + "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import Minimax" ] }, { diff --git a/docs/docs/integrations/llms/modal.ipynb b/docs/docs/integrations/llms/modal.ipynb index 27f2534c191..81dafed6ac7 100644 --- a/docs/docs/integrations/llms/modal.ipynb +++ b/docs/docs/integrations/llms/modal.ipynb @@ -108,8 +108,8 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.llms import Modal\n", - "from langchain.prompts import PromptTemplate" + "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import Modal" ] }, { diff --git a/docs/docs/integrations/llms/mosaicml.ipynb b/docs/docs/integrations/llms/mosaicml.ipynb index 04011ada9a2..c20bf8c4f2c 100644 --- a/docs/docs/integrations/llms/mosaicml.ipynb +++ b/docs/docs/integrations/llms/mosaicml.ipynb @@ -43,8 +43,8 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.llms import MosaicML\n", - "from langchain.prompts import PromptTemplate" + "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import MosaicML" ] }, { diff --git a/docs/docs/integrations/llms/nlpcloud.ipynb b/docs/docs/integrations/llms/nlpcloud.ipynb index f5e42eaa68b..f647b3e8180 100644 --- a/docs/docs/integrations/llms/nlpcloud.ipynb +++ b/docs/docs/integrations/llms/nlpcloud.ipynb @@ -73,8 +73,8 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.llms import NLPCloud\n", - "from langchain.prompts import PromptTemplate" + "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import NLPCloud" ] }, { diff --git a/docs/docs/integrations/llms/octoai.ipynb b/docs/docs/integrations/llms/octoai.ipynb index bfed1da79fe..aceeee284c5 100644 --- a/docs/docs/integrations/llms/octoai.ipynb +++ b/docs/docs/integrations/llms/octoai.ipynb @@ -43,8 +43,8 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.llms.octoai_endpoint import OctoAIEndpoint\n", - "from langchain.prompts import PromptTemplate" + "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms.octoai_endpoint import OctoAIEndpoint" ] }, { diff --git a/docs/docs/integrations/llms/ollama.ipynb b/docs/docs/integrations/llms/ollama.ipynb index 6b427cac174..9637c965ccd 100644 --- a/docs/docs/integrations/llms/ollama.ipynb +++ b/docs/docs/integrations/llms/ollama.ipynb @@ -59,7 +59,7 @@ "source": [ "from langchain.callbacks.manager import CallbackManager\n", "from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n", - "from langchain.llms import Ollama\n", + "from langchain_community.llms import Ollama\n", "\n", "llm = Ollama(model=\"llama2\")" ] @@ -119,7 +119,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import Ollama\n", + "from langchain_community.llms import Ollama\n", "\n", "bakllava = Ollama(model=\"bakllava\")" ] diff --git a/docs/docs/integrations/llms/opaqueprompts.ipynb b/docs/docs/integrations/llms/opaqueprompts.ipynb index 7af61e5ab7f..936b5d339ea 100644 --- a/docs/docs/integrations/llms/opaqueprompts.ipynb +++ b/docs/docs/integrations/llms/opaqueprompts.ipynb @@ -61,9 +61,9 @@ "from langchain.callbacks.stdout import StdOutCallbackHandler\n", "from langchain.chains import LLMChain\n", "from langchain.globals import set_debug, set_verbose\n", - "from langchain.llms import OpaquePrompts, OpenAI\n", "from langchain.memory import ConversationBufferWindowMemory\n", "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import OpaquePrompts, OpenAI\n", "\n", "set_debug(True)\n", "set_verbose(True)\n", diff --git a/docs/docs/integrations/llms/openai.ipynb b/docs/docs/integrations/llms/openai.ipynb index 3f3bbed3b64..72ad79b1436 100644 --- a/docs/docs/integrations/llms/openai.ipynb +++ b/docs/docs/integrations/llms/openai.ipynb @@ -67,8 +67,8 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.llms import OpenAI\n", - "from langchain.prompts import PromptTemplate" + "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import OpenAI" ] }, { diff --git a/docs/docs/integrations/llms/openllm.ipynb b/docs/docs/integrations/llms/openllm.ipynb index e5c019b2b66..2adbda71e43 100644 --- a/docs/docs/integrations/llms/openllm.ipynb +++ b/docs/docs/integrations/llms/openllm.ipynb @@ -54,7 +54,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import OpenLLM\n", + "from langchain_community.llms import OpenLLM\n", "\n", "server_url = \"http://localhost:3000\" # Replace with remote host if you are running on a remote server\n", "llm = OpenLLM(server_url=server_url)" @@ -81,7 +81,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import OpenLLM\n", + "from langchain_community.llms import OpenLLM\n", "\n", "llm = OpenLLM(\n", " model_name=\"dolly-v2\",\n", diff --git a/docs/docs/integrations/llms/openlm.ipynb b/docs/docs/integrations/llms/openlm.ipynb index a992bd442d2..0ccb64de79f 100644 --- a/docs/docs/integrations/llms/openlm.ipynb +++ b/docs/docs/integrations/llms/openlm.ipynb @@ -69,8 +69,8 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.llms import OpenLM\n", - "from langchain.prompts import PromptTemplate" + "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import OpenLM" ] }, { diff --git a/docs/docs/integrations/llms/pai_eas_endpoint.ipynb b/docs/docs/integrations/llms/pai_eas_endpoint.ipynb index 2f5baee551a..6fd85b7d58f 100644 --- a/docs/docs/integrations/llms/pai_eas_endpoint.ipynb +++ b/docs/docs/integrations/llms/pai_eas_endpoint.ipynb @@ -15,8 +15,8 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.llms.pai_eas_endpoint import PaiEasEndpoint\n", "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms.pai_eas_endpoint import PaiEasEndpoint\n", "\n", "template = \"\"\"Question: {question}\n", "\n", diff --git a/docs/docs/integrations/llms/petals.ipynb b/docs/docs/integrations/llms/petals.ipynb index 582a982d8fe..4d0900a53ee 100644 --- a/docs/docs/integrations/llms/petals.ipynb +++ b/docs/docs/integrations/llms/petals.ipynb @@ -46,8 +46,8 @@ "import os\n", "\n", "from langchain.chains import LLMChain\n", - "from langchain.llms import Petals\n", - "from langchain.prompts import PromptTemplate" + "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import Petals" ] }, { diff --git a/docs/docs/integrations/llms/pipelineai.ipynb b/docs/docs/integrations/llms/pipelineai.ipynb index cb7225cc251..8045413aa41 100644 --- a/docs/docs/integrations/llms/pipelineai.ipynb +++ b/docs/docs/integrations/llms/pipelineai.ipynb @@ -51,8 +51,8 @@ "import os\n", "\n", "from langchain.chains import LLMChain\n", - "from langchain.llms import PipelineAI\n", - "from langchain.prompts import PromptTemplate" + "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import PipelineAI" ] }, { diff --git a/docs/docs/integrations/llms/predibase.ipynb b/docs/docs/integrations/llms/predibase.ipynb index ec1924b12a1..e5e18279b5e 100644 --- a/docs/docs/integrations/llms/predibase.ipynb +++ b/docs/docs/integrations/llms/predibase.ipynb @@ -47,7 +47,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import Predibase\n", + "from langchain_community.llms import Predibase\n", "\n", "model = Predibase(\n", " model=\"vicuna-13b\", predibase_api_key=os.environ.get(\"PREDIBASE_API_TOKEN\")\n", @@ -166,7 +166,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import Predibase\n", + "from langchain_community.llms import Predibase\n", "\n", "model = Predibase(\n", " model=\"my-finetuned-LLM\", predibase_api_key=os.environ.get(\"PREDIBASE_API_TOKEN\")\n", diff --git a/docs/docs/integrations/llms/predictionguard.ipynb b/docs/docs/integrations/llms/predictionguard.ipynb index dfdba4af199..d0f3dd6d4bb 100644 --- a/docs/docs/integrations/llms/predictionguard.ipynb +++ b/docs/docs/integrations/llms/predictionguard.ipynb @@ -32,8 +32,8 @@ "import os\n", "\n", "from langchain.chains import LLMChain\n", - "from langchain.llms import PredictionGuard\n", - "from langchain.prompts import PromptTemplate" + "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import PredictionGuard" ] }, { diff --git a/docs/docs/integrations/llms/promptlayer_openai.ipynb b/docs/docs/integrations/llms/promptlayer_openai.ipynb index eea93414457..1a6049a5d01 100644 --- a/docs/docs/integrations/llms/promptlayer_openai.ipynb +++ b/docs/docs/integrations/llms/promptlayer_openai.ipynb @@ -61,7 +61,7 @@ "import os\n", "\n", "import promptlayer\n", - "from langchain.llms import PromptLayerOpenAI" + "from langchain_community.llms import PromptLayerOpenAI" ] }, { diff --git a/docs/docs/integrations/llms/rellm_experimental.ipynb b/docs/docs/integrations/llms/rellm_experimental.ipynb index c783e434c16..08e9c4c6b75 100644 --- a/docs/docs/integrations/llms/rellm_experimental.ipynb +++ b/docs/docs/integrations/llms/rellm_experimental.ipynb @@ -92,7 +92,7 @@ } ], "source": [ - "from langchain.llms import HuggingFacePipeline\n", + "from langchain_community.llms import HuggingFacePipeline\n", "from transformers import pipeline\n", "\n", "hf_model = pipeline(\n", diff --git a/docs/docs/integrations/llms/replicate.ipynb b/docs/docs/integrations/llms/replicate.ipynb index 9b9fc37b473..bb3d1f6d866 100644 --- a/docs/docs/integrations/llms/replicate.ipynb +++ b/docs/docs/integrations/llms/replicate.ipynb @@ -104,8 +104,8 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.llms import Replicate\n", - "from langchain.prompts import PromptTemplate" + "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import Replicate" ] }, { diff --git a/docs/docs/integrations/llms/runhouse.ipynb b/docs/docs/integrations/llms/runhouse.ipynb index a536214ffc7..59bd92ef60b 100644 --- a/docs/docs/integrations/llms/runhouse.ipynb +++ b/docs/docs/integrations/llms/runhouse.ipynb @@ -45,8 +45,8 @@ "source": [ "import runhouse as rh\n", "from langchain.chains import LLMChain\n", - "from langchain.llms import SelfHostedHuggingFaceLLM, SelfHostedPipeline\n", - "from langchain.prompts import PromptTemplate" + "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import SelfHostedHuggingFaceLLM, SelfHostedPipeline" ] }, { diff --git a/docs/docs/integrations/llms/sagemaker.ipynb b/docs/docs/integrations/llms/sagemaker.ipynb index 101f9d81ab0..594f39e186f 100644 --- a/docs/docs/integrations/llms/sagemaker.ipynb +++ b/docs/docs/integrations/llms/sagemaker.ipynb @@ -104,9 +104,9 @@ "\n", "import boto3\n", "from langchain.chains.question_answering import load_qa_chain\n", - "from langchain.llms import SagemakerEndpoint\n", - "from langchain.llms.sagemaker_endpoint import LLMContentHandler\n", "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import SagemakerEndpoint\n", + "from langchain_community.llms.sagemaker_endpoint import LLMContentHandler\n", "\n", "query = \"\"\"How long was Elizabeth hospitalized?\n", "\"\"\"\n", @@ -174,9 +174,9 @@ "from typing import Dict\n", "\n", "from langchain.chains.question_answering import load_qa_chain\n", - "from langchain.llms import SagemakerEndpoint\n", - "from langchain.llms.sagemaker_endpoint import LLMContentHandler\n", "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import SagemakerEndpoint\n", + "from langchain_community.llms.sagemaker_endpoint import LLMContentHandler\n", "\n", "query = \"\"\"How long was Elizabeth hospitalized?\n", "\"\"\"\n", diff --git a/docs/docs/integrations/llms/stochasticai.ipynb b/docs/docs/integrations/llms/stochasticai.ipynb index 1b226861b26..1f3ecd98d0d 100644 --- a/docs/docs/integrations/llms/stochasticai.ipynb +++ b/docs/docs/integrations/llms/stochasticai.ipynb @@ -80,8 +80,8 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.llms import StochasticAI\n", - "from langchain.prompts import PromptTemplate" + "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import StochasticAI" ] }, { diff --git a/docs/docs/integrations/llms/symblai_nebula.ipynb b/docs/docs/integrations/llms/symblai_nebula.ipynb index 3e3f97cd782..cbea2f6e9cb 100644 --- a/docs/docs/integrations/llms/symblai_nebula.ipynb +++ b/docs/docs/integrations/llms/symblai_nebula.ipynb @@ -34,7 +34,7 @@ }, "outputs": [], "source": [ - "from langchain.llms.symblai_nebula import Nebula\n", + "from langchain_community.llms.symblai_nebula import Nebula\n", "\n", "llm = Nebula(nebula_api_key=\"\")" ] diff --git a/docs/docs/integrations/llms/textgen.ipynb b/docs/docs/integrations/llms/textgen.ipynb index 3c56d91179f..b7bf0941c05 100644 --- a/docs/docs/integrations/llms/textgen.ipynb +++ b/docs/docs/integrations/llms/textgen.ipynb @@ -43,8 +43,8 @@ "source": [ "from langchain.chains import LLMChain\n", "from langchain.globals import set_debug\n", - "from langchain.llms import TextGen\n", "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import TextGen\n", "\n", "set_debug(True)\n", "\n", @@ -94,8 +94,8 @@ "from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n", "from langchain.chains import LLMChain\n", "from langchain.globals import set_debug\n", - "from langchain.llms import TextGen\n", "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import TextGen\n", "\n", "set_debug(True)\n", "\n", diff --git a/docs/docs/integrations/llms/titan_takeoff.ipynb b/docs/docs/integrations/llms/titan_takeoff.ipynb index 533f7b8e5b7..966870610c3 100644 --- a/docs/docs/integrations/llms/titan_takeoff.ipynb +++ b/docs/docs/integrations/llms/titan_takeoff.ipynb @@ -87,7 +87,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import TitanTakeoff\n", + "from langchain_community.llms import TitanTakeoff\n", "\n", "llm = TitanTakeoff(\n", " base_url=\"http://localhost:8000\", generate_max_length=128, temperature=1.0\n", diff --git a/docs/docs/integrations/llms/titan_takeoff_pro.ipynb b/docs/docs/integrations/llms/titan_takeoff_pro.ipynb index 3e5a922c746..37d108fe3b3 100644 --- a/docs/docs/integrations/llms/titan_takeoff_pro.ipynb +++ b/docs/docs/integrations/llms/titan_takeoff_pro.ipynb @@ -32,8 +32,8 @@ "source": [ "from langchain.callbacks.manager import CallbackManager\n", "from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n", - "from langchain.llms import TitanTakeoffPro\n", "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import TitanTakeoffPro\n", "\n", "# Example 1: Basic use\n", "llm = TitanTakeoffPro()\n", diff --git a/docs/docs/integrations/llms/together.ipynb b/docs/docs/integrations/llms/together.ipynb index d6e89e4c1b2..cb6aa56d3cc 100644 --- a/docs/docs/integrations/llms/together.ipynb +++ b/docs/docs/integrations/llms/together.ipynb @@ -37,7 +37,7 @@ } ], "source": [ - "from langchain.llms import Together\n", + "from langchain_community.llms import Together\n", "\n", "llm = Together(\n", " model=\"togethercomputer/RedPajama-INCITE-7B-Base\",\n", diff --git a/docs/docs/integrations/llms/tongyi.ipynb b/docs/docs/integrations/llms/tongyi.ipynb index fbdfdad836f..010584097f5 100644 --- a/docs/docs/integrations/llms/tongyi.ipynb +++ b/docs/docs/integrations/llms/tongyi.ipynb @@ -76,8 +76,8 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.llms import Tongyi\n", - "from langchain.prompts import PromptTemplate" + "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import Tongyi" ] }, { diff --git a/docs/docs/integrations/llms/vllm.ipynb b/docs/docs/integrations/llms/vllm.ipynb index d95b2129cc4..0bd25873bab 100644 --- a/docs/docs/integrations/llms/vllm.ipynb +++ b/docs/docs/integrations/llms/vllm.ipynb @@ -71,7 +71,7 @@ } ], "source": [ - "from langchain.llms import VLLM\n", + "from langchain_community.llms import VLLM\n", "\n", "llm = VLLM(\n", " model=\"mosaicml/mpt-7b\",\n", @@ -164,7 +164,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import VLLM\n", + "from langchain_community.llms import VLLM\n", "\n", "llm = VLLM(\n", " model=\"mosaicml/mpt-30b\",\n", @@ -208,7 +208,7 @@ } ], "source": [ - "from langchain.llms import VLLMOpenAI\n", + "from langchain_community.llms import VLLMOpenAI\n", "\n", "llm = VLLMOpenAI(\n", " openai_api_key=\"EMPTY\",\n", diff --git a/docs/docs/integrations/llms/volcengine_maas.ipynb b/docs/docs/integrations/llms/volcengine_maas.ipynb index c78d8d82cff..dba36dcd18a 100644 --- a/docs/docs/integrations/llms/volcengine_maas.ipynb +++ b/docs/docs/integrations/llms/volcengine_maas.ipynb @@ -38,8 +38,8 @@ }, "outputs": [], "source": [ - "from langchain.llms import VolcEngineMaasLLM\n", "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import VolcEngineMaasLLM\n", "from langchain_core.output_parsers import StrOutputParser" ] }, diff --git a/docs/docs/integrations/llms/watsonxllm.ipynb b/docs/docs/integrations/llms/watsonxllm.ipynb index 7a2676f5ee5..00dd88d5304 100644 --- a/docs/docs/integrations/llms/watsonxllm.ipynb +++ b/docs/docs/integrations/llms/watsonxllm.ipynb @@ -105,7 +105,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import WatsonxLLM\n", + "from langchain_community.llms import WatsonxLLM\n", "\n", "watsonx_llm = WatsonxLLM(\n", " model_id=\"google/flan-ul2\",\n", diff --git a/docs/docs/integrations/llms/writer.ipynb b/docs/docs/integrations/llms/writer.ipynb index a74fc0564a3..fed4af16690 100644 --- a/docs/docs/integrations/llms/writer.ipynb +++ b/docs/docs/integrations/llms/writer.ipynb @@ -56,8 +56,8 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.llms import Writer\n", - "from langchain.prompts import PromptTemplate" + "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import Writer" ] }, { diff --git a/docs/docs/integrations/llms/xinference.ipynb b/docs/docs/integrations/llms/xinference.ipynb index 5d5af24c716..377b65ace81 100644 --- a/docs/docs/integrations/llms/xinference.ipynb +++ b/docs/docs/integrations/llms/xinference.ipynb @@ -87,7 +87,7 @@ } ], "source": [ - "from langchain.llms import Xinference\n", + "from langchain_community.llms import Xinference\n", "\n", "llm = Xinference(\n", " server_url=\"http://0.0.0.0:9997\", model_uid=\"7167b2b0-2a04-11ee-83f0-d29396a3f064\"\n", diff --git a/docs/docs/integrations/llms/yandex.ipynb b/docs/docs/integrations/llms/yandex.ipynb index adddce4d8b2..4b31fb76e16 100644 --- a/docs/docs/integrations/llms/yandex.ipynb +++ b/docs/docs/integrations/llms/yandex.ipynb @@ -45,8 +45,8 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.llms import YandexGPT\n", - "from langchain.prompts import PromptTemplate" + "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import YandexGPT" ] }, { diff --git a/docs/docs/integrations/memory/aws_dynamodb.ipynb b/docs/docs/integrations/memory/aws_dynamodb.ipynb index 942ea065fb0..93931d98db1 100644 --- a/docs/docs/integrations/memory/aws_dynamodb.ipynb +++ b/docs/docs/integrations/memory/aws_dynamodb.ipynb @@ -248,8 +248,8 @@ "outputs": [], "source": [ "from langchain.agents import AgentType, Tool, initialize_agent\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.memory import ConversationBufferMemory\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_experimental.utilities import PythonREPL" ] }, diff --git a/docs/docs/integrations/memory/motorhead_memory.ipynb b/docs/docs/integrations/memory/motorhead_memory.ipynb index e30a30f36e8..7468a69a943 100644 --- a/docs/docs/integrations/memory/motorhead_memory.ipynb +++ b/docs/docs/integrations/memory/motorhead_memory.ipynb @@ -36,8 +36,8 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.llms import OpenAI\n", "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import OpenAI\n", "\n", "template = \"\"\"You are a chatbot having a conversation with a human.\n", "\n", diff --git a/docs/docs/integrations/memory/remembrall.md b/docs/docs/integrations/memory/remembrall.md index e68f21c0ade..dd46138d740 100644 --- a/docs/docs/integrations/memory/remembrall.md +++ b/docs/docs/integrations/memory/remembrall.md @@ -21,7 +21,7 @@ Any request that you send with the modified `openai_api_base` (see below) and Re In addition to setting the `openai_api_base` and Remembrall API key via `x-gp-api-key`, you should specify a UID to maintain memory for. This will usually be a unique user identifier (like email). ```python -from langchain.chat_models import ChatOpenAI +from langchain_community.chat_models import ChatOpenAI chat_model = ChatOpenAI(openai_api_base="https://remembrall.dev/api/openai/v1", model_kwargs={ "headers":{ @@ -40,7 +40,7 @@ print(chat_model.predict("What is my favorite color?")) First, create a document context in the [Remembrall dashboard](https://remembrall.dev/dashboard/spells). Paste in the document texts or upload documents as PDFs to be processed. Save the Document Context ID and insert it as shown below. ```python -from langchain.chat_models import ChatOpenAI +from langchain_community.chat_models import ChatOpenAI chat_model = ChatOpenAI(openai_api_base="https://remembrall.dev/api/openai/v1", model_kwargs={ "headers":{ diff --git a/docs/docs/integrations/memory/sqlite.ipynb b/docs/docs/integrations/memory/sqlite.ipynb index 09b9ca4a892..8a4be179741 100644 --- a/docs/docs/integrations/memory/sqlite.ipynb +++ b/docs/docs/integrations/memory/sqlite.ipynb @@ -34,10 +34,10 @@ "outputs": [], "source": [ "from langchain.chains import ConversationChain\n", - "from langchain.llms import OpenAI\n", "from langchain.memory import ConversationEntityMemory\n", "from langchain.memory.entity import SQLiteEntityStore\n", - "from langchain.memory.prompt import ENTITY_MEMORY_CONVERSATION_TEMPLATE" + "from langchain.memory.prompt import ENTITY_MEMORY_CONVERSATION_TEMPLATE\n", + "from langchain_community.llms import OpenAI" ] }, { diff --git a/docs/docs/integrations/memory/streamlit_chat_message_history.ipynb b/docs/docs/integrations/memory/streamlit_chat_message_history.ipynb index 8c865f921db..078d107ae40 100644 --- a/docs/docs/integrations/memory/streamlit_chat_message_history.ipynb +++ b/docs/docs/integrations/memory/streamlit_chat_message_history.ipynb @@ -83,8 +83,8 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.llms import OpenAI\n", "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import OpenAI\n", "\n", "template = \"\"\"You are an AI chatbot having a conversation with a human.\n", "\n", diff --git a/docs/docs/integrations/memory/xata_chat_message_history.ipynb b/docs/docs/integrations/memory/xata_chat_message_history.ipynb index 48fd22ce714..e058d185ac3 100644 --- a/docs/docs/integrations/memory/xata_chat_message_history.ipynb +++ b/docs/docs/integrations/memory/xata_chat_message_history.ipynb @@ -154,8 +154,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.vectorstores.xata import XataVectorStore\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "\n", "embeddings = OpenAIEmbeddings()\n", "\n", @@ -220,7 +220,7 @@ "source": [ "from langchain.agents import AgentType, initialize_agent\n", "from langchain.agents.agent_toolkits import create_retriever_tool\n", - "from langchain.chat_models import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "tool = create_retriever_tool(\n", " vector_store.as_retriever(),\n", diff --git a/docs/docs/integrations/memory/zep_memory.ipynb b/docs/docs/integrations/memory/zep_memory.ipynb index 1e90ee93a2a..08da5ef96cf 100644 --- a/docs/docs/integrations/memory/zep_memory.ipynb +++ b/docs/docs/integrations/memory/zep_memory.ipynb @@ -50,11 +50,11 @@ "from uuid import uuid4\n", "\n", "from langchain.agents import AgentType, Tool, initialize_agent\n", - "from langchain.llms import OpenAI\n", "from langchain.memory import ZepMemory\n", "from langchain.retrievers import ZepRetriever\n", "from langchain.schema import AIMessage, HumanMessage\n", "from langchain.utilities import WikipediaAPIWrapper\n", + "from langchain_community.llms import OpenAI\n", "\n", "# Set this to your Zep server URL\n", "ZEP_API_URL = \"http://localhost:8000\"\n", diff --git a/docs/docs/integrations/platforms/anthropic.mdx b/docs/docs/integrations/platforms/anthropic.mdx index 06b00efa945..31eae89cba6 100644 --- a/docs/docs/integrations/platforms/anthropic.mdx +++ b/docs/docs/integrations/platforms/anthropic.mdx @@ -52,7 +52,7 @@ This will return a completion like this `A gummy bear!` instead of a whole new a You can import this wrapper with the following code: ``` -from langchain.chat_models import ChatAnthropic +from langchain_community.chat_models import ChatAnthropic model = ChatAnthropic() ``` @@ -120,7 +120,7 @@ This `Anthropic` wrapper is subclassed from `LLM`. We can import it with: ``` -from langchain.llms import Anthropic +from langchain_community.llms import Anthropic model = Anthropic() ``` diff --git a/docs/docs/integrations/platforms/aws.mdx b/docs/docs/integrations/platforms/aws.mdx index cb9d9c1d048..2f374da8dd6 100644 --- a/docs/docs/integrations/platforms/aws.mdx +++ b/docs/docs/integrations/platforms/aws.mdx @@ -20,7 +20,7 @@ The `LangChain` integrations related to [Amazon AWS](https://aws.amazon.com/) pl See a [usage example](/docs/integrations/llms/bedrock). ```python -from langchain.llms.bedrock import Bedrock +from langchain_community.llms.bedrock import Bedrock ``` ### Amazon API Gateway @@ -40,7 +40,7 @@ from langchain.llms.bedrock import Bedrock See a [usage example](/docs/integrations/llms/amazon_api_gateway). ```python -from langchain.llms import AmazonAPIGateway +from langchain_community.llms import AmazonAPIGateway ``` ### SageMaker Endpoint @@ -53,8 +53,8 @@ We use `SageMaker` to host our model and expose it as the `SageMaker Endpoint`. See a [usage example](/docs/integrations/llms/sagemaker). ```python -from langchain.llms import SagemakerEndpoint -from langchain.llms.sagemaker_endpoint import LLMContentHandler +from langchain_community.llms import SagemakerEndpoint +from langchain_community.llms.sagemaker_endpoint import LLMContentHandler ``` ## Chat models @@ -64,7 +64,7 @@ from langchain.llms.sagemaker_endpoint import LLMContentHandler See a [usage example](/docs/integrations/chat/bedrock). ```python -from langchain.chat_models import BedrockChat +from langchain_community.chat_models import BedrockChat ``` ## Text Embedding Models @@ -73,15 +73,15 @@ from langchain.chat_models import BedrockChat See a [usage example](/docs/integrations/text_embedding/bedrock). ```python -from langchain.embeddings import BedrockEmbeddings +from langchain_community.embeddings import BedrockEmbeddings ``` ### SageMaker Endpoint See a [usage example](/docs/integrations/text_embedding/sagemaker-endpoint). ```python -from langchain.embeddings import SagemakerEndpointEmbeddings -from langchain.llms.sagemaker_endpoint import ContentHandlerBase +from langchain_community.embeddings import SagemakerEndpointEmbeddings +from langchain_community.llms.sagemaker_endpoint import ContentHandlerBase ``` ## Chains diff --git a/docs/docs/integrations/platforms/google.mdx b/docs/docs/integrations/platforms/google.mdx index 350ff563bae..314128af8ef 100644 --- a/docs/docs/integrations/platforms/google.mdx +++ b/docs/docs/integrations/platforms/google.mdx @@ -66,7 +66,7 @@ pip install google-cloud-aiplatform See a [usage example](/docs/integrations/chat/google_vertex_ai_palm). ```python -from langchain.chat_models import ChatVertexAI +from langchain_community.chat_models import ChatVertexAI ``` ## Document Loaders @@ -102,7 +102,7 @@ pip install google-cloud-aiplatform See a [usage example](/docs/integrations/llms/google_vertex_ai_palm). ```python -from langchain.llms import VertexAI +from langchain_community.llms import VertexAI ``` ### Model Garden @@ -118,7 +118,7 @@ pip install google-cloud-aiplatform See a [usage example](/docs/integrations/llms/google_vertex_ai_palm#vertex-model-garden). ```python -from langchain.llms import VertexAIModelGarden +from langchain_community.llms import VertexAIModelGarden ``` diff --git a/docs/docs/integrations/platforms/huggingface.mdx b/docs/docs/integrations/platforms/huggingface.mdx index 5f768c6d5bf..4d215f3f3e1 100644 --- a/docs/docs/integrations/platforms/huggingface.mdx +++ b/docs/docs/integrations/platforms/huggingface.mdx @@ -21,7 +21,7 @@ pip install huggingface_hub See a [usage example](/docs/integrations/llms/huggingface_hub). ```python -from langchain.llms import HuggingFaceHub +from langchain_community.llms import HuggingFaceHub ``` ### Hugging Face Local Pipelines @@ -37,7 +37,7 @@ pip install transformers See a [usage example](/docs/integrations/llms/huggingface_pipelines). ```python -from langchain.llms.huggingface_pipeline import HuggingFacePipeline +from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline ``` ### Hugging Face TextGen Inference @@ -55,7 +55,7 @@ pip install text_generation See a [usage example](/docs/integrations/llms/huggingface_textgen_inference). ```python -from langchain.llms import HuggingFaceTextGenInference +from langchain_community.llms import HuggingFaceTextGenInference ``` @@ -105,14 +105,14 @@ pip install sentence_transformers See a [usage example](/docs/integrations/text_embedding/huggingfacehub). ```python -from langchain.embeddings import HuggingFaceEmbeddings +from langchain_community.embeddings import HuggingFaceEmbeddings ``` #### HuggingFaceInstructEmbeddings See a [usage example](/docs/integrations/text_embedding/instruct_embeddings). ```python -from langchain.embeddings import HuggingFaceInstructEmbeddings +from langchain_community.embeddings import HuggingFaceInstructEmbeddings ``` #### HuggingFaceBgeEmbeddings @@ -123,7 +123,7 @@ from langchain.embeddings import HuggingFaceInstructEmbeddings See a [usage example](/docs/integrations/text_embedding/bge_huggingface). ```python -from langchain.embeddings import HuggingFaceBgeEmbeddings +from langchain_community.embeddings import HuggingFaceBgeEmbeddings ``` diff --git a/docs/docs/integrations/platforms/microsoft.mdx b/docs/docs/integrations/platforms/microsoft.mdx index 2da31c143ae..153d19ac068 100644 --- a/docs/docs/integrations/platforms/microsoft.mdx +++ b/docs/docs/integrations/platforms/microsoft.mdx @@ -26,7 +26,7 @@ See a [usage example](/docs/integrations/chat/azure_chat_openai) ```python -from langchain.chat_models import AzureChatOpenAI +from langchain_community.chat_models import AzureChatOpenAI ``` ## Text Embedding Models @@ -35,7 +35,7 @@ from langchain.chat_models import AzureChatOpenAI See a [usage example](/docs/integrations/text_embedding/azureopenai) ```python -from langchain.embeddings import AzureOpenAIEmbeddings +from langchain_community.embeddings import AzureOpenAIEmbeddings ``` ## LLMs @@ -44,7 +44,7 @@ from langchain.embeddings import AzureOpenAIEmbeddings See a [usage example](/docs/integrations/llms/azure_openai_example). ```python -from langchain.llms import AzureOpenAI +from langchain_community.llms import AzureOpenAI ``` ## Document loaders diff --git a/docs/docs/integrations/platforms/openai.mdx b/docs/docs/integrations/platforms/openai.mdx index 5ca287f3f24..4c5f2e70cd4 100644 --- a/docs/docs/integrations/platforms/openai.mdx +++ b/docs/docs/integrations/platforms/openai.mdx @@ -29,12 +29,12 @@ pip install tiktoken See a [usage example](/docs/integrations/llms/openai). ```python -from langchain.llms import OpenAI +from langchain_community.llms import OpenAI ``` If you are using a model hosted on `Azure`, you should use different wrapper for that: ```python -from langchain.llms import AzureOpenAI +from langchain_community.llms import AzureOpenAI ``` For a more detailed walkthrough of the `Azure` wrapper, see [here](/docs/integrations/llms/azure_openai_example) @@ -44,12 +44,12 @@ For a more detailed walkthrough of the `Azure` wrapper, see [here](/docs/integra See a [usage example](/docs/integrations/chat/openai). ```python -from langchain.chat_models import ChatOpenAI +from langchain_community.chat_models import ChatOpenAI ``` If you are using a model hosted on `Azure`, you should use different wrapper for that: ```python -from langchain.llms import AzureChatOpenAI +from langchain_community.llms import AzureChatOpenAI ``` For a more detailed walkthrough of the `Azure` wrapper, see [here](/docs/integrations/chat/azure_chat_openai) @@ -59,7 +59,7 @@ For a more detailed walkthrough of the `Azure` wrapper, see [here](/docs/integra See a [usage example](/docs/integrations/text_embedding/openai) ```python -from langchain.embeddings import OpenAIEmbeddings +from langchain_community.embeddings import OpenAIEmbeddings ``` diff --git a/docs/docs/integrations/providers/ai21.mdx b/docs/docs/integrations/providers/ai21.mdx index fb675ab5668..6d9e3abfa61 100644 --- a/docs/docs/integrations/providers/ai21.mdx +++ b/docs/docs/integrations/providers/ai21.mdx @@ -12,5 +12,5 @@ It is broken into two parts: installation and setup, and then references to spec There exists an AI21 LLM wrapper, which you can access with ```python -from langchain.llms import AI21 +from langchain_community.llms import AI21 ``` diff --git a/docs/docs/integrations/providers/aim_tracking.ipynb b/docs/docs/integrations/providers/aim_tracking.ipynb index fda118a40cc..9f094db78fd 100644 --- a/docs/docs/integrations/providers/aim_tracking.ipynb +++ b/docs/docs/integrations/providers/aim_tracking.ipynb @@ -66,7 +66,7 @@ "from datetime import datetime\n", "\n", "from langchain.callbacks import AimCallbackHandler, StdOutCallbackHandler\n", - "from langchain.llms import OpenAI" + "from langchain_community.llms import OpenAI" ] }, { diff --git a/docs/docs/integrations/providers/aleph_alpha.mdx b/docs/docs/integrations/providers/aleph_alpha.mdx index edb3813670a..4f8a5d0e086 100644 --- a/docs/docs/integrations/providers/aleph_alpha.mdx +++ b/docs/docs/integrations/providers/aleph_alpha.mdx @@ -24,7 +24,7 @@ ALEPH_ALPHA_API_KEY = getpass() See a [usage example](/docs/integrations/llms/aleph_alpha). ```python -from langchain.llms import AlephAlpha +from langchain_community.llms import AlephAlpha ``` ## Text Embedding Models @@ -32,5 +32,5 @@ from langchain.llms import AlephAlpha See a [usage example](/docs/integrations/text_embedding/aleph_alpha). ```python -from langchain.embeddings import AlephAlphaSymmetricSemanticEmbedding, AlephAlphaAsymmetricSemanticEmbedding +from langchain_community.embeddings import AlephAlphaSymmetricSemanticEmbedding, AlephAlphaAsymmetricSemanticEmbedding ``` diff --git a/docs/docs/integrations/providers/alibaba_cloud.mdx b/docs/docs/integrations/providers/alibaba_cloud.mdx index e2ccdc08b13..f210ce44deb 100644 --- a/docs/docs/integrations/providers/alibaba_cloud.mdx +++ b/docs/docs/integrations/providers/alibaba_cloud.mdx @@ -15,7 +15,7 @@ See [installation instructions and a usage example](/docs/integrations/chat/alibaba_cloud_pai_eas). ```python -from langchain.chat_models import PaiEasChatEndpoint +from langchain_community.chat_models import PaiEasChatEndpoint ``` ## Vectorstore diff --git a/docs/docs/integrations/providers/anyscale.mdx b/docs/docs/integrations/providers/anyscale.mdx index 4d98dd31f03..a3b8c4cc4e3 100644 --- a/docs/docs/integrations/providers/anyscale.mdx +++ b/docs/docs/integrations/providers/anyscale.mdx @@ -13,5 +13,5 @@ It is broken into two parts: installation and setup, and then references to spec There exists an Anyscale LLM wrapper, which you can access with ```python -from langchain.llms import Anyscale +from langchain_community.llms import Anyscale ``` diff --git a/docs/docs/integrations/providers/arthur_tracking.ipynb b/docs/docs/integrations/providers/arthur_tracking.ipynb index 10deb5dd8b5..bbf042f000a 100644 --- a/docs/docs/integrations/providers/arthur_tracking.ipynb +++ b/docs/docs/integrations/providers/arthur_tracking.ipynb @@ -28,8 +28,8 @@ "source": [ "from langchain.callbacks import ArthurCallbackHandler\n", "from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n", - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.schema import HumanMessage" + "from langchain.schema import HumanMessage\n", + "from langchain_community.chat_models import ChatOpenAI" ] }, { diff --git a/docs/docs/integrations/providers/awadb.md b/docs/docs/integrations/providers/awadb.md index be6d4d66fe1..01a59147172 100644 --- a/docs/docs/integrations/providers/awadb.md +++ b/docs/docs/integrations/providers/awadb.md @@ -22,7 +22,7 @@ See a [usage example](/docs/integrations/vectorstores/awadb). ## Text Embedding Model ```python -from langchain.embeddings import AwaEmbeddings +from langchain_community.embeddings import AwaEmbeddings ``` See a [usage example](/docs/integrations/text_embedding/awadb). diff --git a/docs/docs/integrations/providers/bananadev.mdx b/docs/docs/integrations/providers/bananadev.mdx index ee7992be74c..ebc39bcf560 100644 --- a/docs/docs/integrations/providers/bananadev.mdx +++ b/docs/docs/integrations/providers/bananadev.mdx @@ -62,7 +62,7 @@ This example is from the `app.py` file in [CodeLlama-7B-Instruct-GPTQ](https://g Within Langchain, there exists a Banana LLM wrapper, which you can access with ```python -from langchain.llms import Banana +from langchain_community.llms import Banana ``` You need to provide a model key and model url slug, which you can get from the model's details page in the [Banana.dev dashboard](https://app.banana.dev). diff --git a/docs/docs/integrations/providers/beam.mdx b/docs/docs/integrations/providers/beam.mdx index ec5ac205c56..329e22276a2 100644 --- a/docs/docs/integrations/providers/beam.mdx +++ b/docs/docs/integrations/providers/beam.mdx @@ -18,7 +18,7 @@ It is broken into two parts: installation and setup, and then references to spec There exists a Beam LLM wrapper, which you can access with ```python -from langchain.llms.beam import Beam +from langchain_community.llms.beam import Beam ``` ## Define your Beam app. @@ -64,7 +64,7 @@ response = llm._call("Running machine learning on a remote GPU") An example script which deploys the model and calls it would be: ```python -from langchain.llms.beam import Beam +from langchain_community.llms.beam import Beam import time llm = Beam(model_name="gpt2", diff --git a/docs/docs/integrations/providers/bittensor.mdx b/docs/docs/integrations/providers/bittensor.mdx index 501a1b52f03..b818ae885ef 100644 --- a/docs/docs/integrations/providers/bittensor.mdx +++ b/docs/docs/integrations/providers/bittensor.mdx @@ -14,7 +14,7 @@ It is broken into two parts: installation and setup, and then examples of NIBitt There exists a NIBittensor LLM wrapper, which you can access with: ```python -from langchain.llms import NIBittensorLLM +from langchain_community.llms import NIBittensorLLM ``` It provides a unified interface for all models: diff --git a/docs/docs/integrations/providers/cerebriumai.mdx b/docs/docs/integrations/providers/cerebriumai.mdx index a92312be868..733b5f9a0a5 100644 --- a/docs/docs/integrations/providers/cerebriumai.mdx +++ b/docs/docs/integrations/providers/cerebriumai.mdx @@ -13,5 +13,5 @@ It is broken into two parts: installation and setup, and then references to spec There exists an CerebriumAI LLM wrapper, which you can access with ```python -from langchain.llms import CerebriumAI +from langchain_community.llms import CerebriumAI ``` \ No newline at end of file diff --git a/docs/docs/integrations/providers/clarifai.mdx b/docs/docs/integrations/providers/clarifai.mdx index b54e096a58d..56c702c1479 100644 --- a/docs/docs/integrations/providers/clarifai.mdx +++ b/docs/docs/integrations/providers/clarifai.mdx @@ -21,7 +21,7 @@ Also note that given there are many models for images, video, text and audio und To find the selection of LLMs in the Clarifai platform you can select the text to text model type [here](https://clarifai.com/explore/models?filterData=%5B%7B%22field%22%3A%22model_type_id%22%2C%22value%22%3A%5B%22text-to-text%22%5D%7D%5D&page=1&perPage=24). ```python -from langchain.llms import Clarifai +from langchain_community.llms import Clarifai llm = Clarifai(pat=CLARIFAI_PAT, user_id=USER_ID, app_id=APP_ID, model_id=MODEL_ID) ``` @@ -34,7 +34,7 @@ To find the selection of text embeddings models in the Clarifai platform you can There is a Clarifai Embedding model in LangChain, which you can access with: ```python -from langchain.embeddings import ClarifaiEmbeddings +from langchain_community.embeddings import ClarifaiEmbeddings embeddings = ClarifaiEmbeddings(pat=CLARIFAI_PAT, user_id=USER_ID, app_id=APP_ID, model_id=MODEL_ID) ``` For more details, the docs on the Clarifai Embeddings wrapper provide a [detailed walkthrough](/docs/integrations/text_embedding/clarifai). diff --git a/docs/docs/integrations/providers/clearml_tracking.ipynb b/docs/docs/integrations/providers/clearml_tracking.ipynb index 0b0050a851c..3362826e11b 100644 --- a/docs/docs/integrations/providers/clearml_tracking.ipynb +++ b/docs/docs/integrations/providers/clearml_tracking.ipynb @@ -106,7 +106,7 @@ ], "source": [ "from langchain.callbacks import StdOutCallbackHandler\n", - "from langchain.llms import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "# Setup and use the ClearML Callback\n", "clearml_callback = ClearMLCallbackHandler(\n", diff --git a/docs/docs/integrations/providers/cloudflare.mdx b/docs/docs/integrations/providers/cloudflare.mdx index bbc124bb0f2..ad1223ddc15 100644 --- a/docs/docs/integrations/providers/cloudflare.mdx +++ b/docs/docs/integrations/providers/cloudflare.mdx @@ -14,5 +14,5 @@ See [installation instructions and usage example](/docs/integrations/text_embedding/cloudflare_workersai). ```python -from langchain.embeddings.cloudflare_workersai import CloudflareWorkersAIEmbeddings +from langchain_community.embeddings.cloudflare_workersai import CloudflareWorkersAIEmbeddings ``` diff --git a/docs/docs/integrations/providers/cnosdb.mdx b/docs/docs/integrations/providers/cnosdb.mdx index d93f2e0f4c3..29c5362f065 100644 --- a/docs/docs/integrations/providers/cnosdb.mdx +++ b/docs/docs/integrations/providers/cnosdb.mdx @@ -37,7 +37,7 @@ db = SQLDatabase.from_cnosdb() ``` ```python # Creating a OpenAI Chat LLM Wrapper -from langchain.chat_models import ChatOpenAI +from langchain_community.chat_models import ChatOpenAI llm = ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo") ``` diff --git a/docs/docs/integrations/providers/cohere.mdx b/docs/docs/integrations/providers/cohere.mdx index de409d08f80..04ed089e609 100644 --- a/docs/docs/integrations/providers/cohere.mdx +++ b/docs/docs/integrations/providers/cohere.mdx @@ -15,10 +15,10 @@ Get a [Cohere api key](https://dashboard.cohere.ai/) and set it as an environmen |API|description|Endpoint docs|Import|Example usage| |---|---|---|---|---| -|Chat|Build chat bots|[chat](https://docs.cohere.com/reference/chat)|`from langchain.chat_models import ChatCohere`|[cohere.ipynb](/docs/docs/integrations/chat/cohere.ipynb)| -|LLM|Generate text|[generate](https://docs.cohere.com/reference/generate)|`from langchain.llms import Cohere`|[cohere.ipynb](/docs/docs/integrations/llms/cohere.ipynb)| +|Chat|Build chat bots|[chat](https://docs.cohere.com/reference/chat)|`from langchain_community.chat_models import ChatCohere`|[cohere.ipynb](/docs/docs/integrations/chat/cohere.ipynb)| +|LLM|Generate text|[generate](https://docs.cohere.com/reference/generate)|`from langchain_community.llms import Cohere`|[cohere.ipynb](/docs/docs/integrations/llms/cohere.ipynb)| |RAG Retriever|Connect to external data sources|[chat + rag](https://docs.cohere.com/reference/chat)|`from langchain.retrievers import CohereRagRetriever`|[cohere.ipynb](/docs/docs/integrations/retrievers/cohere.ipynb)| -|Text Embedding|Embed strings to vectors|[embed](https://docs.cohere.com/reference/embed)|`from langchain.embeddings import CohereEmbeddings`|[cohere.ipynb](/docs/docs/integrations/text_embedding/cohere.ipynb)| +|Text Embedding|Embed strings to vectors|[embed](https://docs.cohere.com/reference/embed)|`from langchain_community.embeddings import CohereEmbeddings`|[cohere.ipynb](/docs/docs/integrations/text_embedding/cohere.ipynb)| |Rerank Retriever|Rank strings based on relevance|[rerank](https://docs.cohere.com/reference/rerank)|`from langchain.retrievers.document_compressors import CohereRerank`|[cohere.ipynb](/docs/docs/integrations/retrievers/cohere-reranker.ipynb)| ## Quick copy examples @@ -26,7 +26,7 @@ Get a [Cohere api key](https://dashboard.cohere.ai/) and set it as an environmen ### Chat ```python -from langchain.chat_models import ChatCohere +from langchain_community.chat_models import ChatCohere from langchain.schema import HumanMessage chat = ChatCohere() messages = [HumanMessage(content="knock knock")] @@ -37,7 +37,7 @@ print(chat(messages)) ```python -from langchain.llms import Cohere +from langchain_community.llms import Cohere llm = Cohere(model="command") print(llm.invoke("Come up with a pet name")) @@ -47,7 +47,7 @@ print(llm.invoke("Come up with a pet name")) ### RAG Retriever ```python -from langchain.chat_models import ChatCohere +from langchain_community.chat_models import ChatCohere from langchain.retrievers import CohereRagRetriever from langchain_core.documents import Document @@ -58,7 +58,7 @@ print(rag.get_relevant_documents("What is cohere ai?")) ### Text Embedding ```python -from langchain.chat_models import ChatCohere +from langchain_community.chat_models import ChatCohere from langchain.retrievers import CohereRagRetriever from langchain_core.documents import Document diff --git a/docs/docs/integrations/providers/comet_tracking.ipynb b/docs/docs/integrations/providers/comet_tracking.ipynb index cd119f06f88..7fb7c974147 100644 --- a/docs/docs/integrations/providers/comet_tracking.ipynb +++ b/docs/docs/integrations/providers/comet_tracking.ipynb @@ -119,7 +119,7 @@ "outputs": [], "source": [ "from langchain.callbacks import CometCallbackHandler, StdOutCallbackHandler\n", - "from langchain.llms import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "comet_callback = CometCallbackHandler(\n", " project_name=\"comet-example-langchain\",\n", @@ -151,8 +151,8 @@ "source": [ "from langchain.callbacks import CometCallbackHandler, StdOutCallbackHandler\n", "from langchain.chains import LLMChain\n", - "from langchain.llms import OpenAI\n", "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import OpenAI\n", "\n", "comet_callback = CometCallbackHandler(\n", " complexity_metrics=True,\n", @@ -189,7 +189,7 @@ "source": [ "from langchain.agents import initialize_agent, load_tools\n", "from langchain.callbacks import CometCallbackHandler, StdOutCallbackHandler\n", - "from langchain.llms import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "comet_callback = CometCallbackHandler(\n", " project_name=\"comet-example-langchain\",\n", @@ -248,8 +248,8 @@ "source": [ "from langchain.callbacks import CometCallbackHandler, StdOutCallbackHandler\n", "from langchain.chains import LLMChain\n", - "from langchain.llms import OpenAI\n", "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import OpenAI\n", "from rouge_score import rouge_scorer\n", "\n", "\n", diff --git a/docs/docs/integrations/providers/ctransformers.mdx b/docs/docs/integrations/providers/ctransformers.mdx index 35dc7725207..a787a7ec2fc 100644 --- a/docs/docs/integrations/providers/ctransformers.mdx +++ b/docs/docs/integrations/providers/ctransformers.mdx @@ -15,7 +15,7 @@ It is broken into two parts: installation and setup, and then references to spec There exists a CTransformers LLM wrapper, which you can access with: ```python -from langchain.llms import CTransformers +from langchain_community.llms import CTransformers ``` It provides a unified interface for all models: diff --git a/docs/docs/integrations/providers/databricks.md b/docs/docs/integrations/providers/databricks.md index 96911ab02f5..db419069891 100644 --- a/docs/docs/integrations/providers/databricks.md +++ b/docs/docs/integrations/providers/databricks.md @@ -30,7 +30,7 @@ Databricks External Models [Databricks External Models](https://docs.databricks.com/generative-ai/external-models/index.html) is a service that is designed to streamline the usage and management of various large language model (LLM) providers, such as OpenAI and Anthropic, within an organization. It offers a high-level interface that simplifies the interaction with these services by providing a unified endpoint to handle specific LLM related requests. The following example creates an endpoint that serves OpenAI's GPT-4 model and generates a chat response from it: ```python -from langchain.chat_models import ChatDatabricks +from langchain_community.chat_models import ChatDatabricks from langchain_core.messages import HumanMessage from mlflow.deployments import get_deploy_client @@ -66,7 +66,7 @@ Databricks Foundation Model APIs [Databricks Foundation Model APIs](https://docs.databricks.com/machine-learning/foundation-models/index.html) allow you to access and query state-of-the-art open source models from dedicated serving endpoints. With Foundation Model APIs, developers can quickly and easily build applications that leverage a high-quality generative AI model without maintaining their own model deployment. The following example uses the `databricks-bge-large-en` endpoint to generate embeddings from text: ```python -from langchain.embeddings import DatabricksEmbeddings +from langchain_community.embeddings import DatabricksEmbeddings embeddings = DatabricksEmbeddings(endpoint="databricks-bge-large-en") diff --git a/docs/docs/integrations/providers/deepinfra.mdx b/docs/docs/integrations/providers/deepinfra.mdx index da8787fc104..d370862aa65 100644 --- a/docs/docs/integrations/providers/deepinfra.mdx +++ b/docs/docs/integrations/providers/deepinfra.mdx @@ -24,7 +24,7 @@ You can view a [list of request and response parameters](https://deepinfra.com/m There exists an DeepInfra LLM wrapper, which you can access with ```python -from langchain.llms import DeepInfra +from langchain_community.llms import DeepInfra ``` ### Embeddings @@ -32,5 +32,5 @@ from langchain.llms import DeepInfra There is also an DeepInfra Embeddings wrapper, you can access with ```python -from langchain.embeddings import DeepInfraEmbeddings +from langchain_community.embeddings import DeepInfraEmbeddings ``` diff --git a/docs/docs/integrations/providers/deepsparse.mdx b/docs/docs/integrations/providers/deepsparse.mdx index 1682f757fe0..aa6905a1eb0 100644 --- a/docs/docs/integrations/providers/deepsparse.mdx +++ b/docs/docs/integrations/providers/deepsparse.mdx @@ -15,7 +15,7 @@ It is broken into two parts: installation and setup, and then examples of DeepSp There exists a DeepSparse LLM wrapper, which you can access with: ```python -from langchain.llms import DeepSparse +from langchain_community.llms import DeepSparse ``` It provides a unified interface for all models: diff --git a/docs/docs/integrations/providers/fireworks.md b/docs/docs/integrations/providers/fireworks.md index 86735092325..ca77f6bf852 100644 --- a/docs/docs/integrations/providers/fireworks.md +++ b/docs/docs/integrations/providers/fireworks.md @@ -36,7 +36,7 @@ Fireworks integrates with Langchain through the LLM module. In this example, we will work the llama-v2-13b-chat model. ```python -from langchain.llms.fireworks import Fireworks +from langchain_community.llms.fireworks import Fireworks llm = Fireworks( fireworks_api_key="", diff --git a/docs/docs/integrations/providers/flyte.mdx b/docs/docs/integrations/providers/flyte.mdx index dcb521e8b47..04bc45cfb32 100644 --- a/docs/docs/integrations/providers/flyte.mdx +++ b/docs/docs/integrations/providers/flyte.mdx @@ -28,7 +28,7 @@ from flytekit import ImageSpec, task from langchain.agents import AgentType, initialize_agent, load_tools from langchain.callbacks import FlyteCallbackHandler from langchain.chains import LLMChain -from langchain.chat_models import ChatOpenAI +from langchain_community.chat_models import ChatOpenAI from langchain.prompts import PromptTemplate from langchain.schema import HumanMessage ``` diff --git a/docs/docs/integrations/providers/forefrontai.mdx b/docs/docs/integrations/providers/forefrontai.mdx index c738c62d6f1..a0045f75a41 100644 --- a/docs/docs/integrations/providers/forefrontai.mdx +++ b/docs/docs/integrations/providers/forefrontai.mdx @@ -12,5 +12,5 @@ It is broken into two parts: installation and setup, and then references to spec There exists an ForefrontAI LLM wrapper, which you can access with ```python -from langchain.llms import ForefrontAI +from langchain_community.llms import ForefrontAI ``` \ No newline at end of file diff --git a/docs/docs/integrations/providers/google_serper.mdx b/docs/docs/integrations/providers/google_serper.mdx index b44065372dd..f091d95ebe5 100644 --- a/docs/docs/integrations/providers/google_serper.mdx +++ b/docs/docs/integrations/providers/google_serper.mdx @@ -21,7 +21,7 @@ You can use it as part of a Self Ask chain: ```python from langchain.utilities import GoogleSerperAPIWrapper -from langchain.llms.openai import OpenAI +from langchain_community.llms.openai import OpenAI from langchain.agents import initialize_agent, Tool from langchain.agents import AgentType diff --git a/docs/docs/integrations/providers/gooseai.mdx b/docs/docs/integrations/providers/gooseai.mdx index f0d93fa0815..49909481a00 100644 --- a/docs/docs/integrations/providers/gooseai.mdx +++ b/docs/docs/integrations/providers/gooseai.mdx @@ -19,5 +19,5 @@ os.environ["GOOSEAI_API_KEY"] = "YOUR_API_KEY" There exists an GooseAI LLM wrapper, which you can access with: ```python -from langchain.llms import GooseAI +from langchain_community.llms import GooseAI ``` \ No newline at end of file diff --git a/docs/docs/integrations/providers/gpt4all.mdx b/docs/docs/integrations/providers/gpt4all.mdx index 01cc169803b..1050015961a 100644 --- a/docs/docs/integrations/providers/gpt4all.mdx +++ b/docs/docs/integrations/providers/gpt4all.mdx @@ -21,7 +21,7 @@ wget https://gpt4all.io/models/gguf/mistral-7b-openorca.Q4_0.gguf -O models/mist To use the GPT4All wrapper, you need to provide the path to the pre-trained model file and the model's configuration. ```python -from langchain.llms import GPT4All +from langchain_community.llms import GPT4All # Instantiate the model. Callbacks support token-wise streaming model = GPT4All(model="./models/mistral-7b-openorca.Q4_0.gguf", n_threads=8) @@ -35,7 +35,7 @@ You can also customize the generation parameters, such as n_predict, temp, top_p To stream the model's predictions, add in a CallbackManager. ```python -from langchain.llms import GPT4All +from langchain_community.llms import GPT4All from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler # There are many CallbackHandlers supported, such as diff --git a/docs/docs/integrations/providers/gradient.mdx b/docs/docs/integrations/providers/gradient.mdx index d143d595293..37cd04e91ec 100644 --- a/docs/docs/integrations/providers/gradient.mdx +++ b/docs/docs/integrations/providers/gradient.mdx @@ -15,13 +15,13 @@ There exists an Gradient LLM wrapper, which you can access with See a [usage example](/docs/integrations/llms/gradient). ```python -from langchain.llms import GradientLLM +from langchain_community.llms import GradientLLM ``` ## Text Embedding Model There exists an Gradient Embedding model, which you can access with ```python -from langchain.embeddings import GradientEmbeddings +from langchain_community.embeddings import GradientEmbeddings ``` For a more detailed walkthrough of this, see [this notebook](/docs/integrations/text_embedding/gradient) diff --git a/docs/docs/integrations/providers/hazy_research.mdx b/docs/docs/integrations/providers/hazy_research.mdx index 5e04760f516..13cbda6b8ee 100644 --- a/docs/docs/integrations/providers/hazy_research.mdx +++ b/docs/docs/integrations/providers/hazy_research.mdx @@ -15,5 +15,5 @@ There exists an LLM wrapper around Hazy Research's `manifest` library. To use this wrapper: ```python -from langchain.llms.manifest import ManifestWrapper +from langchain_community.llms.manifest import ManifestWrapper ``` diff --git a/docs/docs/integrations/providers/helicone.mdx b/docs/docs/integrations/providers/helicone.mdx index 0e76b9604be..7adabba080a 100644 --- a/docs/docs/integrations/providers/helicone.mdx +++ b/docs/docs/integrations/providers/helicone.mdx @@ -23,7 +23,7 @@ Now head over to [helicone.ai](https://helicone.ai/onboarding?step=2) to create ## How to enable Helicone caching ```python -from langchain.llms import OpenAI +from langchain_community.llms import OpenAI import openai openai.api_base = "https://oai.hconeai.com/v1" @@ -37,7 +37,7 @@ print(llm(text)) ## How to use Helicone custom properties ```python -from langchain.llms import OpenAI +from langchain_community.llms import OpenAI import openai openai.api_base = "https://oai.hconeai.com/v1" diff --git a/docs/docs/integrations/providers/infinity.mdx b/docs/docs/integrations/providers/infinity.mdx index 8efef25aa2a..887a8584036 100644 --- a/docs/docs/integrations/providers/infinity.mdx +++ b/docs/docs/integrations/providers/infinity.mdx @@ -6,6 +6,6 @@ There exists an infinity Embedding model, which you can access with ```python -from langchain.embeddings import InfinityEmbeddings +from langchain_community.embeddings import InfinityEmbeddings ``` For a more detailed walkthrough of this, see [this notebook](/docs/integrations/text_embedding/infinity) diff --git a/docs/docs/integrations/providers/javelin_ai_gateway.mdx b/docs/docs/integrations/providers/javelin_ai_gateway.mdx index 7aae14365b4..7b1c4ede217 100644 --- a/docs/docs/integrations/providers/javelin_ai_gateway.mdx +++ b/docs/docs/integrations/providers/javelin_ai_gateway.mdx @@ -30,7 +30,7 @@ export JAVELIN_API_KEY=... ```python from langchain.chains import LLMChain -from langchain.llms import JavelinAIGateway +from langchain_community.llms import JavelinAIGateway from langchain.prompts import PromptTemplate route_completions = "eng_dept03" @@ -51,8 +51,8 @@ print(result) ## Embeddings Example ```python -from langchain.embeddings import JavelinAIGatewayEmbeddings -from langchain.embeddings.openai import OpenAIEmbeddings +from langchain_community.embeddings import JavelinAIGatewayEmbeddings +from langchain_community.embeddings.openai import OpenAIEmbeddings embeddings = JavelinAIGatewayEmbeddings( gateway_uri="http://localhost:8000", @@ -65,7 +65,7 @@ print(embeddings.embed_documents(["hello"])) ## Chat Example ```python -from langchain.chat_models import ChatJavelinAIGateway +from langchain_community.chat_models import ChatJavelinAIGateway from langchain.schema import HumanMessage, SystemMessage messages = [ diff --git a/docs/docs/integrations/providers/jina.mdx b/docs/docs/integrations/providers/jina.mdx index 76420a890a0..057ace079fa 100644 --- a/docs/docs/integrations/providers/jina.mdx +++ b/docs/docs/integrations/providers/jina.mdx @@ -9,7 +9,7 @@ It is broken into two parts: installation and setup, and then references to spec There exists a Jina Embeddings wrapper, which you can access with ```python -from langchain.embeddings import JinaEmbeddings +from langchain_community.embeddings import JinaEmbeddings # you can pas jina_api_key, if none is passed it will be taken from `JINA_API_TOKEN` environment variable embeddings = JinaEmbeddings(jina_api_key='jina_**', model_name='jina-embeddings-v2-base-en') diff --git a/docs/docs/integrations/providers/langchain_decorators.mdx b/docs/docs/integrations/providers/langchain_decorators.mdx index b93db6bb4ef..bab28f2617d 100644 --- a/docs/docs/integrations/providers/langchain_decorators.mdx +++ b/docs/docs/integrations/providers/langchain_decorators.mdx @@ -91,7 +91,7 @@ def write_a_complicated_code(app_idea:str)->str: 3. Define the settings **directly in the decorator** ``` python -from langchain.llms import OpenAI +from langchain_community.llms import OpenAI @llm_prompt( llm=OpenAI(temperature=0.7), diff --git a/docs/docs/integrations/providers/llamacpp.mdx b/docs/docs/integrations/providers/llamacpp.mdx index 53c29b6ff31..48b12913c2c 100644 --- a/docs/docs/integrations/providers/llamacpp.mdx +++ b/docs/docs/integrations/providers/llamacpp.mdx @@ -13,7 +13,7 @@ It is broken into two parts: installation and setup, and then references to spec There exists a LlamaCpp LLM wrapper, which you can access with ```python -from langchain.llms import LlamaCpp +from langchain_community.llms import LlamaCpp ``` For a more detailed walkthrough of this, see [this notebook](/docs/integrations/llms/llamacpp) @@ -21,6 +21,6 @@ For a more detailed walkthrough of this, see [this notebook](/docs/integrations/ There exists a LlamaCpp Embeddings wrapper, which you can access with ```python -from langchain.embeddings import LlamaCppEmbeddings +from langchain_community.embeddings import LlamaCppEmbeddings ``` For a more detailed walkthrough of this, see [this notebook](/docs/integrations/text_embedding/llamacpp) diff --git a/docs/docs/integrations/providers/log10.mdx b/docs/docs/integrations/providers/log10.mdx index 4f12b11ef86..b5fe4ce4f35 100644 --- a/docs/docs/integrations/providers/log10.mdx +++ b/docs/docs/integrations/providers/log10.mdx @@ -17,7 +17,7 @@ Log10 is an [open-source](https://github.com/log10-io/log10) proxiless LLM data Integration with log10 is a simple one-line `log10_callback` integration as shown below: ```python -from langchain.chat_models import ChatOpenAI +from langchain_community.chat_models import ChatOpenAI from langchain.schema import HumanMessage from log10.langchain import Log10Callback @@ -40,9 +40,9 @@ llm = ChatOpenAI(model_name="gpt-3.5-turbo", callbacks=[log10_callback]) ## How to use tags with Log10 ```python -from langchain.llms import OpenAI -from langchain.chat_models import ChatAnthropic -from langchain.chat_models import ChatOpenAI +from langchain_community.llms import OpenAI +from langchain_community.chat_models import ChatAnthropic +from langchain_community.chat_models import ChatOpenAI from langchain.schema import HumanMessage from log10.langchain import Log10Callback @@ -74,7 +74,7 @@ You can also intermix direct OpenAI calls and Langchain LLM calls: import os from log10.load import log10, log10_session import openai -from langchain.llms import OpenAI +from langchain_community.llms import OpenAI log10(openai) diff --git a/docs/docs/integrations/providers/minimax.mdx b/docs/docs/integrations/providers/minimax.mdx index 0f64856a18b..d1e08af0531 100644 --- a/docs/docs/integrations/providers/minimax.mdx +++ b/docs/docs/integrations/providers/minimax.mdx @@ -14,7 +14,7 @@ There exists a Minimax LLM wrapper, which you can access with See a [usage example](/docs/modules/model_io/llms/integrations/minimax). ```python -from langchain.llms import Minimax +from langchain_community.llms import Minimax ``` ## Chat Models @@ -22,12 +22,12 @@ from langchain.llms import Minimax See a [usage example](/docs/modules/model_io/chat/integrations/minimax) ```python -from langchain.chat_models import MiniMaxChat +from langchain_community.chat_models import MiniMaxChat ``` ## Text Embedding Model There exists a Minimax Embedding model, which you can access with ```python -from langchain.embeddings import MiniMaxEmbeddings +from langchain_community.embeddings import MiniMaxEmbeddings ``` diff --git a/docs/docs/integrations/providers/mlflow.mdx b/docs/docs/integrations/providers/mlflow.mdx index 159a693cc09..5219f0b7a9c 100644 --- a/docs/docs/integrations/providers/mlflow.mdx +++ b/docs/docs/integrations/providers/mlflow.mdx @@ -58,7 +58,7 @@ See the [API documentation and examples](https://www.mlflow.org/docs/latest/pyth ```python import mlflow from langchain.chains import LLMChain, PromptTemplate -from langchain.llms import Mlflow +from langchain_community.llms import Mlflow llm = Mlflow( target_uri="http://127.0.0.1:5000", @@ -85,7 +85,7 @@ print(model.predict([{"adjective": "funny"}])) ## Embeddings Example ```python -from langchain.embeddings import MlflowEmbeddings +from langchain_community.embeddings import MlflowEmbeddings embeddings = MlflowEmbeddings( target_uri="http://127.0.0.1:5000", @@ -99,7 +99,7 @@ print(embeddings.embed_documents(["hello"])) ## Chat Example ```python -from langchain.chat_models import ChatMlflow +from langchain_community.chat_models import ChatMlflow from langchain.schema import HumanMessage, SystemMessage chat = ChatMlflow( diff --git a/docs/docs/integrations/providers/mlflow_ai_gateway.mdx b/docs/docs/integrations/providers/mlflow_ai_gateway.mdx index 3c716724c3c..dccabba4945 100644 --- a/docs/docs/integrations/providers/mlflow_ai_gateway.mdx +++ b/docs/docs/integrations/providers/mlflow_ai_gateway.mdx @@ -67,7 +67,7 @@ See the [API documentation and examples](https://www.mlflow.org/docs/latest/pyth ```python import mlflow from langchain.chains import LLMChain, PromptTemplate -from langchain.llms import MlflowAIGateway +from langchain_community.llms import MlflowAIGateway gateway = MlflowAIGateway( gateway_uri="http://127.0.0.1:5000", @@ -98,7 +98,7 @@ print(model.predict([{"adjective": "funny"}])) ## Embeddings Example ```python -from langchain.embeddings import MlflowAIGatewayEmbeddings +from langchain_community.embeddings import MlflowAIGatewayEmbeddings embeddings = MlflowAIGatewayEmbeddings( gateway_uri="http://127.0.0.1:5000", @@ -112,7 +112,7 @@ print(embeddings.embed_documents(["hello"])) ## Chat Example ```python -from langchain.chat_models import ChatMLflowAIGateway +from langchain_community.chat_models import ChatMLflowAIGateway from langchain.schema import HumanMessage, SystemMessage chat = ChatMLflowAIGateway( @@ -142,7 +142,7 @@ Please contact a Databricks representative to enroll in the preview. ```python from langchain.chains import LLMChain from langchain.prompts import PromptTemplate -from langchain.llms import MlflowAIGateway +from langchain_community.llms import MlflowAIGateway gateway = MlflowAIGateway( gateway_uri="databricks", diff --git a/docs/docs/integrations/providers/mlflow_tracking.ipynb b/docs/docs/integrations/providers/mlflow_tracking.ipynb index 4403ce33029..4b4dffd03f8 100644 --- a/docs/docs/integrations/providers/mlflow_tracking.ipynb +++ b/docs/docs/integrations/providers/mlflow_tracking.ipynb @@ -78,7 +78,7 @@ "outputs": [], "source": [ "from langchain.callbacks import MlflowCallbackHandler\n", - "from langchain.llms import OpenAI" + "from langchain_community.llms import OpenAI" ] }, { diff --git a/docs/docs/integrations/providers/modal.mdx b/docs/docs/integrations/providers/modal.mdx index 6d6854c92a7..7e02799d717 100644 --- a/docs/docs/integrations/providers/modal.mdx +++ b/docs/docs/integrations/providers/modal.mdx @@ -81,7 +81,7 @@ Your web endpoint will acquire a persistent URL under the `modal.run` domain. The `Modal` LLM wrapper class which will accept your deployed web endpoint's URL. ```python -from langchain.llms import Modal +from langchain_community.llms import Modal endpoint_url = "https://ecorp--custom-llm-endpoint.modal.run" # REPLACE ME with your deployed Modal web endpoint's URL diff --git a/docs/docs/integrations/providers/modelscope.mdx b/docs/docs/integrations/providers/modelscope.mdx index df6add2bb1b..34c421ea707 100644 --- a/docs/docs/integrations/providers/modelscope.mdx +++ b/docs/docs/integrations/providers/modelscope.mdx @@ -18,7 +18,7 @@ pip install modelscope ```python -from langchain.embeddings import ModelScopeEmbeddings +from langchain_community.embeddings import ModelScopeEmbeddings ``` For a more detailed walkthrough of this, see [this notebook](/docs/integrations/text_embedding/modelscope_hub) diff --git a/docs/docs/integrations/providers/motherduck.mdx b/docs/docs/integrations/providers/motherduck.mdx index a072dacf8b9..17ea1205e39 100644 --- a/docs/docs/integrations/providers/motherduck.mdx +++ b/docs/docs/integrations/providers/motherduck.mdx @@ -26,7 +26,7 @@ conn_str = f"duckdb:///md:{token}@my_db" You can use the SQLChain to query data in your Motherduck instance in natural language. ``` -from langchain.llms import OpenAI, SQLDatabase, SQLDatabaseChain +from langchain_community.llms import OpenAI, SQLDatabase, SQLDatabaseChain db = SQLDatabase.from_uri(conn_str) db_chain = SQLDatabaseChain.from_llm(OpenAI(temperature=0), db, verbose=True) ``` diff --git a/docs/docs/integrations/providers/nlpcloud.mdx b/docs/docs/integrations/providers/nlpcloud.mdx index e401faeb5aa..f6d664833a1 100644 --- a/docs/docs/integrations/providers/nlpcloud.mdx +++ b/docs/docs/integrations/providers/nlpcloud.mdx @@ -19,7 +19,7 @@ pip install nlpcloud See a [usage example](/docs/integrations/llms/nlpcloud). ```python -from langchain.llms import NLPCloud +from langchain_community.llms import NLPCloud ``` ## Text Embedding Models @@ -27,5 +27,5 @@ from langchain.llms import NLPCloud See a [usage example](/docs/integrations/text_embedding/nlp_cloud) ```python -from langchain.embeddings import NLPCloudEmbeddings +from langchain_community.embeddings import NLPCloudEmbeddings ``` diff --git a/docs/docs/integrations/providers/ollama.mdx b/docs/docs/integrations/providers/ollama.mdx index da10174a168..ba00e2a2469 100644 --- a/docs/docs/integrations/providers/ollama.mdx +++ b/docs/docs/integrations/providers/ollama.mdx @@ -21,7 +21,7 @@ To use, you should set up the environment variables `ANYSCALE_API_BASE` and ## LLM ```python -from langchain.llms import Ollama +from langchain_community.llms import Ollama ``` See the notebook example [here](/docs/integrations/llms/ollama). @@ -31,7 +31,7 @@ See the notebook example [here](/docs/integrations/llms/ollama). ### Chat Ollama ```python -from langchain.chat_models import ChatOllama +from langchain_community.chat_models import ChatOllama ``` See the notebook example [here](/docs/integrations/chat/ollama). @@ -47,7 +47,7 @@ See the notebook example [here](/docs/integrations/chat/ollama_functions). ## Embedding models ```python -from langchain.embeddings import OllamaEmbeddings +from langchain_community.embeddings import OllamaEmbeddings ``` See the notebook example [here](/docs/integrations/text_embedding/ollama). diff --git a/docs/docs/integrations/providers/openllm.mdx b/docs/docs/integrations/providers/openllm.mdx index 1f24af8ed22..92bdca1242c 100644 --- a/docs/docs/integrations/providers/openllm.mdx +++ b/docs/docs/integrations/providers/openllm.mdx @@ -27,7 +27,7 @@ There is a OpenLLM Wrapper which supports loading LLM in-process or accessing a remote OpenLLM server: ```python -from langchain.llms import OpenLLM +from langchain_community.llms import OpenLLM ``` ### Wrapper for OpenLLM server @@ -44,7 +44,7 @@ openllm start flan-t5 Wrapper usage: ```python -from langchain.llms import OpenLLM +from langchain_community.llms import OpenLLM llm = OpenLLM(server_url='http://localhost:3000') @@ -57,7 +57,7 @@ You can also use the OpenLLM wrapper to load LLM in current Python process for running inference. ```python -from langchain.llms import OpenLLM +from langchain_community.llms import OpenLLM llm = OpenLLM(model_name="dolly-v2", model_id='databricks/dolly-v2-7b') diff --git a/docs/docs/integrations/providers/petals.mdx b/docs/docs/integrations/providers/petals.mdx index 2f6db15cb97..db85c3cfc80 100644 --- a/docs/docs/integrations/providers/petals.mdx +++ b/docs/docs/integrations/providers/petals.mdx @@ -13,5 +13,5 @@ It is broken into two parts: installation and setup, and then references to spec There exists an Petals LLM wrapper, which you can access with ```python -from langchain.llms import Petals +from langchain_community.llms import Petals ``` diff --git a/docs/docs/integrations/providers/pipelineai.mdx b/docs/docs/integrations/providers/pipelineai.mdx index eef57eb5b57..e13f6cffc5c 100644 --- a/docs/docs/integrations/providers/pipelineai.mdx +++ b/docs/docs/integrations/providers/pipelineai.mdx @@ -15,5 +15,5 @@ It is broken into two parts: installation and setup, and then references to spec There exists a PipelineAI LLM wrapper, which you can access with ```python -from langchain.llms import PipelineAI +from langchain_community.llms import PipelineAI ``` diff --git a/docs/docs/integrations/providers/portkey/index.md b/docs/docs/integrations/providers/portkey/index.md index daefe35a225..bed90b08586 100644 --- a/docs/docs/integrations/providers/portkey/index.md +++ b/docs/docs/integrations/providers/portkey/index.md @@ -20,7 +20,7 @@ To start, get your Portkey API key by [signing up here](https://app.portkey.ai/l For OpenAI, a simple integration with logging feature would look like this: ```python -from langchain.llms import OpenAI +from langchain_community.llms import OpenAI from langchain.utilities import Portkey # Add the Portkey API Key from your account @@ -39,7 +39,7 @@ A common Portkey X Langchain use case is to **trace a chain or an agent** and vi ```python from langchain.agents import AgentType, initialize_agent, load_tools -from langchain.llms import OpenAI +from langchain_community.llms import OpenAI from langchain.utilities import Portkey # Add the Portkey API Key from your account diff --git a/docs/docs/integrations/providers/portkey/logging_tracing_portkey.ipynb b/docs/docs/integrations/providers/portkey/logging_tracing_portkey.ipynb index 7fd2cd41611..7cb1e83091c 100644 --- a/docs/docs/integrations/providers/portkey/logging_tracing_portkey.ipynb +++ b/docs/docs/integrations/providers/portkey/logging_tracing_portkey.ipynb @@ -27,8 +27,8 @@ "import os\n", "\n", "from langchain.agents import AgentType, initialize_agent, load_tools\n", - "from langchain.llms import OpenAI\n", - "from langchain.utilities import Portkey" + "from langchain.utilities import Portkey\n", + "from langchain_community.llms import OpenAI" ] }, { diff --git a/docs/docs/integrations/providers/predibase.md b/docs/docs/integrations/providers/predibase.md index 79e55dcf5ec..31a445e99ad 100644 --- a/docs/docs/integrations/providers/predibase.md +++ b/docs/docs/integrations/providers/predibase.md @@ -15,7 +15,7 @@ Predibase integrates with LangChain by implementing LLM module. You can see a sh import os os.environ["PREDIBASE_API_TOKEN"] = "{PREDIBASE_API_TOKEN}" -from langchain.llms import Predibase +from langchain_community.llms import Predibase model = Predibase(model = 'vicuna-13b', predibase_api_key=os.environ.get('PREDIBASE_API_TOKEN')) diff --git a/docs/docs/integrations/providers/predictionguard.mdx b/docs/docs/integrations/providers/predictionguard.mdx index 09482cdb056..72d36bc0193 100644 --- a/docs/docs/integrations/providers/predictionguard.mdx +++ b/docs/docs/integrations/providers/predictionguard.mdx @@ -11,7 +11,7 @@ It is broken into two parts: installation and setup, and then references to spec There exists a Prediction Guard LLM wrapper, which you can access with ```python -from langchain.llms import PredictionGuard +from langchain_community.llms import PredictionGuard ``` You can provide the name of the Prediction Guard model as an argument when initializing the LLM: @@ -36,7 +36,7 @@ Basic usage of the controlled or guarded LLM wrapper: import os import predictionguard as pg -from langchain.llms import PredictionGuard +from langchain_community.llms import PredictionGuard from langchain.prompts import PromptTemplate from langchain.chains import LLMChain @@ -79,7 +79,7 @@ import os from langchain.prompts import PromptTemplate from langchain.chains import LLMChain -from langchain.llms import PredictionGuard +from langchain_community.llms import PredictionGuard # Optional, add your OpenAI API Key. This is optional, as Prediction Guard allows # you to access all the latest open access models (see https://docs.predictionguard.com) diff --git a/docs/docs/integrations/providers/promptlayer.mdx b/docs/docs/integrations/providers/promptlayer.mdx index 44c724d5505..550ff28f35b 100644 --- a/docs/docs/integrations/providers/promptlayer.mdx +++ b/docs/docs/integrations/providers/promptlayer.mdx @@ -35,7 +35,7 @@ from langchain.callbacks import PromptLayerCallbackHandler See a [usage example](/docs/integrations/llms/promptlayer_openai). ```python -from langchain.llms import PromptLayerOpenAI +from langchain_community.llms import PromptLayerOpenAI ``` @@ -44,6 +44,6 @@ from langchain.llms import PromptLayerOpenAI See a [usage example](/docs/integrations/chat/promptlayer_chatopenai). ```python -from langchain.chat_models import PromptLayerChatOpenAI +from langchain_community.chat_models import PromptLayerChatOpenAI ``` diff --git a/docs/docs/integrations/providers/ray_serve.ipynb b/docs/docs/integrations/providers/ray_serve.ipynb index db17b4acaa9..f8988c64c01 100644 --- a/docs/docs/integrations/providers/ray_serve.ipynb +++ b/docs/docs/integrations/providers/ray_serve.ipynb @@ -108,8 +108,8 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.llms import OpenAI\n", - "from langchain.prompts import PromptTemplate" + "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import OpenAI" ] }, { diff --git a/docs/docs/integrations/providers/rebuff.ipynb b/docs/docs/integrations/providers/rebuff.ipynb index 1ee61311e3c..540e68ed515 100644 --- a/docs/docs/integrations/providers/rebuff.ipynb +++ b/docs/docs/integrations/providers/rebuff.ipynb @@ -104,8 +104,8 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.llms import OpenAI\n", "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import OpenAI\n", "\n", "# Set up the LangChain SDK with the environment variable\n", "llm = OpenAI(temperature=0)" diff --git a/docs/docs/integrations/providers/runhouse.mdx b/docs/docs/integrations/providers/runhouse.mdx index 8039882b0bc..d0b63ed4905 100644 --- a/docs/docs/integrations/providers/runhouse.mdx +++ b/docs/docs/integrations/providers/runhouse.mdx @@ -12,7 +12,7 @@ For a basic self-hosted LLM, you can use the `SelfHostedHuggingFaceLLM` class. F custom LLMs, you can use the `SelfHostedPipeline` parent class. ```python -from langchain.llms import SelfHostedPipeline, SelfHostedHuggingFaceLLM +from langchain_community.llms import SelfHostedPipeline, SelfHostedHuggingFaceLLM ``` For a more detailed walkthrough of the Self-hosted LLMs, see [this notebook](/docs/integrations/llms/runhouse) @@ -23,7 +23,7 @@ There are several ways to use self-hosted embeddings with LangChain via Runhouse For a basic self-hosted embedding from a Hugging Face Transformers model, you can use the `SelfHostedEmbedding` class. ```python -from langchain.llms import SelfHostedPipeline, SelfHostedHuggingFaceLLM +from langchain_community.llms import SelfHostedPipeline, SelfHostedHuggingFaceLLM ``` For a more detailed walkthrough of the Self-hosted Embeddings, see [this notebook](/docs/integrations/text_embedding/self-hosted) diff --git a/docs/docs/integrations/providers/rwkv.mdx b/docs/docs/integrations/providers/rwkv.mdx index 82a3c35e529..2b5f827aa22 100644 --- a/docs/docs/integrations/providers/rwkv.mdx +++ b/docs/docs/integrations/providers/rwkv.mdx @@ -15,7 +15,7 @@ It is broken into two parts: installation and setup, and then usage with an exam To use the RWKV wrapper, you need to provide the path to the pre-trained model file and the tokenizer's configuration. ```python -from langchain.llms import RWKV +from langchain_community.llms import RWKV # Test the model diff --git a/docs/docs/integrations/providers/salute_devices.mdx b/docs/docs/integrations/providers/salute_devices.mdx index 2ab03926722..b35adf02449 100644 --- a/docs/docs/integrations/providers/salute_devices.mdx +++ b/docs/docs/integrations/providers/salute_devices.mdx @@ -17,7 +17,7 @@ pip install gigachat See a [usage example](/docs/integrations/llms/gigachat). ```python -from langchain.llms import GigaChat +from langchain_community.llms import GigaChat ``` ## Chat models @@ -25,5 +25,5 @@ from langchain.llms import GigaChat See a [usage example](/docs/integrations/chat/gigachat). ```python -from langchain.chat_models import GigaChat +from langchain_community.chat_models import GigaChat ``` \ No newline at end of file diff --git a/docs/docs/integrations/providers/searchapi.mdx b/docs/docs/integrations/providers/searchapi.mdx index f79602f98ec..29d59b49b1a 100644 --- a/docs/docs/integrations/providers/searchapi.mdx +++ b/docs/docs/integrations/providers/searchapi.mdx @@ -21,7 +21,7 @@ You can use it as part of a Self Ask chain: ```python from langchain.utilities import SearchApiAPIWrapper -from langchain.llms.openai import OpenAI +from langchain_community.llms.openai import OpenAI from langchain.agents import initialize_agent, Tool from langchain.agents import AgentType diff --git a/docs/docs/integrations/providers/shaleprotocol.md b/docs/docs/integrations/providers/shaleprotocol.md index d2b1bfe3681..bef048986d2 100644 --- a/docs/docs/integrations/providers/shaleprotocol.md +++ b/docs/docs/integrations/providers/shaleprotocol.md @@ -19,7 +19,7 @@ As of June 2023, the API supports Vicuna-13B by default. We are going to support For example ```python -from langchain.llms import OpenAI +from langchain_community.llms import OpenAI from langchain.prompts import PromptTemplate from langchain.chains import LLMChain diff --git a/docs/docs/integrations/providers/spacy.mdx b/docs/docs/integrations/providers/spacy.mdx index 4ff18381697..13fa9bce050 100644 --- a/docs/docs/integrations/providers/spacy.mdx +++ b/docs/docs/integrations/providers/spacy.mdx @@ -24,5 +24,5 @@ from langchain.text_splitter import SpacyTextSplitter See a [usage example](/docs/integrations/text_embedding/spacy_embedding) ```python -from langchain.embeddings.spacy_embeddings import SpacyEmbeddings +from langchain_community.embeddings.spacy_embeddings import SpacyEmbeddings ``` diff --git a/docs/docs/integrations/providers/stochasticai.mdx b/docs/docs/integrations/providers/stochasticai.mdx index 75891103962..bd0b5484bb2 100644 --- a/docs/docs/integrations/providers/stochasticai.mdx +++ b/docs/docs/integrations/providers/stochasticai.mdx @@ -13,5 +13,5 @@ It is broken into two parts: installation and setup, and then references to spec There exists an StochasticAI LLM wrapper, which you can access with ```python -from langchain.llms import StochasticAI +from langchain_community.llms import StochasticAI ``` \ No newline at end of file diff --git a/docs/docs/integrations/providers/symblai_nebula.mdx b/docs/docs/integrations/providers/symblai_nebula.mdx index b716af6ff0c..57c27f6a249 100644 --- a/docs/docs/integrations/providers/symblai_nebula.mdx +++ b/docs/docs/integrations/providers/symblai_nebula.mdx @@ -13,6 +13,6 @@ It is broken into two parts: installation and setup, and then references to spec There exists an Nebula LLM wrapper, which you can access with ```python -from langchain.llms import Nebula +from langchain_community.llms import Nebula llm = Nebula() ``` diff --git a/docs/docs/integrations/providers/tencent.mdx b/docs/docs/integrations/providers/tencent.mdx index 1d75b4e1f45..731ee68ab8f 100644 --- a/docs/docs/integrations/providers/tencent.mdx +++ b/docs/docs/integrations/providers/tencent.mdx @@ -20,7 +20,7 @@ For more information, see [this notebook](/docs/integrations/chat/tencent_hunyuan) ```python -from langchain.chat_models import ChatHunyuan +from langchain_community.chat_models import ChatHunyuan ``` ## Vector Store diff --git a/docs/docs/integrations/providers/vectara/vectara_chat.ipynb b/docs/docs/integrations/providers/vectara/vectara_chat.ipynb index 665f4f5b644..17a4a99d651 100644 --- a/docs/docs/integrations/providers/vectara/vectara_chat.ipynb +++ b/docs/docs/integrations/providers/vectara/vectara_chat.ipynb @@ -61,8 +61,8 @@ "import os\n", "\n", "from langchain.chains import ConversationalRetrievalChain\n", - "from langchain.llms import OpenAI\n", - "from langchain.vectorstores import Vectara" + "from langchain.vectorstores import Vectara\n", + "from langchain_community.llms import OpenAI" ] }, { diff --git a/docs/docs/integrations/providers/vectara/vectara_summary.ipynb b/docs/docs/integrations/providers/vectara/vectara_summary.ipynb index 837341b43c0..2830e8de435 100644 --- a/docs/docs/integrations/providers/vectara/vectara_summary.ipynb +++ b/docs/docs/integrations/providers/vectara/vectara_summary.ipynb @@ -74,9 +74,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import FakeEmbeddings\n", "from langchain.prompts import ChatPromptTemplate\n", "from langchain.vectorstores import Vectara\n", + "from langchain_community.embeddings import FakeEmbeddings\n", "from langchain_core.output_parsers import StrOutputParser\n", "from langchain_core.runnables import RunnableLambda, RunnablePassthrough" ] @@ -242,8 +242,8 @@ } ], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.retrievers.multi_query import MultiQueryRetriever\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "llm = ChatOpenAI(temperature=0)\n", "mqr = MultiQueryRetriever.from_llm(retriever=retriever, llm=llm)\n", diff --git a/docs/docs/integrations/providers/wandb_tracing.ipynb b/docs/docs/integrations/providers/wandb_tracing.ipynb index fb60e19a79f..da4cd81d4f8 100644 --- a/docs/docs/integrations/providers/wandb_tracing.ipynb +++ b/docs/docs/integrations/providers/wandb_tracing.ipynb @@ -39,7 +39,7 @@ "\n", "from langchain.agents import AgentType, initialize_agent, load_tools\n", "from langchain.callbacks import wandb_tracing_enabled\n", - "from langchain.llms import OpenAI" + "from langchain_community.llms import OpenAI" ] }, { diff --git a/docs/docs/integrations/providers/wandb_tracking.ipynb b/docs/docs/integrations/providers/wandb_tracking.ipynb index 9f9a23bf4f5..12b056b8cf6 100644 --- a/docs/docs/integrations/providers/wandb_tracking.ipynb +++ b/docs/docs/integrations/providers/wandb_tracking.ipynb @@ -63,7 +63,7 @@ "from datetime import datetime\n", "\n", "from langchain.callbacks import StdOutCallbackHandler, WandbCallbackHandler\n", - "from langchain.llms import OpenAI" + "from langchain_community.llms import OpenAI" ] }, { diff --git a/docs/docs/integrations/providers/whylabs_profiling.ipynb b/docs/docs/integrations/providers/whylabs_profiling.ipynb index 69e4d8da1fc..c8ec8ac818e 100644 --- a/docs/docs/integrations/providers/whylabs_profiling.ipynb +++ b/docs/docs/integrations/providers/whylabs_profiling.ipynb @@ -100,7 +100,7 @@ } ], "source": [ - "from langchain.llms import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "whylabs = WhyLabsCallbackHandler.from_params()\n", "llm = OpenAI(temperature=0, callbacks=[whylabs])\n", diff --git a/docs/docs/integrations/providers/writer.mdx b/docs/docs/integrations/providers/writer.mdx index 7b38c1ca027..52ff0723ee6 100644 --- a/docs/docs/integrations/providers/writer.mdx +++ b/docs/docs/integrations/providers/writer.mdx @@ -12,5 +12,5 @@ It is broken into two parts: installation and setup, and then references to spec There exists an Writer LLM wrapper, which you can access with ```python -from langchain.llms import Writer +from langchain_community.llms import Writer ``` \ No newline at end of file diff --git a/docs/docs/integrations/providers/xinference.mdx b/docs/docs/integrations/providers/xinference.mdx index 0029deef21c..07aefb3b952 100644 --- a/docs/docs/integrations/providers/xinference.mdx +++ b/docs/docs/integrations/providers/xinference.mdx @@ -76,7 +76,7 @@ A model uid will be returned. Example usage: ```python -from langchain.llms import Xinference +from langchain_community.llms import Xinference llm = Xinference( server_url="http://0.0.0.0:9997", diff --git a/docs/docs/integrations/providers/yandex.mdx b/docs/docs/integrations/providers/yandex.mdx index 8b539e54a48..06d381a5e78 100644 --- a/docs/docs/integrations/providers/yandex.mdx +++ b/docs/docs/integrations/providers/yandex.mdx @@ -19,7 +19,7 @@ pip install yandexcloud See a [usage example](/docs/integrations/llms/yandex). ```python -from langchain.llms import YandexGPT +from langchain_community.llms import YandexGPT ``` ## Chat models @@ -29,5 +29,5 @@ from langchain.llms import YandexGPT See a [usage example](/docs/integrations/chat/yandex). ```python -from langchain.chat_models import ChatYandexGPT +from langchain_community.chat_models import ChatYandexGPT ``` diff --git a/docs/docs/integrations/retrievers/arxiv.ipynb b/docs/docs/integrations/retrievers/arxiv.ipynb index 95b65ba49d6..ee6d406cb2c 100644 --- a/docs/docs/integrations/retrievers/arxiv.ipynb +++ b/docs/docs/integrations/retrievers/arxiv.ipynb @@ -201,7 +201,7 @@ "outputs": [], "source": [ "from langchain.chains import ConversationalRetrievalChain\n", - "from langchain.chat_models import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "model = ChatOpenAI(model_name=\"gpt-3.5-turbo\") # switch to 'gpt-4'\n", "qa = ConversationalRetrievalChain.from_llm(model, retriever=retriever)" diff --git a/docs/docs/integrations/retrievers/bedrock.ipynb b/docs/docs/integrations/retrievers/bedrock.ipynb index 36c5e2f42aa..17efd839e99 100644 --- a/docs/docs/integrations/retrievers/bedrock.ipynb +++ b/docs/docs/integrations/retrievers/bedrock.ipynb @@ -78,7 +78,7 @@ "source": [ "from botocore.client import Config\n", "from langchain.chains import RetrievalQA\n", - "from langchain.llms import Bedrock\n", + "from langchain_community.llms import Bedrock\n", "\n", "model_kwargs_claude = {\"temperature\": 0, \"top_k\": 10, \"max_tokens_to_sample\": 3000}\n", "\n", diff --git a/docs/docs/integrations/retrievers/cohere-reranker.ipynb b/docs/docs/integrations/retrievers/cohere-reranker.ipynb index 8e973b0331a..c5cce3feed5 100644 --- a/docs/docs/integrations/retrievers/cohere-reranker.ipynb +++ b/docs/docs/integrations/retrievers/cohere-reranker.ipynb @@ -326,9 +326,9 @@ ], "source": [ "from langchain.document_loaders import TextLoader\n", - "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", "from langchain.vectorstores import FAISS\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", "\n", "documents = TextLoader(\"../../modules/state_of_the_union.txt\").load()\n", "text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=100)\n", @@ -386,9 +386,9 @@ } ], "source": [ - "from langchain.llms import OpenAI\n", "from langchain.retrievers import ContextualCompressionRetriever\n", "from langchain.retrievers.document_compressors import CohereRerank\n", + "from langchain_community.llms import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", "compressor = CohereRerank()\n", diff --git a/docs/docs/integrations/retrievers/cohere.ipynb b/docs/docs/integrations/retrievers/cohere.ipynb index 3562bb6b7af..b6dec184e1d 100644 --- a/docs/docs/integrations/retrievers/cohere.ipynb +++ b/docs/docs/integrations/retrievers/cohere.ipynb @@ -19,8 +19,8 @@ }, "outputs": [], "source": [ - "from langchain.chat_models import ChatCohere\n", "from langchain.retrievers import CohereRagRetriever\n", + "from langchain_community.chat_models import ChatCohere\n", "from langchain_core.documents import Document" ] }, diff --git a/docs/docs/integrations/retrievers/docarray_retriever.ipynb b/docs/docs/integrations/retrievers/docarray_retriever.ipynb index 458915ffcd5..e02d1a7743e 100644 --- a/docs/docs/integrations/retrievers/docarray_retriever.ipynb +++ b/docs/docs/integrations/retrievers/docarray_retriever.ipynb @@ -34,8 +34,8 @@ "\n", "from docarray import BaseDoc\n", "from docarray.typing import NdArray\n", - "from langchain.embeddings import FakeEmbeddings\n", "from langchain.retrievers import DocArrayRetriever\n", + "from langchain_community.embeddings import FakeEmbeddings\n", "\n", "embeddings = FakeEmbeddings(size=32)" ] @@ -569,7 +569,7 @@ "source": [ "from docarray import BaseDoc, DocList\n", "from docarray.typing import NdArray\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "\n", "\n", "# define schema for your movie documents\n", diff --git a/docs/docs/integrations/retrievers/fleet_context.ipynb b/docs/docs/integrations/retrievers/fleet_context.ipynb index f15645fdbb6..ca8f6e6c4b1 100644 --- a/docs/docs/integrations/retrievers/fleet_context.ipynb +++ b/docs/docs/integrations/retrievers/fleet_context.ipynb @@ -33,10 +33,10 @@ "from typing import Any, Optional, Type\n", "\n", "import pandas as pd\n", - "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.retrievers import MultiVectorRetriever\n", "from langchain.schema import Document\n", "from langchain.vectorstores import FAISS\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_core.stores import BaseStore\n", "from langchain_core.vectorstores import VectorStore\n", "\n", @@ -191,9 +191,9 @@ { "data": { "text/plain": [ - "[Document(page_content='Vector store-backed retriever | 🦜️🔗 Langchain\\n# Vector store-backed retriever A vector store retriever is a retriever that uses a vector store to retrieve documents. It is a lightweight wrapper around the vector store class to make it conform to the retriever interface. It uses the search methods implemented by a vector store, like similarity search and MMR, to query the texts in the vector store. Once you construct a vector store, it\\'s very easy to construct a retriever. Let\\'s walk through an example.Once you construct a vector store, it\\'s very easy to construct a retriever. Let\\'s walk through an example. ``` from langchain.document_loaders import TextLoaderloader = TextLoader(\\'../../../state_of_the_union.txt\\') ``` ``` from langchain.text_splitter import CharacterTextSplitterfrom langchain.vectorstores import FAISSfrom langchain.embeddings import OpenAIEmbeddingsdocuments = loader.load()text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)texts = text_splitter.split_documents(documents)embeddings = OpenAIEmbeddings()db = FAISS.from_documents(texts, embeddings) ``` ``` Exiting: Cleaning up .chroma directory ``` ``` retriever = db.as_retriever() ``` ``` docs = retriever.get_relevant_documents(\"what did he say about ketanji brown jackson\") ``` ## Maximum marginal relevance retrieval[\\u200b](#maximum-marginal-relevance-retrieval) By default, the vector store retriever uses similarity search.If the underlying vector store supports maximum marginal relevance search, you can specify that as the search type. ``` retriever = db.as_retriever(search_type=\"mmr\") ``` ``` docs = retriever.get_relevant_documents(\"what did he say about ketanji brown jackson\") ``` ## Similarity score threshold retrieval[\\u200b](#similarity-score-threshold-retrieval) You can also a retrieval method that sets a similarity score threshold and only returns documents with a score above that threshold. ``` retriever = db.as_retriever(search_type=\"similarity_score_threshold\", search_kwargs={\"score_threshold\": .5}) ``` ``` docs = retriever.get_relevant_documents(\"what did he say about ketanji brown jackson\") ``` ## Specifying top k[\\u200b](#specifying-top-k) You can also specify search kwargs like `k` to use when doing retrieval.``` retriever = db.as_retriever(search_kwargs={\"k\": 1}) ``` ``` docs = retriever.get_relevant_documents(\"what did he say about ketanji brown jackson\") ``` ``` len(docs) ``` ``` 1 ```', metadata={'title': 'Vector store-backed retriever | 🦜️🔗 Langchain', 'type': None, 'url': 'https://python.langchain.com/docs/modules/data_connection/retrievers/vectorstore', 'id': 'c153ebd9-2611-4a43-9db6-daa1f5f214f6'}),\n", - " Document(page_content='MultiVector Retriever | 🦜️🔗 Langchain\\n# MultiVector Retriever It can often be beneficial to store multiple vectors per document. There are multiple use cases where this is beneficial. LangChain has a base `MultiVectorRetriever` which makes querying this type of setup easy. A lot of the complexity lies in how to create the multiple vectors per document. This notebook covers some of the common ways to create those vectors and use the `MultiVectorRetriever`. The methods to create multiple vectors per document include: - Smaller chunks: split a document into smaller chunks, and embed those (this is ParentDocumentRetriever). - Summary: create a summary for each document, embed that along with (or instead of) the document. - Hypothetical questions: create hypothetical questions that each document would be appropriate to answer, embed those along with (or instead of) the document. Note that this also enables another method of adding embeddings - manually.Note that this also enables another method of adding embeddings - manually. This is great because you can explicitly add questions or queries that should lead to a document being recovered, giving you more control. ``` from langchain.retrievers.multi_vector import MultiVectorRetriever ``` ``` from langchain.vectorstores import Chromafrom langchain.embeddings import OpenAIEmbeddingsfrom langchain.text_splitter import RecursiveCharacterTextSplitterfrom langchain.storage import InMemoryStorefrom langchain.document_loaders import TextLoader ``` ``` loaders = [ TextLoader(\\'../../paul_graham_essay.txt\\'), TextLoader(\\'../../state_of_the_union.txt\\'),]docs = []for l in loaders: docs.extend(l.load())text_splitter = RecursiveCharacterTextSplitter(chunk_size=10000)docs = text_splitter.split_documents(docs) ``` ## Smaller chunks[\\u200b](#smaller-chunks) Often times it can be useful to retrieve larger chunks of information, but embed smaller chunks.This allows for embeddings to capture the semantic meaning as closely as possible, but for as much context as possible to be passed downstream. Note that this is what the `ParentDocumentRetriever` does. Here we show what is going on under the hood.``` # The vectorstore to use to index the child chunksvectorstore = Chroma( collection_name=\"full_documents\", embedding_function=OpenAIEmbeddings())# The storage layer for the parent documentsstore = InMemoryStore()id_key = \"doc_id\"# The retriever (empty to start)retriever = MultiVectorRetriever( vectorstore=vectorstore, docstore=store, id_key=id_key,)import uuiddoc_ids = [str(uuid.uuid4()) for _ in docs] ``` ``` # The splitter to use to create smaller chunkschild_text_splitter = RecursiveCharacterTextSplitter(chunk_size=400) ``` ``` sub_docs = []for i, doc in enumerate(docs): _id = doc_ids[i] _sub_docs = child_text_splitter.split_documents([doc]) for _doc in _sub_docs: _doc.metadata[id_key] = _id sub_docs.extend(_sub_docs) ``` ``` retriever.vectorstore.add_documents(sub_docs)retriever.docstore.mset(list(zip(doc_ids, docs))) ``` ``` # Vectorstore alone retrieves the small chunksretriever.vectorstore.similarity_search(\"justice breyer\")[0] ``` ``` Document(page_content=\\'Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court.Justice Breyer, thank you for your service. \\\\n\\\\nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \\', metadata={\\'doc_id\\': \\'10e9cbc0-4ba5-4d79-a09b-c033d1ba7b01\\', \\'source\\': \\'../../state_of_the_union.txt\\'}) ``` ``` # Retriever returns larger chunkslen(retriever.get_relevant_documents(\"justice breyer\")[0].page_content) ``` ``` 9874 ``` ## Summary[\\u200b](#summary) Oftentimes a summary may be able to distill more accurately what a chunk is about, leading to better retrieval. Here we show how to create summaries, and then embed those.``` from langchain.chat_models import ChatOpenAIfrom langchain.prompts import ChatPromptTemplatefrom langchain_core.output_parsers import StrOutputParserimport uuidfrom langchain_core.documents import Document ``` ``` chain = ( {\"doc\": lambda x: x.page_content} | ChatPromptTemplate.from_template(\"Summarize the following document:\\\\n\\\\n{doc}\") | ChatOpenAI(max_retries=0) | StrOutputParser()) ``` ``` summaries = chain.batch(docs, {\"max_concurrency\": 5}) ``` ``` # The vectorstore to use to index the child chunksvectorstore = Chroma( collection_name=\"summaries\", embedding_function=OpenAIEmbeddings())# The storage layer for the parent documentsstore = InMemoryStore()id_key = \"doc_id\"# The retriever (empty to start)retriever = MultiVectorRetriever( vectorstore=vectorstore, docstore=store, id_key=id_key,)doc_ids = [str(uuid.uuid4()) for _ in docs] ``` ``` summary_docs = [Document(page_content=s,metadata={id_key: doc_ids[i]}) for i, s in enumerate(summaries)] ``` ``` retriever.vectorstore.add_documents(summary_docs)retriever.docstore.mset(list(zip(doc_ids, docs))) ``` ``` # # We can also add the original chunks to the vectorstore if we so want# for i, doc in enumerate(docs):# doc.metadata[id_key] = doc_ids[i]# retriever.vectorstore.add_documents(docs) ``` ``` sub_docs = vectorstore.similarity_search(\"justice breyer\") ``` ``` sub_docs[0] ``` ``` Document(page_content=\"The document is a transcript of a speech given by the President of the United States.The President discusses several important issues and initiatives, including the nomination of a Supreme Court Justice, border security and immigration reform, protecting women\\'s rights, advancing LGBTQ+ equality, bipartisan legislation, addressing the opioid epidemic and mental health, supporting veterans, investigating the health effects of burn pits on military personnel, ending cancer, and the strength and resilience of the American people. \", metadata={\\'doc_id\\': \\'79fa2e9f-28d9-4372-8af3-2caf4f1de312\\'}) ``` ``` retrieved_docs = retriever.get_relevant_documents(\"justice breyer\") ``` ``` len(retrieved_docs[0].page_content) ``` ``` 9194 ``` ## Hypothetical Queries[\\u200b](#hypothetical-queries) An LLM can also be used to generate a list of hypothetical questions that could be asked of a particular document.These questions can then be embedded ``` functions = [ { \"name\": \"hypothetical_questions\", \"description\": \"Generate hypothetical questions\", \"parameters\": { \"type\": \"object\", \"properties\": { \"questions\": { \"type\": \"array\", \"items\": { \"type\": \"string\" }, }, }, \"required\": [\"questions\"] } } ] ``` ``` from langchain.output_parsers.openai_functions import JsonKeyOutputFunctionsParserchain = ( {\"doc\": lambda x: x.page_content} # Only asking for 3 hypothetical questions, but this could be adjusted | ChatPromptTemplate.from_template(\"Generate a list of 3 hypothetical questions that the below document could be used to answer:\\\\n\\\\n{doc}\") | ChatOpenAI(max_retries=0, model=\"gpt-4\").bind(functions=functions, function_call={\"name\": \"hypothetical_questions\"}) | JsonKeyOutputFunctionsParser(key_name=\"questions\")) ``` ``` chain.invoke(docs[0]) ``` ``` [\"What was the author\\'s initial impression of philosophy as a field of study, and how did it change when they got to college?\", \\'Why did the author decide to switch their focus to Artificial Intelligence (AI)? \\', \"What led to the author\\'s disillusionment with the field of AI as it was practiced at the time?\"]``` ``` hypothetical_questions = chain.batch(docs, {\"max_concurrency\": 5}) ``` ``` # The vectorstore to use to index the child chunksvectorstore = Chroma( collection_name=\"hypo-questions\", embedding_function=OpenAIEmbeddings())# The storage layer for the parent documentsstore = InMemoryStore()id_key = \"doc_id\"# The retriever (empty to start)retriever = MultiVectorRetriever( vectorstore=vectorstore, docstore=store, id_key=id_key,)doc_ids = [str(uuid.uuid4()) for _ in docs] ``` ``` question_docs = []for i, question_list in enumerate(hypothetical_questions): question_docs.extend([Document(page_content=s,metadata={id_key: doc_ids[i]}) for s in question_list]) ``` ``` retriever.vectorstore.add_documents(question_docs)retriever.docstore.mset(list(zip(doc_ids, docs))) ``` ``` sub_docs = vectorstore.similarity_search(\"justice breyer\") ``` ``` sub_docs ``` ``` [Document(page_content=\"What is the President\\'s stance on immigration reform?\", metadata={\\'doc_id\\': \\'505d73e3-8350-46ec-a58e-3af032f04ab3\\'}), Document(page_content=\"What is the President\\'s stance on immigration reform? \", metadata={\\'doc_id\\': \\'1c9618f0-7660-4b4f-a37c-509cbbbf6dba\\'}), Document(page_content=\"What is the President\\'s stance on immigration reform? \", metadata={\\'doc_id\\': \\'82c08209-b904-46a8-9532-edd2380950b7\\'}), Document(page_content=\\'What measures is the President proposing to protect the rights of LGBTQ+ Americans? \\', metadata={\\'doc_id\\': \\'82c08209-b904-46a8-9532-edd2380950b7\\'})] ``` ``` retrieved_docs = retriever.get_relevant_documents(\"justice breyer\") ``` ``` len(retrieved_docs[0].page_content) ``` ``` 9194 ```', metadata={'title': 'MultiVector Retriever | 🦜️🔗 Langchain', 'type': None, 'url': 'https://python.langchain.com/docs/modules/data_connection/retrievers/multi_vector', 'id': 'beec5531-16a7-453c-80ab-c5628e0236ce'}),\n", - " Document(page_content='MultiQueryRetriever | 🦜️🔗 Langchain\\n# MultiQueryRetriever Distance-based vector database retrieval embeds (represents) queries in high-dimensional space and finds similar embedded documents based on \"distance\". But, retrieval may produce different results with subtle changes in query wording or if the embeddings do not capture the semantics of the data well. Prompt engineering / tuning is sometimes done to manually address these problems, but can be tedious. The `MultiQueryRetriever` automates the process of prompt tuning by using an LLM to generate multiple queries from different perspectives for a given user input query. For each query, it retrieves a set of relevant documents and takes the unique union across all queries to get a larger set of potentially relevant documents. By generating multiple perspectives on the same question, the `MultiQueryRetriever` might be able to overcome some of the limitations of the distance-based retrieval and get a richer set of results.By generating multiple perspectives on the same question, the `MultiQueryRetriever` might be able to overcome some of the limitations of the distance-based retrieval and get a richer set of results. ``` # Build a sample vectorDBfrom langchain.vectorstores import Chromafrom langchain.document_loaders import WebBaseLoaderfrom langchain.embeddings.openai import OpenAIEmbeddingsfrom langchain.text_splitter import RecursiveCharacterTextSplitter# Load blog postloader = WebBaseLoader(\"https://lilianweng.github.io/posts/2023-06-23-agent/\")data = loader.load()# Splittext_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)splits = text_splitter.split_documents(data)# VectorDBembedding = OpenAIEmbeddings()vectordb = Chroma.from_documents(documents=splits, embedding=embedding) ``` #### Simple usage[\\u200b](#simple-usage) Specify the LLM to use for query generation, and the retriever will do the rest.``` from langchain.chat_models import ChatOpenAIfrom langchain.retrievers.multi_query import MultiQueryRetrieverquestion = \"What are the approaches to Task Decomposition? \"llm = ChatOpenAI(temperature=0)retriever_from_llm = MultiQueryRetriever.from_llm( retriever=vectordb.as_retriever(), llm=llm) ``` ``` # Set logging for the queriesimport logginglogging.basicConfig()logging.getLogger(\"langchain.retrievers.multi_query\").setLevel(logging.INFO) ``` ``` unique_docs = retriever_from_llm.get_relevant_documents(query=question)len(unique_docs) ``` ``` INFO:langchain.retrievers.multi_query:Generated queries: [\\'1. How can Task Decomposition be approached? \\', \\'2. What are the different methods for Task Decomposition? \\', \\'3. What are the various approaches to decomposing tasks?\\'] 5 ``` #### Supplying your own prompt[\\u200b](#supplying-your-own-prompt) You can also supply a prompt along with an output parser to split the results into a list of queries.5 ``` #### Supplying your own prompt[\\u200b](#supplying-your-own-prompt) You can also supply a prompt along with an output parser to split the results into a list of queries. ``` from typing import Listfrom langchain.chains import LLMChainfrom pydantic import BaseModel, Fieldfrom langchain.prompts import PromptTemplatefrom langchain.output_parsers import PydanticOutputParser# Output parser will split the LLM result into a list of queriesclass LineList(BaseModel): # \"lines\" is the key (attribute name) of the parsed output lines: List[str] = Field(description=\"Lines of text\")class LineListOutputParser(PydanticOutputParser): def __init__(self) -> None: super().__init__(pydantic_object=LineList) def parse(self, text: str) -> LineList: lines = text.strip().split(\"\\\\n\") return LineList(lines=lines)output_parser = LineListOutputParser()QUERY_PROMPT = PromptTemplate( input_variables=[\"question\"], template=\"\"\"You are an AI language model assistant.Your task is to generate five different versions of the given user question to retrieve relevant documents from a vector database. By generating multiple perspectives on the user question, your goal is to help the user overcome some of the limitations of the distance-based similarity search. Provide these alternative questions separated by newlines. Original question: {question}\"\"\",)llm = ChatOpenAI(temperature=0)# Chainllm_chain = LLMChain(llm=llm, prompt=QUERY_PROMPT, output_parser=output_parser)# Other inputsquestion = \"What are the approaches to Task Decomposition?\" ``` ``` # Runretriever = MultiQueryRetriever( retriever=vectordb.as_retriever(), llm_chain=llm_chain, parser_key=\"lines\") # \"lines\" is the key (attribute name) of the parsed output# Resultsunique_docs = retriever.get_relevant_documents( query=\"What does the course say about regression? \")len(unique_docs) ``` ``` INFO:langchain.retrievers.multi_query:Generated queries: [\"1.\")len(unique_docs) ``` ``` INFO:langchain.retrievers.multi_query:Generated queries: [\"1. What is the course\\'s perspective on regression? \", \\'2. Can you provide information on regression as discussed in the course? \\', \\'3. How does the course cover the topic of regression? \\', \"4. What are the course\\'s teachings on regression? \", \\'5. In relation to the course, what is mentioned about regression?\\'] 11 ```', metadata={'title': 'MultiQueryRetriever | 🦜️🔗 Langchain', 'type': None, 'url': 'https://python.langchain.com/docs/modules/data_connection/retrievers/MultiQueryRetriever', 'id': 'f7c20633-6a60-4ca3-96b1-13fee66e321d'}),\n", + "[Document(page_content='Vector store-backed retriever | 🦜️🔗 Langchain\\n# Vector store-backed retriever A vector store retriever is a retriever that uses a vector store to retrieve documents. It is a lightweight wrapper around the vector store class to make it conform to the retriever interface. It uses the search methods implemented by a vector store, like similarity search and MMR, to query the texts in the vector store. Once you construct a vector store, it\\'s very easy to construct a retriever. Let\\'s walk through an example.Once you construct a vector store, it\\'s very easy to construct a retriever. Let\\'s walk through an example. ``` from langchain.document_loaders import TextLoaderloader = TextLoader(\\'../../../state_of_the_union.txt\\') ``` ``` from langchain.text_splitter import CharacterTextSplitterfrom langchain.vectorstores import FAISSfrom langchain_community.embeddings import OpenAIEmbeddingsdocuments = loader.load()text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)texts = text_splitter.split_documents(documents)embeddings = OpenAIEmbeddings()db = FAISS.from_documents(texts, embeddings) ``` ``` Exiting: Cleaning up .chroma directory ``` ``` retriever = db.as_retriever() ``` ``` docs = retriever.get_relevant_documents(\"what did he say about ketanji brown jackson\") ``` ## Maximum marginal relevance retrieval[\\u200b](#maximum-marginal-relevance-retrieval) By default, the vector store retriever uses similarity search.If the underlying vector store supports maximum marginal relevance search, you can specify that as the search type. ``` retriever = db.as_retriever(search_type=\"mmr\") ``` ``` docs = retriever.get_relevant_documents(\"what did he say about ketanji brown jackson\") ``` ## Similarity score threshold retrieval[\\u200b](#similarity-score-threshold-retrieval) You can also a retrieval method that sets a similarity score threshold and only returns documents with a score above that threshold. ``` retriever = db.as_retriever(search_type=\"similarity_score_threshold\", search_kwargs={\"score_threshold\": .5}) ``` ``` docs = retriever.get_relevant_documents(\"what did he say about ketanji brown jackson\") ``` ## Specifying top k[\\u200b](#specifying-top-k) You can also specify search kwargs like `k` to use when doing retrieval.``` retriever = db.as_retriever(search_kwargs={\"k\": 1}) ``` ``` docs = retriever.get_relevant_documents(\"what did he say about ketanji brown jackson\") ``` ``` len(docs) ``` ``` 1 ```', metadata={'title': 'Vector store-backed retriever | 🦜️🔗 Langchain', 'type': None, 'url': 'https://python.langchain.com/docs/modules/data_connection/retrievers/vectorstore', 'id': 'c153ebd9-2611-4a43-9db6-daa1f5f214f6'}),\n", + " Document(page_content='MultiVector Retriever | 🦜️🔗 Langchain\\n# MultiVector Retriever It can often be beneficial to store multiple vectors per document. There are multiple use cases where this is beneficial. LangChain has a base `MultiVectorRetriever` which makes querying this type of setup easy. A lot of the complexity lies in how to create the multiple vectors per document. This notebook covers some of the common ways to create those vectors and use the `MultiVectorRetriever`. The methods to create multiple vectors per document include: - Smaller chunks: split a document into smaller chunks, and embed those (this is ParentDocumentRetriever). - Summary: create a summary for each document, embed that along with (or instead of) the document. - Hypothetical questions: create hypothetical questions that each document would be appropriate to answer, embed those along with (or instead of) the document. Note that this also enables another method of adding embeddings - manually.Note that this also enables another method of adding embeddings - manually. This is great because you can explicitly add questions or queries that should lead to a document being recovered, giving you more control. ``` from langchain.retrievers.multi_vector import MultiVectorRetriever ``` ``` from langchain.vectorstores import Chromafrom langchain_community.embeddings import OpenAIEmbeddingsfrom langchain.text_splitter import RecursiveCharacterTextSplitterfrom langchain.storage import InMemoryStorefrom langchain.document_loaders import TextLoader ``` ``` loaders = [ TextLoader(\\'../../paul_graham_essay.txt\\'), TextLoader(\\'../../state_of_the_union.txt\\'),]docs = []for l in loaders: docs.extend(l.load())text_splitter = RecursiveCharacterTextSplitter(chunk_size=10000)docs = text_splitter.split_documents(docs) ``` ## Smaller chunks[\\u200b](#smaller-chunks) Often times it can be useful to retrieve larger chunks of information, but embed smaller chunks.This allows for embeddings to capture the semantic meaning as closely as possible, but for as much context as possible to be passed downstream. Note that this is what the `ParentDocumentRetriever` does. Here we show what is going on under the hood.``` # The vectorstore to use to index the child chunksvectorstore = Chroma( collection_name=\"full_documents\", embedding_function=OpenAIEmbeddings())# The storage layer for the parent documentsstore = InMemoryStore()id_key = \"doc_id\"# The retriever (empty to start)retriever = MultiVectorRetriever( vectorstore=vectorstore, docstore=store, id_key=id_key,)import uuiddoc_ids = [str(uuid.uuid4()) for _ in docs] ``` ``` # The splitter to use to create smaller chunkschild_text_splitter = RecursiveCharacterTextSplitter(chunk_size=400) ``` ``` sub_docs = []for i, doc in enumerate(docs): _id = doc_ids[i] _sub_docs = child_text_splitter.split_documents([doc]) for _doc in _sub_docs: _doc.metadata[id_key] = _id sub_docs.extend(_sub_docs) ``` ``` retriever.vectorstore.add_documents(sub_docs)retriever.docstore.mset(list(zip(doc_ids, docs))) ``` ``` # Vectorstore alone retrieves the small chunksretriever.vectorstore.similarity_search(\"justice breyer\")[0] ``` ``` Document(page_content=\\'Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court.Justice Breyer, thank you for your service. \\\\n\\\\nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \\', metadata={\\'doc_id\\': \\'10e9cbc0-4ba5-4d79-a09b-c033d1ba7b01\\', \\'source\\': \\'../../state_of_the_union.txt\\'}) ``` ``` # Retriever returns larger chunkslen(retriever.get_relevant_documents(\"justice breyer\")[0].page_content) ``` ``` 9874 ``` ## Summary[\\u200b](#summary) Oftentimes a summary may be able to distill more accurately what a chunk is about, leading to better retrieval. Here we show how to create summaries, and then embed those.``` from langchain_community.chat_models import ChatOpenAIfrom langchain.prompts import ChatPromptTemplatefrom langchain_core.output_parsers import StrOutputParserimport uuidfrom langchain_core.documents import Document ``` ``` chain = ( {\"doc\": lambda x: x.page_content} | ChatPromptTemplate.from_template(\"Summarize the following document:\\\\n\\\\n{doc}\") | ChatOpenAI(max_retries=0) | StrOutputParser()) ``` ``` summaries = chain.batch(docs, {\"max_concurrency\": 5}) ``` ``` # The vectorstore to use to index the child chunksvectorstore = Chroma( collection_name=\"summaries\", embedding_function=OpenAIEmbeddings())# The storage layer for the parent documentsstore = InMemoryStore()id_key = \"doc_id\"# The retriever (empty to start)retriever = MultiVectorRetriever( vectorstore=vectorstore, docstore=store, id_key=id_key,)doc_ids = [str(uuid.uuid4()) for _ in docs] ``` ``` summary_docs = [Document(page_content=s,metadata={id_key: doc_ids[i]}) for i, s in enumerate(summaries)] ``` ``` retriever.vectorstore.add_documents(summary_docs)retriever.docstore.mset(list(zip(doc_ids, docs))) ``` ``` # # We can also add the original chunks to the vectorstore if we so want# for i, doc in enumerate(docs):# doc.metadata[id_key] = doc_ids[i]# retriever.vectorstore.add_documents(docs) ``` ``` sub_docs = vectorstore.similarity_search(\"justice breyer\") ``` ``` sub_docs[0] ``` ``` Document(page_content=\"The document is a transcript of a speech given by the President of the United States.The President discusses several important issues and initiatives, including the nomination of a Supreme Court Justice, border security and immigration reform, protecting women\\'s rights, advancing LGBTQ+ equality, bipartisan legislation, addressing the opioid epidemic and mental health, supporting veterans, investigating the health effects of burn pits on military personnel, ending cancer, and the strength and resilience of the American people. \", metadata={\\'doc_id\\': \\'79fa2e9f-28d9-4372-8af3-2caf4f1de312\\'}) ``` ``` retrieved_docs = retriever.get_relevant_documents(\"justice breyer\") ``` ``` len(retrieved_docs[0].page_content) ``` ``` 9194 ``` ## Hypothetical Queries[\\u200b](#hypothetical-queries) An LLM can also be used to generate a list of hypothetical questions that could be asked of a particular document.These questions can then be embedded ``` functions = [ { \"name\": \"hypothetical_questions\", \"description\": \"Generate hypothetical questions\", \"parameters\": { \"type\": \"object\", \"properties\": { \"questions\": { \"type\": \"array\", \"items\": { \"type\": \"string\" }, }, }, \"required\": [\"questions\"] } } ] ``` ``` from langchain.output_parsers.openai_functions import JsonKeyOutputFunctionsParserchain = ( {\"doc\": lambda x: x.page_content} # Only asking for 3 hypothetical questions, but this could be adjusted | ChatPromptTemplate.from_template(\"Generate a list of 3 hypothetical questions that the below document could be used to answer:\\\\n\\\\n{doc}\") | ChatOpenAI(max_retries=0, model=\"gpt-4\").bind(functions=functions, function_call={\"name\": \"hypothetical_questions\"}) | JsonKeyOutputFunctionsParser(key_name=\"questions\")) ``` ``` chain.invoke(docs[0]) ``` ``` [\"What was the author\\'s initial impression of philosophy as a field of study, and how did it change when they got to college?\", \\'Why did the author decide to switch their focus to Artificial Intelligence (AI)? \\', \"What led to the author\\'s disillusionment with the field of AI as it was practiced at the time?\"]``` ``` hypothetical_questions = chain.batch(docs, {\"max_concurrency\": 5}) ``` ``` # The vectorstore to use to index the child chunksvectorstore = Chroma( collection_name=\"hypo-questions\", embedding_function=OpenAIEmbeddings())# The storage layer for the parent documentsstore = InMemoryStore()id_key = \"doc_id\"# The retriever (empty to start)retriever = MultiVectorRetriever( vectorstore=vectorstore, docstore=store, id_key=id_key,)doc_ids = [str(uuid.uuid4()) for _ in docs] ``` ``` question_docs = []for i, question_list in enumerate(hypothetical_questions): question_docs.extend([Document(page_content=s,metadata={id_key: doc_ids[i]}) for s in question_list]) ``` ``` retriever.vectorstore.add_documents(question_docs)retriever.docstore.mset(list(zip(doc_ids, docs))) ``` ``` sub_docs = vectorstore.similarity_search(\"justice breyer\") ``` ``` sub_docs ``` ``` [Document(page_content=\"What is the President\\'s stance on immigration reform?\", metadata={\\'doc_id\\': \\'505d73e3-8350-46ec-a58e-3af032f04ab3\\'}), Document(page_content=\"What is the President\\'s stance on immigration reform? \", metadata={\\'doc_id\\': \\'1c9618f0-7660-4b4f-a37c-509cbbbf6dba\\'}), Document(page_content=\"What is the President\\'s stance on immigration reform? \", metadata={\\'doc_id\\': \\'82c08209-b904-46a8-9532-edd2380950b7\\'}), Document(page_content=\\'What measures is the President proposing to protect the rights of LGBTQ+ Americans? \\', metadata={\\'doc_id\\': \\'82c08209-b904-46a8-9532-edd2380950b7\\'})] ``` ``` retrieved_docs = retriever.get_relevant_documents(\"justice breyer\") ``` ``` len(retrieved_docs[0].page_content) ``` ``` 9194 ```', metadata={'title': 'MultiVector Retriever | 🦜️🔗 Langchain', 'type': None, 'url': 'https://python.langchain.com/docs/modules/data_connection/retrievers/multi_vector', 'id': 'beec5531-16a7-453c-80ab-c5628e0236ce'}),\n", + " Document(page_content='MultiQueryRetriever | 🦜️🔗 Langchain\\n# MultiQueryRetriever Distance-based vector database retrieval embeds (represents) queries in high-dimensional space and finds similar embedded documents based on \"distance\". But, retrieval may produce different results with subtle changes in query wording or if the embeddings do not capture the semantics of the data well. Prompt engineering / tuning is sometimes done to manually address these problems, but can be tedious. The `MultiQueryRetriever` automates the process of prompt tuning by using an LLM to generate multiple queries from different perspectives for a given user input query. For each query, it retrieves a set of relevant documents and takes the unique union across all queries to get a larger set of potentially relevant documents. By generating multiple perspectives on the same question, the `MultiQueryRetriever` might be able to overcome some of the limitations of the distance-based retrieval and get a richer set of results.By generating multiple perspectives on the same question, the `MultiQueryRetriever` might be able to overcome some of the limitations of the distance-based retrieval and get a richer set of results. ``` # Build a sample vectorDBfrom langchain.vectorstores import Chromafrom langchain.document_loaders import WebBaseLoaderfrom langchain_community.embeddings.openai import OpenAIEmbeddingsfrom langchain.text_splitter import RecursiveCharacterTextSplitter# Load blog postloader = WebBaseLoader(\"https://lilianweng.github.io/posts/2023-06-23-agent/\")data = loader.load()# Splittext_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)splits = text_splitter.split_documents(data)# VectorDBembedding = OpenAIEmbeddings()vectordb = Chroma.from_documents(documents=splits, embedding=embedding) ``` #### Simple usage[\\u200b](#simple-usage) Specify the LLM to use for query generation, and the retriever will do the rest.``` from langchain_community.chat_models import ChatOpenAIfrom langchain.retrievers.multi_query import MultiQueryRetrieverquestion = \"What are the approaches to Task Decomposition? \"llm = ChatOpenAI(temperature=0)retriever_from_llm = MultiQueryRetriever.from_llm( retriever=vectordb.as_retriever(), llm=llm) ``` ``` # Set logging for the queriesimport logginglogging.basicConfig()logging.getLogger(\"langchain.retrievers.multi_query\").setLevel(logging.INFO) ``` ``` unique_docs = retriever_from_llm.get_relevant_documents(query=question)len(unique_docs) ``` ``` INFO:langchain.retrievers.multi_query:Generated queries: [\\'1. How can Task Decomposition be approached? \\', \\'2. What are the different methods for Task Decomposition? \\', \\'3. What are the various approaches to decomposing tasks?\\'] 5 ``` #### Supplying your own prompt[\\u200b](#supplying-your-own-prompt) You can also supply a prompt along with an output parser to split the results into a list of queries.5 ``` #### Supplying your own prompt[\\u200b](#supplying-your-own-prompt) You can also supply a prompt along with an output parser to split the results into a list of queries. ``` from typing import Listfrom langchain.chains import LLMChainfrom pydantic import BaseModel, Fieldfrom langchain.prompts import PromptTemplatefrom langchain.output_parsers import PydanticOutputParser# Output parser will split the LLM result into a list of queriesclass LineList(BaseModel): # \"lines\" is the key (attribute name) of the parsed output lines: List[str] = Field(description=\"Lines of text\")class LineListOutputParser(PydanticOutputParser): def __init__(self) -> None: super().__init__(pydantic_object=LineList) def parse(self, text: str) -> LineList: lines = text.strip().split(\"\\\\n\") return LineList(lines=lines)output_parser = LineListOutputParser()QUERY_PROMPT = PromptTemplate( input_variables=[\"question\"], template=\"\"\"You are an AI language model assistant.Your task is to generate five different versions of the given user question to retrieve relevant documents from a vector database. By generating multiple perspectives on the user question, your goal is to help the user overcome some of the limitations of the distance-based similarity search. Provide these alternative questions separated by newlines. Original question: {question}\"\"\",)llm = ChatOpenAI(temperature=0)# Chainllm_chain = LLMChain(llm=llm, prompt=QUERY_PROMPT, output_parser=output_parser)# Other inputsquestion = \"What are the approaches to Task Decomposition?\" ``` ``` # Runretriever = MultiQueryRetriever( retriever=vectordb.as_retriever(), llm_chain=llm_chain, parser_key=\"lines\") # \"lines\" is the key (attribute name) of the parsed output# Resultsunique_docs = retriever.get_relevant_documents( query=\"What does the course say about regression? \")len(unique_docs) ``` ``` INFO:langchain.retrievers.multi_query:Generated queries: [\"1.\")len(unique_docs) ``` ``` INFO:langchain.retrievers.multi_query:Generated queries: [\"1. What is the course\\'s perspective on regression? \", \\'2. Can you provide information on regression as discussed in the course? \\', \\'3. How does the course cover the topic of regression? \\', \"4. What are the course\\'s teachings on regression? \", \\'5. In relation to the course, what is mentioned about regression?\\'] 11 ```', metadata={'title': 'MultiQueryRetriever | 🦜️🔗 Langchain', 'type': None, 'url': 'https://python.langchain.com/docs/modules/data_connection/retrievers/MultiQueryRetriever', 'id': 'f7c20633-6a60-4ca3-96b1-13fee66e321d'}),\n", " Document(page_content='langchain.retrievers.multi_vector.MultiVectorRetriever — 🦜🔗 LangChain 0.0.322\\n# `langchain.retrievers.multi_vector`.MultiVectorRetriever[¶](#langchain-retrievers-multi-vector-multivectorretriever) *class *langchain.retrievers.multi_vector.MultiVectorRetriever[[source]](../_modules/langchain/retrievers/multi_vector.html#MultiVectorRetriever)[¶](#langchain.retrievers.multi_vector.MultiVectorRetriever) # Examples using MultiVectorRetriever[¶](#langchain-retrievers-multi-vector-multivectorretriever) - [MultiVector Retriever](https://python.langchain.com/docs/modules/data_connection/retrievers/multi_vector)', metadata={'title': 'langchain.retrievers.multi_vector.MultiVectorRetriever — 🦜🔗 LangChain 0.0.322', 'type': None, 'url': 'https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.multi_vector.MultiVectorRetriever.html#langchain-retrievers-multi-vector-multivectorretriever', 'id': '1820c44d-7783-4846-a11c-106b18da015d'})]" ] }, @@ -223,9 +223,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.prompts import ChatPromptTemplate\n", "from langchain.schema import StrOutputParser\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.runnables import RunnablePassthrough\n", "\n", "prompt = ChatPromptTemplate.from_messages(\n", @@ -275,7 +275,7 @@ "To create a FAISS vector store retriever that returns 10 documents per search query, you can use the following code:\n", "\n", "```python\n", - "from langchain.embeddings import OpenAIEmbeddings\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain.vectorstores import FAISS\n", "\n", "# Assuming you have already loaded and split your documents\n", diff --git a/docs/docs/integrations/retrievers/jaguar.ipynb b/docs/docs/integrations/retrievers/jaguar.ipynb index 35f43d94616..62a83758ce2 100644 --- a/docs/docs/integrations/retrievers/jaguar.ipynb +++ b/docs/docs/integrations/retrievers/jaguar.ipynb @@ -53,8 +53,8 @@ "outputs": [], "source": [ "from langchain.document_loaders import TextLoader\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "from langchain_community.vectorstores.jaguar import Jaguar\n", "\n", "\"\"\" \n", @@ -147,7 +147,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "from langchain_community.vectorstores.jaguar import Jaguar\n", "\n", "# Instantiate a Jaguar vector store object\n", diff --git a/docs/docs/integrations/retrievers/kay.ipynb b/docs/docs/integrations/retrievers/kay.ipynb index b61f4b9b6e4..b0f6237591e 100644 --- a/docs/docs/integrations/retrievers/kay.ipynb +++ b/docs/docs/integrations/retrievers/kay.ipynb @@ -151,7 +151,7 @@ "outputs": [], "source": [ "from langchain.chains import ConversationalRetrievalChain\n", - "from langchain.chat_models import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "model = ChatOpenAI(model_name=\"gpt-3.5-turbo\")\n", "qa = ConversationalRetrievalChain.from_llm(model, retriever=retriever)" diff --git a/docs/docs/integrations/retrievers/knn.ipynb b/docs/docs/integrations/retrievers/knn.ipynb index fe3b2c3d1ab..89c31026d0e 100644 --- a/docs/docs/integrations/retrievers/knn.ipynb +++ b/docs/docs/integrations/retrievers/knn.ipynb @@ -21,8 +21,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import OpenAIEmbeddings\n", - "from langchain.retrievers import KNNRetriever" + "from langchain.retrievers import KNNRetriever\n", + "from langchain_community.embeddings import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/retrievers/merger_retriever.ipynb b/docs/docs/integrations/retrievers/merger_retriever.ipynb index 1d2b9e3102b..77824115a83 100644 --- a/docs/docs/integrations/retrievers/merger_retriever.ipynb +++ b/docs/docs/integrations/retrievers/merger_retriever.ipynb @@ -27,11 +27,11 @@ " EmbeddingsClusteringFilter,\n", " EmbeddingsRedundantFilter,\n", ")\n", - "from langchain.embeddings import HuggingFaceEmbeddings, OpenAIEmbeddings\n", "from langchain.retrievers import ContextualCompressionRetriever\n", "from langchain.retrievers.document_compressors import DocumentCompressorPipeline\n", "from langchain.retrievers.merger_retriever import MergerRetriever\n", "from langchain.vectorstores import Chroma\n", + "from langchain_community.embeddings import HuggingFaceEmbeddings, OpenAIEmbeddings\n", "\n", "# Get 3 diff embeddings.\n", "all_mini = HuggingFaceEmbeddings(model_name=\"all-MiniLM-L6-v2\")\n", diff --git a/docs/docs/integrations/retrievers/outline.ipynb b/docs/docs/integrations/retrievers/outline.ipynb index 80ad74516cd..a47267de3cb 100644 --- a/docs/docs/integrations/retrievers/outline.ipynb +++ b/docs/docs/integrations/retrievers/outline.ipynb @@ -89,9 +89,9 @@ { "data": { "text/plain": [ - "[Document(page_content='This walkthrough demonstrates how to use an agent optimized for conversation. Other agents are often optimized for using tools to figure out the best response, which is not ideal in a conversational setting where you may want the agent to be able to chat with the user as well.\\n\\nIf we compare it to the standard ReAct agent, the main difference is the prompt. We want it to be much more conversational.\\n\\nfrom langchain.agents import AgentType, Tool, initialize_agent\\n\\nfrom langchain.llms import OpenAI\\n\\nfrom langchain.memory import ConversationBufferMemory\\n\\nfrom langchain.utilities import SerpAPIWrapper\\n\\nsearch = SerpAPIWrapper() tools = \\\\[ Tool( name=\"Current Search\", func=search.run, description=\"useful for when you need to answer questions about current events or the current state of the world\", ), \\\\]\\n\\n\\\\\\nllm = OpenAI(temperature=0)\\n\\nUsing LCEL\\n\\nWe will first show how to create this agent using LCEL\\n\\nfrom langchain import hub\\n\\nfrom langchain.agents.format_scratchpad import format_log_to_str\\n\\nfrom langchain.agents.output_parsers import ReActSingleInputOutputParser\\n\\nfrom langchain.tools.render import render_text_description\\n\\nprompt = hub.pull(\"hwchase17/react-chat\")\\n\\nprompt = prompt.partial( tools=render_text_description(tools), tool_names=\", \".join(\\\\[[t.name](http://t.name) for t in tools\\\\]), )\\n\\nllm_with_stop = llm.bind(stop=\\\\[\"\\\\nObservation\"\\\\])\\n\\nagent = ( { \"input\": lambda x: x\\\\[\"input\"\\\\], \"agent_scratchpad\": lambda x: format_log_to_str(x\\\\[\"intermediate_steps\"\\\\]), \"chat_history\": lambda x: x\\\\[\"chat_history\"\\\\], } | prompt | llm_with_stop | ReActSingleInputOutputParser() )\\n\\nfrom langchain.agents import AgentExecutor\\n\\nmemory = ConversationBufferMemory(memory_key=\"chat_history\") agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True, memory=memory)\\n\\nagent_executor.invoke({\"input\": \"hi, i am bob\"})\\\\[\"output\"\\\\]\\n\\n```\\n> Entering new AgentExecutor chain...\\n\\nThought: Do I need to use a tool? No\\nFinal Answer: Hi Bob, nice to meet you! How can I help you today?\\n\\n> Finished chain.\\n```\\n\\n\\\\\\n\\'Hi Bob, nice to meet you! How can I help you today?\\'\\n\\nagent_executor.invoke({\"input\": \"whats my name?\"})\\\\[\"output\"\\\\]\\n\\n```\\n> Entering new AgentExecutor chain...\\n\\nThought: Do I need to use a tool? No\\nFinal Answer: Your name is Bob.\\n\\n> Finished chain.\\n```\\n\\n\\\\\\n\\'Your name is Bob.\\'\\n\\nagent_executor.invoke({\"input\": \"what are some movies showing 9/21/2023?\"})\\\\[\"output\"\\\\]\\n\\n```\\n> Entering new AgentExecutor chain...\\n\\nThought: Do I need to use a tool? Yes\\nAction: Current Search\\nAction Input: Movies showing 9/21/2023[\\'September 2023 Movies: The Creator • Dumb Money • Expend4bles • The Kill Room • The Inventor • The Equalizer 3 • PAW Patrol: The Mighty Movie, ...\\'] Do I need to use a tool? No\\nFinal Answer: According to current search, some movies showing on 9/21/2023 are The Creator, Dumb Money, Expend4bles, The Kill Room, The Inventor, The Equalizer 3, and PAW Patrol: The Mighty Movie.\\n\\n> Finished chain.\\n```\\n\\n\\\\\\n\\'According to current search, some movies showing on 9/21/2023 are The Creator, Dumb Money, Expend4bles, The Kill Room, The Inventor, The Equalizer 3, and PAW Patrol: The Mighty Movie.\\'\\n\\n\\\\\\nUse the off-the-shelf agent\\n\\nWe can also create this agent using the off-the-shelf agent class\\n\\nagent_executor = initialize_agent( tools, llm, agent=AgentType.CONVERSATIONAL_REACT_DESCRIPTION, verbose=True, memory=memory, )\\n\\nUse a chat model\\n\\nWe can also use a chat model here. The main difference here is in the prompts used.\\n\\nfrom langchain import hub\\n\\nfrom langchain.chat_models import ChatOpenAI\\n\\nprompt = hub.pull(\"hwchase17/react-chat-json\") chat_model = ChatOpenAI(temperature=0, model=\"gpt-4\")\\n\\nprompt = prompt.partial( tools=render_text_description(tools), tool_names=\", \".join(\\\\[[t.name](http://t.name) for t in tools\\\\]), )\\n\\nchat_model_with_stop = chat_model.bind(stop=\\\\[\"\\\\nObservation\"\\\\])\\n\\nfrom langchain.agents.format_scratchpad import format_log_to_messages\\n\\nfrom langchain.agents.output_parsers import JSONAgentOutputParser\\n\\n# We need some extra steering, or the c', metadata={'title': 'Conversational', 'source': 'https://d01.getoutline.com/doc/conversational-B5dBkUgQ4b'}),\n", - " Document(page_content='Quickstart\\n\\nIn this quickstart we\\'ll show you how to:\\n\\nGet setup with LangChain, LangSmith and LangServe\\n\\nUse the most basic and common components of LangChain: prompt templates, models, and output parsers\\n\\nUse LangChain Expression Language, the protocol that LangChain is built on and which facilitates component chaining\\n\\nBuild a simple application with LangChain\\n\\nTrace your application with LangSmith\\n\\nServe your application with LangServe\\n\\nThat\\'s a fair amount to cover! Let\\'s dive in.\\n\\nSetup\\n\\nInstallation\\n\\nTo install LangChain run:\\n\\nPip\\n\\nConda\\n\\npip install langchain\\n\\nFor more details, see our Installation guide.\\n\\nEnvironment\\n\\nUsing LangChain will usually require integrations with one or more model providers, data stores, APIs, etc. For this example, we\\'ll use OpenAI\\'s model APIs.\\n\\nFirst we\\'ll need to install their Python package:\\n\\npip install openai\\n\\nAccessing the API requires an API key, which you can get by creating an account and heading here. Once we have a key we\\'ll want to set it as an environment variable by running:\\n\\nexport OPENAI_API_KEY=\"...\"\\n\\nIf you\\'d prefer not to set an environment variable you can pass the key in directly via the openai_api_key named parameter when initiating the OpenAI LLM class:\\n\\nfrom langchain.chat_models import ChatOpenAI\\n\\nllm = ChatOpenAI(openai_api_key=\"...\")\\n\\nLangSmith\\n\\nMany of the applications you build with LangChain will contain multiple steps with multiple invocations of LLM calls. As these applications get more and more complex, it becomes crucial to be able to inspect what exactly is going on inside your chain or agent. The best way to do this is with LangSmith.\\n\\nNote that LangSmith is not needed, but it is helpful. If you do want to use LangSmith, after you sign up at the link above, make sure to set your environment variables to start logging traces:\\n\\nexport LANGCHAIN_TRACING_V2=\"true\" export LANGCHAIN_API_KEY=...\\n\\nLangServe\\n\\nLangServe helps developers deploy LangChain chains as a REST API. You do not need to use LangServe to use LangChain, but in this guide we\\'ll show how you can deploy your app with LangServe.\\n\\nInstall with:\\n\\npip install \"langserve\\\\[all\\\\]\"\\n\\nBuilding with LangChain\\n\\nLangChain provides many modules that can be used to build language model applications. Modules can be used as standalones in simple applications and they can be composed for more complex use cases. Composition is powered by LangChain Expression Language (LCEL), which defines a unified Runnable interface that many modules implement, making it possible to seamlessly chain components.\\n\\nThe simplest and most common chain contains three things:\\n\\nLLM/Chat Model: The language model is the core reasoning engine here. In order to work with LangChain, you need to understand the different types of language models and how to work with them. Prompt Template: This provides instructions to the language model. This controls what the language model outputs, so understanding how to construct prompts and different prompting strategies is crucial. Output Parser: These translate the raw response from the language model to a more workable format, making it easy to use the output downstream. In this guide we\\'ll cover those three components individually, and then go over how to combine them. Understanding these concepts will set you up well for being able to use and customize LangChain applications. Most LangChain applications allow you to configure the model and/or the prompt, so knowing how to take advantage of this will be a big enabler.\\n\\nLLM / Chat Model\\n\\nThere are two types of language models:\\n\\nLLM: underlying model takes a string as input and returns a string\\n\\nChatModel: underlying model takes a list of messages as input and returns a message\\n\\nStrings are simple, but what exactly are messages? The base message interface is defined by BaseMessage, which has two required attributes:\\n\\ncontent: The content of the message. Usually a string. role: The entity from which the BaseMessage is coming. LangChain provides several ob', metadata={'title': 'Quick Start', 'source': 'https://d01.getoutline.com/doc/quick-start-jGuGGGOTuL'}),\n", - " Document(page_content='This walkthrough showcases using an agent to implement the [ReAct](https://react-lm.github.io/) logic.\\n\\n```javascript\\nfrom langchain.agents import AgentType, initialize_agent, load_tools\\nfrom langchain.llms import OpenAI\\n```\\n\\nFirst, let\\'s load the language model we\\'re going to use to control the agent.\\n\\n```javascript\\nllm = OpenAI(temperature=0)\\n```\\n\\nNext, let\\'s load some tools to use. Note that the llm-math tool uses an LLM, so we need to pass that in.\\n\\n```javascript\\ntools = load_tools([\"serpapi\", \"llm-math\"], llm=llm)\\n```\\n\\n## Using LCEL[\\u200b](https://python.langchain.com/docs/modules/agents/agent_types/react#using-lcel \"Direct link to Using LCEL\")\\n\\nWe will first show how to create the agent using LCEL\\n\\n```javascript\\nfrom langchain import hub\\nfrom langchain.agents.format_scratchpad import format_log_to_str\\nfrom langchain.agents.output_parsers import ReActSingleInputOutputParser\\nfrom langchain.tools.render import render_text_description\\n```\\n\\n```javascript\\nprompt = hub.pull(\"hwchase17/react\")\\nprompt = prompt.partial(\\n tools=render_text_description(tools),\\n tool_names=\", \".join([t.name for t in tools]),\\n)\\n```\\n\\n```javascript\\nllm_with_stop = llm.bind(stop=[\"\\\\nObservation\"])\\n```\\n\\n```javascript\\nagent = (\\n {\\n \"input\": lambda x: x[\"input\"],\\n \"agent_scratchpad\": lambda x: format_log_to_str(x[\"intermediate_steps\"]),\\n }\\n | prompt\\n | llm_with_stop\\n | ReActSingleInputOutputParser()\\n)\\n```\\n\\n```javascript\\nfrom langchain.agents import AgentExecutor\\n```\\n\\n```javascript\\nagent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)\\n```\\n\\n```javascript\\nagent_executor.invoke(\\n {\\n \"input\": \"Who is Leo DiCaprio\\'s girlfriend? What is her current age raised to the 0.43 power?\"\\n }\\n)\\n```\\n\\n```javascript\\n \\n \\n > Entering new AgentExecutor chain...\\n I need to find out who Leo DiCaprio\\'s girlfriend is and then calculate her age raised to the 0.43 power.\\n Action: Search\\n Action Input: \"Leo DiCaprio girlfriend\"model Vittoria Ceretti I need to find out Vittoria Ceretti\\'s age\\n Action: Search\\n Action Input: \"Vittoria Ceretti age\"25 years I need to calculate 25 raised to the 0.43 power\\n Action: Calculator\\n Action Input: 25^0.43Answer: 3.991298452658078 I now know the final answer\\n Final Answer: Leo DiCaprio\\'s girlfriend is Vittoria Ceretti and her current age raised to the 0.43 power is 3.991298452658078.\\n \\n > Finished chain.\\n\\n\\n\\n\\n\\n {\\'input\\': \"Who is Leo DiCaprio\\'s girlfriend? What is her current age raised to the 0.43 power?\",\\n \\'output\\': \"Leo DiCaprio\\'s girlfriend is Vittoria Ceretti and her current age raised to the 0.43 power is 3.991298452658078.\"}\\n```\\n\\n## Using ZeroShotReactAgent[\\u200b](https://python.langchain.com/docs/modules/agents/agent_types/react#using-zeroshotreactagent \"Direct link to Using ZeroShotReactAgent\")\\n\\nWe will now show how to use the agent with an off-the-shelf agent implementation\\n\\n```javascript\\nagent_executor = initialize_agent(\\n tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\\n)\\n```\\n\\n```javascript\\nagent_executor.invoke(\\n {\\n \"input\": \"Who is Leo DiCaprio\\'s girlfriend? What is her current age raised to the 0.43 power?\"\\n }\\n)\\n```\\n\\n```javascript\\n \\n \\n > Entering new AgentExecutor chain...\\n I need to find out who Leo DiCaprio\\'s girlfriend is and then calculate her age raised to the 0.43 power.\\n Action: Search\\n Action Input: \"Leo DiCaprio girlfriend\"\\n Observation: model Vittoria Ceretti\\n Thought: I need to find out Vittoria Ceretti\\'s age\\n Action: Search\\n Action Input: \"Vittoria Ceretti age\"\\n Observation: 25 years\\n Thought: I need to calculate 25 raised to the 0.43 power\\n Action: Calculator\\n Action Input: 25^0.43\\n Observation: Answer: 3.991298452658078\\n Thought: I now know the final answer\\n Final Answer: Leo DiCaprio\\'s girlfriend is Vittoria Ceretti and her current age raised to the 0.43 power is 3.991298452658078.\\n \\n > Finished chain.\\n\\n\\n\\n\\n\\n {\\'input\\': \"Who is L', metadata={'title': 'ReAct', 'source': 'https://d01.getoutline.com/doc/react-d6rxRS1MHk'})]" + "[Document(page_content='This walkthrough demonstrates how to use an agent optimized for conversation. Other agents are often optimized for using tools to figure out the best response, which is not ideal in a conversational setting where you may want the agent to be able to chat with the user as well.\\n\\nIf we compare it to the standard ReAct agent, the main difference is the prompt. We want it to be much more conversational.\\n\\nfrom langchain.agents import AgentType, Tool, initialize_agent\\n\\nfrom langchain_community.llms import OpenAI\\n\\nfrom langchain.memory import ConversationBufferMemory\\n\\nfrom langchain.utilities import SerpAPIWrapper\\n\\nsearch = SerpAPIWrapper() tools = \\\\[ Tool( name=\"Current Search\", func=search.run, description=\"useful for when you need to answer questions about current events or the current state of the world\", ), \\\\]\\n\\n\\\\\\nllm = OpenAI(temperature=0)\\n\\nUsing LCEL\\n\\nWe will first show how to create this agent using LCEL\\n\\nfrom langchain import hub\\n\\nfrom langchain.agents.format_scratchpad import format_log_to_str\\n\\nfrom langchain.agents.output_parsers import ReActSingleInputOutputParser\\n\\nfrom langchain.tools.render import render_text_description\\n\\nprompt = hub.pull(\"hwchase17/react-chat\")\\n\\nprompt = prompt.partial( tools=render_text_description(tools), tool_names=\", \".join(\\\\[[t.name](http://t.name) for t in tools\\\\]), )\\n\\nllm_with_stop = llm.bind(stop=\\\\[\"\\\\nObservation\"\\\\])\\n\\nagent = ( { \"input\": lambda x: x\\\\[\"input\"\\\\], \"agent_scratchpad\": lambda x: format_log_to_str(x\\\\[\"intermediate_steps\"\\\\]), \"chat_history\": lambda x: x\\\\[\"chat_history\"\\\\], } | prompt | llm_with_stop | ReActSingleInputOutputParser() )\\n\\nfrom langchain.agents import AgentExecutor\\n\\nmemory = ConversationBufferMemory(memory_key=\"chat_history\") agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True, memory=memory)\\n\\nagent_executor.invoke({\"input\": \"hi, i am bob\"})\\\\[\"output\"\\\\]\\n\\n```\\n> Entering new AgentExecutor chain...\\n\\nThought: Do I need to use a tool? No\\nFinal Answer: Hi Bob, nice to meet you! How can I help you today?\\n\\n> Finished chain.\\n```\\n\\n\\\\\\n\\'Hi Bob, nice to meet you! How can I help you today?\\'\\n\\nagent_executor.invoke({\"input\": \"whats my name?\"})\\\\[\"output\"\\\\]\\n\\n```\\n> Entering new AgentExecutor chain...\\n\\nThought: Do I need to use a tool? No\\nFinal Answer: Your name is Bob.\\n\\n> Finished chain.\\n```\\n\\n\\\\\\n\\'Your name is Bob.\\'\\n\\nagent_executor.invoke({\"input\": \"what are some movies showing 9/21/2023?\"})\\\\[\"output\"\\\\]\\n\\n```\\n> Entering new AgentExecutor chain...\\n\\nThought: Do I need to use a tool? Yes\\nAction: Current Search\\nAction Input: Movies showing 9/21/2023[\\'September 2023 Movies: The Creator • Dumb Money • Expend4bles • The Kill Room • The Inventor • The Equalizer 3 • PAW Patrol: The Mighty Movie, ...\\'] Do I need to use a tool? No\\nFinal Answer: According to current search, some movies showing on 9/21/2023 are The Creator, Dumb Money, Expend4bles, The Kill Room, The Inventor, The Equalizer 3, and PAW Patrol: The Mighty Movie.\\n\\n> Finished chain.\\n```\\n\\n\\\\\\n\\'According to current search, some movies showing on 9/21/2023 are The Creator, Dumb Money, Expend4bles, The Kill Room, The Inventor, The Equalizer 3, and PAW Patrol: The Mighty Movie.\\'\\n\\n\\\\\\nUse the off-the-shelf agent\\n\\nWe can also create this agent using the off-the-shelf agent class\\n\\nagent_executor = initialize_agent( tools, llm, agent=AgentType.CONVERSATIONAL_REACT_DESCRIPTION, verbose=True, memory=memory, )\\n\\nUse a chat model\\n\\nWe can also use a chat model here. The main difference here is in the prompts used.\\n\\nfrom langchain import hub\\n\\nfrom langchain_community.chat_models import ChatOpenAI\\n\\nprompt = hub.pull(\"hwchase17/react-chat-json\") chat_model = ChatOpenAI(temperature=0, model=\"gpt-4\")\\n\\nprompt = prompt.partial( tools=render_text_description(tools), tool_names=\", \".join(\\\\[[t.name](http://t.name) for t in tools\\\\]), )\\n\\nchat_model_with_stop = chat_model.bind(stop=\\\\[\"\\\\nObservation\"\\\\])\\n\\nfrom langchain.agents.format_scratchpad import format_log_to_messages\\n\\nfrom langchain.agents.output_parsers import JSONAgentOutputParser\\n\\n# We need some extra steering, or the c', metadata={'title': 'Conversational', 'source': 'https://d01.getoutline.com/doc/conversational-B5dBkUgQ4b'}),\n", + " Document(page_content='Quickstart\\n\\nIn this quickstart we\\'ll show you how to:\\n\\nGet setup with LangChain, LangSmith and LangServe\\n\\nUse the most basic and common components of LangChain: prompt templates, models, and output parsers\\n\\nUse LangChain Expression Language, the protocol that LangChain is built on and which facilitates component chaining\\n\\nBuild a simple application with LangChain\\n\\nTrace your application with LangSmith\\n\\nServe your application with LangServe\\n\\nThat\\'s a fair amount to cover! Let\\'s dive in.\\n\\nSetup\\n\\nInstallation\\n\\nTo install LangChain run:\\n\\nPip\\n\\nConda\\n\\npip install langchain\\n\\nFor more details, see our Installation guide.\\n\\nEnvironment\\n\\nUsing LangChain will usually require integrations with one or more model providers, data stores, APIs, etc. For this example, we\\'ll use OpenAI\\'s model APIs.\\n\\nFirst we\\'ll need to install their Python package:\\n\\npip install openai\\n\\nAccessing the API requires an API key, which you can get by creating an account and heading here. Once we have a key we\\'ll want to set it as an environment variable by running:\\n\\nexport OPENAI_API_KEY=\"...\"\\n\\nIf you\\'d prefer not to set an environment variable you can pass the key in directly via the openai_api_key named parameter when initiating the OpenAI LLM class:\\n\\nfrom langchain_community.chat_models import ChatOpenAI\\n\\nllm = ChatOpenAI(openai_api_key=\"...\")\\n\\nLangSmith\\n\\nMany of the applications you build with LangChain will contain multiple steps with multiple invocations of LLM calls. As these applications get more and more complex, it becomes crucial to be able to inspect what exactly is going on inside your chain or agent. The best way to do this is with LangSmith.\\n\\nNote that LangSmith is not needed, but it is helpful. If you do want to use LangSmith, after you sign up at the link above, make sure to set your environment variables to start logging traces:\\n\\nexport LANGCHAIN_TRACING_V2=\"true\" export LANGCHAIN_API_KEY=...\\n\\nLangServe\\n\\nLangServe helps developers deploy LangChain chains as a REST API. You do not need to use LangServe to use LangChain, but in this guide we\\'ll show how you can deploy your app with LangServe.\\n\\nInstall with:\\n\\npip install \"langserve\\\\[all\\\\]\"\\n\\nBuilding with LangChain\\n\\nLangChain provides many modules that can be used to build language model applications. Modules can be used as standalones in simple applications and they can be composed for more complex use cases. Composition is powered by LangChain Expression Language (LCEL), which defines a unified Runnable interface that many modules implement, making it possible to seamlessly chain components.\\n\\nThe simplest and most common chain contains three things:\\n\\nLLM/Chat Model: The language model is the core reasoning engine here. In order to work with LangChain, you need to understand the different types of language models and how to work with them. Prompt Template: This provides instructions to the language model. This controls what the language model outputs, so understanding how to construct prompts and different prompting strategies is crucial. Output Parser: These translate the raw response from the language model to a more workable format, making it easy to use the output downstream. In this guide we\\'ll cover those three components individually, and then go over how to combine them. Understanding these concepts will set you up well for being able to use and customize LangChain applications. Most LangChain applications allow you to configure the model and/or the prompt, so knowing how to take advantage of this will be a big enabler.\\n\\nLLM / Chat Model\\n\\nThere are two types of language models:\\n\\nLLM: underlying model takes a string as input and returns a string\\n\\nChatModel: underlying model takes a list of messages as input and returns a message\\n\\nStrings are simple, but what exactly are messages? The base message interface is defined by BaseMessage, which has two required attributes:\\n\\ncontent: The content of the message. Usually a string. role: The entity from which the BaseMessage is coming. LangChain provides several ob', metadata={'title': 'Quick Start', 'source': 'https://d01.getoutline.com/doc/quick-start-jGuGGGOTuL'}),\n", + " Document(page_content='This walkthrough showcases using an agent to implement the [ReAct](https://react-lm.github.io/) logic.\\n\\n```javascript\\nfrom langchain.agents import AgentType, initialize_agent, load_tools\\nfrom langchain_community.llms import OpenAI\\n```\\n\\nFirst, let\\'s load the language model we\\'re going to use to control the agent.\\n\\n```javascript\\nllm = OpenAI(temperature=0)\\n```\\n\\nNext, let\\'s load some tools to use. Note that the llm-math tool uses an LLM, so we need to pass that in.\\n\\n```javascript\\ntools = load_tools([\"serpapi\", \"llm-math\"], llm=llm)\\n```\\n\\n## Using LCEL[\\u200b](https://python.langchain.com/docs/modules/agents/agent_types/react#using-lcel \"Direct link to Using LCEL\")\\n\\nWe will first show how to create the agent using LCEL\\n\\n```javascript\\nfrom langchain import hub\\nfrom langchain.agents.format_scratchpad import format_log_to_str\\nfrom langchain.agents.output_parsers import ReActSingleInputOutputParser\\nfrom langchain.tools.render import render_text_description\\n```\\n\\n```javascript\\nprompt = hub.pull(\"hwchase17/react\")\\nprompt = prompt.partial(\\n tools=render_text_description(tools),\\n tool_names=\", \".join([t.name for t in tools]),\\n)\\n```\\n\\n```javascript\\nllm_with_stop = llm.bind(stop=[\"\\\\nObservation\"])\\n```\\n\\n```javascript\\nagent = (\\n {\\n \"input\": lambda x: x[\"input\"],\\n \"agent_scratchpad\": lambda x: format_log_to_str(x[\"intermediate_steps\"]),\\n }\\n | prompt\\n | llm_with_stop\\n | ReActSingleInputOutputParser()\\n)\\n```\\n\\n```javascript\\nfrom langchain.agents import AgentExecutor\\n```\\n\\n```javascript\\nagent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)\\n```\\n\\n```javascript\\nagent_executor.invoke(\\n {\\n \"input\": \"Who is Leo DiCaprio\\'s girlfriend? What is her current age raised to the 0.43 power?\"\\n }\\n)\\n```\\n\\n```javascript\\n \\n \\n > Entering new AgentExecutor chain...\\n I need to find out who Leo DiCaprio\\'s girlfriend is and then calculate her age raised to the 0.43 power.\\n Action: Search\\n Action Input: \"Leo DiCaprio girlfriend\"model Vittoria Ceretti I need to find out Vittoria Ceretti\\'s age\\n Action: Search\\n Action Input: \"Vittoria Ceretti age\"25 years I need to calculate 25 raised to the 0.43 power\\n Action: Calculator\\n Action Input: 25^0.43Answer: 3.991298452658078 I now know the final answer\\n Final Answer: Leo DiCaprio\\'s girlfriend is Vittoria Ceretti and her current age raised to the 0.43 power is 3.991298452658078.\\n \\n > Finished chain.\\n\\n\\n\\n\\n\\n {\\'input\\': \"Who is Leo DiCaprio\\'s girlfriend? What is her current age raised to the 0.43 power?\",\\n \\'output\\': \"Leo DiCaprio\\'s girlfriend is Vittoria Ceretti and her current age raised to the 0.43 power is 3.991298452658078.\"}\\n```\\n\\n## Using ZeroShotReactAgent[\\u200b](https://python.langchain.com/docs/modules/agents/agent_types/react#using-zeroshotreactagent \"Direct link to Using ZeroShotReactAgent\")\\n\\nWe will now show how to use the agent with an off-the-shelf agent implementation\\n\\n```javascript\\nagent_executor = initialize_agent(\\n tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\\n)\\n```\\n\\n```javascript\\nagent_executor.invoke(\\n {\\n \"input\": \"Who is Leo DiCaprio\\'s girlfriend? What is her current age raised to the 0.43 power?\"\\n }\\n)\\n```\\n\\n```javascript\\n \\n \\n > Entering new AgentExecutor chain...\\n I need to find out who Leo DiCaprio\\'s girlfriend is and then calculate her age raised to the 0.43 power.\\n Action: Search\\n Action Input: \"Leo DiCaprio girlfriend\"\\n Observation: model Vittoria Ceretti\\n Thought: I need to find out Vittoria Ceretti\\'s age\\n Action: Search\\n Action Input: \"Vittoria Ceretti age\"\\n Observation: 25 years\\n Thought: I need to calculate 25 raised to the 0.43 power\\n Action: Calculator\\n Action Input: 25^0.43\\n Observation: Answer: 3.991298452658078\\n Thought: I now know the final answer\\n Final Answer: Leo DiCaprio\\'s girlfriend is Vittoria Ceretti and her current age raised to the 0.43 power is 3.991298452658078.\\n \\n > Finished chain.\\n\\n\\n\\n\\n\\n {\\'input\\': \"Who is L', metadata={'title': 'ReAct', 'source': 'https://d01.getoutline.com/doc/react-d6rxRS1MHk'})]" ] }, "execution_count": 4, @@ -129,7 +129,7 @@ "outputs": [], "source": [ "from langchain.chains import ConversationalRetrievalChain\n", - "from langchain.chat_models import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "model = ChatOpenAI(model_name=\"gpt-3.5-turbo\")\n", "qa = ConversationalRetrievalChain.from_llm(model, retriever=retriever)" diff --git a/docs/docs/integrations/retrievers/pinecone_hybrid_search.ipynb b/docs/docs/integrations/retrievers/pinecone_hybrid_search.ipynb index ce5362743a2..32937b79622 100644 --- a/docs/docs/integrations/retrievers/pinecone_hybrid_search.ipynb +++ b/docs/docs/integrations/retrievers/pinecone_hybrid_search.ipynb @@ -180,7 +180,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import OpenAIEmbeddings\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", "\n", "embeddings = OpenAIEmbeddings()" ] diff --git a/docs/docs/integrations/retrievers/re_phrase.ipynb b/docs/docs/integrations/retrievers/re_phrase.ipynb index 07ad112fb31..8199ba41853 100644 --- a/docs/docs/integrations/retrievers/re_phrase.ipynb +++ b/docs/docs/integrations/retrievers/re_phrase.ipynb @@ -27,12 +27,12 @@ "source": [ "import logging\n", "\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.document_loaders import WebBaseLoader\n", - "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.retrievers import RePhraseQueryRetriever\n", "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", - "from langchain.vectorstores import Chroma" + "from langchain.vectorstores import Chroma\n", + "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_community.embeddings import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/retrievers/sec_filings.ipynb b/docs/docs/integrations/retrievers/sec_filings.ipynb index 3edd9d6ca1a..43958a32864 100644 --- a/docs/docs/integrations/retrievers/sec_filings.ipynb +++ b/docs/docs/integrations/retrievers/sec_filings.ipynb @@ -78,8 +78,8 @@ "outputs": [], "source": [ "from langchain.chains import ConversationalRetrievalChain\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.retrievers import KayAiRetriever\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "model = ChatOpenAI(model_name=\"gpt-3.5-turbo\")\n", "retriever = KayAiRetriever.create(\n", diff --git a/docs/docs/integrations/retrievers/self_query/activeloop_deeplake_self_query.ipynb b/docs/docs/integrations/retrievers/self_query/activeloop_deeplake_self_query.ipynb index 825cd0125ff..44c41a6324b 100644 --- a/docs/docs/integrations/retrievers/self_query/activeloop_deeplake_self_query.ipynb +++ b/docs/docs/integrations/retrievers/self_query/activeloop_deeplake_self_query.ipynb @@ -83,9 +83,9 @@ }, "outputs": [], "source": [ - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.schema import Document\n", "from langchain.vectorstores import DeepLake\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "\n", "embeddings = OpenAIEmbeddings()" ] @@ -193,8 +193,8 @@ "outputs": [], "source": [ "from langchain.chains.query_constructor.base import AttributeInfo\n", - "from langchain.llms import OpenAI\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", + "from langchain_community.llms import OpenAI\n", "\n", "metadata_field_info = [\n", " AttributeInfo(\n", diff --git a/docs/docs/integrations/retrievers/self_query/chroma_self_query.ipynb b/docs/docs/integrations/retrievers/self_query/chroma_self_query.ipynb index 67890db8bdb..d6f3e17af43 100644 --- a/docs/docs/integrations/retrievers/self_query/chroma_self_query.ipynb +++ b/docs/docs/integrations/retrievers/self_query/chroma_self_query.ipynb @@ -87,9 +87,9 @@ }, "outputs": [], "source": [ - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.schema import Document\n", "from langchain.vectorstores import Chroma\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "\n", "embeddings = OpenAIEmbeddings()" ] @@ -164,8 +164,8 @@ "outputs": [], "source": [ "from langchain.chains.query_constructor.base import AttributeInfo\n", - "from langchain.llms import OpenAI\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", + "from langchain_community.llms import OpenAI\n", "\n", "metadata_field_info = [\n", " AttributeInfo(\n", diff --git a/docs/docs/integrations/retrievers/self_query/dashvector.ipynb b/docs/docs/integrations/retrievers/self_query/dashvector.ipynb index 772c8ed0723..07e2e18bb19 100644 --- a/docs/docs/integrations/retrievers/self_query/dashvector.ipynb +++ b/docs/docs/integrations/retrievers/self_query/dashvector.ipynb @@ -92,9 +92,9 @@ }, "outputs": [], "source": [ - "from langchain.embeddings import DashScopeEmbeddings\n", "from langchain.schema import Document\n", "from langchain.vectorstores import DashVector\n", + "from langchain_community.embeddings import DashScopeEmbeddings\n", "\n", "embeddings = DashScopeEmbeddings()\n", "\n", @@ -186,8 +186,8 @@ "outputs": [], "source": [ "from langchain.chains.query_constructor.base import AttributeInfo\n", - "from langchain.llms import Tongyi\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", + "from langchain_community.llms import Tongyi\n", "\n", "metadata_field_info = [\n", " AttributeInfo(\n", diff --git a/docs/docs/integrations/retrievers/self_query/elasticsearch_self_query.ipynb b/docs/docs/integrations/retrievers/self_query/elasticsearch_self_query.ipynb index c20fc557cae..cb7d6e42421 100644 --- a/docs/docs/integrations/retrievers/self_query/elasticsearch_self_query.ipynb +++ b/docs/docs/integrations/retrievers/self_query/elasticsearch_self_query.ipynb @@ -60,9 +60,9 @@ "import getpass\n", "import os\n", "\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.schema import Document\n", "from langchain.vectorstores import ElasticsearchStore\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")\n", "\n", @@ -136,8 +136,8 @@ "outputs": [], "source": [ "from langchain.chains.query_constructor.base import AttributeInfo\n", - "from langchain.llms import OpenAI\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", + "from langchain_community.llms import OpenAI\n", "\n", "metadata_field_info = [\n", " AttributeInfo(\n", diff --git a/docs/docs/integrations/retrievers/self_query/milvus_self_query.ipynb b/docs/docs/integrations/retrievers/self_query/milvus_self_query.ipynb index 3640b55cc30..5257022076f 100644 --- a/docs/docs/integrations/retrievers/self_query/milvus_self_query.ipynb +++ b/docs/docs/integrations/retrievers/self_query/milvus_self_query.ipynb @@ -67,9 +67,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.schema import Document\n", "from langchain.vectorstores import Milvus\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "\n", "embeddings = OpenAIEmbeddings()" ] @@ -129,8 +129,8 @@ "outputs": [], "source": [ "from langchain.chains.query_constructor.base import AttributeInfo\n", - "from langchain.llms import OpenAI\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", + "from langchain_community.llms import OpenAI\n", "\n", "metadata_field_info = [\n", " AttributeInfo(\n", diff --git a/docs/docs/integrations/retrievers/self_query/mongodb_atlas.ipynb b/docs/docs/integrations/retrievers/self_query/mongodb_atlas.ipynb index 3ee3b28c1c4..50d769216ca 100644 --- a/docs/docs/integrations/retrievers/self_query/mongodb_atlas.ipynb +++ b/docs/docs/integrations/retrievers/self_query/mongodb_atlas.ipynb @@ -57,9 +57,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.schema import Document\n", "from langchain.vectorstores import MongoDBAtlasVectorSearch\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "from pymongo import MongoClient\n", "\n", "CONNECTION_STRING = \"Use your MongoDB Atlas connection string\"\n", @@ -161,8 +161,8 @@ "outputs": [], "source": [ "from langchain.chains.query_constructor.base import AttributeInfo\n", - "from langchain.llms import OpenAI\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", + "from langchain_community.llms import OpenAI\n", "\n", "metadata_field_info = [\n", " AttributeInfo(\n", diff --git a/docs/docs/integrations/retrievers/self_query/myscale_self_query.ipynb b/docs/docs/integrations/retrievers/self_query/myscale_self_query.ipynb index b38e2901ab2..ae09425718d 100644 --- a/docs/docs/integrations/retrievers/self_query/myscale_self_query.ipynb +++ b/docs/docs/integrations/retrievers/self_query/myscale_self_query.ipynb @@ -78,9 +78,9 @@ }, "outputs": [], "source": [ - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.schema import Document\n", "from langchain.vectorstores import MyScale\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "\n", "embeddings = OpenAIEmbeddings()" ] @@ -161,8 +161,8 @@ "outputs": [], "source": [ "from langchain.chains.query_constructor.base import AttributeInfo\n", - "from langchain.llms import OpenAI\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", + "from langchain_community.llms import OpenAI\n", "\n", "metadata_field_info = [\n", " AttributeInfo(\n", diff --git a/docs/docs/integrations/retrievers/self_query/opensearch_self_query.ipynb b/docs/docs/integrations/retrievers/self_query/opensearch_self_query.ipynb index 7e7fb8940cd..0d9e01853a3 100644 --- a/docs/docs/integrations/retrievers/self_query/opensearch_self_query.ipynb +++ b/docs/docs/integrations/retrievers/self_query/opensearch_self_query.ipynb @@ -59,9 +59,9 @@ "import getpass\n", "import os\n", "\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.schema import Document\n", "from langchain.vectorstores import OpenSearchVectorSearch\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")\n", "\n", @@ -135,8 +135,8 @@ "outputs": [], "source": [ "from langchain.chains.query_constructor.base import AttributeInfo\n", - "from langchain.llms import OpenAI\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", + "from langchain_community.llms import OpenAI\n", "\n", "metadata_field_info = [\n", " AttributeInfo(\n", diff --git a/docs/docs/integrations/retrievers/self_query/pinecone.ipynb b/docs/docs/integrations/retrievers/self_query/pinecone.ipynb index 04214a77d37..845e2feede9 100644 --- a/docs/docs/integrations/retrievers/self_query/pinecone.ipynb +++ b/docs/docs/integrations/retrievers/self_query/pinecone.ipynb @@ -77,9 +77,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.schema import Document\n", "from langchain.vectorstores import Pinecone\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "\n", "embeddings = OpenAIEmbeddings()\n", "# create new index\n", @@ -146,8 +146,8 @@ "outputs": [], "source": [ "from langchain.chains.query_constructor.base import AttributeInfo\n", - "from langchain.llms import OpenAI\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", + "from langchain_community.llms import OpenAI\n", "\n", "metadata_field_info = [\n", " AttributeInfo(\n", diff --git a/docs/docs/integrations/retrievers/self_query/qdrant_self_query.ipynb b/docs/docs/integrations/retrievers/self_query/qdrant_self_query.ipynb index 14c063b71f8..08b68335d10 100644 --- a/docs/docs/integrations/retrievers/self_query/qdrant_self_query.ipynb +++ b/docs/docs/integrations/retrievers/self_query/qdrant_self_query.ipynb @@ -70,9 +70,9 @@ }, "outputs": [], "source": [ - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.schema import Document\n", "from langchain.vectorstores import Qdrant\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "\n", "embeddings = OpenAIEmbeddings()" ] @@ -145,8 +145,8 @@ "outputs": [], "source": [ "from langchain.chains.query_constructor.base import AttributeInfo\n", - "from langchain.llms import OpenAI\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", + "from langchain_community.llms import OpenAI\n", "\n", "metadata_field_info = [\n", " AttributeInfo(\n", diff --git a/docs/docs/integrations/retrievers/self_query/redis_self_query.ipynb b/docs/docs/integrations/retrievers/self_query/redis_self_query.ipynb index 41090e04023..bb9534c8529 100644 --- a/docs/docs/integrations/retrievers/self_query/redis_self_query.ipynb +++ b/docs/docs/integrations/retrievers/self_query/redis_self_query.ipynb @@ -67,9 +67,9 @@ }, "outputs": [], "source": [ - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.schema import Document\n", "from langchain.vectorstores import Redis\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "\n", "embeddings = OpenAIEmbeddings()" ] @@ -194,8 +194,8 @@ "outputs": [], "source": [ "from langchain.chains.query_constructor.base import AttributeInfo\n", - "from langchain.llms import OpenAI\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", + "from langchain_community.llms import OpenAI\n", "\n", "metadata_field_info = [\n", " AttributeInfo(\n", diff --git a/docs/docs/integrations/retrievers/self_query/supabase_self_query.ipynb b/docs/docs/integrations/retrievers/self_query/supabase_self_query.ipynb index f072394390c..26c4e316f13 100644 --- a/docs/docs/integrations/retrievers/self_query/supabase_self_query.ipynb +++ b/docs/docs/integrations/retrievers/self_query/supabase_self_query.ipynb @@ -217,9 +217,9 @@ "source": [ "import os\n", "\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.schema import Document\n", "from langchain.vectorstores import SupabaseVectorStore\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "from supabase.client import Client, create_client\n", "\n", "supabase_url = os.environ.get(\"SUPABASE_URL\")\n", @@ -306,8 +306,8 @@ "outputs": [], "source": [ "from langchain.chains.query_constructor.base import AttributeInfo\n", - "from langchain.llms import OpenAI\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", + "from langchain_community.llms import OpenAI\n", "\n", "metadata_field_info = [\n", " AttributeInfo(\n", diff --git a/docs/docs/integrations/retrievers/self_query/timescalevector_self_query.ipynb b/docs/docs/integrations/retrievers/self_query/timescalevector_self_query.ipynb index fe5cfc64572..fd6882d7a12 100644 --- a/docs/docs/integrations/retrievers/self_query/timescalevector_self_query.ipynb +++ b/docs/docs/integrations/retrievers/self_query/timescalevector_self_query.ipynb @@ -143,9 +143,9 @@ }, "outputs": [], "source": [ - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.schema import Document\n", "from langchain.vectorstores.timescalevector import TimescaleVector\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "\n", "embeddings = OpenAIEmbeddings()" ] @@ -246,8 +246,8 @@ "outputs": [], "source": [ "from langchain.chains.query_constructor.base import AttributeInfo\n", - "from langchain.llms import OpenAI\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", + "from langchain_community.llms import OpenAI\n", "\n", "# Give LLM info about the metadata fields\n", "metadata_field_info = [\n", diff --git a/docs/docs/integrations/retrievers/self_query/vectara_self_query.ipynb b/docs/docs/integrations/retrievers/self_query/vectara_self_query.ipynb index 1bc910351a6..fc0885a7d8d 100644 --- a/docs/docs/integrations/retrievers/self_query/vectara_self_query.ipynb +++ b/docs/docs/integrations/retrievers/self_query/vectara_self_query.ipynb @@ -89,12 +89,12 @@ "from langchain.chains import ConversationalRetrievalChain\n", "from langchain.chains.query_constructor.base import AttributeInfo\n", "from langchain.document_loaders import TextLoader\n", - "from langchain.embeddings import FakeEmbeddings\n", - "from langchain.llms import OpenAI\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", "from langchain.schema import Document\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Vectara" + "from langchain.vectorstores import Vectara\n", + "from langchain_community.embeddings import FakeEmbeddings\n", + "from langchain_community.llms import OpenAI" ] }, { @@ -166,8 +166,8 @@ "outputs": [], "source": [ "from langchain.chains.query_constructor.base import AttributeInfo\n", - "from langchain.llms import OpenAI\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", + "from langchain_community.llms import OpenAI\n", "\n", "metadata_field_info = [\n", " AttributeInfo(\n", diff --git a/docs/docs/integrations/retrievers/self_query/weaviate_self_query.ipynb b/docs/docs/integrations/retrievers/self_query/weaviate_self_query.ipynb index b2c7a087aea..347d4a3d244 100644 --- a/docs/docs/integrations/retrievers/self_query/weaviate_self_query.ipynb +++ b/docs/docs/integrations/retrievers/self_query/weaviate_self_query.ipynb @@ -45,9 +45,9 @@ }, "outputs": [], "source": [ - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.schema import Document\n", "from langchain.vectorstores import Weaviate\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "\n", "embeddings = OpenAIEmbeddings()" ] @@ -116,8 +116,8 @@ "outputs": [], "source": [ "from langchain.chains.query_constructor.base import AttributeInfo\n", - "from langchain.llms import OpenAI\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", + "from langchain_community.llms import OpenAI\n", "\n", "metadata_field_info = [\n", " AttributeInfo(\n", diff --git a/docs/docs/integrations/retrievers/singlestoredb.ipynb b/docs/docs/integrations/retrievers/singlestoredb.ipynb index d510adae241..13843727ecc 100644 --- a/docs/docs/integrations/retrievers/singlestoredb.ipynb +++ b/docs/docs/integrations/retrievers/singlestoredb.ipynb @@ -51,9 +51,9 @@ "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")\n", "\n", "from langchain.document_loaders import TextLoader\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain.vectorstores import SingleStoreDB\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "\n", "loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n", "documents = loader.load()\n", diff --git a/docs/docs/integrations/retrievers/svm.ipynb b/docs/docs/integrations/retrievers/svm.ipynb index 66ca2bd0012..9704aba73eb 100644 --- a/docs/docs/integrations/retrievers/svm.ipynb +++ b/docs/docs/integrations/retrievers/svm.ipynb @@ -80,8 +80,8 @@ }, "outputs": [], "source": [ - "from langchain.embeddings import OpenAIEmbeddings\n", - "from langchain.retrievers import SVMRetriever" + "from langchain.retrievers import SVMRetriever\n", + "from langchain_community.embeddings import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/retrievers/wikipedia.ipynb b/docs/docs/integrations/retrievers/wikipedia.ipynb index 75a9c9c0770..8efd5809d60 100644 --- a/docs/docs/integrations/retrievers/wikipedia.ipynb +++ b/docs/docs/integrations/retrievers/wikipedia.ipynb @@ -200,7 +200,7 @@ "outputs": [], "source": [ "from langchain.chains import ConversationalRetrievalChain\n", - "from langchain.chat_models import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "model = ChatOpenAI(model_name=\"gpt-3.5-turbo\") # switch to 'gpt-4'\n", "qa = ConversationalRetrievalChain.from_llm(model, retriever=retriever)" diff --git a/docs/docs/integrations/retrievers/you-retriever.ipynb b/docs/docs/integrations/retrievers/you-retriever.ipynb index 8c2da49df01..36d31378269 100644 --- a/docs/docs/integrations/retrievers/you-retriever.ipynb +++ b/docs/docs/integrations/retrievers/you-retriever.ipynb @@ -19,8 +19,8 @@ "outputs": [], "source": [ "from langchain.chains import RetrievalQA\n", - "from langchain.llms import OpenAI\n", "from langchain.retrievers.you_retriever import YouRetriever\n", + "from langchain_community.llms import OpenAI\n", "\n", "yr = YouRetriever()\n", "qa = RetrievalQA.from_chain_type(llm=OpenAI(), chain_type=\"map_reduce\", retriever=yr)" diff --git a/docs/docs/integrations/text_embedding/aleph_alpha.ipynb b/docs/docs/integrations/text_embedding/aleph_alpha.ipynb index dc1f9d0aae8..5b9c38ae595 100644 --- a/docs/docs/integrations/text_embedding/aleph_alpha.ipynb +++ b/docs/docs/integrations/text_embedding/aleph_alpha.ipynb @@ -25,7 +25,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import AlephAlphaAsymmetricSemanticEmbedding" + "from langchain_community.embeddings import AlephAlphaAsymmetricSemanticEmbedding" ] }, { @@ -84,7 +84,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import AlephAlphaSymmetricSemanticEmbedding" + "from langchain_community.embeddings import AlephAlphaSymmetricSemanticEmbedding" ] }, { diff --git a/docs/docs/integrations/text_embedding/awadb.ipynb b/docs/docs/integrations/text_embedding/awadb.ipynb index f2c1e733923..b387d575a12 100644 --- a/docs/docs/integrations/text_embedding/awadb.ipynb +++ b/docs/docs/integrations/text_embedding/awadb.ipynb @@ -37,7 +37,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import AwaEmbeddings" + "from langchain_community.embeddings import AwaEmbeddings" ] }, { diff --git a/docs/docs/integrations/text_embedding/azureopenai.ipynb b/docs/docs/integrations/text_embedding/azureopenai.ipynb index 214be3eca26..162eee99dd4 100644 --- a/docs/docs/integrations/text_embedding/azureopenai.ipynb +++ b/docs/docs/integrations/text_embedding/azureopenai.ipynb @@ -40,7 +40,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import AzureOpenAIEmbeddings\n", + "from langchain_community.embeddings import AzureOpenAIEmbeddings\n", "\n", "embeddings = AzureOpenAIEmbeddings(\n", " azure_deployment=\"\",\n", @@ -134,7 +134,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import OpenAIEmbeddings\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", "\n", "embeddings = OpenAIEmbeddings(deployment=\"your-embeddings-deployment-name\")" ] diff --git a/docs/docs/integrations/text_embedding/baidu_qianfan_endpoint.ipynb b/docs/docs/integrations/text_embedding/baidu_qianfan_endpoint.ipynb index ea930cd4120..4301012e78f 100644 --- a/docs/docs/integrations/text_embedding/baidu_qianfan_endpoint.ipynb +++ b/docs/docs/integrations/text_embedding/baidu_qianfan_endpoint.ipynb @@ -62,7 +62,7 @@ "\"\"\"For basic init and call\"\"\"\n", "import os\n", "\n", - "from langchain.embeddings import QianfanEmbeddingsEndpoint\n", + "from langchain_community.embeddings import QianfanEmbeddingsEndpoint\n", "\n", "os.environ[\"QIANFAN_AK\"] = \"your_ak\"\n", "os.environ[\"QIANFAN_SK\"] = \"your_sk\"\n", diff --git a/docs/docs/integrations/text_embedding/bedrock.ipynb b/docs/docs/integrations/text_embedding/bedrock.ipynb index 8a0b1508692..3bcca03e1f5 100644 --- a/docs/docs/integrations/text_embedding/bedrock.ipynb +++ b/docs/docs/integrations/text_embedding/bedrock.ipynb @@ -36,7 +36,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import BedrockEmbeddings\n", + "from langchain_community.embeddings import BedrockEmbeddings\n", "\n", "embeddings = BedrockEmbeddings(\n", " credentials_profile_name=\"bedrock-admin\", region_name=\"us-east-1\"\n", diff --git a/docs/docs/integrations/text_embedding/bge_huggingface.ipynb b/docs/docs/integrations/text_embedding/bge_huggingface.ipynb index 4695370d56b..1d39c6d5a32 100644 --- a/docs/docs/integrations/text_embedding/bge_huggingface.ipynb +++ b/docs/docs/integrations/text_embedding/bge_huggingface.ipynb @@ -32,7 +32,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import HuggingFaceBgeEmbeddings\n", + "from langchain_community.embeddings import HuggingFaceBgeEmbeddings\n", "\n", "model_name = \"BAAI/bge-small-en\"\n", "model_kwargs = {\"device\": \"cpu\"}\n", diff --git a/docs/docs/integrations/text_embedding/bookend.ipynb b/docs/docs/integrations/text_embedding/bookend.ipynb index 3277d49a6c8..4f28ec48078 100644 --- a/docs/docs/integrations/text_embedding/bookend.ipynb +++ b/docs/docs/integrations/text_embedding/bookend.ipynb @@ -17,7 +17,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import BookendEmbeddings" + "from langchain_community.embeddings import BookendEmbeddings" ] }, { diff --git a/docs/docs/integrations/text_embedding/clarifai.ipynb b/docs/docs/integrations/text_embedding/clarifai.ipynb index 335f597ddac..eba15116c9f 100644 --- a/docs/docs/integrations/text_embedding/clarifai.ipynb +++ b/docs/docs/integrations/text_embedding/clarifai.ipynb @@ -82,8 +82,8 @@ "source": [ "# Import the required modules\n", "from langchain.chains import LLMChain\n", - "from langchain.embeddings import ClarifaiEmbeddings\n", - "from langchain.prompts import PromptTemplate" + "from langchain.prompts import PromptTemplate\n", + "from langchain_community.embeddings import ClarifaiEmbeddings" ] }, { diff --git a/docs/docs/integrations/text_embedding/cloudflare_workersai.ipynb b/docs/docs/integrations/text_embedding/cloudflare_workersai.ipynb index 603ab1f3815..97dbdf465ef 100644 --- a/docs/docs/integrations/text_embedding/cloudflare_workersai.ipynb +++ b/docs/docs/integrations/text_embedding/cloudflare_workersai.ipynb @@ -47,7 +47,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings.cloudflare_workersai import CloudflareWorkersAIEmbeddings" + "from langchain_community.embeddings.cloudflare_workersai import (\n", + " CloudflareWorkersAIEmbeddings,\n", + ")" ] }, { diff --git a/docs/docs/integrations/text_embedding/cohere.ipynb b/docs/docs/integrations/text_embedding/cohere.ipynb index 5b0fbbdae1b..fb245bfb578 100644 --- a/docs/docs/integrations/text_embedding/cohere.ipynb +++ b/docs/docs/integrations/text_embedding/cohere.ipynb @@ -17,7 +17,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import CohereEmbeddings" + "from langchain_community.embeddings import CohereEmbeddings" ] }, { diff --git a/docs/docs/integrations/text_embedding/dashscope.ipynb b/docs/docs/integrations/text_embedding/dashscope.ipynb index 2df8fac827d..d38b910f3d2 100644 --- a/docs/docs/integrations/text_embedding/dashscope.ipynb +++ b/docs/docs/integrations/text_embedding/dashscope.ipynb @@ -16,7 +16,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import DashScopeEmbeddings" + "from langchain_community.embeddings import DashScopeEmbeddings" ] }, { diff --git a/docs/docs/integrations/text_embedding/deepinfra.ipynb b/docs/docs/integrations/text_embedding/deepinfra.ipynb index 9fadfbcf3b4..d83bb6baf30 100644 --- a/docs/docs/integrations/text_embedding/deepinfra.ipynb +++ b/docs/docs/integrations/text_embedding/deepinfra.ipynb @@ -47,7 +47,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import DeepInfraEmbeddings" + "from langchain_community.embeddings import DeepInfraEmbeddings" ] }, { diff --git a/docs/docs/integrations/text_embedding/edenai.ipynb b/docs/docs/integrations/text_embedding/edenai.ipynb index 1b8983d21b6..1da964ba321 100644 --- a/docs/docs/integrations/text_embedding/edenai.ipynb +++ b/docs/docs/integrations/text_embedding/edenai.ipynb @@ -54,7 +54,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings.edenai import EdenAiEmbeddings" + "from langchain_community.embeddings.edenai import EdenAiEmbeddings" ] }, { diff --git a/docs/docs/integrations/text_embedding/elasticsearch.ipynb b/docs/docs/integrations/text_embedding/elasticsearch.ipynb index e7e376249d7..a50048363ce 100644 --- a/docs/docs/integrations/text_embedding/elasticsearch.ipynb +++ b/docs/docs/integrations/text_embedding/elasticsearch.ipynb @@ -36,7 +36,7 @@ }, "outputs": [], "source": [ - "from langchain.embeddings.elasticsearch import ElasticsearchEmbeddings" + "from langchain_community.embeddings.elasticsearch import ElasticsearchEmbeddings" ] }, { diff --git a/docs/docs/integrations/text_embedding/embaas.ipynb b/docs/docs/integrations/text_embedding/embaas.ipynb index 85abc083fd3..78b33bf227e 100644 --- a/docs/docs/integrations/text_embedding/embaas.ipynb +++ b/docs/docs/integrations/text_embedding/embaas.ipynb @@ -34,7 +34,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import EmbaasEmbeddings" + "from langchain_community.embeddings import EmbaasEmbeddings" ] }, { diff --git a/docs/docs/integrations/text_embedding/ernie.ipynb b/docs/docs/integrations/text_embedding/ernie.ipynb index 7b2cde487e3..fb1d0d523ba 100644 --- a/docs/docs/integrations/text_embedding/ernie.ipynb +++ b/docs/docs/integrations/text_embedding/ernie.ipynb @@ -16,7 +16,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import ErnieEmbeddings" + "from langchain_community.embeddings import ErnieEmbeddings" ] }, { diff --git a/docs/docs/integrations/text_embedding/fake.ipynb b/docs/docs/integrations/text_embedding/fake.ipynb index 3ab3b1ee8f5..8512f798b2c 100644 --- a/docs/docs/integrations/text_embedding/fake.ipynb +++ b/docs/docs/integrations/text_embedding/fake.ipynb @@ -17,7 +17,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import FakeEmbeddings" + "from langchain_community.embeddings import FakeEmbeddings" ] }, { diff --git a/docs/docs/integrations/text_embedding/fastembed.ipynb b/docs/docs/integrations/text_embedding/fastembed.ipynb index 8b85ebf94eb..72454833ba7 100644 --- a/docs/docs/integrations/text_embedding/fastembed.ipynb +++ b/docs/docs/integrations/text_embedding/fastembed.ipynb @@ -56,7 +56,7 @@ }, "outputs": [], "source": [ - "from langchain.embeddings.fastembed import FastEmbedEmbeddings" + "from langchain_community.embeddings.fastembed import FastEmbedEmbeddings" ] }, { diff --git a/docs/docs/integrations/text_embedding/google_vertex_ai_palm.ipynb b/docs/docs/integrations/text_embedding/google_vertex_ai_palm.ipynb index 10883967908..66832aa870d 100644 --- a/docs/docs/integrations/text_embedding/google_vertex_ai_palm.ipynb +++ b/docs/docs/integrations/text_embedding/google_vertex_ai_palm.ipynb @@ -41,7 +41,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import VertexAIEmbeddings" + "from langchain_community.embeddings import VertexAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/text_embedding/gpt4all.ipynb b/docs/docs/integrations/text_embedding/gpt4all.ipynb index 67ebc9c5843..6234406c1e0 100644 --- a/docs/docs/integrations/text_embedding/gpt4all.ipynb +++ b/docs/docs/integrations/text_embedding/gpt4all.ipynb @@ -48,7 +48,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import GPT4AllEmbeddings" + "from langchain_community.embeddings import GPT4AllEmbeddings" ] }, { diff --git a/docs/docs/integrations/text_embedding/gradient.ipynb b/docs/docs/integrations/text_embedding/gradient.ipynb index 63426b56dcd..d2b89191105 100644 --- a/docs/docs/integrations/text_embedding/gradient.ipynb +++ b/docs/docs/integrations/text_embedding/gradient.ipynb @@ -24,7 +24,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import GradientEmbeddings" + "from langchain_community.embeddings import GradientEmbeddings" ] }, { diff --git a/docs/docs/integrations/text_embedding/huggingfacehub.ipynb b/docs/docs/integrations/text_embedding/huggingfacehub.ipynb index 4c3b1ed2c7f..d28fdfdac8f 100644 --- a/docs/docs/integrations/text_embedding/huggingfacehub.ipynb +++ b/docs/docs/integrations/text_embedding/huggingfacehub.ipynb @@ -26,7 +26,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import HuggingFaceEmbeddings" + "from langchain_community.embeddings import HuggingFaceEmbeddings" ] }, { @@ -139,7 +139,7 @@ } ], "source": [ - "from langchain.embeddings import HuggingFaceInferenceAPIEmbeddings\n", + "from langchain_community.embeddings import HuggingFaceInferenceAPIEmbeddings\n", "\n", "embeddings = HuggingFaceInferenceAPIEmbeddings(\n", " api_key=inference_api_key, model_name=\"sentence-transformers/all-MiniLM-l6-v2\"\n", diff --git a/docs/docs/integrations/text_embedding/infinity.ipynb b/docs/docs/integrations/text_embedding/infinity.ipynb index dc340135519..27ba45c8999 100644 --- a/docs/docs/integrations/text_embedding/infinity.ipynb +++ b/docs/docs/integrations/text_embedding/infinity.ipynb @@ -24,7 +24,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import InfinityEmbeddings" + "from langchain_community.embeddings import InfinityEmbeddings" ] }, { diff --git a/docs/docs/integrations/text_embedding/instruct_embeddings.ipynb b/docs/docs/integrations/text_embedding/instruct_embeddings.ipynb index f4c99631c02..7b75008d763 100644 --- a/docs/docs/integrations/text_embedding/instruct_embeddings.ipynb +++ b/docs/docs/integrations/text_embedding/instruct_embeddings.ipynb @@ -18,7 +18,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import HuggingFaceInstructEmbeddings" + "from langchain_community.embeddings import HuggingFaceInstructEmbeddings" ] }, { diff --git a/docs/docs/integrations/text_embedding/jina.ipynb b/docs/docs/integrations/text_embedding/jina.ipynb index 7eb75678764..111181ad45d 100644 --- a/docs/docs/integrations/text_embedding/jina.ipynb +++ b/docs/docs/integrations/text_embedding/jina.ipynb @@ -17,7 +17,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import JinaEmbeddings" + "from langchain_community.embeddings import JinaEmbeddings" ] }, { diff --git a/docs/docs/integrations/text_embedding/johnsnowlabs_embedding.ipynb b/docs/docs/integrations/text_embedding/johnsnowlabs_embedding.ipynb index e46f5da2cba..29c7f1b0dab 100644 --- a/docs/docs/integrations/text_embedding/johnsnowlabs_embedding.ipynb +++ b/docs/docs/integrations/text_embedding/johnsnowlabs_embedding.ipynb @@ -72,7 +72,7 @@ }, "outputs": [], "source": [ - "from langchain.embeddings.johnsnowlabs import JohnSnowLabsEmbeddings" + "from langchain_community.embeddings.johnsnowlabs import JohnSnowLabsEmbeddings" ] }, { diff --git a/docs/docs/integrations/text_embedding/llamacpp.ipynb b/docs/docs/integrations/text_embedding/llamacpp.ipynb index 24b8179f105..81820bbbec2 100644 --- a/docs/docs/integrations/text_embedding/llamacpp.ipynb +++ b/docs/docs/integrations/text_embedding/llamacpp.ipynb @@ -24,7 +24,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import LlamaCppEmbeddings" + "from langchain_community.embeddings import LlamaCppEmbeddings" ] }, { diff --git a/docs/docs/integrations/text_embedding/llm_rails.ipynb b/docs/docs/integrations/text_embedding/llm_rails.ipynb index e5e45beb050..72b902bc0d6 100644 --- a/docs/docs/integrations/text_embedding/llm_rails.ipynb +++ b/docs/docs/integrations/text_embedding/llm_rails.ipynb @@ -20,7 +20,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import LLMRailsEmbeddings" + "from langchain_community.embeddings import LLMRailsEmbeddings" ] }, { diff --git a/docs/docs/integrations/text_embedding/localai.ipynb b/docs/docs/integrations/text_embedding/localai.ipynb index 97def1feeab..308d1ca67de 100644 --- a/docs/docs/integrations/text_embedding/localai.ipynb +++ b/docs/docs/integrations/text_embedding/localai.ipynb @@ -18,7 +18,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import LocalAIEmbeddings" + "from langchain_community.embeddings import LocalAIEmbeddings" ] }, { @@ -79,7 +79,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import LocalAIEmbeddings" + "from langchain_community.embeddings import LocalAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/text_embedding/minimax.ipynb b/docs/docs/integrations/text_embedding/minimax.ipynb index 4ccb22d472a..083022acf89 100644 --- a/docs/docs/integrations/text_embedding/minimax.ipynb +++ b/docs/docs/integrations/text_embedding/minimax.ipynb @@ -39,7 +39,7 @@ }, "outputs": [], "source": [ - "from langchain.embeddings import MiniMaxEmbeddings" + "from langchain_community.embeddings import MiniMaxEmbeddings" ] }, { diff --git a/docs/docs/integrations/text_embedding/modelscope_hub.ipynb b/docs/docs/integrations/text_embedding/modelscope_hub.ipynb index e2f47c4f3a4..b7d404e7beb 100644 --- a/docs/docs/integrations/text_embedding/modelscope_hub.ipynb +++ b/docs/docs/integrations/text_embedding/modelscope_hub.ipynb @@ -17,7 +17,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import ModelScopeEmbeddings" + "from langchain_community.embeddings import ModelScopeEmbeddings" ] }, { diff --git a/docs/docs/integrations/text_embedding/mosaicml.ipynb b/docs/docs/integrations/text_embedding/mosaicml.ipynb index 8119d9f5f5c..6fe640a0f5b 100644 --- a/docs/docs/integrations/text_embedding/mosaicml.ipynb +++ b/docs/docs/integrations/text_embedding/mosaicml.ipynb @@ -41,7 +41,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import MosaicMLInstructorEmbeddings" + "from langchain_community.embeddings import MosaicMLInstructorEmbeddings" ] }, { diff --git a/docs/docs/integrations/text_embedding/nlp_cloud.ipynb b/docs/docs/integrations/text_embedding/nlp_cloud.ipynb index 9567d59c4be..0258b83a5ff 100644 --- a/docs/docs/integrations/text_embedding/nlp_cloud.ipynb +++ b/docs/docs/integrations/text_embedding/nlp_cloud.ipynb @@ -31,7 +31,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import NLPCloudEmbeddings" + "from langchain_community.embeddings import NLPCloudEmbeddings" ] }, { diff --git a/docs/docs/integrations/text_embedding/ollama.ipynb b/docs/docs/integrations/text_embedding/ollama.ipynb index 7481214f17e..915f9edab02 100644 --- a/docs/docs/integrations/text_embedding/ollama.ipynb +++ b/docs/docs/integrations/text_embedding/ollama.ipynb @@ -17,7 +17,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import OllamaEmbeddings" + "from langchain_community.embeddings import OllamaEmbeddings" ] }, { diff --git a/docs/docs/integrations/text_embedding/openai.ipynb b/docs/docs/integrations/text_embedding/openai.ipynb index d25005c302b..0fa46f73764 100644 --- a/docs/docs/integrations/text_embedding/openai.ipynb +++ b/docs/docs/integrations/text_embedding/openai.ipynb @@ -17,7 +17,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import OpenAIEmbeddings" + "from langchain_community.embeddings import OpenAIEmbeddings" ] }, { @@ -125,7 +125,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings.openai import OpenAIEmbeddings" + "from langchain_community.embeddings.openai import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/text_embedding/sagemaker-endpoint.ipynb b/docs/docs/integrations/text_embedding/sagemaker-endpoint.ipynb index 17d746a4b7a..184c5c8f538 100644 --- a/docs/docs/integrations/text_embedding/sagemaker-endpoint.ipynb +++ b/docs/docs/integrations/text_embedding/sagemaker-endpoint.ipynb @@ -42,8 +42,8 @@ "import json\n", "from typing import Dict, List\n", "\n", - "from langchain.embeddings import SagemakerEndpointEmbeddings\n", - "from langchain.embeddings.sagemaker_endpoint import EmbeddingsContentHandler\n", + "from langchain_community.embeddings import SagemakerEndpointEmbeddings\n", + "from langchain_community.embeddings.sagemaker_endpoint import EmbeddingsContentHandler\n", "\n", "\n", "class ContentHandler(EmbeddingsContentHandler):\n", diff --git a/docs/docs/integrations/text_embedding/self-hosted.ipynb b/docs/docs/integrations/text_embedding/self-hosted.ipynb index 55a99e442d4..34499e62d75 100644 --- a/docs/docs/integrations/text_embedding/self-hosted.ipynb +++ b/docs/docs/integrations/text_embedding/self-hosted.ipynb @@ -19,7 +19,7 @@ "outputs": [], "source": [ "import runhouse as rh\n", - "from langchain.embeddings import (\n", + "from langchain_community.embeddings import (\n", " SelfHostedEmbeddings,\n", " SelfHostedHuggingFaceEmbeddings,\n", " SelfHostedHuggingFaceInstructEmbeddings,\n", diff --git a/docs/docs/integrations/text_embedding/sentence_transformers.ipynb b/docs/docs/integrations/text_embedding/sentence_transformers.ipynb index cf68ad95966..864649cb028 100644 --- a/docs/docs/integrations/text_embedding/sentence_transformers.ipynb +++ b/docs/docs/integrations/text_embedding/sentence_transformers.ipynb @@ -41,7 +41,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import HuggingFaceEmbeddings" + "from langchain_community.embeddings import HuggingFaceEmbeddings" ] }, { diff --git a/docs/docs/integrations/text_embedding/spacy_embedding.ipynb b/docs/docs/integrations/text_embedding/spacy_embedding.ipynb index edda4828b47..2dba5bc10ec 100644 --- a/docs/docs/integrations/text_embedding/spacy_embedding.ipynb +++ b/docs/docs/integrations/text_embedding/spacy_embedding.ipynb @@ -34,7 +34,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings.spacy_embeddings import SpacyEmbeddings" + "from langchain_community.embeddings.spacy_embeddings import SpacyEmbeddings" ] }, { diff --git a/docs/docs/integrations/text_embedding/tensorflowhub.ipynb b/docs/docs/integrations/text_embedding/tensorflowhub.ipynb index 135ca60e717..e2e7ff0a33e 100644 --- a/docs/docs/integrations/text_embedding/tensorflowhub.ipynb +++ b/docs/docs/integrations/text_embedding/tensorflowhub.ipynb @@ -20,7 +20,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import TensorflowHubEmbeddings" + "from langchain_community.embeddings import TensorflowHubEmbeddings" ] }, { diff --git a/docs/docs/integrations/text_embedding/text_embeddings_inference.ipynb b/docs/docs/integrations/text_embedding/text_embeddings_inference.ipynb index 29c5b67ee21..10b09159ba7 100644 --- a/docs/docs/integrations/text_embedding/text_embeddings_inference.ipynb +++ b/docs/docs/integrations/text_embedding/text_embeddings_inference.ipynb @@ -57,7 +57,7 @@ }, "outputs": [], "source": [ - "from langchain.embeddings import HuggingFaceHubEmbeddings" + "from langchain_community.embeddings import HuggingFaceHubEmbeddings" ] }, { diff --git a/docs/docs/integrations/text_embedding/voyageai.ipynb b/docs/docs/integrations/text_embedding/voyageai.ipynb index 2cca49cc139..8dd992f16a7 100644 --- a/docs/docs/integrations/text_embedding/voyageai.ipynb +++ b/docs/docs/integrations/text_embedding/voyageai.ipynb @@ -19,7 +19,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import VoyageEmbeddings" + "from langchain_community.embeddings import VoyageEmbeddings" ] }, { diff --git a/docs/docs/integrations/text_embedding/xinference.ipynb b/docs/docs/integrations/text_embedding/xinference.ipynb index dd4f0ee39e9..d95e5664fb7 100644 --- a/docs/docs/integrations/text_embedding/xinference.ipynb +++ b/docs/docs/integrations/text_embedding/xinference.ipynb @@ -76,7 +76,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import XinferenceEmbeddings\n", + "from langchain_community.embeddings import XinferenceEmbeddings\n", "\n", "xinference = XinferenceEmbeddings(\n", " server_url=\"http://0.0.0.0:9997\", model_uid=\"915845ee-2a04-11ee-8ed4-d29396a3f064\"\n", diff --git a/docs/docs/integrations/toolkits/ainetwork.ipynb b/docs/docs/integrations/toolkits/ainetwork.ipynb index f533c57d1bd..37ded7824ce 100644 --- a/docs/docs/integrations/toolkits/ainetwork.ipynb +++ b/docs/docs/integrations/toolkits/ainetwork.ipynb @@ -131,7 +131,7 @@ "outputs": [], "source": [ "from langchain.agents import AgentType, initialize_agent\n", - "from langchain.chat_models import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "llm = ChatOpenAI(temperature=0)\n", "agent = initialize_agent(\n", diff --git a/docs/docs/integrations/toolkits/airbyte_structured_qa.ipynb b/docs/docs/integrations/toolkits/airbyte_structured_qa.ipynb index 0e7d82404e8..b36cd423583 100644 --- a/docs/docs/integrations/toolkits/airbyte_structured_qa.ipynb +++ b/docs/docs/integrations/toolkits/airbyte_structured_qa.ipynb @@ -29,8 +29,8 @@ "\n", "import pandas as pd\n", "from langchain.agents import AgentType, create_pandas_dataframe_agent\n", - "from langchain.chat_models.openai import ChatOpenAI\n", "from langchain.document_loaders.airbyte import AirbyteStripeLoader\n", + "from langchain_community.chat_models.openai import ChatOpenAI\n", "\n", "stream_name = \"customers\"\n", "config = {\n", diff --git a/docs/docs/integrations/toolkits/amadeus.ipynb b/docs/docs/integrations/toolkits/amadeus.ipynb index a05604def37..7f52f25c97b 100644 --- a/docs/docs/integrations/toolkits/amadeus.ipynb +++ b/docs/docs/integrations/toolkits/amadeus.ipynb @@ -87,7 +87,7 @@ "outputs": [], "source": [ "from langchain.agents import AgentType, initialize_agent\n", - "from langchain.llms import OpenAI" + "from langchain_community.llms import OpenAI" ] }, { diff --git a/docs/docs/integrations/toolkits/azure_cognitive_services.ipynb b/docs/docs/integrations/toolkits/azure_cognitive_services.ipynb index 8e9eccebc6a..614e258f6fb 100644 --- a/docs/docs/integrations/toolkits/azure_cognitive_services.ipynb +++ b/docs/docs/integrations/toolkits/azure_cognitive_services.ipynb @@ -108,7 +108,7 @@ "outputs": [], "source": [ "from langchain.agents import AgentType, initialize_agent\n", - "from langchain.llms import OpenAI" + "from langchain_community.llms import OpenAI" ] }, { diff --git a/docs/docs/integrations/toolkits/clickup.ipynb b/docs/docs/integrations/toolkits/clickup.ipynb index 13f28086d7f..5cec8d11002 100644 --- a/docs/docs/integrations/toolkits/clickup.ipynb +++ b/docs/docs/integrations/toolkits/clickup.ipynb @@ -23,8 +23,8 @@ "\n", "from langchain.agents import AgentType, initialize_agent\n", "from langchain.agents.agent_toolkits.clickup.toolkit import ClickupToolkit\n", - "from langchain.llms import OpenAI\n", - "from langchain.utilities.clickup import ClickupAPIWrapper" + "from langchain.utilities.clickup import ClickupAPIWrapper\n", + "from langchain_community.llms import OpenAI" ] }, { diff --git a/docs/docs/integrations/toolkits/csv.ipynb b/docs/docs/integrations/toolkits/csv.ipynb index effe3a73ae9..7e766e2df6e 100644 --- a/docs/docs/integrations/toolkits/csv.ipynb +++ b/docs/docs/integrations/toolkits/csv.ipynb @@ -21,8 +21,8 @@ "outputs": [], "source": [ "from langchain.agents.agent_types import AgentType\n", - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.llms import OpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_community.llms import OpenAI\n", "from langchain_experimental.agents.agent_toolkits import create_csv_agent" ] }, diff --git a/docs/docs/integrations/toolkits/document_comparison_toolkit.ipynb b/docs/docs/integrations/toolkits/document_comparison_toolkit.ipynb index a9edf0ecaed..fadfa3c7ea7 100644 --- a/docs/docs/integrations/toolkits/document_comparison_toolkit.ipynb +++ b/docs/docs/integrations/toolkits/document_comparison_toolkit.ipynb @@ -21,11 +21,11 @@ "source": [ "from langchain.agents import Tool\n", "from langchain.chains import RetrievalQA\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.document_loaders import PyPDFLoader\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain.vectorstores import FAISS\n", + "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "from pydantic import BaseModel, Field" ] }, diff --git a/docs/docs/integrations/toolkits/github.ipynb b/docs/docs/integrations/toolkits/github.ipynb index 4949baa1a4b..0cdfd7822f4 100644 --- a/docs/docs/integrations/toolkits/github.ipynb +++ b/docs/docs/integrations/toolkits/github.ipynb @@ -108,8 +108,8 @@ "\n", "from langchain.agents import AgentType, initialize_agent\n", "from langchain.agents.agent_toolkits.github.toolkit import GitHubToolkit\n", - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.utilities.github import GitHubAPIWrapper" + "from langchain.utilities.github import GitHubAPIWrapper\n", + "from langchain_community.chat_models import ChatOpenAI" ] }, { @@ -702,8 +702,8 @@ "outputs": [], "source": [ "from langchain.agents import Tool\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.tools import DuckDuckGoSearchRun\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "tools = []\n", "unwanted_tools = [\"Get Issue\", \"Delete File\", \"Create File\", \"Create Pull Request\"]\n", diff --git a/docs/docs/integrations/toolkits/gitlab.ipynb b/docs/docs/integrations/toolkits/gitlab.ipynb index 0e62f90e557..9d325c54105 100644 --- a/docs/docs/integrations/toolkits/gitlab.ipynb +++ b/docs/docs/integrations/toolkits/gitlab.ipynb @@ -102,8 +102,8 @@ "\n", "from langchain.agents import AgentType, initialize_agent\n", "from langchain.agents.agent_toolkits.gitlab.toolkit import GitLabToolkit\n", - "from langchain.llms import OpenAI\n", - "from langchain.utilities.gitlab import GitLabAPIWrapper" + "from langchain.utilities.gitlab import GitLabAPIWrapper\n", + "from langchain_community.llms import OpenAI" ] }, { diff --git a/docs/docs/integrations/toolkits/gmail.ipynb b/docs/docs/integrations/toolkits/gmail.ipynb index 632c7047935..c9c3d5aa871 100644 --- a/docs/docs/integrations/toolkits/gmail.ipynb +++ b/docs/docs/integrations/toolkits/gmail.ipynb @@ -120,7 +120,7 @@ "source": [ "from langchain import hub\n", "from langchain.agents import AgentExecutor, create_openai_functions_agent\n", - "from langchain.chat_models import ChatOpenAI" + "from langchain_community.chat_models import ChatOpenAI" ] }, { diff --git a/docs/docs/integrations/toolkits/jira.ipynb b/docs/docs/integrations/toolkits/jira.ipynb index ff977eab229..69d47af1da2 100644 --- a/docs/docs/integrations/toolkits/jira.ipynb +++ b/docs/docs/integrations/toolkits/jira.ipynb @@ -51,8 +51,8 @@ "\n", "from langchain.agents import AgentType, initialize_agent\n", "from langchain.agents.agent_toolkits.jira.toolkit import JiraToolkit\n", - "from langchain.llms import OpenAI\n", - "from langchain.utilities.jira import JiraAPIWrapper" + "from langchain.utilities.jira import JiraAPIWrapper\n", + "from langchain_community.llms import OpenAI" ] }, { diff --git a/docs/docs/integrations/toolkits/json.ipynb b/docs/docs/integrations/toolkits/json.ipynb index b826916c7e4..e495e9244f3 100644 --- a/docs/docs/integrations/toolkits/json.ipynb +++ b/docs/docs/integrations/toolkits/json.ipynb @@ -35,8 +35,8 @@ "import yaml\n", "from langchain.agents import create_json_agent\n", "from langchain.agents.agent_toolkits import JsonToolkit\n", - "from langchain.llms.openai import OpenAI\n", - "from langchain.tools.json.tool import JsonSpec" + "from langchain.tools.json.tool import JsonSpec\n", + "from langchain_community.llms.openai import OpenAI" ] }, { diff --git a/docs/docs/integrations/toolkits/multion.ipynb b/docs/docs/integrations/toolkits/multion.ipynb index 0dc0a1ee9b0..2df08656948 100644 --- a/docs/docs/integrations/toolkits/multion.ipynb +++ b/docs/docs/integrations/toolkits/multion.ipynb @@ -80,7 +80,7 @@ "outputs": [], "source": [ "from langchain.agents import AgentType, initialize_agent\n", - "from langchain.llms import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", "from langchain.agents.agent_toolkits import MultionToolkit\n", diff --git a/docs/docs/integrations/toolkits/nasa.ipynb b/docs/docs/integrations/toolkits/nasa.ipynb index 09580d2396e..06759ffaedd 100644 --- a/docs/docs/integrations/toolkits/nasa.ipynb +++ b/docs/docs/integrations/toolkits/nasa.ipynb @@ -31,8 +31,8 @@ "source": [ "from langchain.agents import AgentType, initialize_agent\n", "from langchain.agents.agent_toolkits.nasa.toolkit import NasaToolkit\n", - "from langchain.llms import OpenAI\n", "from langchain.utilities.nasa import NasaAPIWrapper\n", + "from langchain_community.llms import OpenAI\n", "\n", "llm = OpenAI(temperature=0, openai_api_key=\"\")\n", "nasa = NasaAPIWrapper()\n", diff --git a/docs/docs/integrations/toolkits/office365.ipynb b/docs/docs/integrations/toolkits/office365.ipynb index 0b25a4a9b6f..bee5462b175 100644 --- a/docs/docs/integrations/toolkits/office365.ipynb +++ b/docs/docs/integrations/toolkits/office365.ipynb @@ -100,7 +100,7 @@ "outputs": [], "source": [ "from langchain.agents import AgentType, initialize_agent\n", - "from langchain.llms import OpenAI" + "from langchain_community.llms import OpenAI" ] }, { diff --git a/docs/docs/integrations/toolkits/openapi.ipynb b/docs/docs/integrations/toolkits/openapi.ipynb index 696e940b7ef..5078ef2026e 100644 --- a/docs/docs/integrations/toolkits/openapi.ipynb +++ b/docs/docs/integrations/toolkits/openapi.ipynb @@ -252,16 +252,16 @@ "name": "stderr", "output_type": "stream", "text": [ - "/Users/jeremywelborn/src/langchain/langchain/llms/openai.py:169: UserWarning: You are trying to use a chat model. This way of initializing it is no longer supported. Instead, please use: `from langchain.chat_models import ChatOpenAI`\n", + "/Users/jeremywelborn/src/langchain/langchain/llms/openai.py:169: UserWarning: You are trying to use a chat model. This way of initializing it is no longer supported. Instead, please use: `from langchain_community.chat_models import ChatOpenAI`\n", " warnings.warn(\n", - "/Users/jeremywelborn/src/langchain/langchain/llms/openai.py:608: UserWarning: You are trying to use a chat model. This way of initializing it is no longer supported. Instead, please use: `from langchain.chat_models import ChatOpenAI`\n", + "/Users/jeremywelborn/src/langchain/langchain/llms/openai.py:608: UserWarning: You are trying to use a chat model. This way of initializing it is no longer supported. Instead, please use: `from langchain_community.chat_models import ChatOpenAI`\n", " warnings.warn(\n" ] } ], "source": [ "from langchain.agents.agent_toolkits.openapi import planner\n", - "from langchain.llms.openai import OpenAI\n", + "from langchain_community.llms.openai import OpenAI\n", "\n", "llm = OpenAI(model_name=\"gpt-4\", temperature=0.0)" ] @@ -584,8 +584,8 @@ "source": [ "from langchain.agents import create_openapi_agent\n", "from langchain.agents.agent_toolkits import OpenAPIToolkit\n", - "from langchain.llms.openai import OpenAI\n", - "from langchain.tools.json.tool import JsonSpec" + "from langchain.tools.json.tool import JsonSpec\n", + "from langchain_community.llms.openai import OpenAI" ] }, { diff --git a/docs/docs/integrations/toolkits/openapi_nla.ipynb b/docs/docs/integrations/toolkits/openapi_nla.ipynb index 221db17c857..a4df00f3be2 100644 --- a/docs/docs/integrations/toolkits/openapi_nla.ipynb +++ b/docs/docs/integrations/toolkits/openapi_nla.ipynb @@ -27,8 +27,8 @@ "source": [ "from langchain.agents import AgentType, initialize_agent\n", "from langchain.agents.agent_toolkits import NLAToolkit\n", - "from langchain.llms import OpenAI\n", - "from langchain.requests import Requests" + "from langchain.requests import Requests\n", + "from langchain_community.llms import OpenAI" ] }, { diff --git a/docs/docs/integrations/toolkits/pandas.ipynb b/docs/docs/integrations/toolkits/pandas.ipynb index 66af16ef8f1..312e88ec6c6 100644 --- a/docs/docs/integrations/toolkits/pandas.ipynb +++ b/docs/docs/integrations/toolkits/pandas.ipynb @@ -20,7 +20,7 @@ "outputs": [], "source": [ "from langchain.agents.agent_types import AgentType\n", - "from langchain.chat_models import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_experimental.agents.agent_toolkits import create_pandas_dataframe_agent" ] }, @@ -32,7 +32,7 @@ "outputs": [], "source": [ "import pandas as pd\n", - "from langchain.llms import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "df = pd.read_csv(\"titanic.csv\")" ] diff --git a/docs/docs/integrations/toolkits/playwright.ipynb b/docs/docs/integrations/toolkits/playwright.ipynb index f68be061c31..6eb4e583514 100644 --- a/docs/docs/integrations/toolkits/playwright.ipynb +++ b/docs/docs/integrations/toolkits/playwright.ipynb @@ -221,7 +221,7 @@ "outputs": [], "source": [ "from langchain.agents import AgentType, initialize_agent\n", - "from langchain.chat_models import ChatAnthropic\n", + "from langchain_community.chat_models import ChatAnthropic\n", "\n", "llm = ChatAnthropic(temperature=0) # or any other LLM, e.g., ChatOpenAI(), OpenAI()\n", "\n", diff --git a/docs/docs/integrations/toolkits/powerbi.ipynb b/docs/docs/integrations/toolkits/powerbi.ipynb index 0ed596be97f..ae6f0a9409b 100644 --- a/docs/docs/integrations/toolkits/powerbi.ipynb +++ b/docs/docs/integrations/toolkits/powerbi.ipynb @@ -39,8 +39,8 @@ "source": [ "from azure.identity import DefaultAzureCredential\n", "from langchain.agents.agent_toolkits import PowerBIToolkit, create_pbi_agent\n", - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.utilities.powerbi import PowerBIDataset" + "from langchain.utilities.powerbi import PowerBIDataset\n", + "from langchain_community.chat_models import ChatOpenAI" ] }, { diff --git a/docs/docs/integrations/toolkits/python.ipynb b/docs/docs/integrations/toolkits/python.ipynb index b7cb60d100d..e3328b5ae85 100644 --- a/docs/docs/integrations/toolkits/python.ipynb +++ b/docs/docs/integrations/toolkits/python.ipynb @@ -70,7 +70,7 @@ "outputs": [], "source": [ "from langchain.agents import create_openai_functions_agent\n", - "from langchain.chat_models import ChatOpenAI" + "from langchain_community.chat_models import ChatOpenAI" ] }, { @@ -129,7 +129,7 @@ "outputs": [], "source": [ "from langchain.agents import create_react_agent\n", - "from langchain.chat_models import ChatAnthropic" + "from langchain_community.chat_models import ChatAnthropic" ] }, { diff --git a/docs/docs/integrations/toolkits/slack.ipynb b/docs/docs/integrations/toolkits/slack.ipynb index ece8d8a59d9..bdf207d6bdc 100644 --- a/docs/docs/integrations/toolkits/slack.ipynb +++ b/docs/docs/integrations/toolkits/slack.ipynb @@ -75,7 +75,7 @@ "outputs": [], "source": [ "from langchain.agents import AgentType, initialize_agent\n", - "from langchain.llms import OpenAI" + "from langchain_community.llms import OpenAI" ] }, { diff --git a/docs/docs/integrations/toolkits/spark.ipynb b/docs/docs/integrations/toolkits/spark.ipynb index 341d4edee09..d5fe6b1cf8b 100644 --- a/docs/docs/integrations/toolkits/spark.ipynb +++ b/docs/docs/integrations/toolkits/spark.ipynb @@ -79,7 +79,7 @@ } ], "source": [ - "from langchain.llms import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "from langchain_experimental.agents.agent_toolkits import create_spark_dataframe_agent\n", "from pyspark.sql import SparkSession\n", "\n", @@ -335,7 +335,7 @@ "import os\n", "\n", "from langchain.agents import create_spark_dataframe_agent\n", - "from langchain.llms import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = \"...input your openai api key here...\"\n", "\n", diff --git a/docs/docs/integrations/toolkits/spark_sql.ipynb b/docs/docs/integrations/toolkits/spark_sql.ipynb index b321dfa5e33..7249763dde9 100644 --- a/docs/docs/integrations/toolkits/spark_sql.ipynb +++ b/docs/docs/integrations/toolkits/spark_sql.ipynb @@ -26,8 +26,8 @@ "source": [ "from langchain.agents import create_spark_sql_agent\n", "from langchain.agents.agent_toolkits import SparkSQLToolkit\n", - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.utilities.spark_sql import SparkSQL" + "from langchain.utilities.spark_sql import SparkSQL\n", + "from langchain_community.chat_models import ChatOpenAI" ] }, { diff --git a/docs/docs/integrations/toolkits/sql_database.ipynb b/docs/docs/integrations/toolkits/sql_database.ipynb index 51e356e2c86..412168a0f75 100644 --- a/docs/docs/integrations/toolkits/sql_database.ipynb +++ b/docs/docs/integrations/toolkits/sql_database.ipynb @@ -37,8 +37,8 @@ "from langchain.agents import create_sql_agent\n", "from langchain.agents.agent_toolkits import SQLDatabaseToolkit\n", "from langchain.agents.agent_types import AgentType\n", - "from langchain.llms.openai import OpenAI\n", - "from langchain.sql_database import SQLDatabase" + "from langchain.sql_database import SQLDatabase\n", + "from langchain_community.llms.openai import OpenAI" ] }, { diff --git a/docs/docs/integrations/toolkits/steam.ipynb b/docs/docs/integrations/toolkits/steam.ipynb index d0293ce87d6..44e28b6fd1c 100644 --- a/docs/docs/integrations/toolkits/steam.ipynb +++ b/docs/docs/integrations/toolkits/steam.ipynb @@ -76,8 +76,8 @@ "source": [ "from langchain.agents import AgentType, initialize_agent\n", "from langchain.agents.agent_toolkits.steam.toolkit import SteamToolkit\n", - "from langchain.llms import OpenAI\n", - "from langchain.utilities.steam import SteamWebAPIWrapper" + "from langchain.utilities.steam import SteamWebAPIWrapper\n", + "from langchain_community.llms import OpenAI" ] }, { diff --git a/docs/docs/integrations/toolkits/xorbits.ipynb b/docs/docs/integrations/toolkits/xorbits.ipynb index c5f8331f888..44fc61496da 100644 --- a/docs/docs/integrations/toolkits/xorbits.ipynb +++ b/docs/docs/integrations/toolkits/xorbits.ipynb @@ -35,7 +35,7 @@ "outputs": [], "source": [ "import xorbits.pandas as pd\n", - "from langchain.llms import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "from langchain_experimental.agents.agent_toolkits import create_xorbits_agent" ] }, @@ -381,7 +381,7 @@ "source": [ "import xorbits.numpy as np\n", "from langchain.agents import create_xorbits_agent\n", - "from langchain.llms import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "arr = np.array([1, 2, 3, 4, 5, 6])\n", "agent = create_xorbits_agent(OpenAI(temperature=0), arr, verbose=True)" diff --git a/docs/docs/integrations/tools/arxiv.ipynb b/docs/docs/integrations/tools/arxiv.ipynb index 2210cc2a740..e9eb2bd0818 100644 --- a/docs/docs/integrations/tools/arxiv.ipynb +++ b/docs/docs/integrations/tools/arxiv.ipynb @@ -37,7 +37,7 @@ "outputs": [], "source": [ "from langchain.agents import AgentType, initialize_agent, load_tools\n", - "from langchain.chat_models import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "llm = ChatOpenAI(temperature=0.0)\n", "tools = load_tools(\n", diff --git a/docs/docs/integrations/tools/awslambda.ipynb b/docs/docs/integrations/tools/awslambda.ipynb index 79bea437206..d88083eec3e 100644 --- a/docs/docs/integrations/tools/awslambda.ipynb +++ b/docs/docs/integrations/tools/awslambda.ipynb @@ -62,7 +62,7 @@ "outputs": [], "source": [ "from langchain.agents import AgentType, initialize_agent, load_tools\n", - "from langchain.llms import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", "\n", diff --git a/docs/docs/integrations/tools/bash.ipynb b/docs/docs/integrations/tools/bash.ipynb index c5c51a5bc36..5b3d3110401 100644 --- a/docs/docs/integrations/tools/bash.ipynb +++ b/docs/docs/integrations/tools/bash.ipynb @@ -145,7 +145,7 @@ ], "source": [ "from langchain.agents import AgentType, initialize_agent\n", - "from langchain.chat_models import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "llm = ChatOpenAI(temperature=0)\n", "\n", diff --git a/docs/docs/integrations/tools/bearly.ipynb b/docs/docs/integrations/tools/bearly.ipynb index b99d7a3513e..1580ff6c3fc 100644 --- a/docs/docs/integrations/tools/bearly.ipynb +++ b/docs/docs/integrations/tools/bearly.ipynb @@ -28,8 +28,8 @@ "outputs": [], "source": [ "from langchain.agents import AgentType, initialize_agent\n", - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.tools import BearlyInterpreterTool" + "from langchain.tools import BearlyInterpreterTool\n", + "from langchain_community.chat_models import ChatOpenAI" ] }, { diff --git a/docs/docs/integrations/tools/chatgpt_plugins.ipynb b/docs/docs/integrations/tools/chatgpt_plugins.ipynb index 4f57a6cc11b..4d4b8ee0deb 100644 --- a/docs/docs/integrations/tools/chatgpt_plugins.ipynb +++ b/docs/docs/integrations/tools/chatgpt_plugins.ipynb @@ -22,8 +22,8 @@ "outputs": [], "source": [ "from langchain.agents import AgentType, initialize_agent, load_tools\n", - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.tools import AIPluginTool" + "from langchain.tools import AIPluginTool\n", + "from langchain_community.chat_models import ChatOpenAI" ] }, { diff --git a/docs/docs/integrations/tools/dalle_image_generator.ipynb b/docs/docs/integrations/tools/dalle_image_generator.ipynb index 45591470cce..3819f310c19 100644 --- a/docs/docs/integrations/tools/dalle_image_generator.ipynb +++ b/docs/docs/integrations/tools/dalle_image_generator.ipynb @@ -32,7 +32,7 @@ "source": [ "import os\n", "\n", - "from langchain.llms import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = \"\"" ] @@ -52,9 +52,9 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.llms import OpenAI\n", "from langchain.prompts import PromptTemplate\n", "from langchain.utilities.dalle_image_generator import DallEAPIWrapper\n", + "from langchain_community.llms import OpenAI\n", "\n", "llm = OpenAI(temperature=0.9)\n", "prompt = PromptTemplate(\n", diff --git a/docs/docs/integrations/tools/e2b_data_analysis.ipynb b/docs/docs/integrations/tools/e2b_data_analysis.ipynb index e92e70783e1..a37d93d98fe 100644 --- a/docs/docs/integrations/tools/e2b_data_analysis.ipynb +++ b/docs/docs/integrations/tools/e2b_data_analysis.ipynb @@ -58,8 +58,8 @@ "import os\n", "\n", "from langchain.agents import AgentType, initialize_agent\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.tools import E2BDataAnalysisTool\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "os.environ[\"E2B_API_KEY\"] = \"\"\n", "os.environ[\"OPENAI_API_KEY\"] = \"\"" diff --git a/docs/docs/integrations/tools/edenai_tools.ipynb b/docs/docs/integrations/tools/edenai_tools.ipynb index 5d356ed43fe..3c5ae5c99dc 100644 --- a/docs/docs/integrations/tools/edenai_tools.ipynb +++ b/docs/docs/integrations/tools/edenai_tools.ipynb @@ -66,7 +66,7 @@ "outputs": [], "source": [ "from langchain.agents import AgentType, initialize_agent\n", - "from langchain.llms import EdenAI\n", + "from langchain_community.llms import EdenAI\n", "\n", "llm = EdenAI(\n", " feature=\"text\", provider=\"openai\", params={\"temperature\": 0.2, \"max_tokens\": 250}\n", diff --git a/docs/docs/integrations/tools/eleven_labs_tts.ipynb b/docs/docs/integrations/tools/eleven_labs_tts.ipynb index 8d9d55be8f3..adc1af099ba 100644 --- a/docs/docs/integrations/tools/eleven_labs_tts.ipynb +++ b/docs/docs/integrations/tools/eleven_labs_tts.ipynb @@ -127,7 +127,7 @@ "outputs": [], "source": [ "from langchain.agents import AgentType, initialize_agent, load_tools\n", - "from langchain.llms import OpenAI" + "from langchain_community.llms import OpenAI" ] }, { diff --git a/docs/docs/integrations/tools/google_drive.ipynb b/docs/docs/integrations/tools/google_drive.ipynb index cbafe83f605..3808061a3c5 100644 --- a/docs/docs/integrations/tools/google_drive.ipynb +++ b/docs/docs/integrations/tools/google_drive.ipynb @@ -172,7 +172,7 @@ "outputs": [], "source": [ "from langchain.agents import AgentType, initialize_agent\n", - "from langchain.llms import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", "agent = initialize_agent(\n", diff --git a/docs/docs/integrations/tools/google_finance.ipynb b/docs/docs/integrations/tools/google_finance.ipynb index 1f7be2cda86..b6c039e1c41 100644 --- a/docs/docs/integrations/tools/google_finance.ipynb +++ b/docs/docs/integrations/tools/google_finance.ipynb @@ -75,7 +75,7 @@ "import os\n", "\n", "from langchain.agents import AgentType, initialize_agent, load_tools\n", - "from langchain.llms import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = \"\"\n", "os.environ[\"SERP_API_KEY\"] = \"\"\n", diff --git a/docs/docs/integrations/tools/google_jobs.ipynb b/docs/docs/integrations/tools/google_jobs.ipynb index 2ae9a77944e..72f94ec91ef 100644 --- a/docs/docs/integrations/tools/google_jobs.ipynb +++ b/docs/docs/integrations/tools/google_jobs.ipynb @@ -194,7 +194,7 @@ "import os\n", "\n", "from langchain.agents import AgentType, initialize_agent, load_tools\n", - "from langchain.llms import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "OpenAI.api_key = os.environ[\"OPENAI_API_KEY\"]\n", "llm = OpenAI()\n", diff --git a/docs/docs/integrations/tools/google_serper.ipynb b/docs/docs/integrations/tools/google_serper.ipynb index fd28cf90193..f2244504f66 100644 --- a/docs/docs/integrations/tools/google_serper.ipynb +++ b/docs/docs/integrations/tools/google_serper.ipynb @@ -159,8 +159,8 @@ ], "source": [ "from langchain.agents import AgentType, Tool, initialize_agent\n", - "from langchain.llms.openai import OpenAI\n", "from langchain.utilities import GoogleSerperAPIWrapper\n", + "from langchain_community.llms.openai import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", "search = GoogleSerperAPIWrapper()\n", diff --git a/docs/docs/integrations/tools/gradio_tools.ipynb b/docs/docs/integrations/tools/gradio_tools.ipynb index 6337825c09a..26410ca1069 100644 --- a/docs/docs/integrations/tools/gradio_tools.ipynb +++ b/docs/docs/integrations/tools/gradio_tools.ipynb @@ -184,8 +184,8 @@ " TextToVideoTool,\n", ")\n", "from langchain.agents import initialize_agent\n", - "from langchain.llms import OpenAI\n", "from langchain.memory import ConversationBufferMemory\n", + "from langchain_community.llms import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", "memory = ConversationBufferMemory(memory_key=\"chat_history\")\n", diff --git a/docs/docs/integrations/tools/graphql.ipynb b/docs/docs/integrations/tools/graphql.ipynb index c2099fa54a2..09cba1a0227 100644 --- a/docs/docs/integrations/tools/graphql.ipynb +++ b/docs/docs/integrations/tools/graphql.ipynb @@ -44,7 +44,7 @@ "outputs": [], "source": [ "from langchain.agents import AgentType, initialize_agent, load_tools\n", - "from langchain.llms import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", "\n", diff --git a/docs/docs/integrations/tools/human_tools.ipynb b/docs/docs/integrations/tools/human_tools.ipynb index 4c8bc99c0db..ae69520dc34 100644 --- a/docs/docs/integrations/tools/human_tools.ipynb +++ b/docs/docs/integrations/tools/human_tools.ipynb @@ -19,8 +19,8 @@ "outputs": [], "source": [ "from langchain.agents import AgentType, initialize_agent, load_tools\n", - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.llms import OpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "llm = ChatOpenAI(temperature=0.0)\n", "math_llm = OpenAI(temperature=0.0)\n", diff --git a/docs/docs/integrations/tools/lemonai.ipynb b/docs/docs/integrations/tools/lemonai.ipynb index 1a8a38c2f1c..fffdda472ab 100644 --- a/docs/docs/integrations/tools/lemonai.ipynb +++ b/docs/docs/integrations/tools/lemonai.ipynb @@ -126,7 +126,7 @@ "source": [ "import os\n", "\n", - "from langchain.llms import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "from lemonai import execute_workflow" ] }, diff --git a/docs/docs/integrations/tools/memorize.ipynb b/docs/docs/integrations/tools/memorize.ipynb index 49e5b2d3a9c..db7ec349854 100644 --- a/docs/docs/integrations/tools/memorize.ipynb +++ b/docs/docs/integrations/tools/memorize.ipynb @@ -28,8 +28,8 @@ "\n", "from langchain.agents import AgentExecutor, AgentType, initialize_agent, load_tools\n", "from langchain.chains import LLMChain\n", - "from langchain.llms import GradientLLM\n", - "from langchain.memory import ConversationBufferMemory" + "from langchain.memory import ConversationBufferMemory\n", + "from langchain_community.llms import GradientLLM" ] }, { diff --git a/docs/docs/integrations/tools/metaphor_search.ipynb b/docs/docs/integrations/tools/metaphor_search.ipynb index f52ea221b2d..3da3b28b923 100644 --- a/docs/docs/integrations/tools/metaphor_search.ipynb +++ b/docs/docs/integrations/tools/metaphor_search.ipynb @@ -123,8 +123,8 @@ "outputs": [], "source": [ "from langchain.agents import AgentExecutor, OpenAIFunctionsAgent\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.schema import SystemMessage\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "llm = ChatOpenAI(temperature=0)\n", "\n", @@ -310,8 +310,8 @@ "outputs": [], "source": [ "from langchain.agents import AgentExecutor, OpenAIFunctionsAgent\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.schema import SystemMessage\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "llm = ChatOpenAI(temperature=0, model=\"gpt-4\")\n", "\n", diff --git a/docs/docs/integrations/tools/openweathermap.ipynb b/docs/docs/integrations/tools/openweathermap.ipynb index 65dccc24c09..48a954a6194 100644 --- a/docs/docs/integrations/tools/openweathermap.ipynb +++ b/docs/docs/integrations/tools/openweathermap.ipynb @@ -84,7 +84,7 @@ "import os\n", "\n", "from langchain.agents import AgentType, initialize_agent, load_tools\n", - "from langchain.llms import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = \"\"\n", "os.environ[\"OPENWEATHERMAP_API_KEY\"] = \"\"\n", diff --git a/docs/docs/integrations/tools/reddit_search.ipynb b/docs/docs/integrations/tools/reddit_search.ipynb index 92c35010db7..02fe73a543a 100644 --- a/docs/docs/integrations/tools/reddit_search.ipynb +++ b/docs/docs/integrations/tools/reddit_search.ipynb @@ -171,11 +171,11 @@ "\n", "from langchain.agents import AgentExecutor, StructuredChatAgent, Tool\n", "from langchain.chains import LLMChain\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.memory import ConversationBufferMemory, ReadOnlySharedMemory\n", "from langchain.prompts import PromptTemplate\n", "from langchain.tools.reddit_search.tool import RedditSearchRun\n", "from langchain.utilities.reddit_search import RedditSearchAPIWrapper\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "# Provide keys for Reddit\n", "client_id = \"\"\n", diff --git a/docs/docs/integrations/tools/sceneXplain.ipynb b/docs/docs/integrations/tools/sceneXplain.ipynb index 7b08f8f50a2..ae5e6dc2e92 100644 --- a/docs/docs/integrations/tools/sceneXplain.ipynb +++ b/docs/docs/integrations/tools/sceneXplain.ipynb @@ -95,8 +95,8 @@ ], "source": [ "from langchain.agents import initialize_agent\n", - "from langchain.llms import OpenAI\n", "from langchain.memory import ConversationBufferMemory\n", + "from langchain_community.llms import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", "memory = ConversationBufferMemory(memory_key=\"chat_history\")\n", diff --git a/docs/docs/integrations/tools/search_tools.ipynb b/docs/docs/integrations/tools/search_tools.ipynb index d762385b416..6e1ad5dfe7a 100644 --- a/docs/docs/integrations/tools/search_tools.ipynb +++ b/docs/docs/integrations/tools/search_tools.ipynb @@ -22,7 +22,7 @@ "outputs": [], "source": [ "from langchain.agents import AgentType, initialize_agent, load_tools\n", - "from langchain.llms import OpenAI" + "from langchain_community.llms import OpenAI" ] }, { diff --git a/docs/docs/integrations/tools/searchapi.ipynb b/docs/docs/integrations/tools/searchapi.ipynb index 978b7dd380a..ecdfdb3f2fc 100644 --- a/docs/docs/integrations/tools/searchapi.ipynb +++ b/docs/docs/integrations/tools/searchapi.ipynb @@ -125,8 +125,8 @@ ], "source": [ "from langchain.agents import AgentType, Tool, initialize_agent\n", - "from langchain.llms.openai import OpenAI\n", "from langchain.utilities import SearchApiAPIWrapper\n", + "from langchain_community.llms.openai import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", "search = SearchApiAPIWrapper()\n", diff --git a/docs/docs/integrations/tools/tavily_search.ipynb b/docs/docs/integrations/tools/tavily_search.ipynb index 196329c59d5..7e4d9d3c389 100644 --- a/docs/docs/integrations/tools/tavily_search.ipynb +++ b/docs/docs/integrations/tools/tavily_search.ipynb @@ -82,9 +82,9 @@ "import os\n", "\n", "from langchain.agents import AgentType, initialize_agent\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.tools.tavily_search import TavilySearchResults\n", "from langchain.utilities.tavily_search import TavilySearchAPIWrapper\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "# set up API key\n", "os.environ[\"TAVILY_API_KEY\"] = \"...\"\n", diff --git a/docs/docs/integrations/tools/yahoo_finance_news.ipynb b/docs/docs/integrations/tools/yahoo_finance_news.ipynb index b6c9c8d6596..d6baeaa11ea 100644 --- a/docs/docs/integrations/tools/yahoo_finance_news.ipynb +++ b/docs/docs/integrations/tools/yahoo_finance_news.ipynb @@ -53,8 +53,8 @@ "outputs": [], "source": [ "from langchain.agents import AgentType, initialize_agent\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.tools.yahoo_finance_news import YahooFinanceNewsTool\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "llm = ChatOpenAI(temperature=0.0)\n", "tools = [YahooFinanceNewsTool()]\n", diff --git a/docs/docs/integrations/tools/zapier.ipynb b/docs/docs/integrations/tools/zapier.ipynb index 2bb7a4902ef..c1bda16d99e 100644 --- a/docs/docs/integrations/tools/zapier.ipynb +++ b/docs/docs/integrations/tools/zapier.ipynb @@ -62,8 +62,8 @@ "source": [ "from langchain.agents import AgentType, initialize_agent\n", "from langchain.agents.agent_toolkits import ZapierToolkit\n", - "from langchain.llms import OpenAI\n", - "from langchain.utilities.zapier import ZapierNLAWrapper" + "from langchain.utilities.zapier import ZapierNLAWrapper\n", + "from langchain_community.llms import OpenAI" ] }, { @@ -161,10 +161,10 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain, SimpleSequentialChain, TransformChain\n", - "from langchain.llms import OpenAI\n", "from langchain.prompts import PromptTemplate\n", "from langchain.tools.zapier.tool import ZapierNLARunAction\n", - "from langchain.utilities.zapier import ZapierNLAWrapper" + "from langchain.utilities.zapier import ZapierNLAWrapper\n", + "from langchain_community.llms import OpenAI" ] }, { diff --git a/docs/docs/integrations/vectorstores/activeloop_deeplake.ipynb b/docs/docs/integrations/vectorstores/activeloop_deeplake.ipynb index 2d45c73aec7..933ba4912df 100644 --- a/docs/docs/integrations/vectorstores/activeloop_deeplake.ipynb +++ b/docs/docs/integrations/vectorstores/activeloop_deeplake.ipynb @@ -51,9 +51,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import DeepLake" + "from langchain.vectorstores import DeepLake\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings" ] }, { @@ -227,14 +227,14 @@ "name": "stderr", "output_type": "stream", "text": [ - "/home/ubuntu/langchain_activeloop/langchain/libs/langchain/langchain/llms/openai.py:786: UserWarning: You are trying to use a chat model. This way of initializing it is no longer supported. Instead, please use: `from langchain.chat_models import ChatOpenAI`\n", + "/home/ubuntu/langchain_activeloop/langchain/libs/langchain/langchain/llms/openai.py:786: UserWarning: You are trying to use a chat model. This way of initializing it is no longer supported. Instead, please use: `from langchain_community.chat_models import ChatOpenAI`\n", " warnings.warn(\n" ] } ], "source": [ "from langchain.chains import RetrievalQA\n", - "from langchain.llms import OpenAIChat\n", + "from langchain_community.llms import OpenAIChat\n", "\n", "qa = RetrievalQA.from_chain_type(\n", " llm=OpenAIChat(model=\"gpt-3.5-turbo\"),\n", diff --git a/docs/docs/integrations/vectorstores/alibabacloud_opensearch.ipynb b/docs/docs/integrations/vectorstores/alibabacloud_opensearch.ipynb index a06d97e9109..1d059b08b86 100644 --- a/docs/docs/integrations/vectorstores/alibabacloud_opensearch.ipynb +++ b/docs/docs/integrations/vectorstores/alibabacloud_opensearch.ipynb @@ -134,12 +134,12 @@ }, "outputs": [], "source": [ - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain.vectorstores import (\n", " AlibabaCloudOpenSearch,\n", " AlibabaCloudOpenSearchSettings,\n", - ")" + ")\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/vectorstores/analyticdb.ipynb b/docs/docs/integrations/vectorstores/analyticdb.ipynb index 6d33565d20d..b763e3c6b26 100644 --- a/docs/docs/integrations/vectorstores/analyticdb.ipynb +++ b/docs/docs/integrations/vectorstores/analyticdb.ipynb @@ -23,9 +23,9 @@ }, "outputs": [], "source": [ - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import AnalyticDB" + "from langchain.vectorstores import AnalyticDB\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/vectorstores/annoy.ipynb b/docs/docs/integrations/vectorstores/annoy.ipynb index f1915aead7d..fe8c20e4b85 100644 --- a/docs/docs/integrations/vectorstores/annoy.ipynb +++ b/docs/docs/integrations/vectorstores/annoy.ipynb @@ -52,8 +52,8 @@ }, "outputs": [], "source": [ - "from langchain.embeddings import HuggingFaceEmbeddings\n", "from langchain.vectorstores import Annoy\n", + "from langchain_community.embeddings import HuggingFaceEmbeddings\n", "\n", "embeddings_func = HuggingFaceEmbeddings()" ] diff --git a/docs/docs/integrations/vectorstores/astradb.ipynb b/docs/docs/integrations/vectorstores/astradb.ipynb index 06a99ffd3e7..dd89dd1d933 100644 --- a/docs/docs/integrations/vectorstores/astradb.ipynb +++ b/docs/docs/integrations/vectorstores/astradb.ipynb @@ -60,12 +60,12 @@ "from datasets import (\n", " load_dataset,\n", ")\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.document_loaders import PyPDFLoader\n", - "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.prompts import ChatPromptTemplate\n", "from langchain.schema import Document\n", "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", + "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_core.output_parsers import StrOutputParser\n", "from langchain_core.runnables import RunnablePassthrough" ] diff --git a/docs/docs/integrations/vectorstores/azure_cosmos_db.ipynb b/docs/docs/integrations/vectorstores/azure_cosmos_db.ipynb index 16d247ea0d4..a2fd13f3b3e 100644 --- a/docs/docs/integrations/vectorstores/azure_cosmos_db.ipynb +++ b/docs/docs/integrations/vectorstores/azure_cosmos_db.ipynb @@ -131,12 +131,12 @@ "outputs": [], "source": [ "from langchain.document_loaders import TextLoader\n", - "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain.vectorstores.azure_cosmos_db_vector_search import (\n", " AzureCosmosDBVectorSearch,\n", " CosmosDBSimilarityType,\n", ")\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", "\n", "SOURCE_FILE_NAME = \"../../modules/state_of_the_union.txt\"\n", "\n", diff --git a/docs/docs/integrations/vectorstores/azuresearch.ipynb b/docs/docs/integrations/vectorstores/azuresearch.ipynb index 710e5b3fcb8..2b9389a3ae5 100644 --- a/docs/docs/integrations/vectorstores/azuresearch.ipynb +++ b/docs/docs/integrations/vectorstores/azuresearch.ipynb @@ -45,8 +45,8 @@ "source": [ "import os\n", "\n", - "from langchain.embeddings import OpenAIEmbeddings\n", - "from langchain.vectorstores.azuresearch import AzureSearch" + "from langchain.vectorstores.azuresearch import AzureSearch\n", + "from langchain_community.embeddings import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/vectorstores/baiducloud_vector_search.ipynb b/docs/docs/integrations/vectorstores/baiducloud_vector_search.ipynb index 5785907ae7f..92edb132d92 100644 --- a/docs/docs/integrations/vectorstores/baiducloud_vector_search.ipynb +++ b/docs/docs/integrations/vectorstores/baiducloud_vector_search.ipynb @@ -85,7 +85,7 @@ "text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n", "docs = text_splitter.split_documents(documents)\n", "\n", - "from langchain.embeddings import QianfanEmbeddingsEndpoint\n", + "from langchain_community.embeddings import QianfanEmbeddingsEndpoint\n", "\n", "embeddings = QianfanEmbeddingsEndpoint()" ] diff --git a/docs/docs/integrations/vectorstores/chroma.ipynb b/docs/docs/integrations/vectorstores/chroma.ipynb index 1d12a3da412..852946f1c1e 100644 --- a/docs/docs/integrations/vectorstores/chroma.ipynb +++ b/docs/docs/integrations/vectorstores/chroma.ipynb @@ -74,9 +74,11 @@ "source": [ "# import\n", "from langchain.document_loaders import TextLoader\n", - "from langchain.embeddings.sentence_transformer import SentenceTransformerEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain.vectorstores import Chroma\n", + "from langchain_community.embeddings.sentence_transformer import (\n", + " SentenceTransformerEmbeddings,\n", + ")\n", "\n", "# load the document and split it into chunks\n", "loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n", @@ -359,7 +361,7 @@ "\n", "from getpass import getpass\n", "\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "\n", "OPENAI_API_KEY = getpass()" ] diff --git a/docs/docs/integrations/vectorstores/clickhouse.ipynb b/docs/docs/integrations/vectorstores/clickhouse.ipynb index 6f18d02cb38..c74ec7d7a39 100644 --- a/docs/docs/integrations/vectorstores/clickhouse.ipynb +++ b/docs/docs/integrations/vectorstores/clickhouse.ipynb @@ -101,9 +101,9 @@ }, "outputs": [], "source": [ - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Clickhouse, ClickhouseSettings" + "from langchain.vectorstores import Clickhouse, ClickhouseSettings\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/vectorstores/dashvector.ipynb b/docs/docs/integrations/vectorstores/dashvector.ipynb index b141d90a91b..9f29f6f7c78 100644 --- a/docs/docs/integrations/vectorstores/dashvector.ipynb +++ b/docs/docs/integrations/vectorstores/dashvector.ipynb @@ -101,9 +101,9 @@ }, "outputs": [], "source": [ - "from langchain.embeddings.dashscope import DashScopeEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import DashVector" + "from langchain.vectorstores import DashVector\n", + "from langchain_community.embeddings.dashscope import DashScopeEmbeddings" ] }, { diff --git a/docs/docs/integrations/vectorstores/databricks_vector_search.ipynb b/docs/docs/integrations/vectorstores/databricks_vector_search.ipynb index 4ac079b4afe..a4c0942b608 100644 --- a/docs/docs/integrations/vectorstores/databricks_vector_search.ipynb +++ b/docs/docs/integrations/vectorstores/databricks_vector_search.ipynb @@ -60,8 +60,8 @@ "outputs": [], "source": [ "from langchain.document_loaders import TextLoader\n", - "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", "\n", "loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n", "documents = loader.load()\n", diff --git a/docs/docs/integrations/vectorstores/dingo.ipynb b/docs/docs/integrations/vectorstores/dingo.ipynb index bb1222063e2..4b03290be33 100644 --- a/docs/docs/integrations/vectorstores/dingo.ipynb +++ b/docs/docs/integrations/vectorstores/dingo.ipynb @@ -69,9 +69,9 @@ "outputs": [], "source": [ "from langchain.document_loaders import TextLoader\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Dingo" + "from langchain.vectorstores import Dingo\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings" ] }, { @@ -131,9 +131,9 @@ "outputs": [], "source": [ "from langchain.document_loaders import TextLoader\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Dingo" + "from langchain.vectorstores import Dingo\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/vectorstores/docarray_hnsw.ipynb b/docs/docs/integrations/vectorstores/docarray_hnsw.ipynb index 14a197b3a0a..9848d95af5e 100644 --- a/docs/docs/integrations/vectorstores/docarray_hnsw.ipynb +++ b/docs/docs/integrations/vectorstores/docarray_hnsw.ipynb @@ -74,9 +74,9 @@ "outputs": [], "source": [ "from langchain.document_loaders import TextLoader\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import DocArrayHnswSearch" + "from langchain.vectorstores import DocArrayHnswSearch\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/vectorstores/docarray_in_memory.ipynb b/docs/docs/integrations/vectorstores/docarray_in_memory.ipynb index e478b0353e8..f1f3482eaca 100644 --- a/docs/docs/integrations/vectorstores/docarray_in_memory.ipynb +++ b/docs/docs/integrations/vectorstores/docarray_in_memory.ipynb @@ -71,9 +71,9 @@ "outputs": [], "source": [ "from langchain.document_loaders import TextLoader\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import DocArrayInMemorySearch" + "from langchain.vectorstores import DocArrayInMemorySearch\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/vectorstores/elasticsearch.ipynb b/docs/docs/integrations/vectorstores/elasticsearch.ipynb index 033f8143789..e54f3587793 100644 --- a/docs/docs/integrations/vectorstores/elasticsearch.ipynb +++ b/docs/docs/integrations/vectorstores/elasticsearch.ipynb @@ -65,7 +65,7 @@ "Example:\n", "```python\n", " from langchain.vectorstores.elasticsearch import ElasticsearchStore\n", - " from langchain.embeddings.openai import OpenAIEmbeddings\n", + " from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "\n", " embedding = OpenAIEmbeddings()\n", " elastic_vector_search = ElasticsearchStore(\n", @@ -80,7 +80,7 @@ "Example:\n", "```python\n", " from langchain.vectorstores import ElasticsearchStore\n", - " from langchain.embeddings import OpenAIEmbeddings\n", + " from langchain_community.embeddings import OpenAIEmbeddings\n", "\n", " embedding = OpenAIEmbeddings()\n", " elastic_vector_search = ElasticsearchStore(\n", @@ -116,7 +116,7 @@ "Example:\n", "```python\n", " from langchain.vectorstores.elasticsearch import ElasticsearchStore\n", - " from langchain.embeddings import OpenAIEmbeddings\n", + " from langchain_community.embeddings import OpenAIEmbeddings\n", "\n", " embedding = OpenAIEmbeddings()\n", " elastic_vector_search = ElasticsearchStore(\n", @@ -180,8 +180,8 @@ }, "outputs": [], "source": [ - "from langchain.embeddings.openai import OpenAIEmbeddings\n", - "from langchain.vectorstores import ElasticsearchStore" + "from langchain.vectorstores import ElasticsearchStore\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/vectorstores/epsilla.ipynb b/docs/docs/integrations/vectorstores/epsilla.ipynb index 330ac6ddf34..dcd21e8ce51 100644 --- a/docs/docs/integrations/vectorstores/epsilla.ipynb +++ b/docs/docs/integrations/vectorstores/epsilla.ipynb @@ -57,8 +57,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import OpenAIEmbeddings\n", - "from langchain.vectorstores import Epsilla" + "from langchain.vectorstores import Epsilla\n", + "from langchain_community.embeddings import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/vectorstores/faiss.ipynb b/docs/docs/integrations/vectorstores/faiss.ipynb index 45a54975b2a..2867dd29077 100644 --- a/docs/docs/integrations/vectorstores/faiss.ipynb +++ b/docs/docs/integrations/vectorstores/faiss.ipynb @@ -54,9 +54,9 @@ "# os.environ['FAISS_NO_AVX2'] = '1'\n", "\n", "from langchain.document_loaders import TextLoader\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain.vectorstores import FAISS\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "\n", "loader = TextLoader(\"../../../extras/modules/state_of_the_union.txt\")\n", "documents = loader.load()\n", @@ -228,7 +228,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings.huggingface import HuggingFaceEmbeddings\n", + "from langchain_community.embeddings.huggingface import HuggingFaceEmbeddings\n", "\n", "pkl = db.serialize_to_bytes() # serializes the faiss\n", "embeddings = HuggingFaceEmbeddings(model_name=\"all-MiniLM-L6-v2\")\n", diff --git a/docs/docs/integrations/vectorstores/faiss_async.ipynb b/docs/docs/integrations/vectorstores/faiss_async.ipynb index a12c1b083b9..876b141202d 100644 --- a/docs/docs/integrations/vectorstores/faiss_async.ipynb +++ b/docs/docs/integrations/vectorstores/faiss_async.ipynb @@ -57,9 +57,9 @@ "# os.environ['FAISS_NO_AVX2'] = '1'\n", "\n", "from langchain.document_loaders import TextLoader\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain.vectorstores import FAISS\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "\n", "loader = TextLoader(\"../../../extras/modules/state_of_the_union.txt\")\n", "documents = loader.load()\n", @@ -158,7 +158,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings.huggingface import HuggingFaceEmbeddings\n", + "from langchain_community.embeddings.huggingface import HuggingFaceEmbeddings\n", "\n", "pkl = db.serialize_to_bytes() # serializes the faiss index\n", "embeddings = HuggingFaceEmbeddings(model_name=\"all-MiniLM-L6-v2\")\n", diff --git a/docs/docs/integrations/vectorstores/hippo.ipynb b/docs/docs/integrations/vectorstores/hippo.ipynb index 34c637b1934..3a69192723d 100644 --- a/docs/docs/integrations/vectorstores/hippo.ipynb +++ b/docs/docs/integrations/vectorstores/hippo.ipynb @@ -97,11 +97,11 @@ "source": [ "import os\n", "\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.document_loaders import TextLoader\n", - "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores.hippo import Hippo" + "from langchain.vectorstores.hippo import Hippo\n", + "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_community.embeddings import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/vectorstores/hologres.ipynb b/docs/docs/integrations/vectorstores/hologres.ipynb index a3c1da5531a..c1fde040455 100644 --- a/docs/docs/integrations/vectorstores/hologres.ipynb +++ b/docs/docs/integrations/vectorstores/hologres.ipynb @@ -33,9 +33,9 @@ }, "outputs": [], "source": [ - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Hologres" + "from langchain.vectorstores import Hologres\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/vectorstores/jaguar.ipynb b/docs/docs/integrations/vectorstores/jaguar.ipynb index 03797457e73..b1513598620 100644 --- a/docs/docs/integrations/vectorstores/jaguar.ipynb +++ b/docs/docs/integrations/vectorstores/jaguar.ipynb @@ -53,12 +53,12 @@ "outputs": [], "source": [ "from langchain.chains import RetrievalQAWithSourcesChain\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.document_loaders import TextLoader\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", - "from langchain.llms import OpenAI\n", "from langchain.prompts import ChatPromptTemplate\n", "from langchain.text_splitter import CharacterTextSplitter\n", + "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.llms import OpenAI\n", "from langchain_community.vectorstores.jaguar import Jaguar\n", "from langchain_core.output_parsers import StrOutputParser\n", "from langchain_core.runnables import RunnablePassthrough\n", @@ -172,7 +172,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "from langchain_community.vectorstores.jaguar import Jaguar\n", "\n", "# Instantiate a Jaguar vector store object\n", diff --git a/docs/docs/integrations/vectorstores/lancedb.ipynb b/docs/docs/integrations/vectorstores/lancedb.ipynb index 3a546a9498e..ccf8091bd6c 100644 --- a/docs/docs/integrations/vectorstores/lancedb.ipynb +++ b/docs/docs/integrations/vectorstores/lancedb.ipynb @@ -64,8 +64,8 @@ }, "outputs": [], "source": [ - "from langchain.embeddings import OpenAIEmbeddings\n", - "from langchain.vectorstores import LanceDB" + "from langchain.vectorstores import LanceDB\n", + "from langchain_community.embeddings import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/vectorstores/marqo.ipynb b/docs/docs/integrations/vectorstores/marqo.ipynb index 0e8a6ac02f2..64f841913f0 100644 --- a/docs/docs/integrations/vectorstores/marqo.ipynb +++ b/docs/docs/integrations/vectorstores/marqo.ipynb @@ -477,7 +477,7 @@ "import os\n", "\n", "from langchain.chains import RetrievalQAWithSourcesChain\n", - "from langchain.llms import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")" ] diff --git a/docs/docs/integrations/vectorstores/meilisearch.ipynb b/docs/docs/integrations/vectorstores/meilisearch.ipynb index bd9f445c8e0..edcaf4b9d0a 100644 --- a/docs/docs/integrations/vectorstores/meilisearch.ipynb +++ b/docs/docs/integrations/vectorstores/meilisearch.ipynb @@ -126,9 +126,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain.vectorstores import Meilisearch\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "\n", "embeddings = OpenAIEmbeddings()" ] diff --git a/docs/docs/integrations/vectorstores/milvus.ipynb b/docs/docs/integrations/vectorstores/milvus.ipynb index 47d34dd7b41..1ee9d6d8a22 100644 --- a/docs/docs/integrations/vectorstores/milvus.ipynb +++ b/docs/docs/integrations/vectorstores/milvus.ipynb @@ -67,9 +67,9 @@ "outputs": [], "source": [ "from langchain.document_loaders import TextLoader\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Milvus" + "from langchain.vectorstores import Milvus\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/vectorstores/momento_vector_index.ipynb b/docs/docs/integrations/vectorstores/momento_vector_index.ipynb index bae01cc3e52..5e5e0b0b8f7 100644 --- a/docs/docs/integrations/vectorstores/momento_vector_index.ipynb +++ b/docs/docs/integrations/vectorstores/momento_vector_index.ipynb @@ -144,9 +144,9 @@ "outputs": [], "source": [ "from langchain.document_loaders import TextLoader\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import MomentoVectorIndex" + "from langchain.vectorstores import MomentoVectorIndex\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings" ] }, { @@ -376,7 +376,7 @@ "outputs": [], "source": [ "from langchain.chains import RetrievalQA\n", - "from langchain.chat_models import ChatOpenAI" + "from langchain_community.chat_models import ChatOpenAI" ] }, { diff --git a/docs/docs/integrations/vectorstores/mongodb_atlas.ipynb b/docs/docs/integrations/vectorstores/mongodb_atlas.ipynb index d66c1870bac..23296ac25ea 100644 --- a/docs/docs/integrations/vectorstores/mongodb_atlas.ipynb +++ b/docs/docs/integrations/vectorstores/mongodb_atlas.ipynb @@ -194,8 +194,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.vectorstores import MongoDBAtlasVectorSearch\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", "\n", "# insert the documents in MongoDB Atlas with their embedding\n", "vector_search = MongoDBAtlasVectorSearch.from_documents(\n", @@ -243,8 +243,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.vectorstores import MongoDBAtlasVectorSearch\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", "\n", "vector_search = MongoDBAtlasVectorSearch.from_connection_string(\n", " MONGODB_ATLAS_CLUSTER_URI,\n", @@ -389,7 +389,7 @@ "outputs": [], "source": [ "from langchain.chains import RetrievalQA\n", - "from langchain.llms import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "qa = RetrievalQA.from_chain_type(\n", " llm=OpenAI(),\n", diff --git a/docs/docs/integrations/vectorstores/myscale.ipynb b/docs/docs/integrations/vectorstores/myscale.ipynb index 3e1119066b8..4d027a195ed 100644 --- a/docs/docs/integrations/vectorstores/myscale.ipynb +++ b/docs/docs/integrations/vectorstores/myscale.ipynb @@ -99,9 +99,9 @@ "outputs": [], "source": [ "from langchain.document_loaders import TextLoader\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import MyScale" + "from langchain.vectorstores import MyScale\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/vectorstores/neo4jvector.ipynb b/docs/docs/integrations/vectorstores/neo4jvector.ipynb index 5928c407f6b..ffb92297640 100644 --- a/docs/docs/integrations/vectorstores/neo4jvector.ipynb +++ b/docs/docs/integrations/vectorstores/neo4jvector.ipynb @@ -74,9 +74,9 @@ "source": [ "from langchain.docstore.document import Document\n", "from langchain.document_loaders import TextLoader\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Neo4jVector" + "from langchain.vectorstores import Neo4jVector\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings" ] }, { @@ -480,7 +480,7 @@ "outputs": [], "source": [ "from langchain.chains import RetrievalQAWithSourcesChain\n", - "from langchain.chat_models import ChatOpenAI" + "from langchain_community.chat_models import ChatOpenAI" ] }, { diff --git a/docs/docs/integrations/vectorstores/opensearch.ipynb b/docs/docs/integrations/vectorstores/opensearch.ipynb index 51852f237be..f8d015086b8 100644 --- a/docs/docs/integrations/vectorstores/opensearch.ipynb +++ b/docs/docs/integrations/vectorstores/opensearch.ipynb @@ -69,9 +69,9 @@ "outputs": [], "source": [ "from langchain.document_loaders import TextLoader\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import OpenSearchVectorSearch" + "from langchain.vectorstores import OpenSearchVectorSearch\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/vectorstores/pgembedding.ipynb b/docs/docs/integrations/vectorstores/pgembedding.ipynb index 7ef4f403d93..f9c996b54af 100644 --- a/docs/docs/integrations/vectorstores/pgembedding.ipynb +++ b/docs/docs/integrations/vectorstores/pgembedding.ipynb @@ -83,9 +83,9 @@ "source": [ "from langchain.docstore.document import Document\n", "from langchain.document_loaders import TextLoader\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import PGEmbedding" + "from langchain.vectorstores import PGEmbedding\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/vectorstores/pgvecto_rs.ipynb b/docs/docs/integrations/vectorstores/pgvecto_rs.ipynb index b0be659cc3b..4565fca5243 100644 --- a/docs/docs/integrations/vectorstores/pgvecto_rs.ipynb +++ b/docs/docs/integrations/vectorstores/pgvecto_rs.ipynb @@ -31,9 +31,9 @@ "\n", "from langchain.docstore.document import Document\n", "from langchain.document_loaders import TextLoader\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores.pgvecto_rs import PGVecto_rs" + "from langchain.vectorstores.pgvecto_rs import PGVecto_rs\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/vectorstores/pgvector.ipynb b/docs/docs/integrations/vectorstores/pgvector.ipynb index f093f36252e..1457bc2ee27 100644 --- a/docs/docs/integrations/vectorstores/pgvector.ipynb +++ b/docs/docs/integrations/vectorstores/pgvector.ipynb @@ -102,9 +102,9 @@ "source": [ "from langchain.docstore.document import Document\n", "from langchain.document_loaders import TextLoader\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores.pgvector import PGVector" + "from langchain.vectorstores.pgvector import PGVector\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/vectorstores/pinecone.ipynb b/docs/docs/integrations/vectorstores/pinecone.ipynb index 762b72e6fb9..d3c48312f0b 100644 --- a/docs/docs/integrations/vectorstores/pinecone.ipynb +++ b/docs/docs/integrations/vectorstores/pinecone.ipynb @@ -80,9 +80,9 @@ "outputs": [], "source": [ "from langchain.document_loaders import TextLoader\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Pinecone" + "from langchain.vectorstores import Pinecone\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/vectorstores/qdrant.ipynb b/docs/docs/integrations/vectorstores/qdrant.ipynb index bfd17251295..384f52313cd 100644 --- a/docs/docs/integrations/vectorstores/qdrant.ipynb +++ b/docs/docs/integrations/vectorstores/qdrant.ipynb @@ -79,9 +79,9 @@ "outputs": [], "source": [ "from langchain.document_loaders import TextLoader\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Qdrant" + "from langchain.vectorstores import Qdrant\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/vectorstores/redis.ipynb b/docs/docs/integrations/vectorstores/redis.ipynb index 643ded5eb0e..41f51d0b71e 100644 --- a/docs/docs/integrations/vectorstores/redis.ipynb +++ b/docs/docs/integrations/vectorstores/redis.ipynb @@ -163,7 +163,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import OpenAIEmbeddings\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", "\n", "embeddings = OpenAIEmbeddings()" ] diff --git a/docs/docs/integrations/vectorstores/rockset.ipynb b/docs/docs/integrations/vectorstores/rockset.ipynb index 8a0406f5659..cd70260f612 100644 --- a/docs/docs/integrations/vectorstores/rockset.ipynb +++ b/docs/docs/integrations/vectorstores/rockset.ipynb @@ -109,9 +109,9 @@ "outputs": [], "source": [ "from langchain.document_loaders import TextLoader\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain.vectorstores import Rockset\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "\n", "loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n", "documents = loader.load()\n", diff --git a/docs/docs/integrations/vectorstores/scann.ipynb b/docs/docs/integrations/vectorstores/scann.ipynb index 4797110993a..4b904508e9a 100644 --- a/docs/docs/integrations/vectorstores/scann.ipynb +++ b/docs/docs/integrations/vectorstores/scann.ipynb @@ -60,9 +60,9 @@ ], "source": [ "from langchain.document_loaders import TextLoader\n", - "from langchain.embeddings import HuggingFaceEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain.vectorstores import ScaNN\n", + "from langchain_community.embeddings import HuggingFaceEmbeddings\n", "\n", "loader = TextLoader(\"state_of_the_union.txt\")\n", "documents = loader.load()\n", @@ -99,7 +99,7 @@ "outputs": [], "source": [ "from langchain.chains import RetrievalQA\n", - "from langchain.chat_models import google_palm\n", + "from langchain_community.chat_models import google_palm\n", "\n", "palm_client = google_palm.ChatGooglePalm(google_api_key=\"YOUR_GOOGLE_PALM_API_KEY\")\n", "\n", diff --git a/docs/docs/integrations/vectorstores/semadb.ipynb b/docs/docs/integrations/vectorstores/semadb.ipynb index b36a8e92140..6f1804c725f 100644 --- a/docs/docs/integrations/vectorstores/semadb.ipynb +++ b/docs/docs/integrations/vectorstores/semadb.ipynb @@ -41,7 +41,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import HuggingFaceEmbeddings\n", + "from langchain_community.embeddings import HuggingFaceEmbeddings\n", "\n", "embeddings = HuggingFaceEmbeddings()" ] diff --git a/docs/docs/integrations/vectorstores/singlestoredb.ipynb b/docs/docs/integrations/vectorstores/singlestoredb.ipynb index 8dec60e1ada..ff5b20fbf9d 100644 --- a/docs/docs/integrations/vectorstores/singlestoredb.ipynb +++ b/docs/docs/integrations/vectorstores/singlestoredb.ipynb @@ -47,9 +47,9 @@ "outputs": [], "source": [ "from langchain.document_loaders import TextLoader\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import SingleStoreDB" + "from langchain.vectorstores import SingleStoreDB\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/vectorstores/sklearn.ipynb b/docs/docs/integrations/vectorstores/sklearn.ipynb index 740ccabcd3e..a6f08e2e6a0 100644 --- a/docs/docs/integrations/vectorstores/sklearn.ipynb +++ b/docs/docs/integrations/vectorstores/sklearn.ipynb @@ -61,9 +61,9 @@ "outputs": [], "source": [ "from langchain.document_loaders import TextLoader\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain.vectorstores import SKLearnVectorStore\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "\n", "loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n", "documents = loader.load()\n", diff --git a/docs/docs/integrations/vectorstores/sqlitevss.ipynb b/docs/docs/integrations/vectorstores/sqlitevss.ipynb index cb604f0da2a..5c80d66484f 100644 --- a/docs/docs/integrations/vectorstores/sqlitevss.ipynb +++ b/docs/docs/integrations/vectorstores/sqlitevss.ipynb @@ -70,9 +70,11 @@ ], "source": [ "from langchain.document_loaders import TextLoader\n", - "from langchain.embeddings.sentence_transformer import SentenceTransformerEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain.vectorstores import SQLiteVSS\n", + "from langchain_community.embeddings.sentence_transformer import (\n", + " SentenceTransformerEmbeddings,\n", + ")\n", "\n", "# load the document and split it into chunks\n", "loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n", @@ -145,9 +147,11 @@ ], "source": [ "from langchain.document_loaders import TextLoader\n", - "from langchain.embeddings.sentence_transformer import SentenceTransformerEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain.vectorstores import SQLiteVSS\n", + "from langchain_community.embeddings.sentence_transformer import (\n", + " SentenceTransformerEmbeddings,\n", + ")\n", "\n", "# load the document and split it into chunks\n", "loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n", diff --git a/docs/docs/integrations/vectorstores/starrocks.ipynb b/docs/docs/integrations/vectorstores/starrocks.ipynb index cad6ee7ced8..690f177d2f6 100644 --- a/docs/docs/integrations/vectorstores/starrocks.ipynb +++ b/docs/docs/integrations/vectorstores/starrocks.ipynb @@ -59,11 +59,11 @@ "source": [ "from langchain.chains import RetrievalQA\n", "from langchain.document_loaders import DirectoryLoader, UnstructuredMarkdownLoader\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", - "from langchain.llms import OpenAI\n", "from langchain.text_splitter import TokenTextSplitter\n", "from langchain.vectorstores import StarRocks\n", "from langchain.vectorstores.starrocks import StarRocksSettings\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_community.llms import OpenAI\n", "\n", "update_vectordb = False" ] diff --git a/docs/docs/integrations/vectorstores/supabase.ipynb b/docs/docs/integrations/vectorstores/supabase.ipynb index 8416e1073aa..5331106107a 100644 --- a/docs/docs/integrations/vectorstores/supabase.ipynb +++ b/docs/docs/integrations/vectorstores/supabase.ipynb @@ -155,8 +155,8 @@ "source": [ "import os\n", "\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.vectorstores import SupabaseVectorStore\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "from supabase.client import Client, create_client\n", "\n", "supabase_url = os.environ.get(\"SUPABASE_URL\")\n", diff --git a/docs/docs/integrations/vectorstores/tair.ipynb b/docs/docs/integrations/vectorstores/tair.ipynb index 361a1da7d7f..d9345634dc6 100644 --- a/docs/docs/integrations/vectorstores/tair.ipynb +++ b/docs/docs/integrations/vectorstores/tair.ipynb @@ -20,9 +20,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings.fake import FakeEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Tair" + "from langchain.vectorstores import Tair\n", + "from langchain_community.embeddings.fake import FakeEmbeddings" ] }, { diff --git a/docs/docs/integrations/vectorstores/tencentvectordb.ipynb b/docs/docs/integrations/vectorstores/tencentvectordb.ipynb index ef8e0667b10..bd48d7e3c09 100644 --- a/docs/docs/integrations/vectorstores/tencentvectordb.ipynb +++ b/docs/docs/integrations/vectorstores/tencentvectordb.ipynb @@ -34,10 +34,10 @@ "outputs": [], "source": [ "from langchain.document_loaders import TextLoader\n", - "from langchain.embeddings.fake import FakeEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain.vectorstores import TencentVectorDB\n", - "from langchain.vectorstores.tencentvectordb import ConnectionParams" + "from langchain.vectorstores.tencentvectordb import ConnectionParams\n", + "from langchain_community.embeddings.fake import FakeEmbeddings" ] }, { diff --git a/docs/docs/integrations/vectorstores/tigris.ipynb b/docs/docs/integrations/vectorstores/tigris.ipynb index 4b89acdb79f..c5c1b90d798 100644 --- a/docs/docs/integrations/vectorstores/tigris.ipynb +++ b/docs/docs/integrations/vectorstores/tigris.ipynb @@ -86,9 +86,9 @@ "outputs": [], "source": [ "from langchain.document_loaders import TextLoader\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Tigris" + "from langchain.vectorstores import Tigris\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/vectorstores/tiledb.ipynb b/docs/docs/integrations/vectorstores/tiledb.ipynb index 7941705837b..8853ec4970f 100644 --- a/docs/docs/integrations/vectorstores/tiledb.ipynb +++ b/docs/docs/integrations/vectorstores/tiledb.ipynb @@ -44,9 +44,9 @@ "outputs": [], "source": [ "from langchain.document_loaders import TextLoader\n", - "from langchain.embeddings import HuggingFaceEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain.vectorstores import TileDB\n", + "from langchain_community.embeddings import HuggingFaceEmbeddings\n", "\n", "raw_documents = TextLoader(\"../../modules/state_of_the_union.txt\").load()\n", "text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n", diff --git a/docs/docs/integrations/vectorstores/timescalevector.ipynb b/docs/docs/integrations/vectorstores/timescalevector.ipynb index 20549a424c3..177c2325a47 100644 --- a/docs/docs/integrations/vectorstores/timescalevector.ipynb +++ b/docs/docs/integrations/vectorstores/timescalevector.ipynb @@ -125,9 +125,9 @@ "from langchain.docstore.document import Document\n", "from langchain.document_loaders import TextLoader\n", "from langchain.document_loaders.json_loader import JSONLoader\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores.timescalevector import TimescaleVector" + "from langchain.vectorstores.timescalevector import TimescaleVector\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings" ] }, { @@ -356,7 +356,7 @@ "outputs": [], "source": [ "# Initialize GPT3.5 model\n", - "from langchain.chat_models import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "llm = ChatOpenAI(temperature=0.1, model=\"gpt-3.5-turbo-16k\")\n", "\n", @@ -1050,7 +1050,7 @@ } ], "source": [ - "from langchain.chat_models import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "llm = ChatOpenAI(temperature=0.1, model=\"gpt-3.5-turbo-16k\")\n", "\n", @@ -1262,8 +1262,8 @@ "outputs": [], "source": [ "from langchain.chains.query_constructor.base import AttributeInfo\n", - "from langchain.llms import OpenAI\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", + "from langchain_community.llms import OpenAI\n", "\n", "# Give LLM info about the metadata fields\n", "metadata_field_info = [\n", diff --git a/docs/docs/integrations/vectorstores/typesense.ipynb b/docs/docs/integrations/vectorstores/typesense.ipynb index 89a64d3543e..6750e4f6fe6 100644 --- a/docs/docs/integrations/vectorstores/typesense.ipynb +++ b/docs/docs/integrations/vectorstores/typesense.ipynb @@ -85,9 +85,9 @@ "outputs": [], "source": [ "from langchain.document_loaders import TextLoader\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Typesense" + "from langchain.vectorstores import Typesense\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/vectorstores/usearch.ipynb b/docs/docs/integrations/vectorstores/usearch.ipynb index a8098653c28..3752242d2ef 100644 --- a/docs/docs/integrations/vectorstores/usearch.ipynb +++ b/docs/docs/integrations/vectorstores/usearch.ipynb @@ -56,9 +56,9 @@ "outputs": [], "source": [ "from langchain.document_loaders import TextLoader\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import USearch" + "from langchain.vectorstores import USearch\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/vectorstores/vald.ipynb b/docs/docs/integrations/vectorstores/vald.ipynb index 34401d4e9ce..e7ec96d1b4e 100644 --- a/docs/docs/integrations/vectorstores/vald.ipynb +++ b/docs/docs/integrations/vectorstores/vald.ipynb @@ -43,9 +43,9 @@ "outputs": [], "source": [ "from langchain.document_loaders import TextLoader\n", - "from langchain.embeddings import HuggingFaceEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain.vectorstores import Vald\n", + "from langchain_community.embeddings import HuggingFaceEmbeddings\n", "\n", "raw_documents = TextLoader(\"state_of_the_union.txt\").load()\n", "text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n", @@ -190,9 +190,9 @@ "outputs": [], "source": [ "from langchain.document_loaders import TextLoader\n", - "from langchain.embeddings import HuggingFaceEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain.vectorstores import Vald\n", + "from langchain_community.embeddings import HuggingFaceEmbeddings\n", "\n", "raw_documents = TextLoader(\"state_of_the_union.txt\").load()\n", "text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n", diff --git a/docs/docs/integrations/vectorstores/vearch.ipynb b/docs/docs/integrations/vectorstores/vearch.ipynb index da623b9a87f..0b655464bc8 100644 --- a/docs/docs/integrations/vectorstores/vearch.ipynb +++ b/docs/docs/integrations/vectorstores/vearch.ipynb @@ -55,9 +55,9 @@ ], "source": [ "from langchain.document_loaders import TextLoader\n", - "from langchain.embeddings.huggingface import HuggingFaceEmbeddings\n", "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", "from langchain.vectorstores.vearch import Vearch\n", + "from langchain_community.embeddings.huggingface import HuggingFaceEmbeddings\n", "from transformers import AutoModel, AutoTokenizer\n", "\n", "# repalce to your local model path\n", diff --git a/docs/docs/integrations/vectorstores/vectara.ipynb b/docs/docs/integrations/vectorstores/vectara.ipynb index 24fe802191d..943d5f10494 100644 --- a/docs/docs/integrations/vectorstores/vectara.ipynb +++ b/docs/docs/integrations/vectorstores/vectara.ipynb @@ -85,9 +85,9 @@ "outputs": [], "source": [ "from langchain.document_loaders import TextLoader\n", - "from langchain.embeddings.fake import FakeEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Vectara" + "from langchain.vectorstores import Vectara\n", + "from langchain_community.embeddings.fake import FakeEmbeddings" ] }, { diff --git a/docs/docs/integrations/vectorstores/vespa.ipynb b/docs/docs/integrations/vectorstores/vespa.ipynb index 505003fb735..d64d72a8996 100644 --- a/docs/docs/integrations/vectorstores/vespa.ipynb +++ b/docs/docs/integrations/vectorstores/vespa.ipynb @@ -158,7 +158,9 @@ "text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n", "docs = text_splitter.split_documents(documents)\n", "\n", - "from langchain.embeddings.sentence_transformer import SentenceTransformerEmbeddings\n", + "from langchain_community.embeddings.sentence_transformer import (\n", + " SentenceTransformerEmbeddings,\n", + ")\n", "\n", "embedding_function = SentenceTransformerEmbeddings(model_name=\"all-MiniLM-L6-v2\")" ] diff --git a/docs/docs/integrations/vectorstores/weaviate.ipynb b/docs/docs/integrations/vectorstores/weaviate.ipynb index 27cc2942e29..827887adbb5 100644 --- a/docs/docs/integrations/vectorstores/weaviate.ipynb +++ b/docs/docs/integrations/vectorstores/weaviate.ipynb @@ -120,9 +120,9 @@ "outputs": [], "source": [ "from langchain.document_loaders import TextLoader\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Weaviate" + "from langchain.vectorstores import Weaviate\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings" ] }, { @@ -368,7 +368,7 @@ } ], "source": [ - "from langchain.chat_models import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "llm = ChatOpenAI(model_name=\"gpt-3.5-turbo\", temperature=0)\n", "llm.predict(\"What did the president say about Justice Breyer\")" @@ -398,7 +398,7 @@ "outputs": [], "source": [ "from langchain.chains import RetrievalQAWithSourcesChain\n", - "from langchain.llms import OpenAI" + "from langchain_community.llms import OpenAI" ] }, { @@ -540,7 +540,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "llm = ChatOpenAI(model_name=\"gpt-3.5-turbo\", temperature=0)" ] diff --git a/docs/docs/integrations/vectorstores/xata.ipynb b/docs/docs/integrations/vectorstores/xata.ipynb index 94566bd0e93..ac31e515222 100644 --- a/docs/docs/integrations/vectorstores/xata.ipynb +++ b/docs/docs/integrations/vectorstores/xata.ipynb @@ -108,9 +108,9 @@ "outputs": [], "source": [ "from langchain.document_loaders import TextLoader\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores.xata import XataVectorStore" + "from langchain.vectorstores.xata import XataVectorStore\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/vectorstores/yellowbrick.ipynb b/docs/docs/integrations/vectorstores/yellowbrick.ipynb index 34658b912c8..59ee6b6b1c7 100644 --- a/docs/docs/integrations/vectorstores/yellowbrick.ipynb +++ b/docs/docs/integrations/vectorstores/yellowbrick.ipynb @@ -98,11 +98,11 @@ "import psycopg2\n", "from IPython.display import Markdown, display\n", "from langchain.chains import LLMChain, RetrievalQAWithSourcesChain\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.docstore.document import Document\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", "from langchain.vectorstores import Yellowbrick\n", + "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "\n", "# Establish connection parameters to Yellowbrick. If you've signed up for Sandbox, fill in the information from your welcome mail here:\n", "yellowbrick_connection_string = (\n", diff --git a/docs/docs/integrations/vectorstores/zilliz.ipynb b/docs/docs/integrations/vectorstores/zilliz.ipynb index fd3f75ca521..6f8be8bd294 100644 --- a/docs/docs/integrations/vectorstores/zilliz.ipynb +++ b/docs/docs/integrations/vectorstores/zilliz.ipynb @@ -77,9 +77,9 @@ "outputs": [], "source": [ "from langchain.document_loaders import TextLoader\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Milvus" + "from langchain.vectorstores import Milvus\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/langsmith/walkthrough.ipynb b/docs/docs/langsmith/walkthrough.ipynb index eb7df9f805e..458025bbcd4 100644 --- a/docs/docs/langsmith/walkthrough.ipynb +++ b/docs/docs/langsmith/walkthrough.ipynb @@ -142,9 +142,9 @@ "from langchain.agents import AgentExecutor\n", "from langchain.agents.format_scratchpad import format_to_openai_function_messages\n", "from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.tools import DuckDuckGoSearchResults\n", "from langchain.tools.render import format_tool_to_openai_function\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "# Fetches the latest version of this prompt\n", "prompt = hub.pull(\"wfh/langsmith-agent-prompt:latest\")\n", @@ -337,8 +337,8 @@ "from langchain.agents import AgentExecutor, AgentType, initialize_agent, load_tools\n", "from langchain.agents.format_scratchpad import format_to_openai_function_messages\n", "from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.tools.render import format_tool_to_openai_function\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "\n", "# Since chains can be stateful (e.g. they can have memory), we provide\n", diff --git a/docs/docs/modules/agents/how_to/agent_iter.ipynb b/docs/docs/modules/agents/how_to/agent_iter.ipynb index 61854853be3..697c044ac73 100644 --- a/docs/docs/modules/agents/how_to/agent_iter.ipynb +++ b/docs/docs/modules/agents/how_to/agent_iter.ipynb @@ -26,7 +26,7 @@ "source": [ "from langchain.agents import AgentType, initialize_agent\n", "from langchain.chains import LLMMathChain\n", - "from langchain.chat_models import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.pydantic_v1 import BaseModel, Field\n", "from langchain_core.tools import Tool" ] diff --git a/docs/docs/modules/agents/how_to/agent_structured.ipynb b/docs/docs/modules/agents/how_to/agent_structured.ipynb index e8d79cc54cd..ac54d1888f5 100644 --- a/docs/docs/modules/agents/how_to/agent_structured.ipynb +++ b/docs/docs/modules/agents/how_to/agent_structured.ipynb @@ -54,9 +54,9 @@ "outputs": [], "source": [ "from langchain.document_loaders import TextLoader\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", - "from langchain.vectorstores import Chroma" + "from langchain.vectorstores import Chroma\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings" ] }, { @@ -231,9 +231,9 @@ "source": [ "from langchain.agents import AgentExecutor\n", "from langchain.agents.format_scratchpad import format_to_openai_function_messages\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder\n", - "from langchain.tools.render import format_tool_to_openai_function" + "from langchain.tools.render import format_tool_to_openai_function\n", + "from langchain_community.chat_models import ChatOpenAI" ] }, { diff --git a/docs/docs/modules/agents/how_to/custom_agent.ipynb b/docs/docs/modules/agents/how_to/custom_agent.ipynb index 18b995b7569..4e5ea141c03 100644 --- a/docs/docs/modules/agents/how_to/custom_agent.ipynb +++ b/docs/docs/modules/agents/how_to/custom_agent.ipynb @@ -42,7 +42,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "llm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)" ] diff --git a/docs/docs/modules/agents/how_to/streaming.ipynb b/docs/docs/modules/agents/how_to/streaming.ipynb index c2489c7b53a..98c9b908420 100644 --- a/docs/docs/modules/agents/how_to/streaming.ipynb +++ b/docs/docs/modules/agents/how_to/streaming.ipynb @@ -66,7 +66,7 @@ "source": [ "from langchain import hub\n", "from langchain.agents import AgentExecutor, create_openai_functions_agent\n", - "from langchain.chat_models import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "# Get the prompt to use - you can modify this!\n", "# If you want to see the prompt in full, you can at: https://smith.langchain.com/hub/hwchase17/openai-functions-agent\n", diff --git a/docs/docs/modules/agents/quick_start.ipynb b/docs/docs/modules/agents/quick_start.ipynb index ff0bb01c813..a38b5acf57c 100644 --- a/docs/docs/modules/agents/quick_start.ipynb +++ b/docs/docs/modules/agents/quick_start.ipynb @@ -221,7 +221,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "llm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)" ] diff --git a/docs/docs/modules/agents/tools/tools_as_openai_functions.ipynb b/docs/docs/modules/agents/tools/tools_as_openai_functions.ipynb index c928f188e1e..a8c9437b60e 100644 --- a/docs/docs/modules/agents/tools/tools_as_openai_functions.ipynb +++ b/docs/docs/modules/agents/tools/tools_as_openai_functions.ipynb @@ -17,8 +17,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.schema import HumanMessage" + "from langchain.schema import HumanMessage\n", + "from langchain_community.chat_models import ChatOpenAI" ] }, { diff --git a/docs/docs/modules/callbacks/async_callbacks.ipynb b/docs/docs/modules/callbacks/async_callbacks.ipynb index 0deb98fcd63..29ae822ad56 100644 --- a/docs/docs/modules/callbacks/async_callbacks.ipynb +++ b/docs/docs/modules/callbacks/async_callbacks.ipynb @@ -62,8 +62,8 @@ "from typing import Any, Dict, List\n", "\n", "from langchain.callbacks.base import AsyncCallbackHandler, BaseCallbackHandler\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.schema import HumanMessage, LLMResult\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "\n", "class MyCustomSyncHandler(BaseCallbackHandler):\n", diff --git a/docs/docs/modules/callbacks/custom_callbacks.ipynb b/docs/docs/modules/callbacks/custom_callbacks.ipynb index fb810a25caa..239d163cf25 100644 --- a/docs/docs/modules/callbacks/custom_callbacks.ipynb +++ b/docs/docs/modules/callbacks/custom_callbacks.ipynb @@ -53,8 +53,8 @@ ], "source": [ "from langchain.callbacks.base import BaseCallbackHandler\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.schema import HumanMessage\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "\n", "class MyCustomHandler(BaseCallbackHandler):\n", diff --git a/docs/docs/modules/callbacks/filecallbackhandler.ipynb b/docs/docs/modules/callbacks/filecallbackhandler.ipynb index 0806affde38..faa263efa73 100644 --- a/docs/docs/modules/callbacks/filecallbackhandler.ipynb +++ b/docs/docs/modules/callbacks/filecallbackhandler.ipynb @@ -47,8 +47,8 @@ "source": [ "from langchain.callbacks import FileCallbackHandler\n", "from langchain.chains import LLMChain\n", - "from langchain.llms import OpenAI\n", "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import OpenAI\n", "from loguru import logger\n", "\n", "logfile = \"output.log\"\n", diff --git a/docs/docs/modules/callbacks/index.mdx b/docs/docs/modules/callbacks/index.mdx index c90951700ef..fe9e4b9e1f6 100644 --- a/docs/docs/modules/callbacks/index.mdx +++ b/docs/docs/modules/callbacks/index.mdx @@ -86,7 +86,7 @@ LangChain provides a few built-in handlers that you can use to get started. Thes ```python from langchain.callbacks import StdOutCallbackHandler from langchain.chains import LLMChain -from langchain.llms import OpenAI +from langchain_community.llms import OpenAI from langchain.prompts import PromptTemplate handler = StdOutCallbackHandler() diff --git a/docs/docs/modules/callbacks/multiple_callbacks.ipynb b/docs/docs/modules/callbacks/multiple_callbacks.ipynb index 4a135acdb4d..ad7a90b3df1 100644 --- a/docs/docs/modules/callbacks/multiple_callbacks.ipynb +++ b/docs/docs/modules/callbacks/multiple_callbacks.ipynb @@ -128,8 +128,8 @@ "\n", "from langchain.agents import AgentType, initialize_agent, load_tools\n", "from langchain.callbacks.base import BaseCallbackHandler\n", - "from langchain.llms import OpenAI\n", "from langchain.schema import AgentAction\n", + "from langchain_community.llms import OpenAI\n", "\n", "\n", "# First, define custom callback handler implementations\n", diff --git a/docs/docs/modules/callbacks/token_counting.ipynb b/docs/docs/modules/callbacks/token_counting.ipynb index 1d82c1f98c3..26aa8b828e4 100644 --- a/docs/docs/modules/callbacks/token_counting.ipynb +++ b/docs/docs/modules/callbacks/token_counting.ipynb @@ -19,7 +19,7 @@ "import asyncio\n", "\n", "from langchain.callbacks import get_openai_callback\n", - "from langchain.llms import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", "with get_openai_callback() as cb:\n", diff --git a/docs/docs/modules/chains/document/map_reduce.ipynb b/docs/docs/modules/chains/document/map_reduce.ipynb index eb909f14195..cd3f65283e9 100644 --- a/docs/docs/modules/chains/document/map_reduce.ipynb +++ b/docs/docs/modules/chains/document/map_reduce.ipynb @@ -32,9 +32,9 @@ "from functools import partial\n", "\n", "from langchain.chains.combine_documents import collapse_docs, split_list_of_docs\n", - "from langchain.chat_models import ChatAnthropic\n", "from langchain.prompts import PromptTemplate\n", "from langchain.schema import StrOutputParser\n", + "from langchain_community.chat_models import ChatAnthropic\n", "from langchain_core.prompts import format_document\n", "from langchain_core.runnables import RunnableParallel, RunnablePassthrough" ] diff --git a/docs/docs/modules/chains/document/map_rerank.ipynb b/docs/docs/modules/chains/document/map_rerank.ipynb index 8d3aac7003c..1cbc55f5912 100644 --- a/docs/docs/modules/chains/document/map_rerank.ipynb +++ b/docs/docs/modules/chains/document/map_rerank.ipynb @@ -29,10 +29,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.output_parsers.openai_functions import PydanticOutputFunctionsParser\n", "from langchain.prompts import PromptTemplate\n", "from langchain.utils.openai_functions import convert_pydantic_to_openai_function\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.prompts import format_document\n", "from langchain_core.pydantic_v1 import BaseModel, Field" ] diff --git a/docs/docs/modules/chains/document/refine.ipynb b/docs/docs/modules/chains/document/refine.ipynb index b011cdf6d2a..87875e04c75 100644 --- a/docs/docs/modules/chains/document/refine.ipynb +++ b/docs/docs/modules/chains/document/refine.ipynb @@ -48,9 +48,9 @@ "from operator import itemgetter\n", "\n", "from langchain.callbacks.manager import trace_as_chain_group\n", - "from langchain.chat_models import ChatAnthropic\n", "from langchain.prompts import PromptTemplate\n", "from langchain.schema import StrOutputParser\n", + "from langchain_community.chat_models import ChatAnthropic\n", "from langchain_core.prompts import format_document" ] }, diff --git a/docs/docs/modules/chains/document/stuff.ipynb b/docs/docs/modules/chains/document/stuff.ipynb index 8ea0a8e3ceb..67ef4ef73b1 100644 --- a/docs/docs/modules/chains/document/stuff.ipynb +++ b/docs/docs/modules/chains/document/stuff.ipynb @@ -40,9 +40,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatAnthropic\n", "from langchain.prompts import PromptTemplate\n", "from langchain.schema import StrOutputParser\n", + "from langchain_community.chat_models import ChatAnthropic\n", "from langchain_core.prompts import format_document" ] }, diff --git a/docs/docs/modules/chains/foundational/llm_chain.ipynb b/docs/docs/modules/chains/foundational/llm_chain.ipynb index 8eeddacff4f..fb61347d417 100644 --- a/docs/docs/modules/chains/foundational/llm_chain.ipynb +++ b/docs/docs/modules/chains/foundational/llm_chain.ipynb @@ -40,9 +40,9 @@ } ], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.prompts import PromptTemplate\n", "from langchain.schema import StrOutputParser\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "prompt = PromptTemplate.from_template(\n", " \"What is a good name for a company that makes {product}?\"\n", @@ -98,8 +98,8 @@ ], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.llms import OpenAI\n", "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import OpenAI\n", "\n", "prompt_template = \"What is a good name for a company that makes {product}?\"\n", "\n", diff --git a/docs/docs/modules/chains/foundational/router.ipynb b/docs/docs/modules/chains/foundational/router.ipynb index a082d2964a2..5de1993a9a4 100644 --- a/docs/docs/modules/chains/foundational/router.ipynb +++ b/docs/docs/modules/chains/foundational/router.ipynb @@ -57,7 +57,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.output_parsers import StrOutputParser\n", "from langchain_core.runnables import RunnableBranch" ] @@ -193,7 +193,7 @@ "from langchain.chains import ConversationChain\n", "from langchain.chains.llm import LLMChain\n", "from langchain.chains.router import MultiPromptChain\n", - "from langchain.llms import OpenAI" + "from langchain_community.llms import OpenAI" ] }, { @@ -444,8 +444,8 @@ "outputs": [], "source": [ "from langchain.chains.router.embedding_router import EmbeddingRouterChain\n", - "from langchain.embeddings import CohereEmbeddings\n", - "from langchain.vectorstores import Chroma" + "from langchain.vectorstores import Chroma\n", + "from langchain_community.embeddings import CohereEmbeddings" ] }, { diff --git a/docs/docs/modules/chains/foundational/sequential_chains.ipynb b/docs/docs/modules/chains/foundational/sequential_chains.ipynb index 0bcb928a032..9ff568da446 100644 --- a/docs/docs/modules/chains/foundational/sequential_chains.ipynb +++ b/docs/docs/modules/chains/foundational/sequential_chains.ipynb @@ -67,8 +67,8 @@ } ], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.schema import StrOutputParser\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "llm = ChatOpenAI()\n", "chain = (\n", @@ -158,8 +158,8 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.llms import OpenAI\n", "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import OpenAI\n", "\n", "# This is an LLMChain to write a synopsis given a title of a play.\n", "llm = OpenAI(temperature=0.7)\n", diff --git a/docs/docs/modules/chains/foundational/transformation.ipynb b/docs/docs/modules/chains/foundational/transformation.ipynb index 10d36a53d54..1f2156fca72 100644 --- a/docs/docs/modules/chains/foundational/transformation.ipynb +++ b/docs/docs/modules/chains/foundational/transformation.ipynb @@ -69,8 +69,8 @@ } ], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.schema import StrOutputParser\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "runnable = (\n", " {\"output_text\": lambda text: \"\\n\\n\".join(text.split(\"\\n\\n\")[:3])}\n", @@ -105,7 +105,7 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain, SimpleSequentialChain, TransformChain\n", - "from langchain.llms import OpenAI" + "from langchain_community.llms import OpenAI" ] }, { diff --git a/docs/docs/modules/chains/how_to/async_chain.ipynb b/docs/docs/modules/chains/how_to/async_chain.ipynb index 858c132282f..af69ab3d055 100644 --- a/docs/docs/modules/chains/how_to/async_chain.ipynb +++ b/docs/docs/modules/chains/how_to/async_chain.ipynb @@ -72,8 +72,8 @@ "import time\n", "\n", "from langchain.chains import LLMChain\n", - "from langchain.llms import OpenAI\n", "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import OpenAI\n", "\n", "\n", "def generate_serially():\n", diff --git a/docs/docs/modules/chains/how_to/call_methods.ipynb b/docs/docs/modules/chains/how_to/call_methods.ipynb index 1c68de43c84..254b5a70da0 100644 --- a/docs/docs/modules/chains/how_to/call_methods.ipynb +++ b/docs/docs/modules/chains/how_to/call_methods.ipynb @@ -28,7 +28,7 @@ ], "source": [ "from langchain.chains.llm import LLMChain\n", - "from langchain.chat_models.openai import ChatOpenAI\n", + "from langchain_community.chat_models.openai import ChatOpenAI\n", "from langchain_core.prompts import PromptTemplate\n", "\n", "chat = ChatOpenAI(temperature=0)\n", diff --git a/docs/docs/modules/chains/how_to/custom_chain.ipynb b/docs/docs/modules/chains/how_to/custom_chain.ipynb index 692b4749d9d..033fcac6d23 100644 --- a/docs/docs/modules/chains/how_to/custom_chain.ipynb +++ b/docs/docs/modules/chains/how_to/custom_chain.ipynb @@ -152,8 +152,8 @@ ], "source": [ "from langchain.callbacks.stdout import StdOutCallbackHandler\n", - "from langchain.chat_models.openai import ChatOpenAI\n", "from langchain.prompts.prompt import PromptTemplate\n", + "from langchain_community.chat_models.openai import ChatOpenAI\n", "\n", "chain = MyCustomChain(\n", " prompt=PromptTemplate.from_template(\"tell us a joke about {topic}\"),\n", diff --git a/docs/docs/modules/chains/how_to/openai_functions.ipynb b/docs/docs/modules/chains/how_to/openai_functions.ipynb index 65c5cf680db..d07d2849fa4 100644 --- a/docs/docs/modules/chains/how_to/openai_functions.ipynb +++ b/docs/docs/modules/chains/how_to/openai_functions.ipynb @@ -27,8 +27,8 @@ " create_structured_output_chain,\n", " create_structured_output_runnable,\n", ")\n", - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.prompts import ChatPromptTemplate" + "from langchain.prompts import ChatPromptTemplate\n", + "from langchain_community.chat_models import ChatOpenAI" ] }, { diff --git a/docs/docs/modules/chains/index.ipynb b/docs/docs/modules/chains/index.ipynb index f95f2f13fa1..439bdb6f5c2 100644 --- a/docs/docs/modules/chains/index.ipynb +++ b/docs/docs/modules/chains/index.ipynb @@ -50,9 +50,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatAnthropic\n", "from langchain.prompts import ChatPromptTemplate\n", "from langchain.schema import StrOutputParser\n", + "from langchain_community.chat_models import ChatAnthropic\n", "\n", "model = ChatAnthropic()\n", "prompt = ChatPromptTemplate.from_messages(\n", diff --git a/docs/docs/modules/data_connection/document_loaders/pdf.mdx b/docs/docs/modules/data_connection/document_loaders/pdf.mdx index ef06d127952..33554cad7ad 100644 --- a/docs/docs/modules/data_connection/document_loaders/pdf.mdx +++ b/docs/docs/modules/data_connection/document_loaders/pdf.mdx @@ -61,7 +61,7 @@ os.environ['OPENAI_API_KEY'] = getpass.getpass('OpenAI API Key:') ```python from langchain.vectorstores import FAISS -from langchain.embeddings.openai import OpenAIEmbeddings +from langchain_community.embeddings.openai import OpenAIEmbeddings faiss_index = FAISS.from_documents(pages, OpenAIEmbeddings()) docs = faiss_index.similarity_search("How will the community be engaged?", k=2) diff --git a/docs/docs/modules/data_connection/indexing.ipynb b/docs/docs/modules/data_connection/indexing.ipynb index 5e6e2645d1f..23264d1a84f 100644 --- a/docs/docs/modules/data_connection/indexing.ipynb +++ b/docs/docs/modules/data_connection/indexing.ipynb @@ -90,10 +90,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.indexes import SQLRecordManager, index\n", "from langchain.schema import Document\n", - "from langchain.vectorstores import ElasticsearchStore" + "from langchain.vectorstores import ElasticsearchStore\n", + "from langchain_community.embeddings import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/modules/data_connection/retrievers/MultiQueryRetriever.ipynb b/docs/docs/modules/data_connection/retrievers/MultiQueryRetriever.ipynb index d494c10aa7d..16aa4985696 100644 --- a/docs/docs/modules/data_connection/retrievers/MultiQueryRetriever.ipynb +++ b/docs/docs/modules/data_connection/retrievers/MultiQueryRetriever.ipynb @@ -21,9 +21,9 @@ "source": [ "# Build a sample vectorDB\n", "from langchain.document_loaders import WebBaseLoader\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", "from langchain.vectorstores import Chroma\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "\n", "# Load blog post\n", "loader = WebBaseLoader(\"https://lilianweng.github.io/posts/2023-06-23-agent/\")\n", @@ -55,8 +55,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.retrievers.multi_query import MultiQueryRetriever\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "question = \"What are the approaches to Task Decomposition?\"\n", "llm = ChatOpenAI(temperature=0)\n", diff --git a/docs/docs/modules/data_connection/retrievers/contextual_compression.ipynb b/docs/docs/modules/data_connection/retrievers/contextual_compression.ipynb index f9abef1ff3b..fc1b68431ca 100644 --- a/docs/docs/modules/data_connection/retrievers/contextual_compression.ipynb +++ b/docs/docs/modules/data_connection/retrievers/contextual_compression.ipynb @@ -119,9 +119,9 @@ ], "source": [ "from langchain.document_loaders import TextLoader\n", - "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain.vectorstores import FAISS\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", "\n", "documents = TextLoader(\"../../state_of_the_union.txt\").load()\n", "text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n", @@ -174,9 +174,9 @@ } ], "source": [ - "from langchain.llms import OpenAI\n", "from langchain.retrievers import ContextualCompressionRetriever\n", "from langchain.retrievers.document_compressors import LLMChainExtractor\n", + "from langchain_community.llms import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", "compressor = LLMChainExtractor.from_llm(llm)\n", @@ -312,8 +312,8 @@ } ], "source": [ - "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.retrievers.document_compressors import EmbeddingsFilter\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", "\n", "embeddings = OpenAIEmbeddings()\n", "embeddings_filter = EmbeddingsFilter(embeddings=embeddings, similarity_threshold=0.76)\n", diff --git a/docs/docs/modules/data_connection/retrievers/ensemble.ipynb b/docs/docs/modules/data_connection/retrievers/ensemble.ipynb index 95747105b0c..3edc714fcca 100644 --- a/docs/docs/modules/data_connection/retrievers/ensemble.ipynb +++ b/docs/docs/modules/data_connection/retrievers/ensemble.ipynb @@ -28,9 +28,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.retrievers import BM25Retriever, EnsembleRetriever\n", - "from langchain.vectorstores import FAISS" + "from langchain.vectorstores import FAISS\n", + "from langchain_community.embeddings import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/modules/data_connection/retrievers/index.mdx b/docs/docs/modules/data_connection/retrievers/index.mdx index 56ab6b4759a..e741d00c571 100644 --- a/docs/docs/modules/data_connection/retrievers/index.mdx +++ b/docs/docs/modules/data_connection/retrievers/index.mdx @@ -47,7 +47,7 @@ LangChain also integrates with many third-party retrieval services. For a full l Since retrievers are `Runnable`'s, we can easily compose them with other `Runnable` objects: ```python -from langchain.chat_models import ChatOpenAI +from langchain_community.chat_models import ChatOpenAI from langchain.prompts import ChatPromptTemplate from langchain.schema import StrOutputParser from langchain_core.runnables import RunnablePassthrough diff --git a/docs/docs/modules/data_connection/retrievers/long_context_reorder.ipynb b/docs/docs/modules/data_connection/retrievers/long_context_reorder.ipynb index 4a157c42b66..6b2d35f4b96 100644 --- a/docs/docs/modules/data_connection/retrievers/long_context_reorder.ipynb +++ b/docs/docs/modules/data_connection/retrievers/long_context_reorder.ipynb @@ -55,10 +55,10 @@ "from langchain.document_transformers import (\n", " LongContextReorder,\n", ")\n", - "from langchain.embeddings import HuggingFaceEmbeddings\n", - "from langchain.llms import OpenAI\n", "from langchain.prompts import PromptTemplate\n", "from langchain.vectorstores import Chroma\n", + "from langchain_community.embeddings import HuggingFaceEmbeddings\n", + "from langchain_community.llms import OpenAI\n", "\n", "# Get embeddings.\n", "embeddings = HuggingFaceEmbeddings(model_name=\"all-MiniLM-L6-v2\")\n", diff --git a/docs/docs/modules/data_connection/retrievers/multi_vector.ipynb b/docs/docs/modules/data_connection/retrievers/multi_vector.ipynb index 51105104b58..7bbe2778073 100644 --- a/docs/docs/modules/data_connection/retrievers/multi_vector.ipynb +++ b/docs/docs/modules/data_connection/retrievers/multi_vector.ipynb @@ -37,10 +37,10 @@ "outputs": [], "source": [ "from langchain.document_loaders import TextLoader\n", - "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.storage import InMemoryByteStore\n", "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", - "from langchain.vectorstores import Chroma" + "from langchain.vectorstores import Chroma\n", + "from langchain_community.embeddings import OpenAIEmbeddings" ] }, { @@ -230,8 +230,8 @@ "source": [ "import uuid\n", "\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.prompts import ChatPromptTemplate\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.documents import Document\n", "from langchain_core.output_parsers import StrOutputParser" ] diff --git a/docs/docs/modules/data_connection/retrievers/parent_document_retriever.ipynb b/docs/docs/modules/data_connection/retrievers/parent_document_retriever.ipynb index eb694e321ba..5363ff5a59c 100644 --- a/docs/docs/modules/data_connection/retrievers/parent_document_retriever.ipynb +++ b/docs/docs/modules/data_connection/retrievers/parent_document_retriever.ipynb @@ -43,10 +43,10 @@ "outputs": [], "source": [ "from langchain.document_loaders import TextLoader\n", - "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.storage import InMemoryStore\n", "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", - "from langchain.vectorstores import Chroma" + "from langchain.vectorstores import Chroma\n", + "from langchain_community.embeddings import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/modules/data_connection/retrievers/self_query.ipynb b/docs/docs/modules/data_connection/retrievers/self_query.ipynb index 44acd544533..9cfd8e03205 100644 --- a/docs/docs/modules/data_connection/retrievers/self_query.ipynb +++ b/docs/docs/modules/data_connection/retrievers/self_query.ipynb @@ -40,9 +40,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.schema import Document\n", "from langchain.vectorstores import Chroma\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", "\n", "docs = [\n", " Document(\n", @@ -96,8 +96,8 @@ "outputs": [], "source": [ "from langchain.chains.query_constructor.base import AttributeInfo\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "metadata_field_info = [\n", " AttributeInfo(\n", diff --git a/docs/docs/modules/data_connection/retrievers/time_weighted_vectorstore.ipynb b/docs/docs/modules/data_connection/retrievers/time_weighted_vectorstore.ipynb index f725cc5338b..33da6ac0cac 100644 --- a/docs/docs/modules/data_connection/retrievers/time_weighted_vectorstore.ipynb +++ b/docs/docs/modules/data_connection/retrievers/time_weighted_vectorstore.ipynb @@ -29,10 +29,10 @@ "\n", "import faiss\n", "from langchain.docstore import InMemoryDocstore\n", - "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.retrievers import TimeWeightedVectorStoreRetriever\n", "from langchain.schema import Document\n", - "from langchain.vectorstores import FAISS" + "from langchain.vectorstores import FAISS\n", + "from langchain_community.embeddings import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/modules/data_connection/retrievers/vectorstore.ipynb b/docs/docs/modules/data_connection/retrievers/vectorstore.ipynb index ac45c46dfef..d8f6b8da67b 100644 --- a/docs/docs/modules/data_connection/retrievers/vectorstore.ipynb +++ b/docs/docs/modules/data_connection/retrievers/vectorstore.ipynb @@ -42,9 +42,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain.vectorstores import FAISS\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", "\n", "documents = loader.load()\n", "text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n", diff --git a/docs/docs/modules/data_connection/text_embedding/caching_embeddings.ipynb b/docs/docs/modules/data_connection/text_embedding/caching_embeddings.ipynb index b3c4fd6340e..e99b8d0dda4 100644 --- a/docs/docs/modules/data_connection/text_embedding/caching_embeddings.ipynb +++ b/docs/docs/modules/data_connection/text_embedding/caching_embeddings.ipynb @@ -71,10 +71,10 @@ "outputs": [], "source": [ "from langchain.document_loaders import TextLoader\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.storage import LocalFileStore\n", "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain.vectorstores import FAISS\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "\n", "underlying_embeddings = OpenAIEmbeddings()\n", "\n", diff --git a/docs/docs/modules/data_connection/text_embedding/index.mdx b/docs/docs/modules/data_connection/text_embedding/index.mdx index 16804b3949e..921f06414fb 100644 --- a/docs/docs/modules/data_connection/text_embedding/index.mdx +++ b/docs/docs/modules/data_connection/text_embedding/index.mdx @@ -32,14 +32,14 @@ export OPENAI_API_KEY="..." If you'd prefer not to set an environment variable you can pass the key in directly via the `openai_api_key` named parameter when initiating the OpenAI LLM class: ```python -from langchain.embeddings import OpenAIEmbeddings +from langchain_community.embeddings import OpenAIEmbeddings embeddings_model = OpenAIEmbeddings(openai_api_key="...") ``` Otherwise you can initialize without any params: ```python -from langchain.embeddings import OpenAIEmbeddings +from langchain_community.embeddings import OpenAIEmbeddings embeddings_model = OpenAIEmbeddings() ``` diff --git a/docs/docs/modules/data_connection/vectorstores/index.mdx b/docs/docs/modules/data_connection/vectorstores/index.mdx index bbfb5f166c1..b7563027855 100644 --- a/docs/docs/modules/data_connection/vectorstores/index.mdx +++ b/docs/docs/modules/data_connection/vectorstores/index.mdx @@ -44,7 +44,7 @@ os.environ['OPENAI_API_KEY'] = getpass.getpass('OpenAI API Key:') ```python from langchain.document_loaders import TextLoader -from langchain.embeddings.openai import OpenAIEmbeddings +from langchain_community.embeddings.openai import OpenAIEmbeddings from langchain.text_splitter import CharacterTextSplitter from langchain.vectorstores import Chroma @@ -76,7 +76,7 @@ os.environ['OPENAI_API_KEY'] = getpass.getpass('OpenAI API Key:') ```python from langchain.document_loaders import TextLoader -from langchain.embeddings.openai import OpenAIEmbeddings +from langchain_community.embeddings.openai import OpenAIEmbeddings from langchain.text_splitter import CharacterTextSplitter from langchain.vectorstores import FAISS @@ -108,7 +108,7 @@ os.environ['OPENAI_API_KEY'] = getpass.getpass('OpenAI API Key:') ```python from langchain.document_loaders import TextLoader -from langchain.embeddings.openai import OpenAIEmbeddings +from langchain_community.embeddings.openai import OpenAIEmbeddings from langchain.text_splitter import CharacterTextSplitter from langchain.vectorstores import LanceDB diff --git a/docs/docs/modules/memory/adding_memory.ipynb b/docs/docs/modules/memory/adding_memory.ipynb index a4a5c89be55..8997ccae214 100644 --- a/docs/docs/modules/memory/adding_memory.ipynb +++ b/docs/docs/modules/memory/adding_memory.ipynb @@ -24,9 +24,9 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.llms import OpenAI\n", "from langchain.memory import ConversationBufferMemory\n", - "from langchain.prompts import PromptTemplate" + "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import OpenAI" ] }, { @@ -176,13 +176,13 @@ }, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.prompts import (\n", " ChatPromptTemplate,\n", " HumanMessagePromptTemplate,\n", " MessagesPlaceholder,\n", ")\n", - "from langchain.schema import SystemMessage" + "from langchain.schema import SystemMessage\n", + "from langchain_community.chat_models import ChatOpenAI" ] }, { diff --git a/docs/docs/modules/memory/adding_memory_chain_multiple_inputs.ipynb b/docs/docs/modules/memory/adding_memory_chain_multiple_inputs.ipynb index 5d4992b256f..e1693612177 100644 --- a/docs/docs/modules/memory/adding_memory_chain_multiple_inputs.ipynb +++ b/docs/docs/modules/memory/adding_memory_chain_multiple_inputs.ipynb @@ -17,9 +17,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Chroma" + "from langchain.vectorstores import Chroma\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings" ] }, { @@ -77,9 +77,9 @@ "outputs": [], "source": [ "from langchain.chains.question_answering import load_qa_chain\n", - "from langchain.llms import OpenAI\n", "from langchain.memory import ConversationBufferMemory\n", - "from langchain.prompts import PromptTemplate" + "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import OpenAI" ] }, { diff --git a/docs/docs/modules/memory/agent_with_memory.ipynb b/docs/docs/modules/memory/agent_with_memory.ipynb index 9e00b2863ce..3e0c5c337f8 100644 --- a/docs/docs/modules/memory/agent_with_memory.ipynb +++ b/docs/docs/modules/memory/agent_with_memory.ipynb @@ -29,9 +29,9 @@ "source": [ "from langchain.agents import AgentExecutor, Tool, ZeroShotAgent\n", "from langchain.chains import LLMChain\n", - "from langchain.llms import OpenAI\n", "from langchain.memory import ConversationBufferMemory\n", - "from langchain.utilities import GoogleSearchAPIWrapper" + "from langchain.utilities import GoogleSearchAPIWrapper\n", + "from langchain_community.llms import OpenAI" ] }, { diff --git a/docs/docs/modules/memory/agent_with_memory_in_db.ipynb b/docs/docs/modules/memory/agent_with_memory_in_db.ipynb index 01ab5b81a52..529b2d7e03e 100644 --- a/docs/docs/modules/memory/agent_with_memory_in_db.ipynb +++ b/docs/docs/modules/memory/agent_with_memory_in_db.ipynb @@ -35,10 +35,10 @@ "source": [ "from langchain.agents import AgentExecutor, Tool, ZeroShotAgent\n", "from langchain.chains import LLMChain\n", - "from langchain.llms import OpenAI\n", "from langchain.memory import ConversationBufferMemory\n", "from langchain.memory.chat_message_histories import RedisChatMessageHistory\n", - "from langchain.utilities import GoogleSearchAPIWrapper" + "from langchain.utilities import GoogleSearchAPIWrapper\n", + "from langchain_community.llms import OpenAI" ] }, { diff --git a/docs/docs/modules/memory/conversational_customization.ipynb b/docs/docs/modules/memory/conversational_customization.ipynb index 53efd4f3432..247cf8677df 100644 --- a/docs/docs/modules/memory/conversational_customization.ipynb +++ b/docs/docs/modules/memory/conversational_customization.ipynb @@ -18,8 +18,8 @@ "outputs": [], "source": [ "from langchain.chains import ConversationChain\n", - "from langchain.llms import OpenAI\n", "from langchain.memory import ConversationBufferMemory\n", + "from langchain_community.llms import OpenAI\n", "\n", "llm = OpenAI(temperature=0)" ] diff --git a/docs/docs/modules/memory/custom_memory.ipynb b/docs/docs/modules/memory/custom_memory.ipynb index 55ac9a591e2..cfd9f8429c5 100644 --- a/docs/docs/modules/memory/custom_memory.ipynb +++ b/docs/docs/modules/memory/custom_memory.ipynb @@ -28,8 +28,8 @@ "from typing import Any, Dict, List\n", "\n", "from langchain.chains import ConversationChain\n", - "from langchain.llms import OpenAI\n", "from langchain.schema import BaseMemory\n", + "from langchain_community.llms import OpenAI\n", "from pydantic import BaseModel" ] }, diff --git a/docs/docs/modules/memory/index.mdx b/docs/docs/modules/memory/index.mdx index 4129282cf55..9cfda9025e6 100644 --- a/docs/docs/modules/memory/index.mdx +++ b/docs/docs/modules/memory/index.mdx @@ -146,7 +146,7 @@ We'll use an `LLMChain`, and show working with both an LLM and a ChatModel. ```python -from langchain.llms import OpenAI +from langchain_community.llms import OpenAI from langchain.prompts import PromptTemplate from langchain.chains import LLMChain from langchain.memory import ConversationBufferMemory @@ -183,7 +183,7 @@ conversation({"question": "hi"}) ```python -from langchain.chat_models import ChatOpenAI +from langchain_community.chat_models import ChatOpenAI from langchain.prompts import ( ChatPromptTemplate, MessagesPlaceholder, diff --git a/docs/docs/modules/memory/multiple_memory.ipynb b/docs/docs/modules/memory/multiple_memory.ipynb index cca3b11eacc..7fa86d6e592 100644 --- a/docs/docs/modules/memory/multiple_memory.ipynb +++ b/docs/docs/modules/memory/multiple_memory.ipynb @@ -18,13 +18,13 @@ "outputs": [], "source": [ "from langchain.chains import ConversationChain\n", - "from langchain.llms import OpenAI\n", "from langchain.memory import (\n", " CombinedMemory,\n", " ConversationBufferMemory,\n", " ConversationSummaryMemory,\n", ")\n", "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import OpenAI\n", "\n", "conv_memory = ConversationBufferMemory(\n", " memory_key=\"chat_history_lines\", input_key=\"input\"\n", diff --git a/docs/docs/modules/memory/types/buffer.mdx b/docs/docs/modules/memory/types/buffer.mdx index f6a9692f0d9..fc5a23695bf 100644 --- a/docs/docs/modules/memory/types/buffer.mdx +++ b/docs/docs/modules/memory/types/buffer.mdx @@ -54,7 +54,7 @@ Finally, let's take a look at using this in a chain (setting `verbose=True` so w ```python -from langchain.llms import OpenAI +from langchain_community.llms import OpenAI from langchain.chains import ConversationChain diff --git a/docs/docs/modules/memory/types/buffer_window.mdx b/docs/docs/modules/memory/types/buffer_window.mdx index 8a9010f6ff0..08ba8b87656 100644 --- a/docs/docs/modules/memory/types/buffer_window.mdx +++ b/docs/docs/modules/memory/types/buffer_window.mdx @@ -56,7 +56,7 @@ Let's walk through an example, again setting `verbose=True` so we can see the pr ```python -from langchain.llms import OpenAI +from langchain_community.llms import OpenAI from langchain.chains import ConversationChain conversation_with_summary = ConversationChain( llm=OpenAI(temperature=0), diff --git a/docs/docs/modules/memory/types/entity_summary_memory.mdx b/docs/docs/modules/memory/types/entity_summary_memory.mdx index f535c140d3b..41bd6a24496 100644 --- a/docs/docs/modules/memory/types/entity_summary_memory.mdx +++ b/docs/docs/modules/memory/types/entity_summary_memory.mdx @@ -5,7 +5,7 @@ Entity memory remembers given facts about specific entities in a conversation. I Let's first walk through using this functionality. ```python -from langchain.llms import OpenAI +from langchain_community.llms import OpenAI from langchain.memory import ConversationEntityMemory llm = OpenAI(temperature=0) ``` diff --git a/docs/docs/modules/memory/types/kg.ipynb b/docs/docs/modules/memory/types/kg.ipynb index c5838cd8d32..8cac93ae128 100644 --- a/docs/docs/modules/memory/types/kg.ipynb +++ b/docs/docs/modules/memory/types/kg.ipynb @@ -25,8 +25,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import OpenAI\n", - "from langchain.memory import ConversationKGMemory" + "from langchain.memory import ConversationKGMemory\n", + "from langchain_community.llms import OpenAI" ] }, { diff --git a/docs/docs/modules/memory/types/summary.mdx b/docs/docs/modules/memory/types/summary.mdx index 7940bbad725..cb08afbf372 100644 --- a/docs/docs/modules/memory/types/summary.mdx +++ b/docs/docs/modules/memory/types/summary.mdx @@ -6,7 +6,7 @@ Let's first explore the basic functionality of this type of memory. ```python from langchain.memory import ConversationSummaryMemory, ChatMessageHistory -from langchain.llms import OpenAI +from langchain_community.llms import OpenAI ``` @@ -115,7 +115,7 @@ Let's walk through an example of using this in a chain, again setting `verbose=T ```python -from langchain.llms import OpenAI +from langchain_community.llms import OpenAI from langchain.chains import ConversationChain llm = OpenAI(temperature=0) conversation_with_summary = ConversationChain( diff --git a/docs/docs/modules/memory/types/summary_buffer.ipynb b/docs/docs/modules/memory/types/summary_buffer.ipynb index ffcb795eb19..26219422b23 100644 --- a/docs/docs/modules/memory/types/summary_buffer.ipynb +++ b/docs/docs/modules/memory/types/summary_buffer.ipynb @@ -28,8 +28,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import OpenAI\n", "from langchain.memory import ConversationSummaryBufferMemory\n", + "from langchain_community.llms import OpenAI\n", "\n", "llm = OpenAI()" ] diff --git a/docs/docs/modules/memory/types/token_buffer.ipynb b/docs/docs/modules/memory/types/token_buffer.ipynb index 0c117c7f3b6..d902e440fe0 100644 --- a/docs/docs/modules/memory/types/token_buffer.ipynb +++ b/docs/docs/modules/memory/types/token_buffer.ipynb @@ -27,8 +27,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import OpenAI\n", "from langchain.memory import ConversationTokenBufferMemory\n", + "from langchain_community.llms import OpenAI\n", "\n", "llm = OpenAI()" ] diff --git a/docs/docs/modules/memory/types/vectorstore_retriever_memory.mdx b/docs/docs/modules/memory/types/vectorstore_retriever_memory.mdx index 643d814fe1a..a197afb1af4 100644 --- a/docs/docs/modules/memory/types/vectorstore_retriever_memory.mdx +++ b/docs/docs/modules/memory/types/vectorstore_retriever_memory.mdx @@ -8,8 +8,8 @@ In this case, the "docs" are previous conversation snippets. This can be useful ```python from datetime import datetime -from langchain.embeddings.openai import OpenAIEmbeddings -from langchain.llms import OpenAI +from langchain_community.embeddings.openai import OpenAIEmbeddings +from langchain_community.llms import OpenAI from langchain.memory import VectorStoreRetrieverMemory from langchain.chains import ConversationChain from langchain.prompts import PromptTemplate diff --git a/docs/docs/modules/model_io/chat/chat_model_caching.ipynb b/docs/docs/modules/model_io/chat/chat_model_caching.ipynb index 4326154d0dd..7c0b6d0202e 100644 --- a/docs/docs/modules/model_io/chat/chat_model_caching.ipynb +++ b/docs/docs/modules/model_io/chat/chat_model_caching.ipynb @@ -19,8 +19,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.globals import set_llm_cache\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "llm = ChatOpenAI()" ] diff --git a/docs/docs/modules/model_io/chat/quick_start.ipynb b/docs/docs/modules/model_io/chat/quick_start.ipynb index d352b96d3f8..e641bf85211 100644 --- a/docs/docs/modules/model_io/chat/quick_start.ipynb +++ b/docs/docs/modules/model_io/chat/quick_start.ipynb @@ -45,7 +45,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "chat = ChatOpenAI(openai_api_key=\"...\")" ] @@ -65,7 +65,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "chat = ChatOpenAI()" ] diff --git a/docs/docs/modules/model_io/chat/streaming.ipynb b/docs/docs/modules/model_io/chat/streaming.ipynb index 51f3a9c09f4..5526c7f0edd 100644 --- a/docs/docs/modules/model_io/chat/streaming.ipynb +++ b/docs/docs/modules/model_io/chat/streaming.ipynb @@ -21,7 +21,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatAnthropic" + "from langchain_community.chat_models import ChatAnthropic" ] }, { diff --git a/docs/docs/modules/model_io/chat/token_usage_tracking.ipynb b/docs/docs/modules/model_io/chat/token_usage_tracking.ipynb index 4451a4b6ca4..8966d0daf44 100644 --- a/docs/docs/modules/model_io/chat/token_usage_tracking.ipynb +++ b/docs/docs/modules/model_io/chat/token_usage_tracking.ipynb @@ -20,7 +20,7 @@ "outputs": [], "source": [ "from langchain.callbacks import get_openai_callback\n", - "from langchain.chat_models import ChatOpenAI" + "from langchain_community.chat_models import ChatOpenAI" ] }, { @@ -102,7 +102,7 @@ "outputs": [], "source": [ "from langchain.agents import AgentType, initialize_agent, load_tools\n", - "from langchain.llms import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "tools = load_tools([\"serpapi\", \"llm-math\"], llm=llm)\n", "agent = initialize_agent(tools, llm, agent=AgentType.OPENAI_FUNCTIONS, verbose=True)" diff --git a/docs/docs/modules/model_io/llms/llm_caching.ipynb b/docs/docs/modules/model_io/llms/llm_caching.ipynb index 8444b3ae0a6..eaac29f51fa 100644 --- a/docs/docs/modules/model_io/llms/llm_caching.ipynb +++ b/docs/docs/modules/model_io/llms/llm_caching.ipynb @@ -20,7 +20,7 @@ "outputs": [], "source": [ "from langchain.globals import set_llm_cache\n", - "from langchain.llms import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "# To make the caching really obvious, lets use a slower model.\n", "llm = OpenAI(model_name=\"gpt-3.5-turbo-instruct\", n=2, best_of=2)" diff --git a/docs/docs/modules/model_io/llms/quick_start.ipynb b/docs/docs/modules/model_io/llms/quick_start.ipynb index 61c13a210d0..4da76c35a40 100644 --- a/docs/docs/modules/model_io/llms/quick_start.ipynb +++ b/docs/docs/modules/model_io/llms/quick_start.ipynb @@ -51,7 +51,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "llm = OpenAI(openai_api_key=\"...\")" ] @@ -73,7 +73,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "llm = OpenAI()" ] diff --git a/docs/docs/modules/model_io/llms/streaming_llm.ipynb b/docs/docs/modules/model_io/llms/streaming_llm.ipynb index 8ede97f441a..a3857bfc476 100644 --- a/docs/docs/modules/model_io/llms/streaming_llm.ipynb +++ b/docs/docs/modules/model_io/llms/streaming_llm.ipynb @@ -71,7 +71,7 @@ } ], "source": [ - "from langchain.llms import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "llm = OpenAI(model=\"gpt-3.5-turbo-instruct\", temperature=0, max_tokens=512)\n", "for chunk in llm.stream(\"Write me a song about sparkling water.\"):\n", diff --git a/docs/docs/modules/model_io/llms/token_usage_tracking.ipynb b/docs/docs/modules/model_io/llms/token_usage_tracking.ipynb index f0864108cae..68349794258 100644 --- a/docs/docs/modules/model_io/llms/token_usage_tracking.ipynb +++ b/docs/docs/modules/model_io/llms/token_usage_tracking.ipynb @@ -20,7 +20,7 @@ "outputs": [], "source": [ "from langchain.callbacks import get_openai_callback\n", - "from langchain.llms import OpenAI" + "from langchain_community.llms import OpenAI" ] }, { @@ -102,7 +102,7 @@ "outputs": [], "source": [ "from langchain.agents import AgentType, initialize_agent, load_tools\n", - "from langchain.llms import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", "tools = load_tools([\"serpapi\", \"llm-math\"], llm=llm)\n", diff --git a/docs/docs/modules/model_io/output_parsers/quick_start.ipynb b/docs/docs/modules/model_io/output_parsers/quick_start.ipynb index 2d5d28c40aa..af3940833ea 100644 --- a/docs/docs/modules/model_io/output_parsers/quick_start.ipynb +++ b/docs/docs/modules/model_io/output_parsers/quick_start.ipynb @@ -50,9 +50,9 @@ } ], "source": [ - "from langchain.llms import OpenAI\n", "from langchain.output_parsers import PydanticOutputParser\n", "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import OpenAI\n", "from langchain_core.pydantic_v1 import BaseModel, Field, validator\n", "\n", "model = OpenAI(model_name=\"gpt-3.5-turbo-instruct\", temperature=0.0)\n", diff --git a/docs/docs/modules/model_io/output_parsers/types/csv.ipynb b/docs/docs/modules/model_io/output_parsers/types/csv.ipynb index 4d2c5c475c7..f74112530fb 100644 --- a/docs/docs/modules/model_io/output_parsers/types/csv.ipynb +++ b/docs/docs/modules/model_io/output_parsers/types/csv.ipynb @@ -17,9 +17,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.output_parsers import CommaSeparatedListOutputParser\n", "from langchain.prompts import PromptTemplate\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "output_parser = CommaSeparatedListOutputParser()\n", "\n", diff --git a/docs/docs/modules/model_io/output_parsers/types/datetime.ipynb b/docs/docs/modules/model_io/output_parsers/types/datetime.ipynb index ccaa6eeb636..71543651142 100644 --- a/docs/docs/modules/model_io/output_parsers/types/datetime.ipynb +++ b/docs/docs/modules/model_io/output_parsers/types/datetime.ipynb @@ -17,9 +17,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import OpenAI\n", "from langchain.output_parsers import DatetimeOutputParser\n", - "from langchain.prompts import PromptTemplate" + "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import OpenAI" ] }, { diff --git a/docs/docs/modules/model_io/output_parsers/types/json.ipynb b/docs/docs/modules/model_io/output_parsers/types/json.ipynb index e0ed5bb7389..83ec4c8e26f 100644 --- a/docs/docs/modules/model_io/output_parsers/types/json.ipynb +++ b/docs/docs/modules/model_io/output_parsers/types/json.ipynb @@ -22,8 +22,8 @@ "source": [ "from typing import List\n", "\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.prompts import PromptTemplate\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.output_parsers import JsonOutputParser\n", "from langchain_core.pydantic_v1 import BaseModel, Field" ] diff --git a/docs/docs/modules/model_io/output_parsers/types/output_fixing.ipynb b/docs/docs/modules/model_io/output_parsers/types/output_fixing.ipynb index 0d2a51864c9..3c41470987f 100644 --- a/docs/docs/modules/model_io/output_parsers/types/output_fixing.ipynb +++ b/docs/docs/modules/model_io/output_parsers/types/output_fixing.ipynb @@ -23,8 +23,8 @@ "source": [ "from typing import List\n", "\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.output_parsers import PydanticOutputParser\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.pydantic_v1 import BaseModel, Field" ] }, diff --git a/docs/docs/modules/model_io/output_parsers/types/pandas_dataframe.ipynb b/docs/docs/modules/model_io/output_parsers/types/pandas_dataframe.ipynb index 176256d3db1..9daf18b7c69 100644 --- a/docs/docs/modules/model_io/output_parsers/types/pandas_dataframe.ipynb +++ b/docs/docs/modules/model_io/output_parsers/types/pandas_dataframe.ipynb @@ -23,9 +23,9 @@ "from typing import Any, Dict\n", "\n", "import pandas as pd\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.output_parsers import PandasDataFrameOutputParser\n", - "from langchain.prompts import PromptTemplate" + "from langchain.prompts import PromptTemplate\n", + "from langchain_community.chat_models import ChatOpenAI" ] }, { diff --git a/docs/docs/modules/model_io/output_parsers/types/pydantic.ipynb b/docs/docs/modules/model_io/output_parsers/types/pydantic.ipynb index d1fd5d25048..0dbea9e950a 100644 --- a/docs/docs/modules/model_io/output_parsers/types/pydantic.ipynb +++ b/docs/docs/modules/model_io/output_parsers/types/pydantic.ipynb @@ -22,9 +22,9 @@ "source": [ "from typing import List\n", "\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.output_parsers import PydanticOutputParser\n", "from langchain.prompts import PromptTemplate\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.pydantic_v1 import BaseModel, Field, validator" ] }, diff --git a/docs/docs/modules/model_io/output_parsers/types/retry.ipynb b/docs/docs/modules/model_io/output_parsers/types/retry.ipynb index d12902212f7..3773c57b9cd 100644 --- a/docs/docs/modules/model_io/output_parsers/types/retry.ipynb +++ b/docs/docs/modules/model_io/output_parsers/types/retry.ipynb @@ -17,8 +17,6 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.llms import OpenAI\n", "from langchain.output_parsers import (\n", " OutputFixingParser,\n", " PydanticOutputParser,\n", @@ -26,6 +24,8 @@ "from langchain.prompts import (\n", " PromptTemplate,\n", ")\n", + "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_community.llms import OpenAI\n", "from pydantic import BaseModel, Field" ] }, diff --git a/docs/docs/modules/model_io/output_parsers/types/structured.ipynb b/docs/docs/modules/model_io/output_parsers/types/structured.ipynb index bfcb1f971c1..c78e4f783bd 100644 --- a/docs/docs/modules/model_io/output_parsers/types/structured.ipynb +++ b/docs/docs/modules/model_io/output_parsers/types/structured.ipynb @@ -17,9 +17,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.output_parsers import ResponseSchema, StructuredOutputParser\n", - "from langchain.prompts import PromptTemplate" + "from langchain.prompts import PromptTemplate\n", + "from langchain_community.chat_models import ChatOpenAI" ] }, { diff --git a/docs/docs/modules/model_io/output_parsers/types/yaml.ipynb b/docs/docs/modules/model_io/output_parsers/types/yaml.ipynb index 023aec8007e..6621713c945 100644 --- a/docs/docs/modules/model_io/output_parsers/types/yaml.ipynb +++ b/docs/docs/modules/model_io/output_parsers/types/yaml.ipynb @@ -22,9 +22,9 @@ "source": [ "from typing import List\n", "\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.output_parsers import YamlOutputParser\n", "from langchain.prompts import PromptTemplate\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.pydantic_v1 import BaseModel, Field" ] }, diff --git a/docs/docs/modules/model_io/prompts/composition.ipynb b/docs/docs/modules/model_io/prompts/composition.ipynb index 3d35e5652b6..0e07dc72bac 100644 --- a/docs/docs/modules/model_io/prompts/composition.ipynb +++ b/docs/docs/modules/model_io/prompts/composition.ipynb @@ -102,7 +102,7 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.chat_models import ChatOpenAI" + "from langchain_community.chat_models import ChatOpenAI" ] }, { @@ -259,7 +259,7 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.chat_models import ChatOpenAI" + "from langchain_community.chat_models import ChatOpenAI" ] }, { diff --git a/docs/docs/modules/model_io/prompts/example_selector_types/mmr.ipynb b/docs/docs/modules/model_io/prompts/example_selector_types/mmr.ipynb index 35fe667e87b..b2c6d188c7c 100644 --- a/docs/docs/modules/model_io/prompts/example_selector_types/mmr.ipynb +++ b/docs/docs/modules/model_io/prompts/example_selector_types/mmr.ipynb @@ -17,13 +17,13 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.prompts import FewShotPromptTemplate, PromptTemplate\n", "from langchain.prompts.example_selector import (\n", " MaxMarginalRelevanceExampleSelector,\n", " SemanticSimilarityExampleSelector,\n", ")\n", "from langchain.vectorstores import FAISS\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", "\n", "example_prompt = PromptTemplate(\n", " input_variables=[\"input\", \"output\"],\n", diff --git a/docs/docs/modules/model_io/prompts/example_selector_types/similarity.ipynb b/docs/docs/modules/model_io/prompts/example_selector_types/similarity.ipynb index 40e5dbbf08f..20b31b1188f 100644 --- a/docs/docs/modules/model_io/prompts/example_selector_types/similarity.ipynb +++ b/docs/docs/modules/model_io/prompts/example_selector_types/similarity.ipynb @@ -17,10 +17,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.prompts import FewShotPromptTemplate, PromptTemplate\n", "from langchain.prompts.example_selector import SemanticSimilarityExampleSelector\n", "from langchain.vectorstores import Chroma\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", "\n", "example_prompt = PromptTemplate(\n", " input_variables=[\"input\", \"output\"],\n", diff --git a/docs/docs/modules/model_io/prompts/few_shot_examples.ipynb b/docs/docs/modules/model_io/prompts/few_shot_examples.ipynb index c4a54ac3f09..d542fa648ae 100644 --- a/docs/docs/modules/model_io/prompts/few_shot_examples.ipynb +++ b/docs/docs/modules/model_io/prompts/few_shot_examples.ipynb @@ -243,9 +243,9 @@ } ], "source": [ - "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.prompts.example_selector import SemanticSimilarityExampleSelector\n", "from langchain.vectorstores import Chroma\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", "\n", "example_selector = SemanticSimilarityExampleSelector.from_examples(\n", " # This is the list of examples available to select from.\n", diff --git a/docs/docs/modules/model_io/prompts/few_shot_examples_chat.ipynb b/docs/docs/modules/model_io/prompts/few_shot_examples_chat.ipynb index d6965d64595..0fae2134e09 100644 --- a/docs/docs/modules/model_io/prompts/few_shot_examples_chat.ipynb +++ b/docs/docs/modules/model_io/prompts/few_shot_examples_chat.ipynb @@ -160,7 +160,7 @@ } ], "source": [ - "from langchain.chat_models import ChatAnthropic\n", + "from langchain_community.chat_models import ChatAnthropic\n", "\n", "chain = final_prompt | ChatAnthropic(temperature=0.0)\n", "\n", @@ -191,9 +191,9 @@ }, "outputs": [], "source": [ - "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.prompts import SemanticSimilarityExampleSelector\n", - "from langchain.vectorstores import Chroma" + "from langchain.vectorstores import Chroma\n", + "from langchain_community.embeddings import OpenAIEmbeddings" ] }, { @@ -417,7 +417,7 @@ } ], "source": [ - "from langchain.chat_models import ChatAnthropic\n", + "from langchain_community.chat_models import ChatAnthropic\n", "\n", "chain = final_prompt | ChatAnthropic(temperature=0.0)\n", "\n", diff --git a/docs/docs/modules/model_io/prompts/quick_start.ipynb b/docs/docs/modules/model_io/prompts/quick_start.ipynb index 10fb5fc6bc0..af8bc97cee2 100644 --- a/docs/docs/modules/model_io/prompts/quick_start.ipynb +++ b/docs/docs/modules/model_io/prompts/quick_start.ipynb @@ -160,8 +160,8 @@ } ], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.prompts import HumanMessagePromptTemplate\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.messages import SystemMessage\n", "\n", "chat_template = ChatPromptTemplate.from_messages(\n", diff --git a/docs/docs/modules/model_io/quick_start.mdx b/docs/docs/modules/model_io/quick_start.mdx index f57b033e65a..1bee32bff74 100644 --- a/docs/docs/modules/model_io/quick_start.mdx +++ b/docs/docs/modules/model_io/quick_start.mdx @@ -31,8 +31,8 @@ export OPENAI_API_KEY="..." We can then initialize the model: ```python -from langchain.chat_models import ChatOpenAI -from langchain.llms import OpenAI +from langchain_community.chat_models import ChatOpenAI +from langchain_community.llms import OpenAI llm = OpenAI() chat_model = ChatOpenAI() @@ -41,7 +41,7 @@ chat_model = ChatOpenAI() If you'd prefer not to set an environment variable you can pass the key in directly via the `openai_api_key` named parameter when initiating the OpenAI LLM class: ```python -from langchain.chat_models import ChatOpenAI +from langchain_community.chat_models import ChatOpenAI llm = ChatOpenAI(openai_api_key="...") ``` @@ -57,8 +57,8 @@ First, follow [these instructions](https://github.com/jmorganca/ollama) to set u Then, make sure the Ollama server is running. After that, you can do: ```python -from langchain.llms import Ollama -from langchain.chat_models import ChatOllama +from langchain_community.llms import Ollama +from langchain_community.chat_models import ChatOllama llm = Ollama(model="llama2") chat_model = ChatOllama() diff --git a/docs/docs/use_cases/apis.ipynb b/docs/docs/use_cases/apis.ipynb index a051fc250f7..dc3207ff895 100644 --- a/docs/docs/use_cases/apis.ipynb +++ b/docs/docs/use_cases/apis.ipynb @@ -236,7 +236,7 @@ "source": [ "from langchain.chains import APIChain\n", "from langchain.chains.api import open_meteo_docs\n", - "from langchain.llms import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", "chain = APIChain.from_llm_and_api_docs(\n", @@ -345,7 +345,7 @@ "\n", "from langchain.chains import APIChain\n", "from langchain.chains.api import podcast_docs\n", - "from langchain.llms import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "\n", "listen_api_key = \"xxx\" # Get api key here: https://www.listennotes.com/api/pricing/\n", "llm = OpenAI(temperature=0)\n", @@ -380,8 +380,8 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain, LLMRequestsChain\n", - "from langchain.llms import OpenAI\n", - "from langchain.prompts import PromptTemplate" + "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import OpenAI" ] }, { diff --git a/docs/docs/use_cases/chatbots.ipynb b/docs/docs/use_cases/chatbots.ipynb index 105354c9ba0..590e5b53527 100644 --- a/docs/docs/use_cases/chatbots.ipynb +++ b/docs/docs/use_cases/chatbots.ipynb @@ -94,8 +94,8 @@ } ], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.schema import HumanMessage, SystemMessage\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "chat = ChatOpenAI()\n", "chat(\n", @@ -310,8 +310,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import OpenAI\n", "from langchain.memory import ConversationSummaryMemory\n", + "from langchain_community.llms import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", "memory = ConversationSummaryMemory(llm=llm)\n", @@ -615,8 +615,8 @@ "text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)\n", "all_splits = text_splitter.split_documents(data)\n", "\n", - "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.vectorstores import Chroma\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", "\n", "vectorstore = Chroma.from_documents(documents=all_splits, embedding=OpenAIEmbeddings())" ] @@ -649,7 +649,7 @@ "outputs": [], "source": [ "from langchain.chains import ConversationalRetrievalChain\n", - "from langchain.chat_models import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "llm = ChatOpenAI()\n", "retriever = vectorstore.as_retriever()\n", diff --git a/docs/docs/use_cases/code_understanding.ipynb b/docs/docs/use_cases/code_understanding.ipynb index 8ae7542edea..15cd6bebf5e 100644 --- a/docs/docs/use_cases/code_understanding.ipynb +++ b/docs/docs/use_cases/code_understanding.ipynb @@ -200,8 +200,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.vectorstores import Chroma\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "\n", "db = Chroma.from_documents(texts, OpenAIEmbeddings(disallowed_special=()))\n", "retriever = db.as_retriever(\n", @@ -232,8 +232,8 @@ "outputs": [], "source": [ "from langchain.chains import ConversationalRetrievalChain\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.memory import ConversationSummaryMemory\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "llm = ChatOpenAI(model_name=\"gpt-4\")\n", "memory = ConversationSummaryMemory(\n", @@ -368,9 +368,9 @@ "from langchain.callbacks.manager import CallbackManager\n", "from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n", "from langchain.chains import ConversationalRetrievalChain, LLMChain\n", - "from langchain.llms import LlamaCpp\n", "from langchain.memory import ConversationSummaryMemory\n", - "from langchain.prompts import PromptTemplate" + "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import LlamaCpp" ] }, { diff --git a/docs/docs/use_cases/data_generation.ipynb b/docs/docs/use_cases/data_generation.ipynb index 3e9cd776c16..f1267395189 100644 --- a/docs/docs/use_cases/data_generation.ipynb +++ b/docs/docs/use_cases/data_generation.ipynb @@ -64,8 +64,8 @@ "# import dotenv\n", "# dotenv.load_dotenv()\n", "\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.prompts import FewShotPromptTemplate, PromptTemplate\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.pydantic_v1 import BaseModel\n", "from langchain_experimental.tabular_synthetic_data.openai import (\n", " OPENAI_TEMPLATE,\n", @@ -252,7 +252,7 @@ }, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_experimental.synthetic_data import (\n", " DatasetGenerator,\n", " create_data_generation_chain,\n", @@ -491,9 +491,9 @@ "from typing import List\n", "\n", "from langchain.chains import create_extraction_chain_pydantic\n", - "from langchain.llms import OpenAI\n", "from langchain.output_parsers import PydanticOutputParser\n", "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import OpenAI\n", "from pydantic import BaseModel, Field" ] }, diff --git a/docs/docs/use_cases/extraction.ipynb b/docs/docs/use_cases/extraction.ipynb index 165670f9b33..9c58dcd13e1 100644 --- a/docs/docs/use_cases/extraction.ipynb +++ b/docs/docs/use_cases/extraction.ipynb @@ -105,7 +105,7 @@ ], "source": [ "from langchain.chains import create_extraction_chain\n", - "from langchain.chat_models import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "# Schema\n", "schema = {\n", @@ -448,11 +448,11 @@ "source": [ "from typing import Optional, Sequence\n", "\n", - "from langchain.llms import OpenAI\n", "from langchain.output_parsers import PydanticOutputParser\n", "from langchain.prompts import (\n", " PromptTemplate,\n", ")\n", + "from langchain_community.llms import OpenAI\n", "from pydantic import BaseModel, Field, validator\n", "\n", "\n", @@ -527,11 +527,11 @@ } ], "source": [ - "from langchain.llms import OpenAI\n", "from langchain.output_parsers import PydanticOutputParser\n", "from langchain.prompts import (\n", " PromptTemplate,\n", ")\n", + "from langchain_community.llms import OpenAI\n", "from pydantic import BaseModel, Field, validator\n", "\n", "\n", diff --git a/docs/docs/use_cases/graph/diffbot_graphtransformer.ipynb b/docs/docs/use_cases/graph/diffbot_graphtransformer.ipynb index 26023a8bbbc..f2d507bd624 100644 --- a/docs/docs/use_cases/graph/diffbot_graphtransformer.ipynb +++ b/docs/docs/use_cases/graph/diffbot_graphtransformer.ipynb @@ -186,7 +186,7 @@ "outputs": [], "source": [ "from langchain.chains import GraphCypherQAChain\n", - "from langchain.chat_models import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "chain = GraphCypherQAChain.from_llm(\n", " cypher_llm=ChatOpenAI(temperature=0, model_name=\"gpt-4\"),\n", diff --git a/docs/docs/use_cases/graph/graph_arangodb_qa.ipynb b/docs/docs/use_cases/graph/graph_arangodb_qa.ipynb index 2e0bf932fc8..a74c6e0173f 100644 --- a/docs/docs/use_cases/graph/graph_arangodb_qa.ipynb +++ b/docs/docs/use_cases/graph/graph_arangodb_qa.ipynb @@ -426,7 +426,7 @@ "outputs": [], "source": [ "from langchain.chains import ArangoGraphQAChain\n", - "from langchain.chat_models import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "chain = ArangoGraphQAChain.from_llm(\n", " ChatOpenAI(temperature=0), graph=graph, verbose=True\n", diff --git a/docs/docs/use_cases/graph/graph_cypher_qa.ipynb b/docs/docs/use_cases/graph/graph_cypher_qa.ipynb index 56e30288442..c126a827116 100644 --- a/docs/docs/use_cases/graph/graph_cypher_qa.ipynb +++ b/docs/docs/use_cases/graph/graph_cypher_qa.ipynb @@ -39,8 +39,8 @@ "outputs": [], "source": [ "from langchain.chains import GraphCypherQAChain\n", - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.graphs import Neo4jGraph" + "from langchain.graphs import Neo4jGraph\n", + "from langchain_community.chat_models import ChatOpenAI" ] }, { diff --git a/docs/docs/use_cases/graph/graph_falkordb_qa.ipynb b/docs/docs/use_cases/graph/graph_falkordb_qa.ipynb index 6089dadc21c..211e2a927ae 100644 --- a/docs/docs/use_cases/graph/graph_falkordb_qa.ipynb +++ b/docs/docs/use_cases/graph/graph_falkordb_qa.ipynb @@ -29,8 +29,8 @@ "outputs": [], "source": [ "from langchain.chains import FalkorDBQAChain\n", - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.graphs import FalkorDBGraph" + "from langchain.graphs import FalkorDBGraph\n", + "from langchain_community.chat_models import ChatOpenAI" ] }, { diff --git a/docs/docs/use_cases/graph/graph_hugegraph_qa.ipynb b/docs/docs/use_cases/graph/graph_hugegraph_qa.ipynb index 65b5e6dde4f..926bcc4a3ac 100644 --- a/docs/docs/use_cases/graph/graph_hugegraph_qa.ipynb +++ b/docs/docs/use_cases/graph/graph_hugegraph_qa.ipynb @@ -156,8 +156,8 @@ "outputs": [], "source": [ "from langchain.chains import HugeGraphQAChain\n", - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.graphs import HugeGraph" + "from langchain.graphs import HugeGraph\n", + "from langchain_community.chat_models import ChatOpenAI" ] }, { diff --git a/docs/docs/use_cases/graph/graph_kuzu_qa.ipynb b/docs/docs/use_cases/graph/graph_kuzu_qa.ipynb index e3c576e7bc6..19f49db5b15 100644 --- a/docs/docs/use_cases/graph/graph_kuzu_qa.ipynb +++ b/docs/docs/use_cases/graph/graph_kuzu_qa.ipynb @@ -131,8 +131,8 @@ "outputs": [], "source": [ "from langchain.chains import KuzuQAChain\n", - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.graphs import KuzuGraph" + "from langchain.graphs import KuzuGraph\n", + "from langchain_community.chat_models import ChatOpenAI" ] }, { diff --git a/docs/docs/use_cases/graph/graph_memgraph_qa.ipynb b/docs/docs/use_cases/graph/graph_memgraph_qa.ipynb index efe84774c11..cc82b20646a 100644 --- a/docs/docs/use_cases/graph/graph_memgraph_qa.ipynb +++ b/docs/docs/use_cases/graph/graph_memgraph_qa.ipynb @@ -68,9 +68,9 @@ "\n", "from gqlalchemy import Memgraph\n", "from langchain.chains import GraphCypherQAChain\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.graphs import MemgraphGraph\n", - "from langchain.prompts import PromptTemplate" + "from langchain.prompts import PromptTemplate\n", + "from langchain_community.chat_models import ChatOpenAI" ] }, { diff --git a/docs/docs/use_cases/graph/graph_nebula_qa.ipynb b/docs/docs/use_cases/graph/graph_nebula_qa.ipynb index 9a4dd6a9015..1e9b872d0c6 100644 --- a/docs/docs/use_cases/graph/graph_nebula_qa.ipynb +++ b/docs/docs/use_cases/graph/graph_nebula_qa.ipynb @@ -122,8 +122,8 @@ "outputs": [], "source": [ "from langchain.chains import NebulaGraphQAChain\n", - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.graphs import NebulaGraph" + "from langchain.graphs import NebulaGraph\n", + "from langchain_community.chat_models import ChatOpenAI" ] }, { diff --git a/docs/docs/use_cases/graph/graph_networkx_qa.ipynb b/docs/docs/use_cases/graph/graph_networkx_qa.ipynb index fe89aa9f859..e7e95db8084 100644 --- a/docs/docs/use_cases/graph/graph_networkx_qa.ipynb +++ b/docs/docs/use_cases/graph/graph_networkx_qa.ipynb @@ -50,7 +50,7 @@ "outputs": [], "source": [ "from langchain.indexes import GraphIndexCreator\n", - "from langchain.llms import OpenAI" + "from langchain_community.llms import OpenAI" ] }, { diff --git a/docs/docs/use_cases/graph/graph_sparql_qa.ipynb b/docs/docs/use_cases/graph/graph_sparql_qa.ipynb index 3ea9f86dd6b..34e315cd607 100644 --- a/docs/docs/use_cases/graph/graph_sparql_qa.ipynb +++ b/docs/docs/use_cases/graph/graph_sparql_qa.ipynb @@ -31,8 +31,8 @@ "outputs": [], "source": [ "from langchain.chains import GraphSparqlQAChain\n", - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.graphs import RdfGraph" + "from langchain.graphs import RdfGraph\n", + "from langchain_community.chat_models import ChatOpenAI" ] }, { diff --git a/docs/docs/use_cases/graph/neptune_cypher_qa.ipynb b/docs/docs/use_cases/graph/neptune_cypher_qa.ipynb index e5dc7edd817..492cea4d21a 100644 --- a/docs/docs/use_cases/graph/neptune_cypher_qa.ipynb +++ b/docs/docs/use_cases/graph/neptune_cypher_qa.ipynb @@ -41,7 +41,7 @@ ], "source": [ "from langchain.chains import NeptuneOpenCypherQAChain\n", - "from langchain.chat_models import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "llm = ChatOpenAI(temperature=0, model=\"gpt-4\")\n", "\n", diff --git a/docs/docs/use_cases/qa_structured/sql.ipynb b/docs/docs/use_cases/qa_structured/sql.ipynb index 3f5b0981d05..24c554ed669 100644 --- a/docs/docs/use_cases/qa_structured/sql.ipynb +++ b/docs/docs/use_cases/qa_structured/sql.ipynb @@ -85,8 +85,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import OpenAI\n", "from langchain.utilities import SQLDatabase\n", + "from langchain_community.llms import OpenAI\n", "from langchain_experimental.sql import SQLDatabaseChain\n", "\n", "db = SQLDatabase.from_uri(\"sqlite:///Chinook.db\")\n", @@ -161,7 +161,7 @@ "outputs": [], "source": [ "from langchain.chains import create_sql_query_chain\n", - "from langchain.chat_models import ChatOpenAI" + "from langchain_community.chat_models import ChatOpenAI" ] }, { @@ -322,7 +322,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import OpenAI\n", + "from langchain_community.llms import OpenAI\n", "from langchain_experimental.sql import SQLDatabaseChain\n", "\n", "llm = OpenAI(temperature=0, verbose=True)\n", @@ -782,9 +782,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.schema import Document\n", "from langchain.vectorstores import FAISS\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "\n", "embeddings = OpenAIEmbeddings()\n", "\n", @@ -837,8 +837,8 @@ "source": [ "from langchain.agents import AgentType, create_sql_agent\n", "from langchain.agents.agent_toolkits import SQLDatabaseToolkit\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.utilities import SQLDatabase\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "db = SQLDatabase.from_uri(\"sqlite:///Chinook.db\")\n", "llm = ChatOpenAI(model_name=\"gpt-4\", temperature=0)\n", @@ -970,8 +970,8 @@ "outputs": [], "source": [ "from langchain.agents.agent_toolkits import create_retriever_tool\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.vectorstores import FAISS\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "\n", "texts = artists + albums\n", "\n", @@ -996,8 +996,8 @@ "source": [ "from langchain.agents import AgentType, create_sql_agent\n", "from langchain.agents.agent_toolkits import SQLDatabaseToolkit\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.utilities import SQLDatabase\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "# db = SQLDatabase.from_uri(\"sqlite:///Chinook.db\")\n", "llm = ChatOpenAI(model_name=\"gpt-4\", temperature=0)\n", @@ -1160,7 +1160,7 @@ "source": [ "from elasticsearch import Elasticsearch\n", "from langchain.chains.elasticsearch_database import ElasticsearchDatabaseChain\n", - "from langchain.chat_models import ChatOpenAI" + "from langchain_community.chat_models import ChatOpenAI" ] }, { diff --git a/docs/docs/use_cases/question_answering/conversational_retrieval_agents.ipynb b/docs/docs/use_cases/question_answering/conversational_retrieval_agents.ipynb index 5d0fb9f81dc..5fea5317a42 100644 --- a/docs/docs/use_cases/question_answering/conversational_retrieval_agents.ipynb +++ b/docs/docs/use_cases/question_answering/conversational_retrieval_agents.ipynb @@ -146,7 +146,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "llm = ChatOpenAI(temperature=0)" ] diff --git a/docs/docs/use_cases/question_answering/per_user.ipynb b/docs/docs/use_cases/question_answering/per_user.ipynb index 424da86e916..6c63e9b529f 100644 --- a/docs/docs/use_cases/question_answering/per_user.ipynb +++ b/docs/docs/use_cases/question_answering/per_user.ipynb @@ -55,8 +55,8 @@ ], "source": [ "import pinecone\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", - "from langchain.vectorstores import Pinecone" + "from langchain.vectorstores import Pinecone\n", + "from langchain_community.embeddings.openai import OpenAIEmbeddings" ] }, { @@ -159,9 +159,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.prompts import ChatPromptTemplate\n", + "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_core.output_parsers import StrOutputParser\n", "from langchain_core.runnables import (\n", " ConfigurableField,\n", diff --git a/docs/docs/use_cases/summarization.ipynb b/docs/docs/use_cases/summarization.ipynb index d025bdd6124..d11d9ffef26 100644 --- a/docs/docs/use_cases/summarization.ipynb +++ b/docs/docs/use_cases/summarization.ipynb @@ -206,8 +206,8 @@ ], "source": [ "from langchain.chains.summarize import load_summarize_chain\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.document_loaders import WebBaseLoader\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "loader = WebBaseLoader(\"https://lilianweng.github.io/posts/2023-06-23-agent/\")\n", "docs = loader.load()\n", diff --git a/docs/docs/use_cases/tagging.ipynb b/docs/docs/use_cases/tagging.ipynb index 0e9b1b91aad..758c364ada5 100644 --- a/docs/docs/use_cases/tagging.ipynb +++ b/docs/docs/use_cases/tagging.ipynb @@ -64,7 +64,7 @@ "outputs": [], "source": [ "from langchain.chains import create_tagging_chain, create_tagging_chain_pydantic\n", - "from langchain.chat_models import ChatOpenAI" + "from langchain_community.chat_models import ChatOpenAI" ] }, { diff --git a/docs/docs/use_cases/web_scraping.ipynb b/docs/docs/use_cases/web_scraping.ipynb index c536e5136f3..d23ba0dea8a 100644 --- a/docs/docs/use_cases/web_scraping.ipynb +++ b/docs/docs/use_cases/web_scraping.ipynb @@ -263,7 +263,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", + "from langchain_community.chat_models import ChatOpenAI\n", "\n", "llm = ChatOpenAI(temperature=0, model=\"gpt-3.5-turbo-0613\")" ] @@ -479,11 +479,11 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models.openai import ChatOpenAI\n", - "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.retrievers.web_research import WebResearchRetriever\n", "from langchain.utilities import GoogleSearchAPIWrapper\n", - "from langchain.vectorstores import Chroma" + "from langchain.vectorstores import Chroma\n", + "from langchain_community.chat_models.openai import ChatOpenAI\n", + "from langchain_community.embeddings import OpenAIEmbeddings" ] }, { diff --git a/libs/cli/langchain_cli/package_template/package_template/chain.py b/libs/cli/langchain_cli/package_template/package_template/chain.py index ef0a3aecf3c..61d4022a407 100644 --- a/libs/cli/langchain_cli/package_template/package_template/chain.py +++ b/libs/cli/langchain_cli/package_template/package_template/chain.py @@ -1,4 +1,4 @@ -from langchain.chat_models import ChatOpenAI +from langchain_community.chat_models import ChatOpenAI from langchain.prompts import ChatPromptTemplate _prompt = ChatPromptTemplate.from_messages( diff --git a/libs/community/langchain_community/llms/oci_data_science_model_deployment_endpoint.py b/libs/community/langchain_community/llms/oci_data_science_model_deployment_endpoint.py index 630fa228c4d..9092f067799 100644 --- a/libs/community/langchain_community/llms/oci_data_science_model_deployment_endpoint.py +++ b/libs/community/langchain_community/llms/oci_data_science_model_deployment_endpoint.py @@ -228,7 +228,7 @@ class OCIModelDeploymentTGI(OCIModelDeploymentLLM): Example: .. code-block:: python - from langchain.llms import ModelDeploymentTGI + from langchain_community.llms import ModelDeploymentTGI oci_md = ModelDeploymentTGI(endpoint="https:///predict") @@ -294,7 +294,7 @@ class OCIModelDeploymentVLLM(OCIModelDeploymentLLM): Example: .. code-block:: python - from langchain.llms import OCIModelDeploymentVLLM + from langchain_community.llms import OCIModelDeploymentVLLM oci_md = OCIModelDeploymentVLLM( endpoint="https:///predict", diff --git a/libs/community/langchain_community/vectorstores/surrealdb.py b/libs/community/langchain_community/vectorstores/surrealdb.py index 773a00cc576..febc7ea55c7 100644 --- a/libs/community/langchain_community/vectorstores/surrealdb.py +++ b/libs/community/langchain_community/vectorstores/surrealdb.py @@ -32,7 +32,7 @@ class SurrealDBStore(VectorStore): .. code-block:: python from langchain.vectorstores.surrealdb import SurrealDBStore - from langchain.embeddings import HuggingFaceEmbeddings + from langchain_community.embeddings import HuggingFaceEmbeddings embedding_function = HuggingFaceEmbeddings() dburl = "ws://localhost:8000/rpc" diff --git a/libs/core/langchain_core/runnables/base.py b/libs/core/langchain_core/runnables/base.py index ac98aecf6b2..d4019d10d8a 100644 --- a/libs/core/langchain_core/runnables/base.py +++ b/libs/core/langchain_core/runnables/base.py @@ -3719,7 +3719,7 @@ class RunnableBinding(RunnableBindingBase[Input, Output]): # Create a runnable binding that invokes the ChatModel with the # additional kwarg `stop=['-']` when running it. - from langchain.chat_models import ChatOpenAI + from langchain_community.chat_models import ChatOpenAI model = ChatOpenAI() model.invoke('Say "Parrot-MAGIC"', stop=['-']) # Should return `Parrot` # Using it the easy way via `bind` method which returns a new diff --git a/libs/core/langchain_core/runnables/history.py b/libs/core/langchain_core/runnables/history.py index 99be9e7e130..ed65c8d1c0a 100644 --- a/libs/core/langchain_core/runnables/history.py +++ b/libs/core/langchain_core/runnables/history.py @@ -46,7 +46,7 @@ class RunnableWithMessageHistory(RunnableBindingBase): from typing import Optional - from langchain.chat_models import ChatAnthropic + from langchain_community.chat_models import ChatAnthropic from langchain.memory.chat_message_histories import RedisChatMessageHistory from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder diff --git a/libs/experimental/langchain_experimental/agents/agent_toolkits/spark/base.py b/libs/experimental/langchain_experimental/agents/agent_toolkits/spark/base.py index 726a3a8850a..9639ac0db14 100644 --- a/libs/experimental/langchain_experimental/agents/agent_toolkits/spark/base.py +++ b/libs/experimental/langchain_experimental/agents/agent_toolkits/spark/base.py @@ -5,7 +5,7 @@ from langchain.agents.agent import AgentExecutor from langchain.agents.mrkl.base import ZeroShotAgent from langchain.callbacks.base import BaseCallbackManager from langchain.chains.llm import LLMChain -from langchain.llms.base import BaseLLM +from langchain_core.language_models import BaseLLM from langchain_experimental.agents.agent_toolkits.spark.prompt import PREFIX, SUFFIX from langchain_experimental.tools.python.tool import PythonAstREPLTool diff --git a/libs/experimental/langchain_experimental/agents/agent_toolkits/xorbits/base.py b/libs/experimental/langchain_experimental/agents/agent_toolkits/xorbits/base.py index 70f81188692..31bc94c7029 100644 --- a/libs/experimental/langchain_experimental/agents/agent_toolkits/xorbits/base.py +++ b/libs/experimental/langchain_experimental/agents/agent_toolkits/xorbits/base.py @@ -5,7 +5,7 @@ from langchain.agents.agent import AgentExecutor from langchain.agents.mrkl.base import ZeroShotAgent from langchain.callbacks.base import BaseCallbackManager from langchain.chains.llm import LLMChain -from langchain.llms.base import BaseLLM +from langchain_core.language_models import BaseLLM from langchain_experimental.agents.agent_toolkits.xorbits.prompt import ( NP_PREFIX, diff --git a/libs/experimental/langchain_experimental/autonomous_agents/autogpt/agent.py b/libs/experimental/langchain_experimental/autonomous_agents/autogpt/agent.py index 35cbc6c44c6..8a8cfccbfd1 100644 --- a/libs/experimental/langchain_experimental/autonomous_agents/autogpt/agent.py +++ b/libs/experimental/langchain_experimental/autonomous_agents/autogpt/agent.py @@ -3,7 +3,6 @@ from __future__ import annotations from typing import List, Optional from langchain.chains.llm import LLMChain -from langchain.chat_models.base import BaseChatModel from langchain.memory import ChatMessageHistory from langchain.schema import ( BaseChatMessageHistory, @@ -11,6 +10,7 @@ from langchain.schema import ( ) from langchain.tools.base import BaseTool from langchain.tools.human.tool import HumanInputRun +from langchain_core.language_models import BaseChatModel from langchain_core.messages import AIMessage, HumanMessage, SystemMessage from langchain_core.vectorstores import VectorStoreRetriever diff --git a/libs/experimental/langchain_experimental/chat_models/llm_wrapper.py b/libs/experimental/langchain_experimental/chat_models/llm_wrapper.py index 55eec69ffd4..d739cec69c6 100644 --- a/libs/experimental/langchain_experimental/chat_models/llm_wrapper.py +++ b/libs/experimental/langchain_experimental/chat_models/llm_wrapper.py @@ -7,8 +7,6 @@ from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) -from langchain.chat_models.base import BaseChatModel -from langchain.llms.base import LLM from langchain.schema import ( AIMessage, BaseMessage, @@ -18,6 +16,7 @@ from langchain.schema import ( LLMResult, SystemMessage, ) +from langchain_core.language_models import LLM, BaseChatModel DEFAULT_SYSTEM_PROMPT = """You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature. diff --git a/libs/experimental/langchain_experimental/fallacy_removal/base.py b/libs/experimental/langchain_experimental/fallacy_removal/base.py index 8979d00135e..fd196b525a4 100644 --- a/libs/experimental/langchain_experimental/fallacy_removal/base.py +++ b/libs/experimental/langchain_experimental/fallacy_removal/base.py @@ -25,7 +25,7 @@ class FallacyChain(Chain): Example: .. code-block:: python - from langchain.llms import OpenAI + from langchain_community.llms import OpenAI from langchain.chains import LLMChain from langchain_experimental.fallacy import FallacyChain from langchain_experimental.fallacy_removal.models import LogicalFallacy diff --git a/libs/experimental/langchain_experimental/llm_bash/base.py b/libs/experimental/langchain_experimental/llm_bash/base.py index f1fa97cbbca..a016304ef9a 100644 --- a/libs/experimental/langchain_experimental/llm_bash/base.py +++ b/libs/experimental/langchain_experimental/llm_bash/base.py @@ -25,7 +25,7 @@ class LLMBashChain(Chain): .. code-block:: python from langchain.chains import LLMBashChain - from langchain.llms import OpenAI + from langchain_community.llms import OpenAI llm_bash = LLMBashChain.from_llm(OpenAI()) """ diff --git a/libs/experimental/langchain_experimental/llm_symbolic_math/base.py b/libs/experimental/langchain_experimental/llm_symbolic_math/base.py index 45b05758dba..7feda59ffd0 100644 --- a/libs/experimental/langchain_experimental/llm_symbolic_math/base.py +++ b/libs/experimental/langchain_experimental/llm_symbolic_math/base.py @@ -24,7 +24,7 @@ class LLMSymbolicMathChain(Chain): .. code-block:: python from langchain.chains import LLMSymbolicMathChain - from langchain.llms import OpenAI + from langchain_community.llms import OpenAI llm_symbolic_math = LLMSymbolicMathChain.from_llm(OpenAI()) """ diff --git a/libs/experimental/langchain_experimental/llms/anthropic_functions.py b/libs/experimental/langchain_experimental/llms/anthropic_functions.py index 3a18d734601..23f8d5e65ca 100644 --- a/libs/experimental/langchain_experimental/llms/anthropic_functions.py +++ b/libs/experimental/langchain_experimental/llms/anthropic_functions.py @@ -6,12 +6,12 @@ from typing import Any, DefaultDict, Dict, List, Optional, cast from langchain.callbacks.manager import ( CallbackManagerForLLMRun, ) -from langchain.chat_models.anthropic import ChatAnthropic -from langchain.chat_models.base import BaseChatModel from langchain.schema import ( ChatGeneration, ChatResult, ) +from langchain_community.chat_models.anthropic import ChatAnthropic +from langchain_core.language_models import BaseChatModel from langchain_core.messages import ( AIMessage, BaseMessage, diff --git a/libs/experimental/langchain_experimental/llms/jsonformer_decoder.py b/libs/experimental/langchain_experimental/llms/jsonformer_decoder.py index 2e3c5d721ca..a562eaf00f1 100644 --- a/libs/experimental/langchain_experimental/llms/jsonformer_decoder.py +++ b/libs/experimental/langchain_experimental/llms/jsonformer_decoder.py @@ -5,7 +5,7 @@ import json from typing import TYPE_CHECKING, Any, List, Optional, cast from langchain.callbacks.manager import CallbackManagerForLLMRun -from langchain.llms.huggingface_pipeline import HuggingFacePipeline +from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline from langchain_experimental.pydantic_v1 import Field, root_validator diff --git a/libs/experimental/langchain_experimental/llms/llamaapi.py b/libs/experimental/langchain_experimental/llms/llamaapi.py index 2981c9e489b..d2abf328625 100644 --- a/libs/experimental/langchain_experimental/llms/llamaapi.py +++ b/libs/experimental/langchain_experimental/llms/llamaapi.py @@ -10,11 +10,11 @@ from typing import ( ) from langchain.callbacks.manager import CallbackManagerForLLMRun -from langchain.chat_models.base import BaseChatModel from langchain.schema import ( ChatGeneration, ChatResult, ) +from langchain_core.language_models import BaseChatModel from langchain_core.messages import ( AIMessage, BaseMessage, diff --git a/libs/experimental/langchain_experimental/llms/lmformatenforcer_decoder.py b/libs/experimental/langchain_experimental/llms/lmformatenforcer_decoder.py index 8aa9c0a48e1..4a7f9eff8a7 100644 --- a/libs/experimental/langchain_experimental/llms/lmformatenforcer_decoder.py +++ b/libs/experimental/langchain_experimental/llms/lmformatenforcer_decoder.py @@ -4,8 +4,8 @@ from __future__ import annotations from typing import TYPE_CHECKING, Any, List, Optional from langchain.callbacks.manager import CallbackManagerForLLMRun -from langchain.llms.huggingface_pipeline import HuggingFacePipeline from langchain.schema import LLMResult +from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline from langchain_experimental.pydantic_v1 import Field diff --git a/libs/experimental/langchain_experimental/llms/ollama_functions.py b/libs/experimental/langchain_experimental/llms/ollama_functions.py index 78ad9a58e85..4b9b743f66c 100644 --- a/libs/experimental/langchain_experimental/llms/ollama_functions.py +++ b/libs/experimental/langchain_experimental/llms/ollama_functions.py @@ -1,7 +1,7 @@ import json from typing import Any, Dict, List, Optional -from langchain.chat_models.ollama import ChatOllama +from langchain_community.chat_models.ollama import ChatOllama from langchain_core.callbacks import CallbackManagerForLLMRun from langchain_core.language_models import BaseChatModel from langchain_core.messages import AIMessage, BaseMessage diff --git a/libs/experimental/langchain_experimental/llms/rellm_decoder.py b/libs/experimental/langchain_experimental/llms/rellm_decoder.py index a61b9a6c62e..02e66c19346 100644 --- a/libs/experimental/langchain_experimental/llms/rellm_decoder.py +++ b/libs/experimental/langchain_experimental/llms/rellm_decoder.py @@ -4,8 +4,8 @@ from __future__ import annotations from typing import TYPE_CHECKING, Any, List, Optional, cast from langchain.callbacks.manager import CallbackManagerForLLMRun -from langchain.llms.huggingface_pipeline import HuggingFacePipeline -from langchain.llms.utils import enforce_stop_tokens +from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline +from langchain_community.llms.utils import enforce_stop_tokens from langchain_experimental.pydantic_v1 import Field, root_validator diff --git a/libs/experimental/langchain_experimental/sql/base.py b/libs/experimental/langchain_experimental/sql/base.py index f7b0ac51522..9922f9f8b09 100644 --- a/libs/experimental/langchain_experimental/sql/base.py +++ b/libs/experimental/langchain_experimental/sql/base.py @@ -27,7 +27,7 @@ class SQLDatabaseChain(Chain): .. code-block:: python from langchain_experimental.sql import SQLDatabaseChain - from langchain.llms import OpenAI, SQLDatabase + from langchain_community.llms import OpenAI, SQLDatabase db = SQLDatabase(...) db_chain = SQLDatabaseChain.from_llm(OpenAI(), db) diff --git a/libs/experimental/langchain_experimental/sql/vector_sql.py b/libs/experimental/langchain_experimental/sql/vector_sql.py index 396b48bba99..2e479c24b63 100644 --- a/libs/experimental/langchain_experimental/sql/vector_sql.py +++ b/libs/experimental/langchain_experimental/sql/vector_sql.py @@ -88,7 +88,7 @@ class VectorSQLDatabaseChain(SQLDatabaseChain): .. code-block:: python from langchain_experimental.sql import SQLDatabaseChain - from langchain.llms import OpenAI, SQLDatabase, OpenAIEmbeddings + from langchain_community.llms import OpenAI, SQLDatabase, OpenAIEmbeddings db = SQLDatabase(...) db_chain = VectorSQLDatabaseChain.from_llm(OpenAI(), db, OpenAIEmbeddings()) diff --git a/libs/experimental/langchain_experimental/tabular_synthetic_data/openai.py b/libs/experimental/langchain_experimental/tabular_synthetic_data/openai.py index 6ceaee0ede9..6a13727e357 100644 --- a/libs/experimental/langchain_experimental/tabular_synthetic_data/openai.py +++ b/libs/experimental/langchain_experimental/tabular_synthetic_data/openai.py @@ -1,10 +1,10 @@ from typing import Any, Dict, Optional, Type, Union from langchain.chains.openai_functions import create_structured_output_chain -from langchain.chat_models import ChatOpenAI from langchain.prompts import PromptTemplate from langchain.pydantic_v1 import BaseModel from langchain.schema import BaseLLMOutputParser, BasePromptTemplate +from langchain_community.chat_models import ChatOpenAI from langchain_experimental.tabular_synthetic_data.base import SyntheticDataGenerator diff --git a/libs/experimental/tests/integration_tests/chains/test_cpal.py b/libs/experimental/tests/integration_tests/chains/test_cpal.py index 570f5656d99..398350927cd 100644 --- a/libs/experimental/tests/integration_tests/chains/test_cpal.py +++ b/libs/experimental/tests/integration_tests/chains/test_cpal.py @@ -6,9 +6,9 @@ from typing import Type from unittest import mock import pytest -from langchain.llms import OpenAI from langchain.output_parsers import PydanticOutputParser from langchain.prompts.prompt import PromptTemplate +from langchain_community.llms import OpenAI from langchain_experimental import pydantic_v1 as pydantic from langchain_experimental.cpal.base import ( diff --git a/libs/experimental/tests/integration_tests/chains/test_pal.py b/libs/experimental/tests/integration_tests/chains/test_pal.py index 2215a72a8b7..1c623334a1f 100644 --- a/libs/experimental/tests/integration_tests/chains/test_pal.py +++ b/libs/experimental/tests/integration_tests/chains/test_pal.py @@ -1,6 +1,6 @@ """Test PAL chain.""" -from langchain.llms import OpenAI +from langchain_community.llms import OpenAI from langchain_experimental.pal_chain.base import PALChain diff --git a/libs/experimental/tests/integration_tests/chains/test_sql_database.py b/libs/experimental/tests/integration_tests/chains/test_sql_database.py index a6ebe2df581..54783d2a638 100644 --- a/libs/experimental/tests/integration_tests/chains/test_sql_database.py +++ b/libs/experimental/tests/integration_tests/chains/test_sql_database.py @@ -1,6 +1,6 @@ """Test SQL Database Chain.""" -from langchain.llms.openai import OpenAI from langchain.utilities.sql_database import SQLDatabase +from langchain_community.llms.openai import OpenAI from sqlalchemy import Column, Integer, MetaData, String, Table, create_engine, insert from langchain_experimental.sql.base import ( diff --git a/libs/experimental/tests/integration_tests/chains/test_synthetic_data_openai.py b/libs/experimental/tests/integration_tests/chains/test_synthetic_data_openai.py index 3b94e52d08e..90ac8e0d273 100644 --- a/libs/experimental/tests/integration_tests/chains/test_synthetic_data_openai.py +++ b/libs/experimental/tests/integration_tests/chains/test_synthetic_data_openai.py @@ -1,7 +1,7 @@ import pytest -from langchain.chat_models import ChatOpenAI from langchain.prompts.few_shot import FewShotPromptTemplate from langchain.pydantic_v1 import BaseModel +from langchain_community.chat_models import ChatOpenAI from langchain_experimental.tabular_synthetic_data.base import SyntheticDataGenerator from langchain_experimental.tabular_synthetic_data.openai import ( diff --git a/libs/experimental/tests/integration_tests/llms/test_anthropic_functions.py b/libs/experimental/tests/integration_tests/llms/test_anthropic_functions.py index 008e268a350..2c80fd88cb5 100644 --- a/libs/experimental/tests/integration_tests/llms/test_anthropic_functions.py +++ b/libs/experimental/tests/integration_tests/llms/test_anthropic_functions.py @@ -2,8 +2,8 @@ import unittest -from langchain.chat_models.anthropic import ChatAnthropic -from langchain.chat_models.bedrock import BedrockChat +from langchain_community.chat_models.anthropic import ChatAnthropic +from langchain_community.chat_models.bedrock import BedrockChat from langchain_experimental.llms.anthropic_functions import AnthropicFunctions diff --git a/libs/experimental/tests/integration_tests/llms/test_ollama_functions.py b/libs/experimental/tests/integration_tests/llms/test_ollama_functions.py index 348edbc27cb..c1b845bdbd3 100644 --- a/libs/experimental/tests/integration_tests/llms/test_ollama_functions.py +++ b/libs/experimental/tests/integration_tests/llms/test_ollama_functions.py @@ -2,7 +2,7 @@ import unittest -from langchain.chat_models.ollama import ChatOllama +from langchain_community.chat_models.ollama import ChatOllama from langchain_experimental.llms.ollama_functions import OllamaFunctions diff --git a/libs/experimental/tests/unit_tests/chat_models/test_llm_wrapper_llama2chat.py b/libs/experimental/tests/unit_tests/chat_models/test_llm_wrapper_llama2chat.py index 2ae184b196c..3135b10745f 100644 --- a/libs/experimental/tests/unit_tests/chat_models/test_llm_wrapper_llama2chat.py +++ b/libs/experimental/tests/unit_tests/chat_models/test_llm_wrapper_llama2chat.py @@ -5,8 +5,8 @@ from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) -from langchain.llms.base import LLM from langchain.schema import AIMessage, HumanMessage, SystemMessage +from langchain_core.language_models import LLM from langchain_experimental.chat_models import Llama2Chat from langchain_experimental.chat_models.llm_wrapper import DEFAULT_SYSTEM_PROMPT diff --git a/libs/experimental/tests/unit_tests/fake_llm.py b/libs/experimental/tests/unit_tests/fake_llm.py index a0369418758..6ad771efcfc 100644 --- a/libs/experimental/tests/unit_tests/fake_llm.py +++ b/libs/experimental/tests/unit_tests/fake_llm.py @@ -2,7 +2,7 @@ from typing import Any, Dict, List, Mapping, Optional, cast from langchain.callbacks.manager import CallbackManagerForLLMRun -from langchain.llms.base import LLM +from langchain_core.language_models import LLM from langchain_experimental.pydantic_v1 import validator diff --git a/libs/experimental/tests/unit_tests/rl_chain/test_pick_best_chain_call.py b/libs/experimental/tests/unit_tests/rl_chain/test_pick_best_chain_call.py index 6a1bc3fc8ab..fc8cf6aed47 100644 --- a/libs/experimental/tests/unit_tests/rl_chain/test_pick_best_chain_call.py +++ b/libs/experimental/tests/unit_tests/rl_chain/test_pick_best_chain_call.py @@ -1,8 +1,8 @@ from typing import Any, Dict import pytest -from langchain.chat_models import FakeListChatModel from langchain.prompts.prompt import PromptTemplate +from langchain_community.chat_models import FakeListChatModel from test_utils import MockEncoder, MockEncoderReturnsList import langchain_experimental.rl_chain.base as rl_chain diff --git a/libs/experimental/tests/unit_tests/test_smartllm.py b/libs/experimental/tests/unit_tests/test_smartllm.py index b969bbdb5db..0f6d7d13ffb 100644 --- a/libs/experimental/tests/unit_tests/test_smartllm.py +++ b/libs/experimental/tests/unit_tests/test_smartllm.py @@ -1,7 +1,7 @@ """Test SmartLLM.""" -from langchain.chat_models import FakeListChatModel -from langchain.llms import FakeListLLM from langchain.prompts.prompt import PromptTemplate +from langchain_community.chat_models import FakeListChatModel +from langchain_community.llms import FakeListLLM from langchain_experimental.smart_llm import SmartLLMChain diff --git a/libs/langchain/langchain/__init__.py b/libs/langchain/langchain/__init__.py index 3d252ac8a8b..d7eac588920 100644 --- a/libs/langchain/langchain/__init__.py +++ b/libs/langchain/langchain/__init__.py @@ -129,103 +129,103 @@ def __getattr__(name: str) -> Any: return Wikipedia elif name == "Anthropic": - from langchain.llms import Anthropic + from langchain_community.llms import Anthropic _warn_on_import(name, replacement="langchain.llms.Anthropic") return Anthropic elif name == "Banana": - from langchain.llms import Banana + from langchain_community.llms import Banana _warn_on_import(name, replacement="langchain.llms.Banana") return Banana elif name == "CerebriumAI": - from langchain.llms import CerebriumAI + from langchain_community.llms import CerebriumAI _warn_on_import(name, replacement="langchain.llms.CerebriumAI") return CerebriumAI elif name == "Cohere": - from langchain.llms import Cohere + from langchain_community.llms import Cohere _warn_on_import(name, replacement="langchain.llms.Cohere") return Cohere elif name == "ForefrontAI": - from langchain.llms import ForefrontAI + from langchain_community.llms import ForefrontAI _warn_on_import(name, replacement="langchain.llms.ForefrontAI") return ForefrontAI elif name == "GooseAI": - from langchain.llms import GooseAI + from langchain_community.llms import GooseAI _warn_on_import(name, replacement="langchain.llms.GooseAI") return GooseAI elif name == "HuggingFaceHub": - from langchain.llms import HuggingFaceHub + from langchain_community.llms import HuggingFaceHub _warn_on_import(name, replacement="langchain.llms.HuggingFaceHub") return HuggingFaceHub elif name == "HuggingFaceTextGenInference": - from langchain.llms import HuggingFaceTextGenInference + from langchain_community.llms import HuggingFaceTextGenInference _warn_on_import(name, replacement="langchain.llms.HuggingFaceTextGenInference") return HuggingFaceTextGenInference elif name == "LlamaCpp": - from langchain.llms import LlamaCpp + from langchain_community.llms import LlamaCpp _warn_on_import(name, replacement="langchain.llms.LlamaCpp") return LlamaCpp elif name == "Modal": - from langchain.llms import Modal + from langchain_community.llms import Modal _warn_on_import(name, replacement="langchain.llms.Modal") return Modal elif name == "OpenAI": - from langchain.llms import OpenAI + from langchain_community.llms import OpenAI _warn_on_import(name, replacement="langchain.llms.OpenAI") return OpenAI elif name == "Petals": - from langchain.llms import Petals + from langchain_community.llms import Petals _warn_on_import(name, replacement="langchain.llms.Petals") return Petals elif name == "PipelineAI": - from langchain.llms import PipelineAI + from langchain_community.llms import PipelineAI _warn_on_import(name, replacement="langchain.llms.PipelineAI") return PipelineAI elif name == "SagemakerEndpoint": - from langchain.llms import SagemakerEndpoint + from langchain_community.llms import SagemakerEndpoint _warn_on_import(name, replacement="langchain.llms.SagemakerEndpoint") return SagemakerEndpoint elif name == "StochasticAI": - from langchain.llms import StochasticAI + from langchain_community.llms import StochasticAI _warn_on_import(name, replacement="langchain.llms.StochasticAI") return StochasticAI elif name == "Writer": - from langchain.llms import Writer + from langchain_community.llms import Writer _warn_on_import(name, replacement="langchain.llms.Writer") return Writer elif name == "HuggingFacePipeline": - from langchain.llms.huggingface_pipeline import HuggingFacePipeline + from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline _warn_on_import( name, replacement="langchain.llms.huggingface_pipeline.HuggingFacePipeline" diff --git a/libs/langchain/langchain/agents/agent_toolkits/vectorstore/toolkit.py b/libs/langchain/langchain/agents/agent_toolkits/vectorstore/toolkit.py index 1548c3d58aa..9cdadb06fd6 100644 --- a/libs/langchain/langchain/agents/agent_toolkits/vectorstore/toolkit.py +++ b/libs/langchain/langchain/agents/agent_toolkits/vectorstore/toolkit.py @@ -1,12 +1,12 @@ """Toolkit for interacting with a vector store.""" from typing import List +from langchain_community.llms.openai import OpenAI from langchain_core.language_models import BaseLanguageModel from langchain_core.pydantic_v1 import BaseModel, Field from langchain_core.vectorstores import VectorStore from langchain.agents.agent_toolkits.base import BaseToolkit -from langchain.llms.openai import OpenAI from langchain.tools import BaseTool from langchain.tools.vectorstore.tool import ( VectorStoreQATool, diff --git a/libs/langchain/langchain/agents/json_chat/base.py b/libs/langchain/langchain/agents/json_chat/base.py index 46bbc861805..954dd375f25 100644 --- a/libs/langchain/langchain/agents/json_chat/base.py +++ b/libs/langchain/langchain/agents/json_chat/base.py @@ -22,7 +22,7 @@ def create_json_chat_agent( .. code-block:: python from langchain import hub - from langchain.chat_models import ChatOpenAI + from langchain_community.chat_models import ChatOpenAI from langchain.agents import AgentExecutor, create_json_chat_agent prompt = hub.pull("hwchase17/react-chat-json") diff --git a/libs/langchain/langchain/agents/openai_functions_agent/base.py b/libs/langchain/langchain/agents/openai_functions_agent/base.py index a08cdbabbcb..df500653d76 100644 --- a/libs/langchain/langchain/agents/openai_functions_agent/base.py +++ b/libs/langchain/langchain/agents/openai_functions_agent/base.py @@ -240,7 +240,7 @@ def create_openai_functions_agent( .. code-block:: python - from langchain.chat_models import ChatOpenAI + from langchain_community.chat_models import ChatOpenAI from langchain.agents import AgentExecutor, create_openai_functions_agent from langchain import hub diff --git a/libs/langchain/langchain/agents/openai_tools/base.py b/libs/langchain/langchain/agents/openai_tools/base.py index 922285abf28..d0be5742b67 100644 --- a/libs/langchain/langchain/agents/openai_tools/base.py +++ b/libs/langchain/langchain/agents/openai_tools/base.py @@ -23,7 +23,7 @@ def create_openai_tools_agent( .. code-block:: python from langchain import hub - from langchain.chat_models import ChatOpenAI + from langchain_community.chat_models import ChatOpenAI from langchain.agents import AgentExecutor, create_openai_tools_agent prompt = hub.pull("hwchase17/openai-tools-agent") diff --git a/libs/langchain/langchain/agents/react/agent.py b/libs/langchain/langchain/agents/react/agent.py index 8f5898d44f1..fe27a42b6a5 100644 --- a/libs/langchain/langchain/agents/react/agent.py +++ b/libs/langchain/langchain/agents/react/agent.py @@ -22,7 +22,7 @@ def create_react_agent( .. code-block:: python from langchain import hub - from langchain.llms import OpenAI + from langchain_community.llms import OpenAI from langchain.agents import AgentExecutor, create_react_agent prompt = hub.pull("hwchase17/react") diff --git a/libs/langchain/langchain/agents/self_ask_with_search/base.py b/libs/langchain/langchain/agents/self_ask_with_search/base.py index f7b5af55d40..34604f5f44a 100644 --- a/libs/langchain/langchain/agents/self_ask_with_search/base.py +++ b/libs/langchain/langchain/agents/self_ask_with_search/base.py @@ -94,7 +94,7 @@ def create_self_ask_with_search_agent( .. code-block:: python from langchain import hub - from langchain.chat_models import ChatAnthropic + from langchain_community.chat_models import ChatAnthropic from langchain.agents import ( AgentExecutor, create_self_ask_with_search_agent ) diff --git a/libs/langchain/langchain/agents/structured_chat/base.py b/libs/langchain/langchain/agents/structured_chat/base.py index 6f2879baadb..21448ed6a28 100644 --- a/libs/langchain/langchain/agents/structured_chat/base.py +++ b/libs/langchain/langchain/agents/structured_chat/base.py @@ -159,7 +159,7 @@ def create_structured_chat_agent( .. code-block:: python from langchain import hub - from langchain.chat_models import ChatOpenAI + from langchain_community.chat_models import ChatOpenAI from langchain.agents import AgentExecutor, create_structured_chat_agent prompt = hub.pull("hwchase17/structured-chat-agent") diff --git a/libs/langchain/langchain/agents/xml/base.py b/libs/langchain/langchain/agents/xml/base.py index 148d6a889bc..b10efc12db2 100644 --- a/libs/langchain/langchain/agents/xml/base.py +++ b/libs/langchain/langchain/agents/xml/base.py @@ -116,7 +116,7 @@ def create_xml_agent( .. code-block:: python from langchain import hub - from langchain.chat_models import ChatAnthropic + from langchain_community.chat_models import ChatAnthropic from langchain.agents import AgentExecutor, create_xml_agent prompt = hub.pull("hwchase17/xml-agent-convo") diff --git a/libs/langchain/langchain/chains/combine_documents/map_reduce.py b/libs/langchain/langchain/chains/combine_documents/map_reduce.py index cd7bf3f0b7e..ef3a0fb7bf3 100644 --- a/libs/langchain/langchain/chains/combine_documents/map_reduce.py +++ b/libs/langchain/langchain/chains/combine_documents/map_reduce.py @@ -33,7 +33,7 @@ class MapReduceDocumentsChain(BaseCombineDocumentsChain): MapReduceDocumentsChain, ) from langchain_core.prompts import PromptTemplate - from langchain.llms import OpenAI + from langchain_community.llms import OpenAI # This controls how each document will be formatted. Specifically, # it will be passed to `format_document` - see that function for more diff --git a/libs/langchain/langchain/chains/combine_documents/map_rerank.py b/libs/langchain/langchain/chains/combine_documents/map_rerank.py index a0f673621ca..d3ac0410942 100644 --- a/libs/langchain/langchain/chains/combine_documents/map_rerank.py +++ b/libs/langchain/langchain/chains/combine_documents/map_rerank.py @@ -26,7 +26,7 @@ class MapRerankDocumentsChain(BaseCombineDocumentsChain): from langchain.chains import StuffDocumentsChain, LLMChain from langchain_core.prompts import PromptTemplate - from langchain.llms import OpenAI + from langchain_community.llms import OpenAI from langchain.output_parsers.regex import RegexParser document_variable_name = "context" diff --git a/libs/langchain/langchain/chains/combine_documents/reduce.py b/libs/langchain/langchain/chains/combine_documents/reduce.py index 775f5c352cf..74e53c1fbd8 100644 --- a/libs/langchain/langchain/chains/combine_documents/reduce.py +++ b/libs/langchain/langchain/chains/combine_documents/reduce.py @@ -146,7 +146,7 @@ class ReduceDocumentsChain(BaseCombineDocumentsChain): StuffDocumentsChain, LLMChain, ReduceDocumentsChain ) from langchain_core.prompts import PromptTemplate - from langchain.llms import OpenAI + from langchain_community.llms import OpenAI # This controls how each document will be formatted. Specifically, # it will be passed to `format_document` - see that function for more diff --git a/libs/langchain/langchain/chains/combine_documents/refine.py b/libs/langchain/langchain/chains/combine_documents/refine.py index fb9da0eb6d1..7a6c495328f 100644 --- a/libs/langchain/langchain/chains/combine_documents/refine.py +++ b/libs/langchain/langchain/chains/combine_documents/refine.py @@ -37,7 +37,7 @@ class RefineDocumentsChain(BaseCombineDocumentsChain): from langchain.chains import RefineDocumentsChain, LLMChain from langchain_core.prompts import PromptTemplate - from langchain.llms import OpenAI + from langchain_community.llms import OpenAI # This controls how each document will be formatted. Specifically, # it will be passed to `format_document` - see that function for more diff --git a/libs/langchain/langchain/chains/combine_documents/stuff.py b/libs/langchain/langchain/chains/combine_documents/stuff.py index 0251db8801b..73183687e98 100644 --- a/libs/langchain/langchain/chains/combine_documents/stuff.py +++ b/libs/langchain/langchain/chains/combine_documents/stuff.py @@ -104,7 +104,7 @@ class StuffDocumentsChain(BaseCombineDocumentsChain): from langchain.chains import StuffDocumentsChain, LLMChain from langchain_core.prompts import PromptTemplate - from langchain.llms import OpenAI + from langchain_community.llms import OpenAI # This controls how each document will be formatted. Specifically, # it will be passed to `format_document` - see that function for more diff --git a/libs/langchain/langchain/chains/constitutional_ai/base.py b/libs/langchain/langchain/chains/constitutional_ai/base.py index ba8a551737a..33b3886f40d 100644 --- a/libs/langchain/langchain/chains/constitutional_ai/base.py +++ b/libs/langchain/langchain/chains/constitutional_ai/base.py @@ -18,7 +18,7 @@ class ConstitutionalChain(Chain): Example: .. code-block:: python - from langchain.llms import OpenAI + from langchain_community.llms import OpenAI from langchain.chains import LLMChain, ConstitutionalChain from langchain.chains.constitutional_ai.models \ import ConstitutionalPrinciple diff --git a/libs/langchain/langchain/chains/conversation/base.py b/libs/langchain/langchain/chains/conversation/base.py index f0c42217343..eea29aeb5b6 100644 --- a/libs/langchain/langchain/chains/conversation/base.py +++ b/libs/langchain/langchain/chains/conversation/base.py @@ -17,7 +17,7 @@ class ConversationChain(LLMChain): .. code-block:: python from langchain.chains import ConversationChain - from langchain.llms import OpenAI + from langchain_community.llms import OpenAI conversation = ConversationChain(llm=OpenAI()) """ diff --git a/libs/langchain/langchain/chains/conversational_retrieval/base.py b/libs/langchain/langchain/chains/conversational_retrieval/base.py index 304fc95e34c..456186f9778 100644 --- a/libs/langchain/langchain/chains/conversational_retrieval/base.py +++ b/libs/langchain/langchain/chains/conversational_retrieval/base.py @@ -260,7 +260,7 @@ class ConversationalRetrievalChain(BaseConversationalRetrievalChain): StuffDocumentsChain, LLMChain, ConversationalRetrievalChain ) from langchain_core.prompts import PromptTemplate - from langchain.llms import OpenAI + from langchain_community.llms import OpenAI combine_docs_chain = StuffDocumentsChain(...) vectorstore = ... diff --git a/libs/langchain/langchain/chains/elasticsearch_database/base.py b/libs/langchain/langchain/chains/elasticsearch_database/base.py index 3eba20afbc2..46c336808f1 100644 --- a/libs/langchain/langchain/chains/elasticsearch_database/base.py +++ b/libs/langchain/langchain/chains/elasticsearch_database/base.py @@ -27,7 +27,7 @@ class ElasticsearchDatabaseChain(Chain): .. code-block:: python from langchain.chains import ElasticsearchDatabaseChain - from langchain.llms import OpenAI + from langchain_community.llms import OpenAI from elasticsearch import Elasticsearch database = Elasticsearch("http://localhost:9200") diff --git a/libs/langchain/langchain/chains/ernie_functions/base.py b/libs/langchain/langchain/chains/ernie_functions/base.py index 2f0924e3b3a..afed9d4257d 100644 --- a/libs/langchain/langchain/chains/ernie_functions/base.py +++ b/libs/langchain/langchain/chains/ernie_functions/base.py @@ -236,7 +236,7 @@ def create_ernie_fn_runnable( from typing import Optional from langchain.chains.ernie_functions import create_ernie_fn_chain - from langchain.chat_models import ErnieBotChat + from langchain_community.chat_models import ErnieBotChat from langchain.prompts import ChatPromptTemplate from langchain.pydantic_v1 import BaseModel, Field @@ -310,7 +310,7 @@ def create_structured_output_runnable( from typing import Optional from langchain.chains.ernie_functions import create_structured_output_chain - from langchain.chat_models import ErnieBotChat + from langchain_community.chat_models import ErnieBotChat from langchain.prompts import ChatPromptTemplate from langchain.pydantic_v1 import BaseModel, Field @@ -407,7 +407,7 @@ def create_ernie_fn_chain( from typing import Optional from langchain.chains.ernie_functions import create_ernie_fn_chain - from langchain.chat_models import ErnieBotChat + from langchain_community.chat_models import ErnieBotChat from langchain.prompts import ChatPromptTemplate from langchain.pydantic_v1 import BaseModel, Field @@ -494,7 +494,7 @@ def create_structured_output_chain( from typing import Optional from langchain.chains.ernie_functions import create_structured_output_chain - from langchain.chat_models import ErnieBotChat + from langchain_community.chat_models import ErnieBotChat from langchain.prompts import ChatPromptTemplate from langchain.pydantic_v1 import BaseModel, Field diff --git a/libs/langchain/langchain/chains/flare/base.py b/libs/langchain/langchain/chains/flare/base.py index de673f6cb86..06b58ca6d06 100644 --- a/libs/langchain/langchain/chains/flare/base.py +++ b/libs/langchain/langchain/chains/flare/base.py @@ -5,6 +5,7 @@ from abc import abstractmethod from typing import Any, Dict, List, Optional, Sequence, Tuple import numpy as np +from langchain_community.llms.openai import OpenAI from langchain_core.language_models import BaseLanguageModel from langchain_core.outputs import Generation from langchain_core.prompts import BasePromptTemplate @@ -21,7 +22,6 @@ from langchain.chains.flare.prompts import ( FinishedOutputParser, ) from langchain.chains.llm import LLMChain -from langchain.llms.openai import OpenAI class _ResponseChain(LLMChain): diff --git a/libs/langchain/langchain/chains/llm.py b/libs/langchain/langchain/chains/llm.py index b7023dbc6a1..2d9908643a1 100644 --- a/libs/langchain/langchain/chains/llm.py +++ b/libs/langchain/langchain/chains/llm.py @@ -41,7 +41,7 @@ class LLMChain(Chain): .. code-block:: python from langchain.chains import LLMChain - from langchain.llms import OpenAI + from langchain_community.llms import OpenAI from langchain_core.prompts import PromptTemplate prompt_template = "Tell me a {adjective} joke" prompt = PromptTemplate( diff --git a/libs/langchain/langchain/chains/llm_checker/base.py b/libs/langchain/langchain/chains/llm_checker/base.py index c432f4f4219..4e718c73457 100644 --- a/libs/langchain/langchain/chains/llm_checker/base.py +++ b/libs/langchain/langchain/chains/llm_checker/base.py @@ -68,7 +68,7 @@ class LLMCheckerChain(Chain): Example: .. code-block:: python - from langchain.llms import OpenAI + from langchain_community.llms import OpenAI from langchain.chains import LLMCheckerChain llm = OpenAI(temperature=0.7) checker_chain = LLMCheckerChain.from_llm(llm) diff --git a/libs/langchain/langchain/chains/llm_math/base.py b/libs/langchain/langchain/chains/llm_math/base.py index 311df1b8c73..7e520ef0c4f 100644 --- a/libs/langchain/langchain/chains/llm_math/base.py +++ b/libs/langchain/langchain/chains/llm_math/base.py @@ -26,7 +26,7 @@ class LLMMathChain(Chain): .. code-block:: python from langchain.chains import LLMMathChain - from langchain.llms import OpenAI + from langchain_community.llms import OpenAI llm_math = LLMMathChain.from_llm(OpenAI()) """ diff --git a/libs/langchain/langchain/chains/llm_summarization_checker/base.py b/libs/langchain/langchain/chains/llm_summarization_checker/base.py index ab85dc8e167..a3659a53dbe 100644 --- a/libs/langchain/langchain/chains/llm_summarization_checker/base.py +++ b/libs/langchain/langchain/chains/llm_summarization_checker/base.py @@ -71,7 +71,7 @@ class LLMSummarizationCheckerChain(Chain): Example: .. code-block:: python - from langchain.llms import OpenAI + from langchain_community.llms import OpenAI from langchain.chains import LLMSummarizationCheckerChain llm = OpenAI(temperature=0.0) checker_chain = LLMSummarizationCheckerChain.from_llm(llm) diff --git a/libs/langchain/langchain/chains/loading.py b/libs/langchain/langchain/chains/loading.py index 963fb5383bd..64565b61da5 100644 --- a/libs/langchain/langchain/chains/loading.py +++ b/libs/langchain/langchain/chains/loading.py @@ -4,6 +4,7 @@ from pathlib import Path from typing import Any, Union import yaml +from langchain_community.llms.loading import load_llm, load_llm_from_config from langchain_core.prompts.loading import ( _load_output_parser, load_prompt, @@ -27,7 +28,6 @@ from langchain.chains.qa_with_sources.base import QAWithSourcesChain from langchain.chains.qa_with_sources.retrieval import RetrievalQAWithSourcesChain from langchain.chains.qa_with_sources.vector_db import VectorDBQAWithSourcesChain from langchain.chains.retrieval_qa.base import RetrievalQA, VectorDBQA -from langchain.llms.loading import load_llm, load_llm_from_config from langchain.utilities.loading import try_load_from_hub URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/chains/" diff --git a/libs/langchain/langchain/chains/natbot/base.py b/libs/langchain/langchain/chains/natbot/base.py index 013b281e33d..9e9d521e541 100644 --- a/libs/langchain/langchain/chains/natbot/base.py +++ b/libs/langchain/langchain/chains/natbot/base.py @@ -4,6 +4,7 @@ from __future__ import annotations import warnings from typing import Any, Dict, List, Optional +from langchain_community.llms.openai import OpenAI from langchain_core.language_models import BaseLanguageModel from langchain_core.pydantic_v1 import Extra, root_validator @@ -11,7 +12,6 @@ from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.llm import LLMChain from langchain.chains.natbot.prompt import PROMPT -from langchain.llms.openai import OpenAI class NatBotChain(Chain): diff --git a/libs/langchain/langchain/chains/openai_functions/base.py b/libs/langchain/langchain/chains/openai_functions/base.py index 98f5908934b..da3aef2c773 100644 --- a/libs/langchain/langchain/chains/openai_functions/base.py +++ b/libs/langchain/langchain/chains/openai_functions/base.py @@ -243,7 +243,7 @@ def create_openai_fn_runnable( from typing import Optional from langchain.chains.openai_functions import create_openai_fn_chain - from langchain.chat_models import ChatOpenAI + from langchain_community.chat_models import ChatOpenAI from langchain_core.prompts import ChatPromptTemplate from langchain_core.pydantic_v1 import BaseModel, Field @@ -317,7 +317,7 @@ def create_structured_output_runnable( from typing import Optional from langchain.chains.openai_functions import create_structured_output_chain - from langchain.chat_models import ChatOpenAI + from langchain_community.chat_models import ChatOpenAI from langchain_core.prompts import ChatPromptTemplate from langchain_core.pydantic_v1 import BaseModel, Field @@ -418,7 +418,7 @@ def create_openai_fn_chain( from typing import Optional from langchain.chains.openai_functions import create_openai_fn_chain - from langchain.chat_models import ChatOpenAI + from langchain_community.chat_models import ChatOpenAI from langchain_core.prompts import ChatPromptTemplate from langchain_core.pydantic_v1 import BaseModel, Field @@ -505,7 +505,7 @@ def create_structured_output_chain( from typing import Optional from langchain.chains.openai_functions import create_structured_output_chain - from langchain.chat_models import ChatOpenAI + from langchain_community.chat_models import ChatOpenAI from langchain_core.prompts import ChatPromptTemplate from langchain_core.pydantic_v1 import BaseModel, Field diff --git a/libs/langchain/langchain/chains/openai_functions/openapi.py b/libs/langchain/langchain/chains/openai_functions/openapi.py index 3e011a7b767..af5c4b32330 100644 --- a/libs/langchain/langchain/chains/openai_functions/openapi.py +++ b/libs/langchain/langchain/chains/openai_functions/openapi.py @@ -6,6 +6,7 @@ from collections import defaultdict from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union import requests +from langchain_community.chat_models import ChatOpenAI from langchain_core.language_models import BaseLanguageModel from langchain_core.prompts import BasePromptTemplate, ChatPromptTemplate from langchain_core.utils.input import get_colored_text @@ -15,7 +16,6 @@ from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.llm import LLMChain from langchain.chains.sequential import SequentialChain -from langchain.chat_models import ChatOpenAI from langchain.output_parsers.openai_functions import JsonOutputFunctionsParser from langchain.tools import APIOperation from langchain.utilities.openapi import OpenAPISpec diff --git a/libs/langchain/langchain/chains/retrieval_qa/base.py b/libs/langchain/langchain/chains/retrieval_qa/base.py index 185900ca32c..b73d0bc648c 100644 --- a/libs/langchain/langchain/chains/retrieval_qa/base.py +++ b/libs/langchain/langchain/chains/retrieval_qa/base.py @@ -200,7 +200,7 @@ class RetrievalQA(BaseRetrievalQA): Example: .. code-block:: python - from langchain.llms import OpenAI + from langchain_community.llms import OpenAI from langchain.chains import RetrievalQA from langchain.vectorstores import FAISS from langchain_core.vectorstores import VectorStoreRetriever diff --git a/libs/langchain/langchain/chains/router/multi_retrieval_qa.py b/libs/langchain/langchain/chains/router/multi_retrieval_qa.py index 17e0a7fe001..d9b0b924edb 100644 --- a/libs/langchain/langchain/chains/router/multi_retrieval_qa.py +++ b/libs/langchain/langchain/chains/router/multi_retrieval_qa.py @@ -3,6 +3,7 @@ from __future__ import annotations from typing import Any, Dict, List, Mapping, Optional +from langchain_community.chat_models import ChatOpenAI from langchain_core.language_models import BaseLanguageModel from langchain_core.prompts import PromptTemplate from langchain_core.retrievers import BaseRetriever @@ -16,7 +17,6 @@ from langchain.chains.router.llm_router import LLMRouterChain, RouterOutputParse from langchain.chains.router.multi_retrieval_prompt import ( MULTI_RETRIEVAL_ROUTER_TEMPLATE, ) -from langchain.chat_models import ChatOpenAI class MultiRetrievalQAChain(MultiRouteChain): diff --git a/libs/langchain/langchain/chat_models/__init__.py b/libs/langchain/langchain/chat_models/__init__.py index 3f510129b99..7064f7b7585 100644 --- a/libs/langchain/langchain/chat_models/__init__.py +++ b/libs/langchain/langchain/chat_models/__init__.py @@ -17,36 +17,36 @@ an interface where "chat messages" are the inputs and outputs. AIMessage, BaseMessage, HumanMessage """ # noqa: E501 -from langchain.chat_models.anthropic import ChatAnthropic -from langchain.chat_models.anyscale import ChatAnyscale -from langchain.chat_models.azure_openai import AzureChatOpenAI -from langchain.chat_models.baichuan import ChatBaichuan -from langchain.chat_models.baidu_qianfan_endpoint import QianfanChatEndpoint -from langchain.chat_models.bedrock import BedrockChat -from langchain.chat_models.cohere import ChatCohere -from langchain.chat_models.databricks import ChatDatabricks -from langchain.chat_models.ernie import ErnieBotChat -from langchain.chat_models.everlyai import ChatEverlyAI -from langchain.chat_models.fake import FakeListChatModel -from langchain.chat_models.fireworks import ChatFireworks -from langchain.chat_models.gigachat import GigaChat -from langchain.chat_models.google_palm import ChatGooglePalm -from langchain.chat_models.human import HumanInputChatModel -from langchain.chat_models.hunyuan import ChatHunyuan -from langchain.chat_models.javelin_ai_gateway import ChatJavelinAIGateway -from langchain.chat_models.jinachat import JinaChat -from langchain.chat_models.konko import ChatKonko -from langchain.chat_models.litellm import ChatLiteLLM -from langchain.chat_models.minimax import MiniMaxChat -from langchain.chat_models.mlflow import ChatMlflow -from langchain.chat_models.mlflow_ai_gateway import ChatMLflowAIGateway -from langchain.chat_models.ollama import ChatOllama -from langchain.chat_models.openai import ChatOpenAI -from langchain.chat_models.pai_eas_endpoint import PaiEasChatEndpoint -from langchain.chat_models.promptlayer_openai import PromptLayerChatOpenAI -from langchain.chat_models.vertexai import ChatVertexAI -from langchain.chat_models.volcengine_maas import VolcEngineMaasChat -from langchain.chat_models.yandex import ChatYandexGPT +from langchain_community.chat_models.anthropic import ChatAnthropic +from langchain_community.chat_models.anyscale import ChatAnyscale +from langchain_community.chat_models.azure_openai import AzureChatOpenAI +from langchain_community.chat_models.baichuan import ChatBaichuan +from langchain_community.chat_models.baidu_qianfan_endpoint import QianfanChatEndpoint +from langchain_community.chat_models.bedrock import BedrockChat +from langchain_community.chat_models.cohere import ChatCohere +from langchain_community.chat_models.databricks import ChatDatabricks +from langchain_community.chat_models.ernie import ErnieBotChat +from langchain_community.chat_models.everlyai import ChatEverlyAI +from langchain_community.chat_models.fake import FakeListChatModel +from langchain_community.chat_models.fireworks import ChatFireworks +from langchain_community.chat_models.gigachat import GigaChat +from langchain_community.chat_models.google_palm import ChatGooglePalm +from langchain_community.chat_models.human import HumanInputChatModel +from langchain_community.chat_models.hunyuan import ChatHunyuan +from langchain_community.chat_models.javelin_ai_gateway import ChatJavelinAIGateway +from langchain_community.chat_models.jinachat import JinaChat +from langchain_community.chat_models.konko import ChatKonko +from langchain_community.chat_models.litellm import ChatLiteLLM +from langchain_community.chat_models.minimax import MiniMaxChat +from langchain_community.chat_models.mlflow import ChatMlflow +from langchain_community.chat_models.mlflow_ai_gateway import ChatMLflowAIGateway +from langchain_community.chat_models.ollama import ChatOllama +from langchain_community.chat_models.openai import ChatOpenAI +from langchain_community.chat_models.pai_eas_endpoint import PaiEasChatEndpoint +from langchain_community.chat_models.promptlayer_openai import PromptLayerChatOpenAI +from langchain_community.chat_models.vertexai import ChatVertexAI +from langchain_community.chat_models.volcengine_maas import VolcEngineMaasChat +from langchain_community.chat_models.yandex import ChatYandexGPT __all__ = [ "ChatOpenAI", diff --git a/libs/langchain/langchain/embeddings/__init__.py b/libs/langchain/langchain/embeddings/__init__.py index 3710a6e1969..92cba712218 100644 --- a/libs/langchain/langchain/embeddings/__init__.py +++ b/libs/langchain/langchain/embeddings/__init__.py @@ -14,64 +14,74 @@ from different APIs and services. import logging from typing import Any -from langchain.embeddings.aleph_alpha import ( +from langchain_community.embeddings.aleph_alpha import ( AlephAlphaAsymmetricSemanticEmbedding, AlephAlphaSymmetricSemanticEmbedding, ) -from langchain.embeddings.awa import AwaEmbeddings -from langchain.embeddings.azure_openai import AzureOpenAIEmbeddings -from langchain.embeddings.baidu_qianfan_endpoint import QianfanEmbeddingsEndpoint -from langchain.embeddings.bedrock import BedrockEmbeddings -from langchain.embeddings.bookend import BookendEmbeddings -from langchain.embeddings.cache import CacheBackedEmbeddings -from langchain.embeddings.clarifai import ClarifaiEmbeddings -from langchain.embeddings.cohere import CohereEmbeddings -from langchain.embeddings.dashscope import DashScopeEmbeddings -from langchain.embeddings.databricks import DatabricksEmbeddings -from langchain.embeddings.deepinfra import DeepInfraEmbeddings -from langchain.embeddings.edenai import EdenAiEmbeddings -from langchain.embeddings.elasticsearch import ElasticsearchEmbeddings -from langchain.embeddings.embaas import EmbaasEmbeddings -from langchain.embeddings.ernie import ErnieEmbeddings -from langchain.embeddings.fake import DeterministicFakeEmbedding, FakeEmbeddings -from langchain.embeddings.fastembed import FastEmbedEmbeddings -from langchain.embeddings.google_palm import GooglePalmEmbeddings -from langchain.embeddings.gpt4all import GPT4AllEmbeddings -from langchain.embeddings.gradient_ai import GradientEmbeddings -from langchain.embeddings.huggingface import ( +from langchain_community.embeddings.awa import AwaEmbeddings +from langchain_community.embeddings.azure_openai import AzureOpenAIEmbeddings +from langchain_community.embeddings.baidu_qianfan_endpoint import ( + QianfanEmbeddingsEndpoint, +) +from langchain_community.embeddings.bedrock import BedrockEmbeddings +from langchain_community.embeddings.bookend import BookendEmbeddings +from langchain_community.embeddings.clarifai import ClarifaiEmbeddings +from langchain_community.embeddings.cohere import CohereEmbeddings +from langchain_community.embeddings.dashscope import DashScopeEmbeddings +from langchain_community.embeddings.databricks import DatabricksEmbeddings +from langchain_community.embeddings.deepinfra import DeepInfraEmbeddings +from langchain_community.embeddings.edenai import EdenAiEmbeddings +from langchain_community.embeddings.elasticsearch import ElasticsearchEmbeddings +from langchain_community.embeddings.embaas import EmbaasEmbeddings +from langchain_community.embeddings.ernie import ErnieEmbeddings +from langchain_community.embeddings.fake import ( + DeterministicFakeEmbedding, + FakeEmbeddings, +) +from langchain_community.embeddings.fastembed import FastEmbedEmbeddings +from langchain_community.embeddings.google_palm import GooglePalmEmbeddings +from langchain_community.embeddings.gpt4all import GPT4AllEmbeddings +from langchain_community.embeddings.gradient_ai import GradientEmbeddings +from langchain_community.embeddings.huggingface import ( HuggingFaceBgeEmbeddings, HuggingFaceEmbeddings, HuggingFaceInferenceAPIEmbeddings, HuggingFaceInstructEmbeddings, ) -from langchain.embeddings.huggingface_hub import HuggingFaceHubEmbeddings -from langchain.embeddings.infinity import InfinityEmbeddings -from langchain.embeddings.javelin_ai_gateway import JavelinAIGatewayEmbeddings -from langchain.embeddings.jina import JinaEmbeddings -from langchain.embeddings.johnsnowlabs import JohnSnowLabsEmbeddings -from langchain.embeddings.llamacpp import LlamaCppEmbeddings -from langchain.embeddings.localai import LocalAIEmbeddings -from langchain.embeddings.minimax import MiniMaxEmbeddings -from langchain.embeddings.mlflow import MlflowEmbeddings -from langchain.embeddings.mlflow_gateway import MlflowAIGatewayEmbeddings -from langchain.embeddings.modelscope_hub import ModelScopeEmbeddings -from langchain.embeddings.mosaicml import MosaicMLInstructorEmbeddings -from langchain.embeddings.nlpcloud import NLPCloudEmbeddings -from langchain.embeddings.octoai_embeddings import OctoAIEmbeddings -from langchain.embeddings.ollama import OllamaEmbeddings -from langchain.embeddings.openai import OpenAIEmbeddings -from langchain.embeddings.sagemaker_endpoint import SagemakerEndpointEmbeddings -from langchain.embeddings.self_hosted import SelfHostedEmbeddings -from langchain.embeddings.self_hosted_hugging_face import ( +from langchain_community.embeddings.huggingface_hub import HuggingFaceHubEmbeddings +from langchain_community.embeddings.infinity import InfinityEmbeddings +from langchain_community.embeddings.javelin_ai_gateway import JavelinAIGatewayEmbeddings +from langchain_community.embeddings.jina import JinaEmbeddings +from langchain_community.embeddings.johnsnowlabs import JohnSnowLabsEmbeddings +from langchain_community.embeddings.llamacpp import LlamaCppEmbeddings +from langchain_community.embeddings.localai import LocalAIEmbeddings +from langchain_community.embeddings.minimax import MiniMaxEmbeddings +from langchain_community.embeddings.mlflow import MlflowEmbeddings +from langchain_community.embeddings.mlflow_gateway import MlflowAIGatewayEmbeddings +from langchain_community.embeddings.modelscope_hub import ModelScopeEmbeddings +from langchain_community.embeddings.mosaicml import MosaicMLInstructorEmbeddings +from langchain_community.embeddings.nlpcloud import NLPCloudEmbeddings +from langchain_community.embeddings.octoai_embeddings import OctoAIEmbeddings +from langchain_community.embeddings.ollama import OllamaEmbeddings +from langchain_community.embeddings.openai import OpenAIEmbeddings +from langchain_community.embeddings.sagemaker_endpoint import ( + SagemakerEndpointEmbeddings, +) +from langchain_community.embeddings.self_hosted import SelfHostedEmbeddings +from langchain_community.embeddings.self_hosted_hugging_face import ( SelfHostedHuggingFaceEmbeddings, SelfHostedHuggingFaceInstructEmbeddings, ) -from langchain.embeddings.sentence_transformer import SentenceTransformerEmbeddings -from langchain.embeddings.spacy_embeddings import SpacyEmbeddings -from langchain.embeddings.tensorflow_hub import TensorflowHubEmbeddings -from langchain.embeddings.vertexai import VertexAIEmbeddings -from langchain.embeddings.voyageai import VoyageEmbeddings -from langchain.embeddings.xinference import XinferenceEmbeddings +from langchain_community.embeddings.sentence_transformer import ( + SentenceTransformerEmbeddings, +) +from langchain_community.embeddings.spacy_embeddings import SpacyEmbeddings +from langchain_community.embeddings.tensorflow_hub import TensorflowHubEmbeddings +from langchain_community.embeddings.vertexai import VertexAIEmbeddings +from langchain_community.embeddings.voyageai import VoyageEmbeddings +from langchain_community.embeddings.xinference import XinferenceEmbeddings + +from langchain.embeddings.cache import CacheBackedEmbeddings logger = logging.getLogger(__name__) diff --git a/libs/langchain/langchain/embeddings/cache.py b/libs/langchain/langchain/embeddings/cache.py index e5781296181..382827991ff 100644 --- a/libs/langchain/langchain/embeddings/cache.py +++ b/libs/langchain/langchain/embeddings/cache.py @@ -62,8 +62,9 @@ class CacheBackedEmbeddings(Embeddings): .. code-block: python - from langchain.embeddings import CacheBackedEmbeddings, OpenAIEmbeddings + from langchain.embeddings import CacheBackedEmbeddings from langchain.storage import LocalFileStore + from langchain_community.embeddings import OpenAIEmbeddings store = LocalFileStore('./my_cache') diff --git a/libs/langchain/langchain/evaluation/agents/trajectory_eval_chain.py b/libs/langchain/langchain/evaluation/agents/trajectory_eval_chain.py index 18ecdd07d2f..532d67b0a71 100644 --- a/libs/langchain/langchain/evaluation/agents/trajectory_eval_chain.py +++ b/libs/langchain/langchain/evaluation/agents/trajectory_eval_chain.py @@ -109,7 +109,7 @@ class TrajectoryEvalChain(AgentTrajectoryEvaluator, LLMEvalChain): .. code-block:: python from langchain.agents import AgentType, initialize_agent - from langchain.chat_models import ChatOpenAI + from langchain_community.chat_models import ChatOpenAI from langchain.evaluation import TrajectoryEvalChain from langchain.tools import tool diff --git a/libs/langchain/langchain/evaluation/comparison/__init__.py b/libs/langchain/langchain/evaluation/comparison/__init__.py index b98eb40ef94..6a8a62814ab 100644 --- a/libs/langchain/langchain/evaluation/comparison/__init__.py +++ b/libs/langchain/langchain/evaluation/comparison/__init__.py @@ -6,7 +6,7 @@ preferences, measuring similarity / semantic equivalence between outputs, or any other comparison task. Example: - >>> from langchain.chat_models import ChatOpenAI + >>> from langchain_community.chat_models import ChatOpenAI >>> from langchain.evaluation.comparison import PairwiseStringEvalChain >>> llm = ChatOpenAI(temperature=0) >>> chain = PairwiseStringEvalChain.from_llm(llm=llm) diff --git a/libs/langchain/langchain/evaluation/comparison/eval_chain.py b/libs/langchain/langchain/evaluation/comparison/eval_chain.py index 7c3d02ec50e..decde5cc45f 100644 --- a/libs/langchain/langchain/evaluation/comparison/eval_chain.py +++ b/libs/langchain/langchain/evaluation/comparison/eval_chain.py @@ -5,6 +5,8 @@ import logging import re from typing import Any, Dict, List, Optional, Union +from langchain_community.chat_models.azure_openai import AzureChatOpenAI +from langchain_community.chat_models.openai import ChatOpenAI from langchain_core.language_models import BaseLanguageModel from langchain_core.output_parsers import BaseOutputParser from langchain_core.prompts.prompt import PromptTemplate @@ -13,8 +15,6 @@ from langchain_core.pydantic_v1 import Extra, Field from langchain.callbacks.manager import Callbacks from langchain.chains.constitutional_ai.models import ConstitutionalPrinciple from langchain.chains.llm import LLMChain -from langchain.chat_models.azure_openai import AzureChatOpenAI -from langchain.chat_models.openai import ChatOpenAI from langchain.evaluation.comparison.prompt import ( COMPARISON_TEMPLATE, COMPARISON_TEMPLATE_WITH_REFERENCE, @@ -160,7 +160,7 @@ class PairwiseStringEvalChain(PairwiseStringEvaluator, LLMEvalChain, LLMChain): output_parser (BaseOutputParser): The output parser for the chain. Example: - >>> from langchain.chat_models import ChatOpenAI + >>> from langchain_community.chat_models import ChatOpenAI >>> from langchain.evaluation.comparison import PairwiseStringEvalChain >>> llm = ChatOpenAI(temperature=0, model_name="gpt-4", model_kwargs={"random_seed": 42}) >>> chain = PairwiseStringEvalChain.from_llm(llm=llm) diff --git a/libs/langchain/langchain/evaluation/criteria/__init__.py b/libs/langchain/langchain/evaluation/criteria/__init__.py index c9cf0277742..6440c8c0eac 100644 --- a/libs/langchain/langchain/evaluation/criteria/__init__.py +++ b/libs/langchain/langchain/evaluation/criteria/__init__.py @@ -12,7 +12,7 @@ chain against specified criteria. Examples -------- Using a predefined criterion: ->>> from langchain.llms import OpenAI +>>> from langchain_community.llms import OpenAI >>> from langchain.evaluation.criteria import CriteriaEvalChain >>> llm = OpenAI() @@ -26,7 +26,7 @@ Using a predefined criterion: Using a custom criterion: ->>> from langchain.llms import OpenAI +>>> from langchain_community.llms import OpenAI >>> from langchain.evaluation.criteria import LabeledCriteriaEvalChain >>> llm = OpenAI() diff --git a/libs/langchain/langchain/evaluation/criteria/eval_chain.py b/libs/langchain/langchain/evaluation/criteria/eval_chain.py index ab167477c45..066fb540396 100644 --- a/libs/langchain/langchain/evaluation/criteria/eval_chain.py +++ b/libs/langchain/langchain/evaluation/criteria/eval_chain.py @@ -193,7 +193,7 @@ class CriteriaEvalChain(StringEvaluator, LLMEvalChain, LLMChain): Examples -------- - >>> from langchain.chat_models import ChatAnthropic + >>> from langchain_community.chat_models import ChatAnthropic >>> from langchain.evaluation.criteria import CriteriaEvalChain >>> llm = ChatAnthropic(temperature=0) >>> criteria = {"my-custom-criterion": "Is the submission the most amazing ever?"} @@ -205,7 +205,7 @@ class CriteriaEvalChain(StringEvaluator, LLMEvalChain, LLMChain): 'score': 0, } - >>> from langchain.chat_models import ChatOpenAI + >>> from langchain_community.chat_models import ChatOpenAI >>> from langchain.evaluation.criteria import LabeledCriteriaEvalChain >>> llm = ChatOpenAI(model="gpt-4", temperature=0) >>> criteria = "correctness" @@ -344,7 +344,7 @@ class CriteriaEvalChain(StringEvaluator, LLMEvalChain, LLMChain): Examples -------- - >>> from langchain.llms import OpenAI + >>> from langchain_community.llms import OpenAI >>> from langchain.evaluation.criteria import LabeledCriteriaEvalChain >>> llm = OpenAI() >>> criteria = { @@ -432,7 +432,7 @@ class CriteriaEvalChain(StringEvaluator, LLMEvalChain, LLMChain): Examples -------- - >>> from langchain.llms import OpenAI + >>> from langchain_community.llms import OpenAI >>> from langchain.evaluation.criteria import CriteriaEvalChain >>> llm = OpenAI() >>> criteria = "conciseness" @@ -487,7 +487,7 @@ class CriteriaEvalChain(StringEvaluator, LLMEvalChain, LLMChain): Examples -------- - >>> from langchain.llms import OpenAI + >>> from langchain_community.llms import OpenAI >>> from langchain.evaluation.criteria import CriteriaEvalChain >>> llm = OpenAI() >>> criteria = "conciseness" @@ -568,7 +568,7 @@ class LabeledCriteriaEvalChain(CriteriaEvalChain): Examples -------- - >>> from langchain.llms import OpenAI + >>> from langchain_community.llms import OpenAI >>> from langchain.evaluation.criteria import LabeledCriteriaEvalChain >>> llm = OpenAI() >>> criteria = { diff --git a/libs/langchain/langchain/evaluation/embedding_distance/base.py b/libs/langchain/langchain/evaluation/embedding_distance/base.py index 84340ed1b11..2010d1d5e27 100644 --- a/libs/langchain/langchain/evaluation/embedding_distance/base.py +++ b/libs/langchain/langchain/evaluation/embedding_distance/base.py @@ -3,6 +3,7 @@ from enum import Enum from typing import Any, Dict, List, Optional import numpy as np +from langchain_community.embeddings.openai import OpenAIEmbeddings from langchain_core.embeddings import Embeddings from langchain_core.pydantic_v1 import Field, root_validator @@ -12,7 +13,6 @@ from langchain.callbacks.manager import ( Callbacks, ) from langchain.chains.base import Chain -from langchain.embeddings.openai import OpenAIEmbeddings from langchain.evaluation.schema import PairwiseStringEvaluator, StringEvaluator from langchain.schema import RUN_KEY from langchain.utils.math import cosine_similarity diff --git a/libs/langchain/langchain/evaluation/loading.py b/libs/langchain/langchain/evaluation/loading.py index 54e041dc19a..4f9c3757d74 100644 --- a/libs/langchain/langchain/evaluation/loading.py +++ b/libs/langchain/langchain/evaluation/loading.py @@ -1,10 +1,10 @@ """Loading datasets and evaluators.""" from typing import Any, Dict, List, Optional, Sequence, Type, Union +from langchain_community.chat_models.openai import ChatOpenAI from langchain_core.language_models import BaseLanguageModel from langchain.chains.base import Chain -from langchain.chat_models.openai import ChatOpenAI from langchain.evaluation.agents.trajectory_eval_chain import TrajectoryEvalChain from langchain.evaluation.comparison import PairwiseStringEvalChain from langchain.evaluation.comparison.eval_chain import LabeledPairwiseStringEvalChain diff --git a/libs/langchain/langchain/evaluation/scoring/__init__.py b/libs/langchain/langchain/evaluation/scoring/__init__.py index 13057e260c1..14de3b1d3e2 100644 --- a/libs/langchain/langchain/evaluation/scoring/__init__.py +++ b/libs/langchain/langchain/evaluation/scoring/__init__.py @@ -5,7 +5,7 @@ be they LLMs, Chains, or otherwise. This can be based on a variety of criteria and or a reference answer. Example: - >>> from langchain.chat_models import ChatOpenAI + >>> from langchain_community.chat_models import ChatOpenAI >>> from langchain.evaluation.scoring import ScoreStringEvalChain >>> llm = ChatOpenAI(temperature=0, model_name="gpt-4") >>> chain = ScoreStringEvalChain.from_llm(llm=llm) diff --git a/libs/langchain/langchain/evaluation/scoring/eval_chain.py b/libs/langchain/langchain/evaluation/scoring/eval_chain.py index 0e9b2a85b22..63e9e37c2a4 100644 --- a/libs/langchain/langchain/evaluation/scoring/eval_chain.py +++ b/libs/langchain/langchain/evaluation/scoring/eval_chain.py @@ -5,6 +5,8 @@ import logging import re from typing import Any, Dict, List, Optional, Union +from langchain_community.chat_models.azure_openai import AzureChatOpenAI +from langchain_community.chat_models.openai import ChatOpenAI from langchain_core.language_models import BaseLanguageModel from langchain_core.output_parsers import BaseOutputParser from langchain_core.prompts.prompt import PromptTemplate @@ -13,8 +15,6 @@ from langchain_core.pydantic_v1 import Extra, Field from langchain.callbacks.manager import Callbacks from langchain.chains.constitutional_ai.models import ConstitutionalPrinciple from langchain.chains.llm import LLMChain -from langchain.chat_models.azure_openai import AzureChatOpenAI -from langchain.chat_models.openai import ChatOpenAI from langchain.evaluation.criteria.eval_chain import ( CRITERIA_TYPE, Criteria, @@ -152,7 +152,7 @@ class ScoreStringEvalChain(StringEvaluator, LLMEvalChain, LLMChain): output_parser (BaseOutputParser): The output parser for the chain. Example: - >>> from langchain.chat_models import ChatOpenAI + >>> from langchain_community.chat_models import ChatOpenAI >>> from langchain.evaluation.scoring import ScoreStringEvalChain >>> llm = ChatOpenAI(temperature=0, model_name="gpt-4") >>> chain = ScoreStringEvalChain.from_llm(llm=llm) diff --git a/libs/langchain/langchain/indexes/vectorstore.py b/libs/langchain/langchain/indexes/vectorstore.py index 4937a44a109..c3e611601f5 100644 --- a/libs/langchain/langchain/indexes/vectorstore.py +++ b/libs/langchain/langchain/indexes/vectorstore.py @@ -1,5 +1,7 @@ from typing import Any, Dict, List, Optional, Type +from langchain_community.embeddings.openai import OpenAIEmbeddings +from langchain_community.llms.openai import OpenAI from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.language_models import BaseLanguageModel @@ -9,8 +11,6 @@ from langchain_core.vectorstores import VectorStore from langchain.chains.qa_with_sources.retrieval import RetrievalQAWithSourcesChain from langchain.chains.retrieval_qa.base import RetrievalQA from langchain.document_loaders.base import BaseLoader -from langchain.embeddings.openai import OpenAIEmbeddings -from langchain.llms.openai import OpenAI from langchain.text_splitter import RecursiveCharacterTextSplitter, TextSplitter from langchain.vectorstores.chroma import Chroma diff --git a/libs/langchain/langchain/llms/__init__.py b/libs/langchain/langchain/llms/__init__.py index 60935d2b796..729198aaa05 100644 --- a/libs/langchain/langchain/llms/__init__.py +++ b/libs/langchain/langchain/llms/__init__.py @@ -23,223 +23,223 @@ from langchain_core.language_models.llms import BaseLLM def _import_ai21() -> Any: - from langchain.llms.ai21 import AI21 + from langchain_community.llms.ai21 import AI21 return AI21 def _import_aleph_alpha() -> Any: - from langchain.llms.aleph_alpha import AlephAlpha + from langchain_community.llms.aleph_alpha import AlephAlpha return AlephAlpha def _import_amazon_api_gateway() -> Any: - from langchain.llms.amazon_api_gateway import AmazonAPIGateway + from langchain_community.llms.amazon_api_gateway import AmazonAPIGateway return AmazonAPIGateway def _import_anthropic() -> Any: - from langchain.llms.anthropic import Anthropic + from langchain_community.llms.anthropic import Anthropic return Anthropic def _import_anyscale() -> Any: - from langchain.llms.anyscale import Anyscale + from langchain_community.llms.anyscale import Anyscale return Anyscale def _import_arcee() -> Any: - from langchain.llms.arcee import Arcee + from langchain_community.llms.arcee import Arcee return Arcee def _import_aviary() -> Any: - from langchain.llms.aviary import Aviary + from langchain_community.llms.aviary import Aviary return Aviary def _import_azureml_endpoint() -> Any: - from langchain.llms.azureml_endpoint import AzureMLOnlineEndpoint + from langchain_community.llms.azureml_endpoint import AzureMLOnlineEndpoint return AzureMLOnlineEndpoint def _import_baidu_qianfan_endpoint() -> Any: - from langchain.llms.baidu_qianfan_endpoint import QianfanLLMEndpoint + from langchain_community.llms.baidu_qianfan_endpoint import QianfanLLMEndpoint return QianfanLLMEndpoint def _import_bananadev() -> Any: - from langchain.llms.bananadev import Banana + from langchain_community.llms.bananadev import Banana return Banana def _import_baseten() -> Any: - from langchain.llms.baseten import Baseten + from langchain_community.llms.baseten import Baseten return Baseten def _import_beam() -> Any: - from langchain.llms.beam import Beam + from langchain_community.llms.beam import Beam return Beam def _import_bedrock() -> Any: - from langchain.llms.bedrock import Bedrock + from langchain_community.llms.bedrock import Bedrock return Bedrock def _import_bittensor() -> Any: - from langchain.llms.bittensor import NIBittensorLLM + from langchain_community.llms.bittensor import NIBittensorLLM return NIBittensorLLM def _import_cerebriumai() -> Any: - from langchain.llms.cerebriumai import CerebriumAI + from langchain_community.llms.cerebriumai import CerebriumAI return CerebriumAI def _import_chatglm() -> Any: - from langchain.llms.chatglm import ChatGLM + from langchain_community.llms.chatglm import ChatGLM return ChatGLM def _import_clarifai() -> Any: - from langchain.llms.clarifai import Clarifai + from langchain_community.llms.clarifai import Clarifai return Clarifai def _import_cohere() -> Any: - from langchain.llms.cohere import Cohere + from langchain_community.llms.cohere import Cohere return Cohere def _import_ctransformers() -> Any: - from langchain.llms.ctransformers import CTransformers + from langchain_community.llms.ctransformers import CTransformers return CTransformers def _import_ctranslate2() -> Any: - from langchain.llms.ctranslate2 import CTranslate2 + from langchain_community.llms.ctranslate2 import CTranslate2 return CTranslate2 def _import_databricks() -> Any: - from langchain.llms.databricks import Databricks + from langchain_community.llms.databricks import Databricks return Databricks def _import_databricks_chat() -> Any: - from langchain.chat_models.databricks import ChatDatabricks + from langchain_community.chat_models.databricks import ChatDatabricks return ChatDatabricks def _import_deepinfra() -> Any: - from langchain.llms.deepinfra import DeepInfra + from langchain_community.llms.deepinfra import DeepInfra return DeepInfra def _import_deepsparse() -> Any: - from langchain.llms.deepsparse import DeepSparse + from langchain_community.llms.deepsparse import DeepSparse return DeepSparse def _import_edenai() -> Any: - from langchain.llms.edenai import EdenAI + from langchain_community.llms.edenai import EdenAI return EdenAI def _import_fake() -> Any: - from langchain.llms.fake import FakeListLLM + from langchain_community.llms.fake import FakeListLLM return FakeListLLM def _import_fireworks() -> Any: - from langchain.llms.fireworks import Fireworks + from langchain_community.llms.fireworks import Fireworks return Fireworks def _import_forefrontai() -> Any: - from langchain.llms.forefrontai import ForefrontAI + from langchain_community.llms.forefrontai import ForefrontAI return ForefrontAI def _import_gigachat() -> Any: - from langchain.llms.gigachat import GigaChat + from langchain_community.llms.gigachat import GigaChat return GigaChat def _import_google_palm() -> Any: - from langchain.llms.google_palm import GooglePalm + from langchain_community.llms.google_palm import GooglePalm return GooglePalm def _import_gooseai() -> Any: - from langchain.llms.gooseai import GooseAI + from langchain_community.llms.gooseai import GooseAI return GooseAI def _import_gpt4all() -> Any: - from langchain.llms.gpt4all import GPT4All + from langchain_community.llms.gpt4all import GPT4All return GPT4All def _import_gradient_ai() -> Any: - from langchain.llms.gradient_ai import GradientLLM + from langchain_community.llms.gradient_ai import GradientLLM return GradientLLM def _import_huggingface_endpoint() -> Any: - from langchain.llms.huggingface_endpoint import HuggingFaceEndpoint + from langchain_community.llms.huggingface_endpoint import HuggingFaceEndpoint return HuggingFaceEndpoint def _import_huggingface_hub() -> Any: - from langchain.llms.huggingface_hub import HuggingFaceHub + from langchain_community.llms.huggingface_hub import HuggingFaceHub return HuggingFaceHub def _import_huggingface_pipeline() -> Any: - from langchain.llms.huggingface_pipeline import HuggingFacePipeline + from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline return HuggingFacePipeline def _import_huggingface_text_gen_inference() -> Any: - from langchain.llms.huggingface_text_gen_inference import ( + from langchain_community.llms.huggingface_text_gen_inference import ( HuggingFaceTextGenInference, ) @@ -247,289 +247,291 @@ def _import_huggingface_text_gen_inference() -> Any: def _import_human() -> Any: - from langchain.llms.human import HumanInputLLM + from langchain_community.llms.human import HumanInputLLM return HumanInputLLM def _import_javelin_ai_gateway() -> Any: - from langchain.llms.javelin_ai_gateway import JavelinAIGateway + from langchain_community.llms.javelin_ai_gateway import JavelinAIGateway return JavelinAIGateway def _import_koboldai() -> Any: - from langchain.llms.koboldai import KoboldApiLLM + from langchain_community.llms.koboldai import KoboldApiLLM return KoboldApiLLM def _import_llamacpp() -> Any: - from langchain.llms.llamacpp import LlamaCpp + from langchain_community.llms.llamacpp import LlamaCpp return LlamaCpp def _import_manifest() -> Any: - from langchain.llms.manifest import ManifestWrapper + from langchain_community.llms.manifest import ManifestWrapper return ManifestWrapper def _import_minimax() -> Any: - from langchain.llms.minimax import Minimax + from langchain_community.llms.minimax import Minimax return Minimax def _import_mlflow() -> Any: - from langchain.llms.mlflow import Mlflow + from langchain_community.llms.mlflow import Mlflow return Mlflow def _import_mlflow_chat() -> Any: - from langchain.chat_models.mlflow import ChatMlflow + from langchain_community.chat_models.mlflow import ChatMlflow return ChatMlflow def _import_mlflow_ai_gateway() -> Any: - from langchain.llms.mlflow_ai_gateway import MlflowAIGateway + from langchain_community.llms.mlflow_ai_gateway import MlflowAIGateway return MlflowAIGateway def _import_modal() -> Any: - from langchain.llms.modal import Modal + from langchain_community.llms.modal import Modal return Modal def _import_mosaicml() -> Any: - from langchain.llms.mosaicml import MosaicML + from langchain_community.llms.mosaicml import MosaicML return MosaicML def _import_nlpcloud() -> Any: - from langchain.llms.nlpcloud import NLPCloud + from langchain_community.llms.nlpcloud import NLPCloud return NLPCloud def _import_octoai_endpoint() -> Any: - from langchain.llms.octoai_endpoint import OctoAIEndpoint + from langchain_community.llms.octoai_endpoint import OctoAIEndpoint return OctoAIEndpoint def _import_ollama() -> Any: - from langchain.llms.ollama import Ollama + from langchain_community.llms.ollama import Ollama return Ollama def _import_opaqueprompts() -> Any: - from langchain.llms.opaqueprompts import OpaquePrompts + from langchain_community.llms.opaqueprompts import OpaquePrompts return OpaquePrompts def _import_azure_openai() -> Any: - from langchain.llms.openai import AzureOpenAI + from langchain_community.llms.openai import AzureOpenAI return AzureOpenAI def _import_openai() -> Any: - from langchain.llms.openai import OpenAI + from langchain_community.llms.openai import OpenAI return OpenAI def _import_openai_chat() -> Any: - from langchain.llms.openai import OpenAIChat + from langchain_community.llms.openai import OpenAIChat return OpenAIChat def _import_openllm() -> Any: - from langchain.llms.openllm import OpenLLM + from langchain_community.llms.openllm import OpenLLM return OpenLLM def _import_openlm() -> Any: - from langchain.llms.openlm import OpenLM + from langchain_community.llms.openlm import OpenLM return OpenLM def _import_pai_eas_endpoint() -> Any: - from langchain.llms.pai_eas_endpoint import PaiEasEndpoint + from langchain_community.llms.pai_eas_endpoint import PaiEasEndpoint return PaiEasEndpoint def _import_petals() -> Any: - from langchain.llms.petals import Petals + from langchain_community.llms.petals import Petals return Petals def _import_pipelineai() -> Any: - from langchain.llms.pipelineai import PipelineAI + from langchain_community.llms.pipelineai import PipelineAI return PipelineAI def _import_predibase() -> Any: - from langchain.llms.predibase import Predibase + from langchain_community.llms.predibase import Predibase return Predibase def _import_predictionguard() -> Any: - from langchain.llms.predictionguard import PredictionGuard + from langchain_community.llms.predictionguard import PredictionGuard return PredictionGuard def _import_promptlayer() -> Any: - from langchain.llms.promptlayer_openai import PromptLayerOpenAI + from langchain_community.llms.promptlayer_openai import PromptLayerOpenAI return PromptLayerOpenAI def _import_promptlayer_chat() -> Any: - from langchain.llms.promptlayer_openai import PromptLayerOpenAIChat + from langchain_community.llms.promptlayer_openai import PromptLayerOpenAIChat return PromptLayerOpenAIChat def _import_replicate() -> Any: - from langchain.llms.replicate import Replicate + from langchain_community.llms.replicate import Replicate return Replicate def _import_rwkv() -> Any: - from langchain.llms.rwkv import RWKV + from langchain_community.llms.rwkv import RWKV return RWKV def _import_sagemaker_endpoint() -> Any: - from langchain.llms.sagemaker_endpoint import SagemakerEndpoint + from langchain_community.llms.sagemaker_endpoint import SagemakerEndpoint return SagemakerEndpoint def _import_self_hosted() -> Any: - from langchain.llms.self_hosted import SelfHostedPipeline + from langchain_community.llms.self_hosted import SelfHostedPipeline return SelfHostedPipeline def _import_self_hosted_hugging_face() -> Any: - from langchain.llms.self_hosted_hugging_face import SelfHostedHuggingFaceLLM + from langchain_community.llms.self_hosted_hugging_face import ( + SelfHostedHuggingFaceLLM, + ) return SelfHostedHuggingFaceLLM def _import_stochasticai() -> Any: - from langchain.llms.stochasticai import StochasticAI + from langchain_community.llms.stochasticai import StochasticAI return StochasticAI def _import_symblai_nebula() -> Any: - from langchain.llms.symblai_nebula import Nebula + from langchain_community.llms.symblai_nebula import Nebula return Nebula def _import_textgen() -> Any: - from langchain.llms.textgen import TextGen + from langchain_community.llms.textgen import TextGen return TextGen def _import_titan_takeoff() -> Any: - from langchain.llms.titan_takeoff import TitanTakeoff + from langchain_community.llms.titan_takeoff import TitanTakeoff return TitanTakeoff def _import_titan_takeoff_pro() -> Any: - from langchain.llms.titan_takeoff_pro import TitanTakeoffPro + from langchain_community.llms.titan_takeoff_pro import TitanTakeoffPro return TitanTakeoffPro def _import_together() -> Any: - from langchain.llms.together import Together + from langchain_community.llms.together import Together return Together def _import_tongyi() -> Any: - from langchain.llms.tongyi import Tongyi + from langchain_community.llms.tongyi import Tongyi return Tongyi def _import_vertex() -> Any: - from langchain.llms.vertexai import VertexAI + from langchain_community.llms.vertexai import VertexAI return VertexAI def _import_vertex_model_garden() -> Any: - from langchain.llms.vertexai import VertexAIModelGarden + from langchain_community.llms.vertexai import VertexAIModelGarden return VertexAIModelGarden def _import_vllm() -> Any: - from langchain.llms.vllm import VLLM + from langchain_community.llms.vllm import VLLM return VLLM def _import_vllm_openai() -> Any: - from langchain.llms.vllm import VLLMOpenAI + from langchain_community.llms.vllm import VLLMOpenAI return VLLMOpenAI def _import_watsonxllm() -> Any: - from langchain.llms.watsonxllm import WatsonxLLM + from langchain_community.llms.watsonxllm import WatsonxLLM return WatsonxLLM def _import_writer() -> Any: - from langchain.llms.writer import Writer + from langchain_community.llms.writer import Writer return Writer def _import_xinference() -> Any: - from langchain.llms.xinference import Xinference + from langchain_community.llms.xinference import Xinference return Xinference def _import_yandex_gpt() -> Any: - from langchain.llms.yandex import YandexGPT + from langchain_community.llms.yandex import YandexGPT return YandexGPT def _import_volcengine_maas() -> Any: - from langchain.llms.volcengine_maas import VolcEngineMaasLLM + from langchain_community.llms.volcengine_maas import VolcEngineMaasLLM return VolcEngineMaasLLM diff --git a/libs/langchain/langchain/retrievers/multi_query.py b/libs/langchain/langchain/retrievers/multi_query.py index f30cc0266f8..e3160bb752a 100644 --- a/libs/langchain/langchain/retrievers/multi_query.py +++ b/libs/langchain/langchain/retrievers/multi_query.py @@ -3,6 +3,7 @@ import logging from typing import List, Sequence from langchain_core.documents import Document +from langchain_core.language_models import BaseLLM from langchain_core.prompts.prompt import PromptTemplate from langchain_core.pydantic_v1 import BaseModel, Field from langchain_core.retrievers import BaseRetriever @@ -12,7 +13,6 @@ from langchain.callbacks.manager import ( CallbackManagerForRetrieverRun, ) from langchain.chains.llm import LLMChain -from langchain.llms.base import BaseLLM from langchain.output_parsers.pydantic import PydanticOutputParser logger = logging.getLogger(__name__) diff --git a/libs/langchain/langchain/retrievers/parent_document_retriever.py b/libs/langchain/langchain/retrievers/parent_document_retriever.py index d26f4b9685b..86a5f98b2b6 100644 --- a/libs/langchain/langchain/retrievers/parent_document_retriever.py +++ b/libs/langchain/langchain/retrievers/parent_document_retriever.py @@ -33,7 +33,7 @@ class ParentDocumentRetriever(MultiVectorRetriever): # Imports from langchain.vectorstores import Chroma - from langchain.embeddings import OpenAIEmbeddings + from langchain_community.embeddings import OpenAIEmbeddings from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore diff --git a/libs/langchain/langchain/retrievers/re_phraser.py b/libs/langchain/langchain/retrievers/re_phraser.py index a986da9be1f..e4611b202f4 100644 --- a/libs/langchain/langchain/retrievers/re_phraser.py +++ b/libs/langchain/langchain/retrievers/re_phraser.py @@ -2,6 +2,7 @@ import logging from typing import List from langchain_core.documents import Document +from langchain_core.language_models import BaseLLM from langchain_core.prompts.prompt import PromptTemplate from langchain_core.retrievers import BaseRetriever @@ -10,7 +11,6 @@ from langchain.callbacks.manager import ( CallbackManagerForRetrieverRun, ) from langchain.chains.llm import LLMChain -from langchain.llms.base import BaseLLM logger = logging.getLogger(__name__) diff --git a/libs/langchain/langchain/retrievers/web_research.py b/libs/langchain/langchain/retrievers/web_research.py index 74bd7f370c8..9af87e91b6e 100644 --- a/libs/langchain/langchain/retrievers/web_research.py +++ b/libs/langchain/langchain/retrievers/web_research.py @@ -2,7 +2,9 @@ import logging import re from typing import List, Optional +from langchain_community.llms import LlamaCpp from langchain_core.documents import Document +from langchain_core.language_models import BaseLLM from langchain_core.prompts import BasePromptTemplate, PromptTemplate from langchain_core.pydantic_v1 import BaseModel, Field from langchain_core.retrievers import BaseRetriever @@ -16,8 +18,6 @@ from langchain.chains import LLMChain from langchain.chains.prompt_selector import ConditionalPromptSelector from langchain.document_loaders import AsyncHtmlLoader from langchain.document_transformers import Html2TextTransformer -from langchain.llms import LlamaCpp -from langchain.llms.base import BaseLLM from langchain.output_parsers.pydantic import PydanticOutputParser from langchain.text_splitter import RecursiveCharacterTextSplitter, TextSplitter from langchain.utilities import GoogleSearchAPIWrapper diff --git a/libs/langchain/langchain/smith/__init__.py b/libs/langchain/langchain/smith/__init__.py index 8efa22c6984..c32dfc024e0 100644 --- a/libs/langchain/langchain/smith/__init__.py +++ b/libs/langchain/langchain/smith/__init__.py @@ -10,7 +10,7 @@ An example of this is shown below, assuming you've created a LangSmith dataset c .. code-block:: python from langsmith import Client - from langchain.chat_models import ChatOpenAI + from langchain_community.chat_models import ChatOpenAI from langchain.chains import LLMChain from langchain.smith import RunEvalConfig, run_on_dataset diff --git a/libs/langchain/langchain/smith/evaluation/__init__.py b/libs/langchain/langchain/smith/evaluation/__init__.py index 3bbfa649bb3..7d2c172dfb9 100644 --- a/libs/langchain/langchain/smith/evaluation/__init__.py +++ b/libs/langchain/langchain/smith/evaluation/__init__.py @@ -10,7 +10,7 @@ For more information on the LangSmith API, see the `LangSmith API documentation .. code-block:: python from langsmith import Client - from langchain.chat_models import ChatOpenAI + from langchain_community.chat_models import ChatOpenAI from langchain.chains import LLMChain from langchain.smith import EvaluatorType, RunEvalConfig, run_on_dataset diff --git a/libs/langchain/langchain/smith/evaluation/runner_utils.py b/libs/langchain/langchain/smith/evaluation/runner_utils.py index a2cbd535c3d..5665574b1f4 100644 --- a/libs/langchain/langchain/smith/evaluation/runner_utils.py +++ b/libs/langchain/langchain/smith/evaluation/runner_utils.py @@ -1302,7 +1302,7 @@ Examples .. code-block:: python from langsmith import Client - from langchain.chat_models import ChatOpenAI + from langchain_community.chat_models import ChatOpenAI from langchain.chains import LLMChain from langchain.smith import smith_eval.RunEvalConfig, run_on_dataset diff --git a/libs/langchain/tests/integration_tests/agent/test_ainetwork_agent.py b/libs/langchain/tests/integration_tests/agent/test_ainetwork_agent.py index 3ef29af510f..896da5a0a0d 100644 --- a/libs/langchain/tests/integration_tests/agent/test_ainetwork_agent.py +++ b/libs/langchain/tests/integration_tests/agent/test_ainetwork_agent.py @@ -8,11 +8,11 @@ from typing import Any from urllib.error import HTTPError import pytest +from langchain_community.chat_models import ChatOpenAI from langchain_community.tools.ainetwork.utils import authenticate from langchain.agents import AgentType, initialize_agent from langchain.agents.agent_toolkits.ainetwork.toolkit import AINetworkToolkit -from langchain.chat_models import ChatOpenAI class Match(Enum): diff --git a/libs/langchain/tests/integration_tests/agent/test_powerbi_agent.py b/libs/langchain/tests/integration_tests/agent/test_powerbi_agent.py index f59d4273efb..3e9d227cf81 100644 --- a/libs/langchain/tests/integration_tests/agent/test_powerbi_agent.py +++ b/libs/langchain/tests/integration_tests/agent/test_powerbi_agent.py @@ -1,7 +1,7 @@ import pytest +from langchain_community.chat_models import ChatOpenAI from langchain.agents.agent_toolkits import PowerBIToolkit, create_pbi_agent -from langchain.chat_models import ChatOpenAI from langchain.utilities.powerbi import PowerBIDataset from langchain.utils import get_from_env diff --git a/libs/langchain/tests/integration_tests/chains/test_dalle_agent.py b/libs/langchain/tests/integration_tests/chains/test_dalle_agent.py index 246c2c2c6ee..943d8fcbe7e 100644 --- a/libs/langchain/tests/integration_tests/chains/test_dalle_agent.py +++ b/libs/langchain/tests/integration_tests/chains/test_dalle_agent.py @@ -1,6 +1,7 @@ """Integration test for Dall-E image generator agent.""" +from langchain_community.llms import OpenAI + from langchain.agents import AgentType, initialize_agent, load_tools -from langchain.llms import OpenAI def test_call() -> None: diff --git a/libs/langchain/tests/integration_tests/chains/test_graph_database.py b/libs/langchain/tests/integration_tests/chains/test_graph_database.py index ae8ecdd4022..247a47ed995 100644 --- a/libs/langchain/tests/integration_tests/chains/test_graph_database.py +++ b/libs/langchain/tests/integration_tests/chains/test_graph_database.py @@ -1,10 +1,11 @@ """Test Graph Database Chain.""" import os +from langchain_community.llms.openai import OpenAI + from langchain.chains.graph_qa.cypher import GraphCypherQAChain from langchain.chains.loading import load_chain from langchain.graphs import Neo4jGraph -from langchain.llms.openai import OpenAI def test_connect_neo4j() -> None: diff --git a/libs/langchain/tests/integration_tests/chains/test_graph_database_arangodb.py b/libs/langchain/tests/integration_tests/chains/test_graph_database_arangodb.py index d6ce5075671..22fe43f4e08 100644 --- a/libs/langchain/tests/integration_tests/chains/test_graph_database_arangodb.py +++ b/libs/langchain/tests/integration_tests/chains/test_graph_database_arangodb.py @@ -1,10 +1,11 @@ """Test Graph Database Chain.""" from typing import Any +from langchain_community.llms.openai import OpenAI + from langchain.chains.graph_qa.arangodb import ArangoGraphQAChain from langchain.graphs import ArangoGraph from langchain.graphs.arangodb_graph import get_arangodb_client -from langchain.llms.openai import OpenAI def populate_arangodb_database(db: Any) -> None: diff --git a/libs/langchain/tests/integration_tests/chains/test_graph_database_sparql.py b/libs/langchain/tests/integration_tests/chains/test_graph_database_sparql.py index 5ac33ba4918..f6557194202 100644 --- a/libs/langchain/tests/integration_tests/chains/test_graph_database_sparql.py +++ b/libs/langchain/tests/integration_tests/chains/test_graph_database_sparql.py @@ -1,9 +1,10 @@ """Test RDF/ SPARQL Graph Database Chain.""" import os +from langchain_community.llms.openai import OpenAI + from langchain.chains.graph_qa.sparql import GraphSparqlQAChain from langchain.graphs import RdfGraph -from langchain.llms.openai import OpenAI def test_connect_file_rdf() -> None: diff --git a/libs/langchain/tests/integration_tests/chains/test_react.py b/libs/langchain/tests/integration_tests/chains/test_react.py index 1415df67b05..1615872bac8 100644 --- a/libs/langchain/tests/integration_tests/chains/test_react.py +++ b/libs/langchain/tests/integration_tests/chains/test_react.py @@ -1,8 +1,9 @@ """Integration test for self ask with search.""" +from langchain_community.llms.openai import OpenAI + from langchain.agents.react.base import ReActChain from langchain.docstore.wikipedia import Wikipedia -from langchain.llms.openai import OpenAI def test_react() -> None: diff --git a/libs/langchain/tests/integration_tests/chains/test_retrieval_qa.py b/libs/langchain/tests/integration_tests/chains/test_retrieval_qa.py index 35aaffefaa6..23322f523a7 100644 --- a/libs/langchain/tests/integration_tests/chains/test_retrieval_qa.py +++ b/libs/langchain/tests/integration_tests/chains/test_retrieval_qa.py @@ -1,11 +1,12 @@ """Test RetrievalQA functionality.""" from pathlib import Path +from langchain_community.embeddings.openai import OpenAIEmbeddings +from langchain_community.llms import OpenAI + from langchain.chains import RetrievalQA from langchain.chains.loading import load_chain from langchain.document_loaders import TextLoader -from langchain.embeddings.openai import OpenAIEmbeddings -from langchain.llms import OpenAI from langchain.text_splitter import CharacterTextSplitter from langchain.vectorstores import FAISS diff --git a/libs/langchain/tests/integration_tests/chains/test_retrieval_qa_with_sources.py b/libs/langchain/tests/integration_tests/chains/test_retrieval_qa_with_sources.py index 7ace5730ad5..d9f7f4ac4a0 100644 --- a/libs/langchain/tests/integration_tests/chains/test_retrieval_qa_with_sources.py +++ b/libs/langchain/tests/integration_tests/chains/test_retrieval_qa_with_sources.py @@ -1,9 +1,10 @@ """Test RetrievalQA functionality.""" +from langchain_community.embeddings.openai import OpenAIEmbeddings +from langchain_community.llms import OpenAI + from langchain.chains import RetrievalQAWithSourcesChain from langchain.chains.loading import load_chain from langchain.document_loaders import DirectoryLoader -from langchain.embeddings.openai import OpenAIEmbeddings -from langchain.llms import OpenAI from langchain.text_splitter import CharacterTextSplitter from langchain.vectorstores import FAISS diff --git a/libs/langchain/tests/integration_tests/chains/test_self_ask_with_search.py b/libs/langchain/tests/integration_tests/chains/test_self_ask_with_search.py index f288f23dadf..126e85d8e44 100644 --- a/libs/langchain/tests/integration_tests/chains/test_self_ask_with_search.py +++ b/libs/langchain/tests/integration_tests/chains/test_self_ask_with_search.py @@ -1,6 +1,7 @@ """Integration test for self ask with search.""" +from langchain_community.llms.openai import OpenAI + from langchain.agents.self_ask_with_search.base import SelfAskWithSearchChain -from langchain.llms.openai import OpenAI from langchain.utilities.searchapi import SearchApiAPIWrapper diff --git a/libs/langchain/tests/integration_tests/retrievers/document_compressors/test_base.py b/libs/langchain/tests/integration_tests/retrievers/document_compressors/test_base.py index b251a55ac16..3df24d40761 100644 --- a/libs/langchain/tests/integration_tests/retrievers/document_compressors/test_base.py +++ b/libs/langchain/tests/integration_tests/retrievers/document_compressors/test_base.py @@ -1,8 +1,8 @@ """Integration test for compression pipelines.""" +from langchain_community.embeddings import OpenAIEmbeddings from langchain_core.documents import Document from langchain.document_transformers import EmbeddingsRedundantFilter -from langchain.embeddings import OpenAIEmbeddings from langchain.retrievers.document_compressors import ( DocumentCompressorPipeline, EmbeddingsFilter, diff --git a/libs/langchain/tests/integration_tests/retrievers/document_compressors/test_chain_extract.py b/libs/langchain/tests/integration_tests/retrievers/document_compressors/test_chain_extract.py index 67c6550109c..eb584162003 100644 --- a/libs/langchain/tests/integration_tests/retrievers/document_compressors/test_chain_extract.py +++ b/libs/langchain/tests/integration_tests/retrievers/document_compressors/test_chain_extract.py @@ -1,7 +1,7 @@ """Integration test for LLMChainExtractor.""" +from langchain_community.chat_models import ChatOpenAI from langchain_core.documents import Document -from langchain.chat_models import ChatOpenAI from langchain.retrievers.document_compressors import LLMChainExtractor diff --git a/libs/langchain/tests/integration_tests/retrievers/document_compressors/test_chain_filter.py b/libs/langchain/tests/integration_tests/retrievers/document_compressors/test_chain_filter.py index 8c9649b6b91..55249a879a1 100644 --- a/libs/langchain/tests/integration_tests/retrievers/document_compressors/test_chain_filter.py +++ b/libs/langchain/tests/integration_tests/retrievers/document_compressors/test_chain_filter.py @@ -1,7 +1,7 @@ """Integration test for llm-based relevant doc filtering.""" +from langchain_community.chat_models import ChatOpenAI from langchain_core.documents import Document -from langchain.chat_models import ChatOpenAI from langchain.retrievers.document_compressors import LLMChainFilter diff --git a/libs/langchain/tests/integration_tests/retrievers/document_compressors/test_embeddings_filter.py b/libs/langchain/tests/integration_tests/retrievers/document_compressors/test_embeddings_filter.py index 69e70e89a74..bf88058d6a2 100644 --- a/libs/langchain/tests/integration_tests/retrievers/document_compressors/test_embeddings_filter.py +++ b/libs/langchain/tests/integration_tests/retrievers/document_compressors/test_embeddings_filter.py @@ -1,11 +1,11 @@ """Integration test for embedding-based relevant doc filtering.""" import numpy as np +from langchain_community.embeddings import OpenAIEmbeddings from langchain_core.documents import Document from langchain.document_transformers.embeddings_redundant_filter import ( _DocumentWithState, ) -from langchain.embeddings import OpenAIEmbeddings from langchain.retrievers.document_compressors import EmbeddingsFilter diff --git a/libs/langchain/tests/integration_tests/retrievers/test_contextual_compression.py b/libs/langchain/tests/integration_tests/retrievers/test_contextual_compression.py index df74e94cac0..769a63f541f 100644 --- a/libs/langchain/tests/integration_tests/retrievers/test_contextual_compression.py +++ b/libs/langchain/tests/integration_tests/retrievers/test_contextual_compression.py @@ -1,4 +1,5 @@ -from langchain.embeddings import OpenAIEmbeddings +from langchain_community.embeddings import OpenAIEmbeddings + from langchain.retrievers.contextual_compression import ContextualCompressionRetriever from langchain.retrievers.document_compressors import EmbeddingsFilter from langchain.vectorstores import FAISS diff --git a/libs/langchain/tests/integration_tests/retrievers/test_merger_retriever.py b/libs/langchain/tests/integration_tests/retrievers/test_merger_retriever.py index 1dab7507cd2..70a18a74a3a 100644 --- a/libs/langchain/tests/integration_tests/retrievers/test_merger_retriever.py +++ b/libs/langchain/tests/integration_tests/retrievers/test_merger_retriever.py @@ -1,4 +1,5 @@ -from langchain.embeddings import OpenAIEmbeddings +from langchain_community.embeddings import OpenAIEmbeddings + from langchain.retrievers.merger_retriever import MergerRetriever from langchain.vectorstores import Chroma diff --git a/libs/langchain/tests/integration_tests/smith/evaluation/test_runner_utils.py b/libs/langchain/tests/integration_tests/smith/evaluation/test_runner_utils.py index 0560d1cbe6a..a238c799dcf 100644 --- a/libs/langchain/tests/integration_tests/smith/evaluation/test_runner_utils.py +++ b/libs/langchain/tests/integration_tests/smith/evaluation/test_runner_utils.py @@ -2,15 +2,15 @@ from typing import Iterator, List from uuid import uuid4 import pytest +from langchain_community.chat_models import ChatOpenAI +from langchain_community.llms.openai import OpenAI from langchain_core.messages import BaseMessage, HumanMessage from langchain_core.prompts.chat import ChatPromptTemplate from langsmith import Client as Client from langsmith.schemas import DataType from langchain.chains.llm import LLMChain -from langchain.chat_models import ChatOpenAI from langchain.evaluation import EvaluatorType -from langchain.llms.openai import OpenAI from langchain.smith import RunEvalConfig, run_on_dataset from langchain.smith.evaluation import InputFormatError from langchain.smith.evaluation.runner_utils import arun_on_dataset diff --git a/libs/langchain/tests/integration_tests/test_document_transformers.py b/libs/langchain/tests/integration_tests/test_document_transformers.py index fb13d3c34ec..3b95a7a2219 100644 --- a/libs/langchain/tests/integration_tests/test_document_transformers.py +++ b/libs/langchain/tests/integration_tests/test_document_transformers.py @@ -1,4 +1,5 @@ """Integration test for embedding-based redundant doc filtering.""" +from langchain_community.embeddings import OpenAIEmbeddings from langchain_core.documents import Document from langchain.document_transformers.embeddings_redundant_filter import ( @@ -6,7 +7,6 @@ from langchain.document_transformers.embeddings_redundant_filter import ( EmbeddingsRedundantFilter, _DocumentWithState, ) -from langchain.embeddings import OpenAIEmbeddings def test_embeddings_redundant_filter() -> None: diff --git a/libs/langchain/tests/integration_tests/test_long_context_reorder.py b/libs/langchain/tests/integration_tests/test_long_context_reorder.py index 4a8a26cd0e4..98a02302502 100644 --- a/libs/langchain/tests/integration_tests/test_long_context_reorder.py +++ b/libs/langchain/tests/integration_tests/test_long_context_reorder.py @@ -1,6 +1,7 @@ """Integration test for doc reordering.""" +from langchain_community.embeddings import OpenAIEmbeddings + from langchain.document_transformers.long_context_reorder import LongContextReorder -from langchain.embeddings import OpenAIEmbeddings from langchain.vectorstores import Chroma diff --git a/libs/langchain/tests/integration_tests/test_pdf_pagesplitter.py b/libs/langchain/tests/integration_tests/test_pdf_pagesplitter.py index f86c2f166a7..4cb6d73e0ec 100644 --- a/libs/langchain/tests/integration_tests/test_pdf_pagesplitter.py +++ b/libs/langchain/tests/integration_tests/test_pdf_pagesplitter.py @@ -1,8 +1,9 @@ """Test splitting with page numbers included.""" import os +from langchain_community.embeddings.openai import OpenAIEmbeddings + from langchain.document_loaders import PyPDFLoader -from langchain.embeddings.openai import OpenAIEmbeddings from langchain.vectorstores import FAISS diff --git a/libs/langchain/tests/unit_tests/agents/test_agent_iterator.py b/libs/langchain/tests/unit_tests/agents/test_agent_iterator.py index ca603710226..2b59cf8cd49 100644 --- a/libs/langchain/tests/unit_tests/agents/test_agent_iterator.py +++ b/libs/langchain/tests/unit_tests/agents/test_agent_iterator.py @@ -1,6 +1,7 @@ from uuid import UUID import pytest +from langchain_community.llms import FakeListLLM from langchain_core.tools import Tool from langchain.agents import ( @@ -9,7 +10,6 @@ from langchain.agents import ( AgentType, initialize_agent, ) -from langchain.llms import FakeListLLM from langchain.schema import RUN_KEY from tests.unit_tests.agents.test_agent import _get_agent from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler diff --git a/libs/langchain/tests/unit_tests/agents/test_react.py b/libs/langchain/tests/unit_tests/agents/test_react.py index 48ca1202e45..ce2ae69a1c1 100644 --- a/libs/langchain/tests/unit_tests/agents/test_react.py +++ b/libs/langchain/tests/unit_tests/agents/test_react.py @@ -2,6 +2,7 @@ from typing import Union +from langchain_community.llms.fake import FakeListLLM from langchain_core.agents import AgentAction from langchain_core.documents import Document from langchain_core.prompts.prompt import PromptTemplate @@ -9,7 +10,6 @@ from langchain_core.tools import Tool from langchain.agents.react.base import ReActChain, ReActDocstoreAgent from langchain.docstore.base import Docstore -from langchain.llms.fake import FakeListLLM _PAGE_CONTENT = """This is a page about LangChain. diff --git a/libs/langchain/tests/unit_tests/agents/test_serialization.py b/libs/langchain/tests/unit_tests/agents/test_serialization.py index 5f10dca05b3..edba5475380 100644 --- a/libs/langchain/tests/unit_tests/agents/test_serialization.py +++ b/libs/langchain/tests/unit_tests/agents/test_serialization.py @@ -1,11 +1,11 @@ from pathlib import Path from tempfile import TemporaryDirectory +from langchain_community.llms.fake import FakeListLLM from langchain_core.tools import Tool from langchain.agents.agent_types import AgentType from langchain.agents.initialize import initialize_agent, load_agent -from langchain.llms.fake import FakeListLLM def test_mrkl_serialization() -> None: diff --git a/libs/langchain/tests/unit_tests/chains/test_conversation_retrieval.py b/libs/langchain/tests/unit_tests/chains/test_conversation_retrieval.py index fbc61116579..e2e62e006a2 100644 --- a/libs/langchain/tests/unit_tests/chains/test_conversation_retrieval.py +++ b/libs/langchain/tests/unit_tests/chains/test_conversation_retrieval.py @@ -1,10 +1,10 @@ """Test conversation chain and memory.""" +from langchain_community.llms.fake import FakeListLLM from langchain_core.documents import Document from langchain.chains.conversational_retrieval.base import ( ConversationalRetrievalChain, ) -from langchain.llms.fake import FakeListLLM from langchain.memory.buffer import ConversationBufferMemory from tests.unit_tests.retrievers.sequential_retriever import SequentialRetriever diff --git a/libs/langchain/tests/unit_tests/chains/test_history_aware_retriever.py b/libs/langchain/tests/unit_tests/chains/test_history_aware_retriever.py index ebb8a3686cb..46dd67e53ba 100644 --- a/libs/langchain/tests/unit_tests/chains/test_history_aware_retriever.py +++ b/libs/langchain/tests/unit_tests/chains/test_history_aware_retriever.py @@ -1,8 +1,8 @@ +from langchain_community.llms.fake import FakeListLLM from langchain_core.documents import Document from langchain_core.prompts import PromptTemplate from langchain.chains import create_history_aware_retriever -from langchain.llms.fake import FakeListLLM from tests.unit_tests.retrievers.parrot_retriever import FakeParrotRetriever diff --git a/libs/langchain/tests/unit_tests/chains/test_retrieval.py b/libs/langchain/tests/unit_tests/chains/test_retrieval.py index 87f6fe40097..cffa439cf5c 100644 --- a/libs/langchain/tests/unit_tests/chains/test_retrieval.py +++ b/libs/langchain/tests/unit_tests/chains/test_retrieval.py @@ -1,9 +1,9 @@ """Test conversation chain and memory.""" +from langchain_community.llms.fake import FakeListLLM from langchain_core.documents import Document from langchain_core.prompts.prompt import PromptTemplate from langchain.chains import create_retrieval_chain -from langchain.llms.fake import FakeListLLM from tests.unit_tests.retrievers.parrot_retriever import FakeParrotRetriever diff --git a/libs/langchain/tests/unit_tests/evaluation/test_loading.py b/libs/langchain/tests/unit_tests/evaluation/test_loading.py index 3009766ed89..907580fe0d4 100644 --- a/libs/langchain/tests/unit_tests/evaluation/test_loading.py +++ b/libs/langchain/tests/unit_tests/evaluation/test_loading.py @@ -2,8 +2,8 @@ from typing import List import pytest +from langchain_community.embeddings.fake import FakeEmbeddings -from langchain.embeddings.fake import FakeEmbeddings from langchain.evaluation.loading import EvaluatorType, load_evaluators from langchain.evaluation.schema import PairwiseStringEvaluator, StringEvaluator from tests.unit_tests.llms.fake_chat_model import FakeChatModel diff --git a/libs/langchain/tests/unit_tests/load/test_dump.py b/libs/langchain/tests/unit_tests/load/test_dump.py index 0553eae7a1e..d428b04c71e 100644 --- a/libs/langchain/tests/unit_tests/load/test_dump.py +++ b/libs/langchain/tests/unit_tests/load/test_dump.py @@ -3,6 +3,8 @@ from typing import Any, Dict import pytest +from langchain_community.chat_models.openai import ChatOpenAI +from langchain_community.llms.openai import OpenAI from langchain_core.load.dump import dumps from langchain_core.load.serializable import Serializable from langchain_core.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate @@ -10,8 +12,6 @@ from langchain_core.prompts.prompt import PromptTemplate from langchain_core.tracers.langchain import LangChainTracer from langchain.chains.llm import LLMChain -from langchain.chat_models.openai import ChatOpenAI -from langchain.llms.openai import OpenAI class Person(Serializable): diff --git a/libs/langchain/tests/unit_tests/load/test_load.py b/libs/langchain/tests/unit_tests/load/test_load.py index 34fcbae7443..7fb7eb455b6 100644 --- a/libs/langchain/tests/unit_tests/load/test_load.py +++ b/libs/langchain/tests/unit_tests/load/test_load.py @@ -1,12 +1,12 @@ """Test for Serializable base class""" import pytest +from langchain_community.llms.openai import OpenAI from langchain_core.load.dump import dumpd, dumps from langchain_core.load.load import load, loads from langchain_core.prompts.prompt import PromptTemplate from langchain.chains.llm import LLMChain -from langchain.llms.openai import OpenAI class NotSerializable: diff --git a/libs/langchain/tests/unit_tests/test_cache.py b/libs/langchain/tests/unit_tests/test_cache.py index 20b024de9d5..88260a6f71c 100644 --- a/libs/langchain/tests/unit_tests/test_cache.py +++ b/libs/langchain/tests/unit_tests/test_cache.py @@ -3,6 +3,8 @@ from typing import Dict, Generator, List, Union import pytest from _pytest.fixtures import FixtureRequest +from langchain_community.chat_models import FakeListChatModel +from langchain_community.llms import FakeListLLM from langchain_core.language_models.chat_models import BaseChatModel from langchain_core.language_models.llms import BaseLLM from langchain_core.load import dumps @@ -15,9 +17,7 @@ from langchain.cache import ( InMemoryCache, SQLAlchemyCache, ) -from langchain.chat_models import FakeListChatModel from langchain.globals import get_llm_cache, set_llm_cache -from langchain.llms import FakeListLLM def get_sqlite_cache() -> SQLAlchemyCache: diff --git a/libs/langchain/tests/unit_tests/test_dependencies.py b/libs/langchain/tests/unit_tests/test_dependencies.py index 872b01d6213..590faeb3d33 100644 --- a/libs/langchain/tests/unit_tests/test_dependencies.py +++ b/libs/langchain/tests/unit_tests/test_dependencies.py @@ -95,15 +95,15 @@ def test_test_group_dependencies(poetry_conf: Mapping[str, Any]) -> None: def test_imports() -> None: """Test that you can import all top level things okay.""" + from langchain_community.chat_models import ChatOpenAI # noqa: F401 + from langchain_community.embeddings import OpenAIEmbeddings # noqa: F401 + from langchain_community.llms import OpenAI # noqa: F401 from langchain_core.prompts import BasePromptTemplate # noqa: F401 from langchain.agents import OpenAIFunctionsAgent # noqa: F401 from langchain.callbacks import OpenAICallbackHandler # noqa: F401 from langchain.chains import LLMChain # noqa: F401 - from langchain.chat_models import ChatOpenAI # noqa: F401 from langchain.document_loaders import BSHTMLLoader # noqa: F401 - from langchain.embeddings import OpenAIEmbeddings # noqa: F401 - from langchain.llms import OpenAI # noqa: F401 from langchain.retrievers import VespaRetriever # noqa: F401 from langchain.tools import DuckDuckGoSearchResults # noqa: F401 from langchain.utilities import ( diff --git a/templates/anthropic-iterative-search/anthropic_iterative_search/chain.py b/templates/anthropic-iterative-search/anthropic_iterative_search/chain.py index 289abc1f882..2419a1214da 100644 --- a/templates/anthropic-iterative-search/anthropic_iterative_search/chain.py +++ b/templates/anthropic-iterative-search/anthropic_iterative_search/chain.py @@ -1,5 +1,5 @@ -from langchain.chat_models import ChatAnthropic from langchain.prompts import ChatPromptTemplate +from langchain_community.chat_models import ChatAnthropic from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import ConfigurableField diff --git a/templates/anthropic-iterative-search/anthropic_iterative_search/retriever_agent.py b/templates/anthropic-iterative-search/anthropic_iterative_search/retriever_agent.py index 1a84a2024bd..c94ba158e2f 100644 --- a/templates/anthropic-iterative-search/anthropic_iterative_search/retriever_agent.py +++ b/templates/anthropic-iterative-search/anthropic_iterative_search/retriever_agent.py @@ -1,6 +1,6 @@ from langchain.agents import AgentExecutor -from langchain.chat_models import ChatAnthropic from langchain.prompts import ChatPromptTemplate +from langchain_community.chat_models import ChatAnthropic from langchain_core.output_parsers import StrOutputParser from langchain_core.runnables import RunnableParallel, RunnablePassthrough diff --git a/templates/basic-critique-revise/basic_critique_revise/chain.py b/templates/basic-critique-revise/basic_critique_revise/chain.py index ac70c89f919..665c2a8bb74 100644 --- a/templates/basic-critique-revise/basic_critique_revise/chain.py +++ b/templates/basic-critique-revise/basic_critique_revise/chain.py @@ -5,8 +5,8 @@ from operator import itemgetter from typing import Any, Dict, Sequence from langchain.chains.openai_functions import convert_to_openai_function -from langchain.chat_models import ChatOpenAI from langchain.prompts import ChatPromptTemplate +from langchain_community.chat_models import ChatOpenAI from langchain_core.pydantic_v1 import BaseModel, Field, ValidationError, conint from langchain_core.runnables import ( Runnable, diff --git a/templates/bedrock-jcvd/bedrock_jcvd/chain.py b/templates/bedrock-jcvd/bedrock_jcvd/chain.py index def23e55418..60ced40a453 100644 --- a/templates/bedrock-jcvd/bedrock_jcvd/chain.py +++ b/templates/bedrock-jcvd/bedrock_jcvd/chain.py @@ -1,7 +1,7 @@ import os -from langchain.chat_models import BedrockChat from langchain.prompts import ChatPromptTemplate +from langchain_community.chat_models import BedrockChat from langchain_core.runnables import ConfigurableField # For a description of each inference parameter, see diff --git a/templates/cassandra-entomology-rag/cassandra_entomology_rag/__init__.py b/templates/cassandra-entomology-rag/cassandra_entomology_rag/__init__.py index 9e831ee9173..9751ff3705f 100644 --- a/templates/cassandra-entomology-rag/cassandra_entomology_rag/__init__.py +++ b/templates/cassandra-entomology-rag/cassandra_entomology_rag/__init__.py @@ -1,10 +1,10 @@ import os import cassio -from langchain.chat_models import ChatOpenAI -from langchain.embeddings import OpenAIEmbeddings from langchain.prompts import ChatPromptTemplate from langchain.vectorstores import Cassandra +from langchain_community.chat_models import ChatOpenAI +from langchain_community.embeddings import OpenAIEmbeddings from langchain_core.output_parsers import StrOutputParser from langchain_core.runnables import RunnablePassthrough diff --git a/templates/cassandra-synonym-caching/cassandra_synonym_caching/__init__.py b/templates/cassandra-synonym-caching/cassandra_synonym_caching/__init__.py index f38d81d7651..785bb188d02 100644 --- a/templates/cassandra-synonym-caching/cassandra_synonym_caching/__init__.py +++ b/templates/cassandra-synonym-caching/cassandra_synonym_caching/__init__.py @@ -3,9 +3,9 @@ import os import cassio import langchain from langchain.cache import CassandraCache -from langchain.chat_models import ChatOpenAI from langchain.prompts import ChatPromptTemplate from langchain.schema import BaseMessage +from langchain_community.chat_models import ChatOpenAI from langchain_core.runnables import RunnableLambda use_cassandra = int(os.environ.get("USE_CASSANDRA_CLUSTER", "0")) diff --git a/templates/chain-of-note-wiki/chain_of_note_wiki/chain.py b/templates/chain-of-note-wiki/chain_of_note_wiki/chain.py index cd5b2477887..092a9433dde 100644 --- a/templates/chain-of-note-wiki/chain_of_note_wiki/chain.py +++ b/templates/chain-of-note-wiki/chain_of_note_wiki/chain.py @@ -1,7 +1,7 @@ from langchain import hub -from langchain.chat_models import ChatAnthropic from langchain.schema import StrOutputParser from langchain.utilities import WikipediaAPIWrapper +from langchain_community.chat_models import ChatAnthropic from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableLambda, RunnablePassthrough diff --git a/templates/chat-bot-feedback/chat_bot_feedback/chain.py b/templates/chat-bot-feedback/chat_bot_feedback/chain.py index 68eb75f4068..9c721864159 100644 --- a/templates/chat-bot-feedback/chat_bot_feedback/chain.py +++ b/templates/chat-bot-feedback/chat_bot_feedback/chain.py @@ -5,7 +5,6 @@ from typing import List, Optional from langchain import hub from langchain.callbacks.tracers.evaluation import EvaluatorCallbackHandler from langchain.callbacks.tracers.schemas import Run -from langchain.chat_models import ChatOpenAI from langchain.output_parsers.openai_functions import JsonOutputFunctionsParser from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder from langchain.schema import ( @@ -15,6 +14,7 @@ from langchain.schema import ( StrOutputParser, get_buffer_string, ) +from langchain_community.chat_models import ChatOpenAI from langchain_core.pydantic_v1 import BaseModel, Field from langchain_core.runnables import Runnable from langsmith.evaluation import EvaluationResult, RunEvaluator diff --git a/templates/cohere-librarian/cohere_librarian/blurb_matcher.py b/templates/cohere-librarian/cohere_librarian/blurb_matcher.py index f84a2755f07..55388aa86ab 100644 --- a/templates/cohere-librarian/cohere_librarian/blurb_matcher.py +++ b/templates/cohere-librarian/cohere_librarian/blurb_matcher.py @@ -1,9 +1,9 @@ import csv from langchain.chains.question_answering import load_qa_chain -from langchain.embeddings import CohereEmbeddings from langchain.prompts import PromptTemplate from langchain.vectorstores import Chroma +from langchain_community.embeddings import CohereEmbeddings from .chat import chat diff --git a/templates/cohere-librarian/cohere_librarian/chat.py b/templates/cohere-librarian/cohere_librarian/chat.py index f9460e07b2e..cbd37e2816a 100644 --- a/templates/cohere-librarian/cohere_librarian/chat.py +++ b/templates/cohere-librarian/cohere_librarian/chat.py @@ -1,3 +1,3 @@ -from langchain.llms import Cohere +from langchain_community.llms import Cohere chat = Cohere() diff --git a/templates/cohere-librarian/cohere_librarian/rag.py b/templates/cohere-librarian/cohere_librarian/rag.py index 096588b160c..d45099faf93 100644 --- a/templates/cohere-librarian/cohere_librarian/rag.py +++ b/templates/cohere-librarian/cohere_librarian/rag.py @@ -1,5 +1,5 @@ -from langchain.chat_models import ChatCohere from langchain.retrievers import CohereRagRetriever +from langchain_community.chat_models import ChatCohere rag = CohereRagRetriever(llm=ChatCohere()) diff --git a/templates/csv-agent/csv_agent/agent.py b/templates/csv-agent/csv_agent/agent.py index 4d50c148703..a3c1765a091 100644 --- a/templates/csv-agent/csv_agent/agent.py +++ b/templates/csv-agent/csv_agent/agent.py @@ -2,11 +2,11 @@ from pathlib import Path import pandas as pd from langchain.agents import AgentExecutor, OpenAIFunctionsAgent -from langchain.chat_models import ChatOpenAI -from langchain.embeddings import OpenAIEmbeddings from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder from langchain.tools.retriever import create_retriever_tool from langchain.vectorstores import FAISS +from langchain_community.chat_models import ChatOpenAI +from langchain_community.embeddings import OpenAIEmbeddings from langchain_core.pydantic_v1 import BaseModel, Field from langchain_experimental.tools import PythonAstREPLTool diff --git a/templates/elastic-query-generator/elastic_query_generator/chain.py b/templates/elastic-query-generator/elastic_query_generator/chain.py index 911c053e93c..701650bef68 100644 --- a/templates/elastic-query-generator/elastic_query_generator/chain.py +++ b/templates/elastic-query-generator/elastic_query_generator/chain.py @@ -1,6 +1,6 @@ from elasticsearch import Elasticsearch -from langchain.chat_models import ChatOpenAI from langchain.output_parsers.json import SimpleJsonOutputParser +from langchain_community.chat_models import ChatOpenAI from langchain_core.pydantic_v1 import BaseModel from .elastic_index_info import get_indices_infos diff --git a/templates/extraction-openai-functions/extraction_openai_functions/chain.py b/templates/extraction-openai-functions/extraction_openai_functions/chain.py index 9c54d7ef295..94fbd7f9601 100644 --- a/templates/extraction-openai-functions/extraction_openai_functions/chain.py +++ b/templates/extraction-openai-functions/extraction_openai_functions/chain.py @@ -1,9 +1,9 @@ import json from typing import List, Optional -from langchain.chat_models import ChatOpenAI from langchain.prompts import ChatPromptTemplate from langchain.utils.openai_functions import convert_pydantic_to_openai_function +from langchain_community.chat_models import ChatOpenAI from langchain_core.pydantic_v1 import BaseModel template = """A article will be passed to you. Extract from it all papers that are mentioned by this article. diff --git a/templates/guardrails-output-parser/guardrails_output_parser/chain.py b/templates/guardrails-output-parser/guardrails_output_parser/chain.py index e6c850eb2fd..9aa29ce7f39 100644 --- a/templates/guardrails-output-parser/guardrails_output_parser/chain.py +++ b/templates/guardrails-output-parser/guardrails_output_parser/chain.py @@ -1,6 +1,6 @@ -from langchain.llms import OpenAI from langchain.output_parsers import GuardrailsOutputParser from langchain.prompts import PromptTemplate +from langchain_community.llms import OpenAI # Define rail string diff --git a/templates/hybrid-search-weaviate/hybrid_search_weaviate/chain.py b/templates/hybrid-search-weaviate/hybrid_search_weaviate/chain.py index 9c6480d37de..01a11d03d87 100644 --- a/templates/hybrid-search-weaviate/hybrid_search_weaviate/chain.py +++ b/templates/hybrid-search-weaviate/hybrid_search_weaviate/chain.py @@ -1,9 +1,9 @@ import os import weaviate -from langchain.chat_models import ChatOpenAI from langchain.prompts import ChatPromptTemplate from langchain.retrievers.weaviate_hybrid_search import WeaviateHybridSearchRetriever +from langchain_community.chat_models import ChatOpenAI from langchain_core.output_parsers import StrOutputParser from langchain_core.runnables import RunnableParallel, RunnablePassthrough diff --git a/templates/hyde/hyde/chain.py b/templates/hyde/hyde/chain.py index 24e7d37e38d..82123b84684 100644 --- a/templates/hyde/hyde/chain.py +++ b/templates/hyde/hyde/chain.py @@ -1,7 +1,7 @@ -from langchain.chat_models import ChatOpenAI -from langchain.embeddings import OpenAIEmbeddings from langchain.prompts import ChatPromptTemplate from langchain.vectorstores import Chroma +from langchain_community.chat_models import ChatOpenAI +from langchain_community.embeddings import OpenAIEmbeddings from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableParallel diff --git a/templates/llama2-functions/llama2_functions/chain.py b/templates/llama2-functions/llama2_functions/chain.py index b7ebc31bd0d..5550616b8fe 100644 --- a/templates/llama2-functions/llama2_functions/chain.py +++ b/templates/llama2-functions/llama2_functions/chain.py @@ -1,5 +1,5 @@ -from langchain.llms import Replicate from langchain.prompts import ChatPromptTemplate +from langchain_community.llms import Replicate # LLM replicate_id = "andreasjansson/llama-2-13b-chat-gguf:60ec5dda9ff9ee0b6f786c9d1157842e6ab3cc931139ad98fe99e08a35c5d4d4" # noqa: E501 diff --git a/templates/mongo-parent-document-retrieval/ingest.py b/templates/mongo-parent-document-retrieval/ingest.py index 3a6c546aea6..43d21b06167 100644 --- a/templates/mongo-parent-document-retrieval/ingest.py +++ b/templates/mongo-parent-document-retrieval/ingest.py @@ -2,9 +2,9 @@ import os import uuid from langchain.document_loaders import PyPDFLoader -from langchain.embeddings import OpenAIEmbeddings from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.vectorstores import MongoDBAtlasVectorSearch +from langchain_community.embeddings import OpenAIEmbeddings from pymongo import MongoClient PARENT_DOC_ID_KEY = "parent_doc_id" diff --git a/templates/mongo-parent-document-retrieval/mongo_parent_document_retrieval/chain.py b/templates/mongo-parent-document-retrieval/mongo_parent_document_retrieval/chain.py index 5e182144f44..44745743e20 100644 --- a/templates/mongo-parent-document-retrieval/mongo_parent_document_retrieval/chain.py +++ b/templates/mongo-parent-document-retrieval/mongo_parent_document_retrieval/chain.py @@ -1,9 +1,9 @@ import os -from langchain.chat_models import ChatOpenAI -from langchain.embeddings import OpenAIEmbeddings from langchain.prompts import ChatPromptTemplate from langchain.vectorstores import MongoDBAtlasVectorSearch +from langchain_community.chat_models import ChatOpenAI +from langchain_community.embeddings import OpenAIEmbeddings from langchain_core.documents import Document from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel diff --git a/templates/neo4j-advanced-rag/ingest.py b/templates/neo4j-advanced-rag/ingest.py index d19767acc24..83e1ece324d 100644 --- a/templates/neo4j-advanced-rag/ingest.py +++ b/templates/neo4j-advanced-rag/ingest.py @@ -2,12 +2,12 @@ from pathlib import Path from typing import List from langchain.chains.openai_functions import create_structured_output_chain -from langchain.chat_models import ChatOpenAI from langchain.document_loaders import TextLoader -from langchain.embeddings.openai import OpenAIEmbeddings from langchain.graphs import Neo4jGraph from langchain.prompts import ChatPromptTemplate from langchain.text_splitter import TokenTextSplitter +from langchain_community.chat_models import ChatOpenAI +from langchain_community.embeddings.openai import OpenAIEmbeddings from langchain_core.pydantic_v1 import BaseModel, Field from neo4j.exceptions import ClientError diff --git a/templates/neo4j-advanced-rag/neo4j_advanced_rag/chain.py b/templates/neo4j-advanced-rag/neo4j_advanced_rag/chain.py index 5293920f916..70da21af2b9 100644 --- a/templates/neo4j-advanced-rag/neo4j_advanced_rag/chain.py +++ b/templates/neo4j-advanced-rag/neo4j_advanced_rag/chain.py @@ -1,7 +1,7 @@ from operator import itemgetter -from langchain.chat_models import ChatOpenAI from langchain.prompts import ChatPromptTemplate +from langchain_community.chat_models import ChatOpenAI from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import ConfigurableField, RunnableParallel diff --git a/templates/neo4j-advanced-rag/neo4j_advanced_rag/retrievers.py b/templates/neo4j-advanced-rag/neo4j_advanced_rag/retrievers.py index dc2e303a7c7..eccb33e159d 100644 --- a/templates/neo4j-advanced-rag/neo4j_advanced_rag/retrievers.py +++ b/templates/neo4j-advanced-rag/neo4j_advanced_rag/retrievers.py @@ -1,5 +1,5 @@ -from langchain.embeddings import OpenAIEmbeddings from langchain.vectorstores import Neo4jVector +from langchain_community.embeddings import OpenAIEmbeddings # Typical RAG retriever diff --git a/templates/neo4j-cypher-ft/neo4j_cypher_ft/chain.py b/templates/neo4j-cypher-ft/neo4j_cypher_ft/chain.py index 79c64b4ffd5..49c6ed278c6 100644 --- a/templates/neo4j-cypher-ft/neo4j_cypher_ft/chain.py +++ b/templates/neo4j-cypher-ft/neo4j_cypher_ft/chain.py @@ -2,9 +2,9 @@ from typing import List, Optional from langchain.chains.graph_qa.cypher_utils import CypherQueryCorrector, Schema from langchain.chains.openai_functions import create_structured_output_chain -from langchain.chat_models import ChatOpenAI from langchain.graphs import Neo4jGraph from langchain.prompts import ChatPromptTemplate +from langchain_community.chat_models import ChatOpenAI from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel, Field from langchain_core.runnables import RunnablePassthrough diff --git a/templates/neo4j-cypher-memory/neo4j_cypher_memory/chain.py b/templates/neo4j-cypher-memory/neo4j_cypher_memory/chain.py index a8f3fff0adc..dbfb54b78cc 100644 --- a/templates/neo4j-cypher-memory/neo4j_cypher_memory/chain.py +++ b/templates/neo4j-cypher-memory/neo4j_cypher_memory/chain.py @@ -1,10 +1,10 @@ from typing import Any, Dict, List from langchain.chains.graph_qa.cypher_utils import CypherQueryCorrector, Schema -from langchain.chat_models import ChatOpenAI from langchain.graphs import Neo4jGraph from langchain.memory import ChatMessageHistory from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder +from langchain_community.chat_models import ChatOpenAI from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnablePassthrough diff --git a/templates/neo4j-cypher/neo4j_cypher/chain.py b/templates/neo4j-cypher/neo4j_cypher/chain.py index f8e4d8d9047..a08942320a9 100644 --- a/templates/neo4j-cypher/neo4j_cypher/chain.py +++ b/templates/neo4j-cypher/neo4j_cypher/chain.py @@ -1,7 +1,7 @@ from langchain.chains.graph_qa.cypher_utils import CypherQueryCorrector, Schema -from langchain.chat_models import ChatOpenAI from langchain.graphs import Neo4jGraph from langchain.prompts import ChatPromptTemplate +from langchain_community.chat_models import ChatOpenAI from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnablePassthrough diff --git a/templates/neo4j-generation/neo4j_generation/chain.py b/templates/neo4j-generation/neo4j_generation/chain.py index f980d252446..8c6459343aa 100644 --- a/templates/neo4j-generation/neo4j_generation/chain.py +++ b/templates/neo4j-generation/neo4j_generation/chain.py @@ -3,11 +3,11 @@ from typing import List, Optional from langchain.chains.openai_functions import ( create_structured_output_chain, ) -from langchain.chat_models import ChatOpenAI from langchain.graphs import Neo4jGraph from langchain.graphs.graph_document import GraphDocument from langchain.prompts import ChatPromptTemplate from langchain.schema import Document +from langchain_community.chat_models import ChatOpenAI from neo4j_generation.utils import ( KnowledgeGraph, diff --git a/templates/neo4j-parent/ingest.py b/templates/neo4j-parent/ingest.py index e1b4d05ac63..8a5d82f4df3 100644 --- a/templates/neo4j-parent/ingest.py +++ b/templates/neo4j-parent/ingest.py @@ -1,10 +1,10 @@ from pathlib import Path from langchain.document_loaders import TextLoader -from langchain.embeddings.openai import OpenAIEmbeddings from langchain.graphs import Neo4jGraph from langchain.text_splitter import TokenTextSplitter from langchain.vectorstores import Neo4jVector +from langchain_community.embeddings.openai import OpenAIEmbeddings txt_path = Path(__file__).parent / "dune.txt" diff --git a/templates/neo4j-parent/neo4j_parent/chain.py b/templates/neo4j-parent/neo4j_parent/chain.py index 01950baba99..736b64dea2b 100644 --- a/templates/neo4j-parent/neo4j_parent/chain.py +++ b/templates/neo4j-parent/neo4j_parent/chain.py @@ -1,7 +1,7 @@ -from langchain.chat_models import ChatOpenAI -from langchain.embeddings import OpenAIEmbeddings from langchain.prompts import ChatPromptTemplate from langchain.vectorstores import Neo4jVector +from langchain_community.chat_models import ChatOpenAI +from langchain_community.embeddings import OpenAIEmbeddings from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableParallel, RunnablePassthrough diff --git a/templates/neo4j-vector-memory/ingest.py b/templates/neo4j-vector-memory/ingest.py index 698a94d2e4b..3c24b66c673 100644 --- a/templates/neo4j-vector-memory/ingest.py +++ b/templates/neo4j-vector-memory/ingest.py @@ -1,9 +1,9 @@ from pathlib import Path from langchain.document_loaders import TextLoader -from langchain.embeddings.openai import OpenAIEmbeddings from langchain.text_splitter import TokenTextSplitter from langchain.vectorstores import Neo4jVector +from langchain_community.embeddings.openai import OpenAIEmbeddings txt_path = Path(__file__).parent / "dune.txt" diff --git a/templates/neo4j-vector-memory/neo4j_vector_memory/chain.py b/templates/neo4j-vector-memory/neo4j_vector_memory/chain.py index 63b9f4fef01..fd0a6299304 100644 --- a/templates/neo4j-vector-memory/neo4j_vector_memory/chain.py +++ b/templates/neo4j-vector-memory/neo4j_vector_memory/chain.py @@ -1,9 +1,9 @@ from operator import itemgetter -from langchain.chat_models import ChatOpenAI -from langchain.embeddings import OpenAIEmbeddings from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder, PromptTemplate from langchain.vectorstores import Neo4jVector +from langchain_community.chat_models import ChatOpenAI +from langchain_community.embeddings import OpenAIEmbeddings from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnablePassthrough diff --git a/templates/openai-functions-agent/openai_functions_agent/agent.py b/templates/openai-functions-agent/openai_functions_agent/agent.py index 80986fd4d0a..9f0bcad4d3b 100644 --- a/templates/openai-functions-agent/openai_functions_agent/agent.py +++ b/templates/openai-functions-agent/openai_functions_agent/agent.py @@ -3,11 +3,11 @@ from typing import List, Tuple from langchain.agents import AgentExecutor from langchain.agents.format_scratchpad import format_to_openai_function_messages from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser -from langchain.chat_models import ChatOpenAI from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder from langchain.tools.render import format_tool_to_openai_function from langchain.tools.tavily_search import TavilySearchResults from langchain.utilities.tavily_search import TavilySearchAPIWrapper +from langchain_community.chat_models import ChatOpenAI from langchain_core.messages import AIMessage, HumanMessage from langchain_core.pydantic_v1 import BaseModel, Field diff --git a/templates/openai-functions-tool-retrieval-agent/openai_functions_tool_retrieval_agent/agent.py b/templates/openai-functions-tool-retrieval-agent/openai_functions_tool_retrieval_agent/agent.py index 983208026e9..ef2aa3a8d02 100644 --- a/templates/openai-functions-tool-retrieval-agent/openai_functions_tool_retrieval_agent/agent.py +++ b/templates/openai-functions-tool-retrieval-agent/openai_functions_tool_retrieval_agent/agent.py @@ -6,8 +6,6 @@ from langchain.agents import ( ) from langchain.agents.format_scratchpad import format_to_openai_functions from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser -from langchain.chat_models import ChatOpenAI -from langchain.embeddings import OpenAIEmbeddings from langchain.prompts import ( ChatPromptTemplate, MessagesPlaceholder, @@ -17,6 +15,8 @@ from langchain.tools.render import format_tool_to_openai_function from langchain.tools.tavily_search import TavilySearchResults from langchain.utilities.tavily_search import TavilySearchAPIWrapper from langchain.vectorstores import FAISS +from langchain_community.chat_models import ChatOpenAI +from langchain_community.embeddings import OpenAIEmbeddings from langchain_core.messages import AIMessage, HumanMessage from langchain_core.pydantic_v1 import BaseModel, Field from langchain_core.runnables import Runnable, RunnableLambda, RunnableParallel diff --git a/templates/pii-protected-chatbot/pii_protected_chatbot/chain.py b/templates/pii-protected-chatbot/pii_protected_chatbot/chain.py index d36c8719f9b..97fe60b73e8 100644 --- a/templates/pii-protected-chatbot/pii_protected_chatbot/chain.py +++ b/templates/pii-protected-chatbot/pii_protected_chatbot/chain.py @@ -1,7 +1,7 @@ from typing import List, Tuple -from langchain.chat_models import ChatOpenAI from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder +from langchain_community.chat_models import ChatOpenAI from langchain_core.messages import AIMessage, HumanMessage from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel diff --git a/templates/pirate-speak-configurable/pirate_speak_configurable/chain.py b/templates/pirate-speak-configurable/pirate_speak_configurable/chain.py index 241397ed725..4fd3781089d 100644 --- a/templates/pirate-speak-configurable/pirate_speak_configurable/chain.py +++ b/templates/pirate-speak-configurable/pirate_speak_configurable/chain.py @@ -1,5 +1,5 @@ -from langchain.chat_models import ChatAnthropic, ChatCohere, ChatOpenAI from langchain.prompts import ChatPromptTemplate +from langchain_community.chat_models import ChatAnthropic, ChatCohere, ChatOpenAI from langchain_core.runnables import ConfigurableField _prompt = ChatPromptTemplate.from_messages( diff --git a/templates/pirate-speak/pirate_speak/chain.py b/templates/pirate-speak/pirate_speak/chain.py index 38777dee722..07503bde5ab 100644 --- a/templates/pirate-speak/pirate_speak/chain.py +++ b/templates/pirate-speak/pirate_speak/chain.py @@ -1,5 +1,5 @@ -from langchain.chat_models import ChatOpenAI from langchain.prompts import ChatPromptTemplate +from langchain_community.chat_models import ChatOpenAI _prompt = ChatPromptTemplate.from_messages( [ diff --git a/templates/plate-chain/plate_chain/chain.py b/templates/plate-chain/plate_chain/chain.py index a369d23be2a..7c03efe36c7 100644 --- a/templates/plate-chain/plate_chain/chain.py +++ b/templates/plate-chain/plate_chain/chain.py @@ -1,8 +1,8 @@ import base64 import json -from langchain.chat_models import ChatOpenAI from langchain.prompts import ChatPromptTemplate, SystemMessagePromptTemplate +from langchain_community.chat_models import ChatOpenAI from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import Field from langserve import CustomUserType diff --git a/templates/propositional-retrieval/propositional_retrieval/storage.py b/templates/propositional-retrieval/propositional_retrieval/storage.py index c8f96922ddb..4c8aadb45b0 100644 --- a/templates/propositional-retrieval/propositional_retrieval/storage.py +++ b/templates/propositional-retrieval/propositional_retrieval/storage.py @@ -1,9 +1,9 @@ import logging from pathlib import Path -from langchain.embeddings import OpenAIEmbeddings from langchain.retrievers.multi_vector import MultiVectorRetriever from langchain.storage import LocalFileStore +from langchain_community.embeddings import OpenAIEmbeddings from langchain_community.vectorstores import Chroma logging.basicConfig(level=logging.INFO) diff --git a/templates/python-lint/python_lint/agent_executor.py b/templates/python-lint/python_lint/agent_executor.py index 6a27385727c..d9902007be5 100644 --- a/templates/python-lint/python_lint/agent_executor.py +++ b/templates/python-lint/python_lint/agent_executor.py @@ -5,10 +5,10 @@ import tempfile from langchain.agents import AgentType, initialize_agent from langchain.agents.tools import Tool -from langchain.chat_models import ChatOpenAI -from langchain.llms.base import BaseLLM from langchain.prompts import ChatPromptTemplate from langchain.pydantic_v1 import BaseModel, Field, ValidationError, validator +from langchain_community.chat_models import ChatOpenAI +from langchain_core.language_models import BaseLLM from langchain_core.runnables import ConfigurableField, Runnable diff --git a/templates/rag-astradb/astradb_entomology_rag/__init__.py b/templates/rag-astradb/astradb_entomology_rag/__init__.py index 2f1c325a0e1..ff99f159dee 100644 --- a/templates/rag-astradb/astradb_entomology_rag/__init__.py +++ b/templates/rag-astradb/astradb_entomology_rag/__init__.py @@ -1,9 +1,9 @@ import os -from langchain.chat_models import ChatOpenAI -from langchain.embeddings import OpenAIEmbeddings from langchain.prompts import ChatPromptTemplate from langchain.vectorstores import AstraDB +from langchain_community.chat_models import ChatOpenAI +from langchain_community.embeddings import OpenAIEmbeddings from langchain_core.output_parsers import StrOutputParser from langchain_core.runnables import RunnablePassthrough diff --git a/templates/rag-aws-bedrock/rag_aws_bedrock/chain.py b/templates/rag-aws-bedrock/rag_aws_bedrock/chain.py index 744b96545a6..09dd7784792 100644 --- a/templates/rag-aws-bedrock/rag_aws_bedrock/chain.py +++ b/templates/rag-aws-bedrock/rag_aws_bedrock/chain.py @@ -1,9 +1,9 @@ import os -from langchain.embeddings import BedrockEmbeddings -from langchain.llms.bedrock import Bedrock from langchain.prompts import ChatPromptTemplate from langchain.vectorstores import FAISS +from langchain_community.embeddings import BedrockEmbeddings +from langchain_community.llms.bedrock import Bedrock from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableParallel, RunnablePassthrough diff --git a/templates/rag-aws-kendra/rag_aws_kendra/chain.py b/templates/rag-aws-kendra/rag_aws_kendra/chain.py index df2e5635573..2b2554fc219 100644 --- a/templates/rag-aws-kendra/rag_aws_kendra/chain.py +++ b/templates/rag-aws-kendra/rag_aws_kendra/chain.py @@ -1,8 +1,8 @@ import os -from langchain.llms.bedrock import Bedrock from langchain.prompts import ChatPromptTemplate from langchain.retrievers import AmazonKendraRetriever +from langchain_community.llms.bedrock import Bedrock from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableParallel, RunnablePassthrough diff --git a/templates/rag-chroma-multi-modal-multi-vector/ingest.py b/templates/rag-chroma-multi-modal-multi-vector/ingest.py index ef8d9819721..5700676e14e 100644 --- a/templates/rag-chroma-multi-modal-multi-vector/ingest.py +++ b/templates/rag-chroma-multi-modal-multi-vector/ingest.py @@ -6,11 +6,11 @@ from io import BytesIO from pathlib import Path import pypdfium2 as pdfium -from langchain.chat_models import ChatOpenAI -from langchain.embeddings import OpenAIEmbeddings from langchain.retrievers.multi_vector import MultiVectorRetriever from langchain.storage import LocalFileStore, UpstashRedisByteStore from langchain.vectorstores import Chroma +from langchain_community.chat_models import ChatOpenAI +from langchain_community.embeddings import OpenAIEmbeddings from langchain_core.documents import Document from langchain_core.messages import HumanMessage from PIL import Image diff --git a/templates/rag-chroma-multi-modal-multi-vector/rag_chroma_multi_modal_multi_vector/chain.py b/templates/rag-chroma-multi-modal-multi-vector/rag_chroma_multi_modal_multi_vector/chain.py index f3cb84c1996..6432c78903e 100644 --- a/templates/rag-chroma-multi-modal-multi-vector/rag_chroma_multi_modal_multi_vector/chain.py +++ b/templates/rag-chroma-multi-modal-multi-vector/rag_chroma_multi_modal_multi_vector/chain.py @@ -3,12 +3,12 @@ import io import os from pathlib import Path -from langchain.chat_models import ChatOpenAI -from langchain.embeddings import OpenAIEmbeddings from langchain.pydantic_v1 import BaseModel from langchain.retrievers.multi_vector import MultiVectorRetriever from langchain.storage import LocalFileStore, UpstashRedisByteStore from langchain.vectorstores import Chroma +from langchain_community.chat_models import ChatOpenAI +from langchain_community.embeddings import OpenAIEmbeddings from langchain_core.documents import Document from langchain_core.messages import HumanMessage from langchain_core.output_parsers import StrOutputParser diff --git a/templates/rag-chroma-multi-modal/rag_chroma_multi_modal/chain.py b/templates/rag-chroma-multi-modal/rag_chroma_multi_modal/chain.py index c8c674365d5..a45b3e1fe03 100644 --- a/templates/rag-chroma-multi-modal/rag_chroma_multi_modal/chain.py +++ b/templates/rag-chroma-multi-modal/rag_chroma_multi_modal/chain.py @@ -2,8 +2,8 @@ import base64 import io from pathlib import Path -from langchain.chat_models import ChatOpenAI from langchain.vectorstores import Chroma +from langchain_community.chat_models import ChatOpenAI from langchain_core.documents import Document from langchain_core.messages import HumanMessage from langchain_core.output_parsers import StrOutputParser diff --git a/templates/rag-chroma-private/rag_chroma_private/chain.py b/templates/rag-chroma-private/rag_chroma_private/chain.py index 05e5bca894e..874a40efe12 100644 --- a/templates/rag-chroma-private/rag_chroma_private/chain.py +++ b/templates/rag-chroma-private/rag_chroma_private/chain.py @@ -1,10 +1,10 @@ # Load -from langchain.chat_models import ChatOllama from langchain.document_loaders import WebBaseLoader -from langchain.embeddings import GPT4AllEmbeddings from langchain.prompts import ChatPromptTemplate from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.vectorstores import Chroma +from langchain_community.chat_models import ChatOllama +from langchain_community.embeddings import GPT4AllEmbeddings from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableParallel, RunnablePassthrough diff --git a/templates/rag-chroma/rag_chroma/chain.py b/templates/rag-chroma/rag_chroma/chain.py index ea929e974cb..799a1e00795 100644 --- a/templates/rag-chroma/rag_chroma/chain.py +++ b/templates/rag-chroma/rag_chroma/chain.py @@ -1,7 +1,7 @@ -from langchain.chat_models import ChatOpenAI -from langchain.embeddings import OpenAIEmbeddings from langchain.prompts import ChatPromptTemplate from langchain.vectorstores import Chroma +from langchain_community.chat_models import ChatOpenAI +from langchain_community.embeddings import OpenAIEmbeddings from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableParallel, RunnablePassthrough diff --git a/templates/rag-codellama-fireworks/rag_codellama_fireworks/chain.py b/templates/rag-codellama-fireworks/rag_codellama_fireworks/chain.py index c1f4cefebb4..65b442ed16b 100644 --- a/templates/rag-codellama-fireworks/rag_codellama_fireworks/chain.py +++ b/templates/rag-codellama-fireworks/rag_codellama_fireworks/chain.py @@ -3,11 +3,11 @@ import os from git import Repo from langchain.document_loaders.generic import GenericLoader from langchain.document_loaders.parsers import LanguageParser -from langchain.embeddings import GPT4AllEmbeddings -from langchain.llms.fireworks import Fireworks from langchain.prompts import ChatPromptTemplate from langchain.text_splitter import Language, RecursiveCharacterTextSplitter from langchain.vectorstores import Chroma +from langchain_community.embeddings import GPT4AllEmbeddings +from langchain_community.llms.fireworks import Fireworks from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableParallel, RunnablePassthrough diff --git a/templates/rag-conversation-zep/ingest.py b/templates/rag-conversation-zep/ingest.py index 9dddc6a9172..d9caaa52287 100644 --- a/templates/rag-conversation-zep/ingest.py +++ b/templates/rag-conversation-zep/ingest.py @@ -2,9 +2,9 @@ import os from langchain.document_loaders import WebBaseLoader -from langchain.embeddings import FakeEmbeddings from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.vectorstores.zep import CollectionConfig, ZepVectorStore +from langchain_community.embeddings import FakeEmbeddings ZEP_API_URL = os.environ.get("ZEP_API_URL", "http://localhost:8000") ZEP_API_KEY = os.environ.get("ZEP_API_KEY", None) diff --git a/templates/rag-conversation-zep/rag_conversation_zep/chain.py b/templates/rag-conversation-zep/rag_conversation_zep/chain.py index f46e66a4bd0..860e5641ef7 100644 --- a/templates/rag-conversation-zep/rag_conversation_zep/chain.py +++ b/templates/rag-conversation-zep/rag_conversation_zep/chain.py @@ -2,11 +2,11 @@ import os from operator import itemgetter from typing import List, Tuple -from langchain.chat_models import ChatOpenAI from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder from langchain.prompts.prompt import PromptTemplate from langchain.schema import AIMessage, HumanMessage, format_document from langchain.vectorstores.zep import CollectionConfig, ZepVectorStore +from langchain_community.chat_models import ChatOpenAI from langchain_core.documents import Document from langchain_core.messages import BaseMessage from langchain_core.output_parsers import StrOutputParser diff --git a/templates/rag-conversation/rag_conversation/chain.py b/templates/rag-conversation/rag_conversation/chain.py index 2e5e5f28748..1e84ccc2433 100644 --- a/templates/rag-conversation/rag_conversation/chain.py +++ b/templates/rag-conversation/rag_conversation/chain.py @@ -2,12 +2,12 @@ import os from operator import itemgetter from typing import List, Tuple -from langchain.chat_models import ChatOpenAI -from langchain.embeddings import OpenAIEmbeddings from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder from langchain.prompts.prompt import PromptTemplate from langchain.schema import AIMessage, HumanMessage, format_document from langchain.vectorstores import Pinecone +from langchain_community.chat_models import ChatOpenAI +from langchain_community.embeddings import OpenAIEmbeddings from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel, Field from langchain_core.runnables import ( diff --git a/templates/rag-elasticsearch/ingest.py b/templates/rag-elasticsearch/ingest.py index 5b6db39d09e..a799d1a97ed 100644 --- a/templates/rag-elasticsearch/ingest.py +++ b/templates/rag-elasticsearch/ingest.py @@ -1,9 +1,9 @@ import os from langchain.document_loaders import JSONLoader -from langchain.embeddings import HuggingFaceEmbeddings from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.vectorstores.elasticsearch import ElasticsearchStore +from langchain_community.embeddings import HuggingFaceEmbeddings ELASTIC_CLOUD_ID = os.getenv("ELASTIC_CLOUD_ID") ELASTIC_USERNAME = os.getenv("ELASTIC_USERNAME", "elastic") diff --git a/templates/rag-elasticsearch/rag_elasticsearch/chain.py b/templates/rag-elasticsearch/rag_elasticsearch/chain.py index 2d33153d9b6..b40b5ac819f 100644 --- a/templates/rag-elasticsearch/rag_elasticsearch/chain.py +++ b/templates/rag-elasticsearch/rag_elasticsearch/chain.py @@ -1,10 +1,10 @@ from operator import itemgetter from typing import List, Optional, Tuple -from langchain.chat_models import ChatOpenAI -from langchain.embeddings import HuggingFaceEmbeddings from langchain.schema import BaseMessage, format_document from langchain.vectorstores.elasticsearch import ElasticsearchStore +from langchain_community.chat_models import ChatOpenAI +from langchain_community.embeddings import HuggingFaceEmbeddings from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel, Field from langchain_core.runnables import RunnableParallel, RunnablePassthrough diff --git a/templates/rag-fusion/ingest.py b/templates/rag-fusion/ingest.py index 769d2465577..f93faee19c6 100644 --- a/templates/rag-fusion/ingest.py +++ b/templates/rag-fusion/ingest.py @@ -1,5 +1,5 @@ -from langchain.embeddings import OpenAIEmbeddings from langchain.vectorstores import Pinecone +from langchain_community.embeddings import OpenAIEmbeddings all_documents = { "doc1": "Climate change and economic impact.", diff --git a/templates/rag-fusion/rag_fusion/chain.py b/templates/rag-fusion/rag_fusion/chain.py index 60d11bd49b9..1ca7bbd4c99 100644 --- a/templates/rag-fusion/rag_fusion/chain.py +++ b/templates/rag-fusion/rag_fusion/chain.py @@ -1,8 +1,8 @@ from langchain import hub -from langchain.chat_models import ChatOpenAI -from langchain.embeddings import OpenAIEmbeddings from langchain.load import dumps, loads from langchain.vectorstores import Pinecone +from langchain_community.chat_models import ChatOpenAI +from langchain_community.embeddings import OpenAIEmbeddings from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel diff --git a/templates/rag-google-cloud-sensitive-data-protection/rag_google_cloud_sensitive_data_protection/chain.py b/templates/rag-google-cloud-sensitive-data-protection/rag_google_cloud_sensitive_data_protection/chain.py index 0a33e7c58ae..40c6f4bdbc7 100644 --- a/templates/rag-google-cloud-sensitive-data-protection/rag_google_cloud_sensitive_data_protection/chain.py +++ b/templates/rag-google-cloud-sensitive-data-protection/rag_google_cloud_sensitive_data_protection/chain.py @@ -2,8 +2,8 @@ import os from typing import List, Tuple from google.cloud import dlp_v2 -from langchain.chat_models import ChatVertexAI from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder +from langchain_community.chat_models import ChatVertexAI from langchain_core.messages import AIMessage, HumanMessage from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel, Field diff --git a/templates/rag-google-cloud-vertexai-search/rag_google_cloud_vertexai_search/chain.py b/templates/rag-google-cloud-vertexai-search/rag_google_cloud_vertexai_search/chain.py index 46eafc5ccc9..8a89a877a54 100644 --- a/templates/rag-google-cloud-vertexai-search/rag_google_cloud_vertexai_search/chain.py +++ b/templates/rag-google-cloud-vertexai-search/rag_google_cloud_vertexai_search/chain.py @@ -1,8 +1,8 @@ import os -from langchain.chat_models import ChatVertexAI from langchain.prompts import ChatPromptTemplate from langchain.retrievers import GoogleVertexAISearchRetriever +from langchain_community.chat_models import ChatVertexAI from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableParallel, RunnablePassthrough diff --git a/templates/rag-gpt-crawler/rag_gpt_crawler/chain.py b/templates/rag-gpt-crawler/rag_gpt_crawler/chain.py index a217de65ca1..663a4041038 100644 --- a/templates/rag-gpt-crawler/rag_gpt_crawler/chain.py +++ b/templates/rag-gpt-crawler/rag_gpt_crawler/chain.py @@ -1,12 +1,12 @@ import json from pathlib import Path -from langchain.chat_models import ChatOpenAI -from langchain.embeddings import OpenAIEmbeddings from langchain.prompts import ChatPromptTemplate from langchain.schema import Document from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.vectorstores import Chroma +from langchain_community.chat_models import ChatOpenAI +from langchain_community.embeddings import OpenAIEmbeddings from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableParallel, RunnablePassthrough diff --git a/templates/rag-matching-engine/rag_matching_engine/chain.py b/templates/rag-matching-engine/rag_matching_engine/chain.py index 943eec823ef..83ed057920a 100644 --- a/templates/rag-matching-engine/rag_matching_engine/chain.py +++ b/templates/rag-matching-engine/rag_matching_engine/chain.py @@ -1,9 +1,9 @@ import os -from langchain.embeddings import VertexAIEmbeddings -from langchain.llms import VertexAI from langchain.prompts import PromptTemplate from langchain.vectorstores import MatchingEngine +from langchain_community.embeddings import VertexAIEmbeddings +from langchain_community.llms import VertexAI from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableParallel, RunnablePassthrough diff --git a/templates/rag-momento-vector-index/rag_momento_vector_index/chain.py b/templates/rag-momento-vector-index/rag_momento_vector_index/chain.py index 78291f68e75..8d99565456e 100644 --- a/templates/rag-momento-vector-index/rag_momento_vector_index/chain.py +++ b/templates/rag-momento-vector-index/rag_momento_vector_index/chain.py @@ -1,9 +1,9 @@ import os -from langchain.chat_models import ChatOpenAI -from langchain.embeddings import OpenAIEmbeddings from langchain.prompts import ChatPromptTemplate from langchain.vectorstores import MomentoVectorIndex +from langchain_community.chat_models import ChatOpenAI +from langchain_community.embeddings import OpenAIEmbeddings from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnablePassthrough diff --git a/templates/rag-momento-vector-index/rag_momento_vector_index/ingest.py b/templates/rag-momento-vector-index/rag_momento_vector_index/ingest.py index 660f10ce27c..f15d8564930 100644 --- a/templates/rag-momento-vector-index/rag_momento_vector_index/ingest.py +++ b/templates/rag-momento-vector-index/rag_momento_vector_index/ingest.py @@ -2,9 +2,9 @@ import os from langchain.document_loaders import WebBaseLoader -from langchain.embeddings import OpenAIEmbeddings from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.vectorstores import MomentoVectorIndex +from langchain_community.embeddings import OpenAIEmbeddings from momento import ( CredentialProvider, PreviewVectorIndexClient, diff --git a/templates/rag-mongo/ingest.py b/templates/rag-mongo/ingest.py index a69b7d7681e..07ce77ebe56 100644 --- a/templates/rag-mongo/ingest.py +++ b/templates/rag-mongo/ingest.py @@ -1,9 +1,9 @@ import os from langchain.document_loaders import PyPDFLoader -from langchain.embeddings import OpenAIEmbeddings from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.vectorstores import MongoDBAtlasVectorSearch +from langchain_community.embeddings import OpenAIEmbeddings from pymongo import MongoClient MONGO_URI = os.environ["MONGO_URI"] diff --git a/templates/rag-mongo/rag_mongo/chain.py b/templates/rag-mongo/rag_mongo/chain.py index 62613f2e076..e6eafb85460 100644 --- a/templates/rag-mongo/rag_mongo/chain.py +++ b/templates/rag-mongo/rag_mongo/chain.py @@ -1,11 +1,11 @@ import os -from langchain.chat_models import ChatOpenAI from langchain.document_loaders import PyPDFLoader -from langchain.embeddings import OpenAIEmbeddings from langchain.prompts import ChatPromptTemplate from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.vectorstores import MongoDBAtlasVectorSearch +from langchain_community.chat_models import ChatOpenAI +from langchain_community.embeddings import OpenAIEmbeddings from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import ( diff --git a/templates/rag-multi-index-fusion/rag_multi_index_fusion/chain.py b/templates/rag-multi-index-fusion/rag_multi_index_fusion/chain.py index 03799162355..f0bfca79103 100644 --- a/templates/rag-multi-index-fusion/rag_multi_index_fusion/chain.py +++ b/templates/rag-multi-index-fusion/rag_multi_index_fusion/chain.py @@ -1,8 +1,6 @@ from operator import itemgetter import numpy as np -from langchain.chat_models import ChatOpenAI -from langchain.embeddings import OpenAIEmbeddings from langchain.prompts import ChatPromptTemplate from langchain.retrievers import ( ArxivRetriever, @@ -12,6 +10,8 @@ from langchain.retrievers import ( ) from langchain.schema import StrOutputParser from langchain.utils.math import cosine_similarity +from langchain_community.chat_models import ChatOpenAI +from langchain_community.embeddings import OpenAIEmbeddings from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import ( RunnableParallel, diff --git a/templates/rag-multi-index-router/rag_multi_index_router/chain.py b/templates/rag-multi-index-router/rag_multi_index_router/chain.py index 625f2b4e80e..d1f3b38324b 100644 --- a/templates/rag-multi-index-router/rag_multi_index_router/chain.py +++ b/templates/rag-multi-index-router/rag_multi_index_router/chain.py @@ -1,7 +1,6 @@ from operator import itemgetter from typing import Literal -from langchain.chat_models import ChatOpenAI from langchain.output_parsers.openai_functions import PydanticAttrOutputFunctionsParser from langchain.prompts import ChatPromptTemplate from langchain.retrievers import ( @@ -12,6 +11,7 @@ from langchain.retrievers import ( ) from langchain.schema import StrOutputParser from langchain.utils.openai_functions import convert_pydantic_to_openai_function +from langchain_community.chat_models import ChatOpenAI from langchain_core.pydantic_v1 import BaseModel, Field from langchain_core.runnables import ( RouterRunnable, diff --git a/templates/rag-multi-modal-local/rag_multi_modal_local/chain.py b/templates/rag-multi-modal-local/rag_multi_modal_local/chain.py index b45e2e83b3d..d84370b6aa0 100644 --- a/templates/rag-multi-modal-local/rag_multi_modal_local/chain.py +++ b/templates/rag-multi-modal-local/rag_multi_modal_local/chain.py @@ -2,8 +2,8 @@ import base64 import io from pathlib import Path -from langchain.chat_models import ChatOllama from langchain.vectorstores import Chroma +from langchain_community.chat_models import ChatOllama from langchain_core.documents import Document from langchain_core.messages import HumanMessage from langchain_core.output_parsers import StrOutputParser diff --git a/templates/rag-multi-modal-mv-local/ingest.py b/templates/rag-multi-modal-mv-local/ingest.py index 6b186c7b3b9..896e9ae949b 100644 --- a/templates/rag-multi-modal-mv-local/ingest.py +++ b/templates/rag-multi-modal-mv-local/ingest.py @@ -5,11 +5,11 @@ import uuid from io import BytesIO from pathlib import Path -from langchain.chat_models import ChatOllama -from langchain.embeddings import OllamaEmbeddings from langchain.retrievers.multi_vector import MultiVectorRetriever from langchain.storage import LocalFileStore from langchain.vectorstores import Chroma +from langchain_community.chat_models import ChatOllama +from langchain_community.embeddings import OllamaEmbeddings from langchain_core.documents import Document from langchain_core.messages import HumanMessage from PIL import Image diff --git a/templates/rag-multi-modal-mv-local/rag_multi_modal_mv_local/chain.py b/templates/rag-multi-modal-mv-local/rag_multi_modal_mv_local/chain.py index e66064b2782..079b385adda 100644 --- a/templates/rag-multi-modal-mv-local/rag_multi_modal_mv_local/chain.py +++ b/templates/rag-multi-modal-mv-local/rag_multi_modal_mv_local/chain.py @@ -2,12 +2,12 @@ import base64 import io from pathlib import Path -from langchain.chat_models import ChatOllama -from langchain.embeddings import OllamaEmbeddings from langchain.pydantic_v1 import BaseModel from langchain.retrievers.multi_vector import MultiVectorRetriever from langchain.storage import LocalFileStore from langchain.vectorstores import Chroma +from langchain_community.chat_models import ChatOllama +from langchain_community.embeddings import OllamaEmbeddings from langchain_core.documents import Document from langchain_core.messages import HumanMessage from langchain_core.output_parsers import StrOutputParser diff --git a/templates/rag-ollama-multi-query/rag_ollama_multi_query/chain.py b/templates/rag-ollama-multi-query/rag_ollama_multi_query/chain.py index c65675e0be1..1e1fb1c3ac9 100644 --- a/templates/rag-ollama-multi-query/rag_ollama_multi_query/chain.py +++ b/templates/rag-ollama-multi-query/rag_ollama_multi_query/chain.py @@ -1,14 +1,14 @@ from typing import List from langchain.chains import LLMChain -from langchain.chat_models import ChatOllama, ChatOpenAI from langchain.document_loaders import WebBaseLoader -from langchain.embeddings import OpenAIEmbeddings from langchain.output_parsers import PydanticOutputParser from langchain.prompts import ChatPromptTemplate, PromptTemplate from langchain.retrievers.multi_query import MultiQueryRetriever from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.vectorstores import Chroma +from langchain_community.chat_models import ChatOllama, ChatOpenAI +from langchain_community.embeddings import OpenAIEmbeddings from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel, Field from langchain_core.runnables import RunnableParallel, RunnablePassthrough diff --git a/templates/rag-opensearch/rag_opensearch/chain.py b/templates/rag-opensearch/rag_opensearch/chain.py index 75f9b0fb841..39b7c2f1048 100644 --- a/templates/rag-opensearch/rag_opensearch/chain.py +++ b/templates/rag-opensearch/rag_opensearch/chain.py @@ -1,9 +1,9 @@ import os -from langchain.chat_models import ChatOpenAI -from langchain.embeddings import OpenAIEmbeddings from langchain.prompts import ChatPromptTemplate from langchain.vectorstores.opensearch_vector_search import OpenSearchVectorSearch +from langchain_community.chat_models import ChatOpenAI +from langchain_community.embeddings import OpenAIEmbeddings from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableParallel, RunnablePassthrough diff --git a/templates/rag-pinecone-multi-query/rag_pinecone_multi_query/chain.py b/templates/rag-pinecone-multi-query/rag_pinecone_multi_query/chain.py index ace96bda068..2d13373ab73 100644 --- a/templates/rag-pinecone-multi-query/rag_pinecone_multi_query/chain.py +++ b/templates/rag-pinecone-multi-query/rag_pinecone_multi_query/chain.py @@ -1,10 +1,10 @@ import os -from langchain.chat_models import ChatOpenAI -from langchain.embeddings import OpenAIEmbeddings from langchain.prompts import ChatPromptTemplate from langchain.retrievers.multi_query import MultiQueryRetriever from langchain.vectorstores import Pinecone +from langchain_community.chat_models import ChatOpenAI +from langchain_community.embeddings import OpenAIEmbeddings from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableParallel, RunnablePassthrough diff --git a/templates/rag-pinecone-rerank/rag_pinecone_rerank/chain.py b/templates/rag-pinecone-rerank/rag_pinecone_rerank/chain.py index dad45ede9f2..57b11a98b77 100644 --- a/templates/rag-pinecone-rerank/rag_pinecone_rerank/chain.py +++ b/templates/rag-pinecone-rerank/rag_pinecone_rerank/chain.py @@ -1,11 +1,11 @@ import os -from langchain.chat_models import ChatOpenAI -from langchain.embeddings import OpenAIEmbeddings from langchain.prompts import ChatPromptTemplate from langchain.retrievers import ContextualCompressionRetriever from langchain.retrievers.document_compressors import CohereRerank from langchain.vectorstores import Pinecone +from langchain_community.chat_models import ChatOpenAI +from langchain_community.embeddings import OpenAIEmbeddings from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableParallel, RunnablePassthrough diff --git a/templates/rag-pinecone/rag_pinecone/chain.py b/templates/rag-pinecone/rag_pinecone/chain.py index 8f1d03d2abe..c78c4542385 100644 --- a/templates/rag-pinecone/rag_pinecone/chain.py +++ b/templates/rag-pinecone/rag_pinecone/chain.py @@ -1,9 +1,9 @@ import os -from langchain.chat_models import ChatOpenAI -from langchain.embeddings import OpenAIEmbeddings from langchain.prompts import ChatPromptTemplate from langchain.vectorstores import Pinecone +from langchain_community.chat_models import ChatOpenAI +from langchain_community.embeddings import OpenAIEmbeddings from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableParallel, RunnablePassthrough diff --git a/templates/rag-redis/ingest.py b/templates/rag-redis/ingest.py index 6e056c10fa2..3d65a17c155 100644 --- a/templates/rag-redis/ingest.py +++ b/templates/rag-redis/ingest.py @@ -1,9 +1,9 @@ import os from langchain.document_loaders import UnstructuredFileLoader -from langchain.embeddings import HuggingFaceEmbeddings from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.vectorstores import Redis +from langchain_community.embeddings import HuggingFaceEmbeddings from rag_redis.config import EMBED_MODEL, INDEX_NAME, INDEX_SCHEMA, REDIS_URL diff --git a/templates/rag-redis/rag_redis/chain.py b/templates/rag-redis/rag_redis/chain.py index 0e451f36362..13219c2180a 100644 --- a/templates/rag-redis/rag_redis/chain.py +++ b/templates/rag-redis/rag_redis/chain.py @@ -1,7 +1,7 @@ -from langchain.chat_models import ChatOpenAI -from langchain.embeddings import HuggingFaceEmbeddings from langchain.prompts import ChatPromptTemplate from langchain.vectorstores import Redis +from langchain_community.chat_models import ChatOpenAI +from langchain_community.embeddings import HuggingFaceEmbeddings from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableParallel, RunnablePassthrough diff --git a/templates/rag-self-query/ingest.py b/templates/rag-self-query/ingest.py index ad3c0bea164..02d0ed63e61 100644 --- a/templates/rag-self-query/ingest.py +++ b/templates/rag-self-query/ingest.py @@ -1,9 +1,9 @@ import os from langchain.document_loaders import JSONLoader -from langchain.embeddings import OpenAIEmbeddings from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.vectorstores import ElasticsearchStore +from langchain_community.embeddings import OpenAIEmbeddings ELASTIC_CLOUD_ID = os.getenv("ELASTIC_CLOUD_ID") ELASTIC_USERNAME = os.getenv("ELASTIC_USERNAME", "elastic") diff --git a/templates/rag-self-query/rag_self_query/chain.py b/templates/rag-self-query/rag_self_query/chain.py index f4135728f19..f1dd70f6ee5 100644 --- a/templates/rag-self-query/rag_self_query/chain.py +++ b/templates/rag-self-query/rag_self_query/chain.py @@ -2,11 +2,11 @@ import os from operator import itemgetter from typing import List, Tuple -from langchain.chat_models import ChatOpenAI -from langchain.embeddings import OpenAIEmbeddings from langchain.retrievers import SelfQueryRetriever from langchain.schema import format_document from langchain.vectorstores.elasticsearch import ElasticsearchStore +from langchain_community.chat_models import ChatOpenAI +from langchain_community.embeddings import OpenAIEmbeddings from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel, Field from langchain_core.runnables import RunnableParallel, RunnablePassthrough diff --git a/templates/rag-semi-structured/rag_semi_structured/chain.py b/templates/rag-semi-structured/rag_semi_structured/chain.py index 8c74db3d075..3467d536bbd 100644 --- a/templates/rag-semi-structured/rag_semi_structured/chain.py +++ b/templates/rag-semi-structured/rag_semi_structured/chain.py @@ -1,12 +1,12 @@ # Load import uuid -from langchain.chat_models import ChatOpenAI -from langchain.embeddings import OpenAIEmbeddings from langchain.prompts import ChatPromptTemplate from langchain.retrievers.multi_vector import MultiVectorRetriever from langchain.storage import InMemoryStore from langchain.vectorstores import Chroma +from langchain_community.chat_models import ChatOpenAI +from langchain_community.embeddings import OpenAIEmbeddings from langchain_core.documents import Document from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel diff --git a/templates/rag-singlestoredb/rag_singlestoredb/chain.py b/templates/rag-singlestoredb/rag_singlestoredb/chain.py index f609266566a..5f691aaee93 100644 --- a/templates/rag-singlestoredb/rag_singlestoredb/chain.py +++ b/templates/rag-singlestoredb/rag_singlestoredb/chain.py @@ -1,9 +1,9 @@ import os -from langchain.chat_models import ChatOpenAI -from langchain.embeddings import OpenAIEmbeddings from langchain.prompts import ChatPromptTemplate from langchain.vectorstores import SingleStoreDB +from langchain_community.chat_models import ChatOpenAI +from langchain_community.embeddings import OpenAIEmbeddings from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableParallel, RunnablePassthrough diff --git a/templates/rag-supabase/rag_supabase/chain.py b/templates/rag-supabase/rag_supabase/chain.py index 6a24aa7fb7e..20de7d165fd 100644 --- a/templates/rag-supabase/rag_supabase/chain.py +++ b/templates/rag-supabase/rag_supabase/chain.py @@ -1,9 +1,9 @@ import os -from langchain.chat_models import ChatOpenAI -from langchain.embeddings import OpenAIEmbeddings from langchain.prompts import ChatPromptTemplate from langchain.vectorstores.supabase import SupabaseVectorStore +from langchain_community.chat_models import ChatOpenAI +from langchain_community.embeddings import OpenAIEmbeddings from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableParallel, RunnablePassthrough diff --git a/templates/rag-timescale-conversation/rag_timescale_conversation/chain.py b/templates/rag-timescale-conversation/rag_timescale_conversation/chain.py index e69f255e476..531d7e1ce4a 100644 --- a/templates/rag-timescale-conversation/rag_timescale_conversation/chain.py +++ b/templates/rag-timescale-conversation/rag_timescale_conversation/chain.py @@ -4,12 +4,12 @@ from operator import itemgetter from typing import List, Optional, Tuple from dotenv import find_dotenv, load_dotenv -from langchain.chat_models import ChatOpenAI -from langchain.embeddings import OpenAIEmbeddings from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder from langchain.prompts.prompt import PromptTemplate from langchain.schema import AIMessage, HumanMessage, format_document from langchain.vectorstores.timescalevector import TimescaleVector +from langchain_community.chat_models import ChatOpenAI +from langchain_community.embeddings import OpenAIEmbeddings from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel, Field from langchain_core.runnables import ( diff --git a/templates/rag-timescale-conversation/rag_timescale_conversation/load_sample_dataset.py b/templates/rag-timescale-conversation/rag_timescale_conversation/load_sample_dataset.py index 7b200b12581..cee2e04ffd0 100644 --- a/templates/rag-timescale-conversation/rag_timescale_conversation/load_sample_dataset.py +++ b/templates/rag-timescale-conversation/rag_timescale_conversation/load_sample_dataset.py @@ -4,9 +4,9 @@ from datetime import datetime, timedelta import requests from langchain.document_loaders import JSONLoader -from langchain.embeddings.openai import OpenAIEmbeddings from langchain.text_splitter import CharacterTextSplitter from langchain.vectorstores.timescalevector import TimescaleVector +from langchain_community.embeddings.openai import OpenAIEmbeddings from timescale_vector import client diff --git a/templates/rag-timescale-hybrid-search-time/rag_timescale_hybrid_search_time/chain.py b/templates/rag-timescale-hybrid-search-time/rag_timescale_hybrid_search_time/chain.py index a0a38eafc4c..d14206ad5bf 100644 --- a/templates/rag-timescale-hybrid-search-time/rag_timescale_hybrid_search_time/chain.py +++ b/templates/rag-timescale-hybrid-search-time/rag_timescale_hybrid_search_time/chain.py @@ -4,12 +4,12 @@ import os from datetime import timedelta from langchain.chains.query_constructor.base import AttributeInfo -from langchain.chat_models import ChatOpenAI -from langchain.embeddings.openai import OpenAIEmbeddings -from langchain.llms import OpenAI from langchain.prompts import ChatPromptTemplate from langchain.retrievers.self_query.base import SelfQueryRetriever from langchain.vectorstores.timescalevector import TimescaleVector +from langchain_community.chat_models import ChatOpenAI +from langchain_community.embeddings.openai import OpenAIEmbeddings +from langchain_community.llms import OpenAI from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableParallel, RunnablePassthrough diff --git a/templates/rag-timescale-hybrid-search-time/rag_timescale_hybrid_search_time/load_sample_dataset.py b/templates/rag-timescale-hybrid-search-time/rag_timescale_hybrid_search_time/load_sample_dataset.py index 7b200b12581..cee2e04ffd0 100644 --- a/templates/rag-timescale-hybrid-search-time/rag_timescale_hybrid_search_time/load_sample_dataset.py +++ b/templates/rag-timescale-hybrid-search-time/rag_timescale_hybrid_search_time/load_sample_dataset.py @@ -4,9 +4,9 @@ from datetime import datetime, timedelta import requests from langchain.document_loaders import JSONLoader -from langchain.embeddings.openai import OpenAIEmbeddings from langchain.text_splitter import CharacterTextSplitter from langchain.vectorstores.timescalevector import TimescaleVector +from langchain_community.embeddings.openai import OpenAIEmbeddings from timescale_vector import client diff --git a/templates/rag-vectara-multiquery/rag_vectara_multiquery/chain.py b/templates/rag-vectara-multiquery/rag_vectara_multiquery/chain.py index 1c1c9b19922..eb863b60640 100644 --- a/templates/rag-vectara-multiquery/rag_vectara_multiquery/chain.py +++ b/templates/rag-vectara-multiquery/rag_vectara_multiquery/chain.py @@ -1,8 +1,8 @@ import os -from langchain.chat_models import ChatOpenAI from langchain.retrievers.multi_query import MultiQueryRetriever from langchain.vectorstores import Vectara +from langchain_community.chat_models import ChatOpenAI from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableParallel, RunnablePassthrough diff --git a/templates/rag-weaviate/rag_weaviate/chain.py b/templates/rag-weaviate/rag_weaviate/chain.py index 0bb5044d57e..2d31bd6fabc 100644 --- a/templates/rag-weaviate/rag_weaviate/chain.py +++ b/templates/rag-weaviate/rag_weaviate/chain.py @@ -1,11 +1,11 @@ import os -from langchain.chat_models import ChatOpenAI from langchain.document_loaders import WebBaseLoader -from langchain.embeddings import OpenAIEmbeddings from langchain.prompts import ChatPromptTemplate from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.vectorstores import Weaviate +from langchain_community.chat_models import ChatOpenAI +from langchain_community.embeddings import OpenAIEmbeddings from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableParallel, RunnablePassthrough diff --git a/templates/research-assistant/research_assistant/search/web.py b/templates/research-assistant/research_assistant/search/web.py index 16db649104b..abeacbc2f89 100644 --- a/templates/research-assistant/research_assistant/search/web.py +++ b/templates/research-assistant/research_assistant/search/web.py @@ -3,10 +3,10 @@ from typing import Any import requests from bs4 import BeautifulSoup -from langchain.chat_models import ChatOpenAI from langchain.prompts import ChatPromptTemplate from langchain.retrievers.tavily_search_api import TavilySearchAPIRetriever from langchain.utilities import DuckDuckGoSearchAPIWrapper +from langchain_community.chat_models import ChatOpenAI from langchain_core.messages import SystemMessage from langchain_core.output_parsers import StrOutputParser from langchain_core.runnables import ( diff --git a/templates/research-assistant/research_assistant/writer.py b/templates/research-assistant/research_assistant/writer.py index 34a167a70b6..d07b2eb5da0 100644 --- a/templates/research-assistant/research_assistant/writer.py +++ b/templates/research-assistant/research_assistant/writer.py @@ -1,5 +1,5 @@ -from langchain.chat_models import ChatOpenAI from langchain.prompts import ChatPromptTemplate +from langchain_community.chat_models import ChatOpenAI from langchain_core.output_parsers import StrOutputParser from langchain_core.runnables import ConfigurableField diff --git a/templates/retrieval-agent/retrieval_agent/chain.py b/templates/retrieval-agent/retrieval_agent/chain.py index c0fb6f798f8..1265100b687 100644 --- a/templates/retrieval-agent/retrieval_agent/chain.py +++ b/templates/retrieval-agent/retrieval_agent/chain.py @@ -5,12 +5,12 @@ from langchain.agents import AgentExecutor from langchain.agents.format_scratchpad import format_to_openai_function_messages from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser from langchain.callbacks.manager import CallbackManagerForRetrieverRun -from langchain.chat_models import AzureChatOpenAI from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder from langchain.schema import BaseRetriever, Document from langchain.tools.render import format_tool_to_openai_function from langchain.tools.retriever import create_retriever_tool from langchain.utilities.arxiv import ArxivAPIWrapper +from langchain_community.chat_models import AzureChatOpenAI from langchain_core.messages import AIMessage, HumanMessage from langchain_core.pydantic_v1 import BaseModel, Field diff --git a/templates/rewrite-retrieve-read/rewrite_retrieve_read/chain.py b/templates/rewrite-retrieve-read/rewrite_retrieve_read/chain.py index 996b9b1347c..d66bd04e187 100644 --- a/templates/rewrite-retrieve-read/rewrite_retrieve_read/chain.py +++ b/templates/rewrite-retrieve-read/rewrite_retrieve_read/chain.py @@ -1,6 +1,6 @@ -from langchain.chat_models import ChatOpenAI from langchain.prompts import ChatPromptTemplate from langchain.utilities import DuckDuckGoSearchAPIWrapper +from langchain_community.chat_models import ChatOpenAI from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnablePassthrough diff --git a/templates/self-query-qdrant/README.md b/templates/self-query-qdrant/README.md index fac93ad0c60..c32af05dac6 100644 --- a/templates/self-query-qdrant/README.md +++ b/templates/self-query-qdrant/README.md @@ -87,8 +87,8 @@ If you want to customize the template, you can do it by passing the parameters t in the `app/server.py` file: ```python -from langchain.llms import Cohere -from langchain.embeddings import HuggingFaceEmbeddings +from langchain_community.llms import Cohere +from langchain_community.embeddings import HuggingFaceEmbeddings from langchain.chains.query_constructor.schema import AttributeInfo from self_query_qdrant.chain import create_chain @@ -109,7 +109,7 @@ The same goes for the `initialize` function that creates a Qdrant collection and ```python from langchain.schema import Document -from langchain.embeddings import HuggingFaceEmbeddings +from langchain_community.embeddings import HuggingFaceEmbeddings from self_query_qdrant.chain import initialize diff --git a/templates/self-query-qdrant/self_query_qdrant/chain.py b/templates/self-query-qdrant/self_query_qdrant/chain.py index 73e46b525aa..d6b00bee57f 100644 --- a/templates/self-query-qdrant/self_query_qdrant/chain.py +++ b/templates/self-query-qdrant/self_query_qdrant/chain.py @@ -2,12 +2,12 @@ import os from typing import List, Optional from langchain.chains.query_constructor.schema import AttributeInfo -from langchain.embeddings import OpenAIEmbeddings -from langchain.llms import BaseLLM -from langchain.llms.openai import OpenAI from langchain.retrievers import SelfQueryRetriever from langchain.schema import Document, StrOutputParser from langchain.vectorstores.qdrant import Qdrant +from langchain_community.embeddings import OpenAIEmbeddings +from langchain_community.llms import BaseLLM +from langchain_community.llms.openai import OpenAI from langchain_core.embeddings import Embeddings from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableParallel, RunnablePassthrough diff --git a/templates/self-query-supabase/self_query_supabase/chain.py b/templates/self-query-supabase/self_query_supabase/chain.py index 1bd87d8da65..c7cec49a27a 100644 --- a/templates/self-query-supabase/self_query_supabase/chain.py +++ b/templates/self-query-supabase/self_query_supabase/chain.py @@ -1,10 +1,10 @@ import os from langchain.chains.query_constructor.base import AttributeInfo -from langchain.embeddings import OpenAIEmbeddings -from langchain.llms.openai import OpenAI from langchain.retrievers.self_query.base import SelfQueryRetriever from langchain.vectorstores.supabase import SupabaseVectorStore +from langchain_community.embeddings import OpenAIEmbeddings +from langchain_community.llms.openai import OpenAI from langchain_core.runnables import RunnableParallel, RunnablePassthrough from supabase.client import create_client diff --git a/templates/skeleton-of-thought/skeleton_of_thought/chain.py b/templates/skeleton-of-thought/skeleton_of_thought/chain.py index d0184e215d2..9196cc58b61 100644 --- a/templates/skeleton-of-thought/skeleton_of_thought/chain.py +++ b/templates/skeleton-of-thought/skeleton_of_thought/chain.py @@ -1,5 +1,5 @@ -from langchain.chat_models import ChatOpenAI from langchain.prompts import ChatPromptTemplate +from langchain_community.chat_models import ChatOpenAI from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnablePassthrough diff --git a/templates/solo-performance-prompting-agent/solo_performance_prompting_agent/agent.py b/templates/solo-performance-prompting-agent/solo_performance_prompting_agent/agent.py index ac59eb3f1f1..671d589df1e 100644 --- a/templates/solo-performance-prompting-agent/solo_performance_prompting_agent/agent.py +++ b/templates/solo-performance-prompting-agent/solo_performance_prompting_agent/agent.py @@ -1,8 +1,8 @@ from langchain.agents import AgentExecutor from langchain.agents.format_scratchpad import format_xml -from langchain.llms import OpenAI from langchain.tools import DuckDuckGoSearchRun from langchain.tools.render import render_text_description +from langchain_community.llms import OpenAI from langchain_core.pydantic_v1 import BaseModel from solo_performance_prompting_agent.parser import parse_output diff --git a/templates/sql-llama2/sql_llama2/chain.py b/templates/sql-llama2/sql_llama2/chain.py index bfec9397344..b47725e8710 100644 --- a/templates/sql-llama2/sql_llama2/chain.py +++ b/templates/sql-llama2/sql_llama2/chain.py @@ -1,8 +1,8 @@ from pathlib import Path -from langchain.llms import Replicate from langchain.prompts import ChatPromptTemplate from langchain.utilities import SQLDatabase +from langchain_community.llms import Replicate from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnablePassthrough diff --git a/templates/sql-llamacpp/sql_llamacpp/chain.py b/templates/sql-llamacpp/sql_llamacpp/chain.py index 6ca92567e07..6ca5232b890 100644 --- a/templates/sql-llamacpp/sql_llamacpp/chain.py +++ b/templates/sql-llamacpp/sql_llamacpp/chain.py @@ -3,10 +3,10 @@ import os from pathlib import Path import requests -from langchain.llms import LlamaCpp from langchain.memory import ConversationBufferMemory from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder from langchain.utilities import SQLDatabase +from langchain_community.llms import LlamaCpp from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableLambda, RunnablePassthrough diff --git a/templates/sql-ollama/sql_ollama/chain.py b/templates/sql-ollama/sql_ollama/chain.py index 2a32d954f06..84a909cf2b3 100644 --- a/templates/sql-ollama/sql_ollama/chain.py +++ b/templates/sql-ollama/sql_ollama/chain.py @@ -1,9 +1,9 @@ from pathlib import Path -from langchain.chat_models import ChatOllama from langchain.memory import ConversationBufferMemory from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder from langchain.utilities import SQLDatabase +from langchain_community.chat_models import ChatOllama from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableLambda, RunnablePassthrough diff --git a/templates/sql-pgvector/sql_pgvector/chain.py b/templates/sql-pgvector/sql_pgvector/chain.py index 542a6578124..044d3b071f6 100644 --- a/templates/sql-pgvector/sql_pgvector/chain.py +++ b/templates/sql-pgvector/sql_pgvector/chain.py @@ -1,10 +1,10 @@ import os import re -from langchain.chat_models import ChatOpenAI -from langchain.embeddings import OpenAIEmbeddings from langchain.prompts import ChatPromptTemplate from langchain.sql_database import SQLDatabase +from langchain_community.chat_models import ChatOpenAI +from langchain_community.embeddings import OpenAIEmbeddings from langchain_core.output_parsers import StrOutputParser from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import RunnableLambda, RunnablePassthrough diff --git a/templates/sql-research-assistant/sql_research_assistant/search/sql.py b/templates/sql-research-assistant/sql_research_assistant/search/sql.py index 25a68da050d..d12ee51499a 100644 --- a/templates/sql-research-assistant/sql_research_assistant/search/sql.py +++ b/templates/sql-research-assistant/sql_research_assistant/search/sql.py @@ -1,10 +1,10 @@ from pathlib import Path -from langchain.chat_models import ChatOllama, ChatOpenAI from langchain.memory import ConversationBufferMemory from langchain.prompts import ChatPromptTemplate from langchain.pydantic_v1 import BaseModel from langchain.utilities import SQLDatabase +from langchain_community.chat_models import ChatOllama, ChatOpenAI from langchain_core.output_parsers import StrOutputParser from langchain_core.runnables import RunnablePassthrough diff --git a/templates/sql-research-assistant/sql_research_assistant/search/web.py b/templates/sql-research-assistant/sql_research_assistant/search/web.py index 929d4362d7a..c8618315416 100644 --- a/templates/sql-research-assistant/sql_research_assistant/search/web.py +++ b/templates/sql-research-assistant/sql_research_assistant/search/web.py @@ -3,9 +3,9 @@ from typing import Any import requests from bs4 import BeautifulSoup -from langchain.chat_models import ChatOpenAI from langchain.prompts import ChatPromptTemplate from langchain.utilities import DuckDuckGoSearchAPIWrapper +from langchain_community.chat_models import ChatOpenAI from langchain_core.messages import SystemMessage from langchain_core.output_parsers import StrOutputParser from langchain_core.runnables import ( diff --git a/templates/sql-research-assistant/sql_research_assistant/writer.py b/templates/sql-research-assistant/sql_research_assistant/writer.py index 34a167a70b6..d07b2eb5da0 100644 --- a/templates/sql-research-assistant/sql_research_assistant/writer.py +++ b/templates/sql-research-assistant/sql_research_assistant/writer.py @@ -1,5 +1,5 @@ -from langchain.chat_models import ChatOpenAI from langchain.prompts import ChatPromptTemplate +from langchain_community.chat_models import ChatOpenAI from langchain_core.output_parsers import StrOutputParser from langchain_core.runnables import ConfigurableField diff --git a/templates/stepback-qa-prompting/stepback_qa_prompting/chain.py b/templates/stepback-qa-prompting/stepback_qa_prompting/chain.py index eb3c6c87b5b..ef68c405eac 100644 --- a/templates/stepback-qa-prompting/stepback_qa_prompting/chain.py +++ b/templates/stepback-qa-prompting/stepback_qa_prompting/chain.py @@ -1,6 +1,6 @@ -from langchain.chat_models import ChatOpenAI from langchain.prompts import ChatPromptTemplate, FewShotChatMessagePromptTemplate from langchain.utilities import DuckDuckGoSearchAPIWrapper +from langchain_community.chat_models import ChatOpenAI from langchain_core.output_parsers import StrOutputParser from langchain_core.runnables import RunnableLambda diff --git a/templates/summarize-anthropic/summarize_anthropic.ipynb b/templates/summarize-anthropic/summarize_anthropic.ipynb index 0c38fd03274..c8a093cc475 100644 --- a/templates/summarize-anthropic/summarize_anthropic.ipynb +++ b/templates/summarize-anthropic/summarize_anthropic.ipynb @@ -36,7 +36,7 @@ "outputs": [], "source": [ "import arxiv\n", - "from langchain.chat_models import ChatAnthropic\n", + "from langchain_community.chat_models import ChatAnthropic\n", "from langchain.document_loaders import ArxivLoader, UnstructuredPDFLoader\n", "\n", "# Load a paper to use\n", diff --git a/templates/summarize-anthropic/summarize_anthropic/chain.py b/templates/summarize-anthropic/summarize_anthropic/chain.py index 36c8a3e2b05..70eb4c59762 100644 --- a/templates/summarize-anthropic/summarize_anthropic/chain.py +++ b/templates/summarize-anthropic/summarize_anthropic/chain.py @@ -1,5 +1,5 @@ from langchain import hub -from langchain.chat_models import ChatAnthropic +from langchain_community.chat_models import ChatAnthropic from langchain_core.output_parsers import StrOutputParser # Create chain diff --git a/templates/vertexai-chuck-norris/vertexai_chuck_norris/chain.py b/templates/vertexai-chuck-norris/vertexai_chuck_norris/chain.py index b09d99c820a..794bd634a9a 100644 --- a/templates/vertexai-chuck-norris/vertexai_chuck_norris/chain.py +++ b/templates/vertexai-chuck-norris/vertexai_chuck_norris/chain.py @@ -1,5 +1,5 @@ -from langchain.chat_models import ChatVertexAI from langchain.prompts import ChatPromptTemplate +from langchain_community.chat_models import ChatVertexAI _prompt = ChatPromptTemplate.from_template( "Tell me a joke about Chuck Norris and {text}" diff --git a/templates/xml-agent/xml_agent/agent.py b/templates/xml-agent/xml_agent/agent.py index dad89a1cf59..6759fa91985 100644 --- a/templates/xml-agent/xml_agent/agent.py +++ b/templates/xml-agent/xml_agent/agent.py @@ -2,10 +2,10 @@ from typing import List, Tuple from langchain.agents import AgentExecutor from langchain.agents.format_scratchpad import format_xml -from langchain.chat_models import ChatAnthropic from langchain.schema import AIMessage, HumanMessage from langchain.tools import DuckDuckGoSearchRun from langchain.tools.render import render_text_description +from langchain_community.chat_models import ChatAnthropic from langchain_core.pydantic_v1 import BaseModel, Field from xml_agent.prompts import conversational_prompt, parse_output