diff --git a/cookbook/Multi_modal_RAG.ipynb b/cookbook/Multi_modal_RAG.ipynb index 87863475f76..ef3d2256d7d 100644 --- a/cookbook/Multi_modal_RAG.ipynb +++ b/cookbook/Multi_modal_RAG.ipynb @@ -64,7 +64,7 @@ "metadata": {}, "outputs": [], "source": [ - "! pip install -U langchain openai chromadb langchain-experimental # (newest versions required for multi-modal)" + "! pip install -U langchain openai langchain-chroma langchain-experimental # (newest versions required for multi-modal)" ] }, { @@ -355,7 +355,7 @@ "\n", "from langchain.retrievers.multi_vector import MultiVectorRetriever\n", "from langchain.storage import InMemoryStore\n", - "from langchain_community.vectorstores import Chroma\n", + "from langchain_chroma import Chroma\n", "from langchain_core.documents import Document\n", "from langchain_openai import OpenAIEmbeddings\n", "\n", diff --git a/cookbook/Multi_modal_RAG_google.ipynb b/cookbook/Multi_modal_RAG_google.ipynb index de4489afddd..9f4d6615b68 100644 --- a/cookbook/Multi_modal_RAG_google.ipynb +++ b/cookbook/Multi_modal_RAG_google.ipynb @@ -37,7 +37,7 @@ "metadata": {}, "outputs": [], "source": [ - "%pip install -U --quiet langchain langchain_community openai chromadb langchain-experimental\n", + "%pip install -U --quiet langchain langchain-chroma langchain-community openai langchain-experimental\n", "%pip install --quiet \"unstructured[all-docs]\" pypdf pillow pydantic lxml pillow matplotlib chromadb tiktoken" ] }, @@ -344,8 +344,8 @@ "\n", "from langchain.retrievers.multi_vector import MultiVectorRetriever\n", "from langchain.storage import InMemoryStore\n", + "from langchain_chroma import Chroma\n", "from langchain_community.embeddings import VertexAIEmbeddings\n", - "from langchain_community.vectorstores import Chroma\n", "from langchain_core.documents import Document\n", "\n", "\n", diff --git a/cookbook/RAPTOR.ipynb b/cookbook/RAPTOR.ipynb index a10fd2df6b6..0c2b165d3d6 100644 --- a/cookbook/RAPTOR.ipynb +++ b/cookbook/RAPTOR.ipynb @@ -7,7 +7,7 @@ "metadata": {}, "outputs": [], "source": [ - "pip install -U langchain umap-learn scikit-learn langchain_community tiktoken langchain-openai langchainhub chromadb langchain-anthropic" + "pip install -U langchain umap-learn scikit-learn langchain_community tiktoken langchain-openai langchainhub langchain-chroma langchain-anthropic" ] }, { @@ -645,7 +645,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.vectorstores import Chroma\n", + "from langchain_chroma import Chroma\n", "\n", "# Initialize all_texts with leaf_texts\n", "all_texts = leaf_texts.copy()\n", diff --git a/cookbook/Semi_Structured_RAG.ipynb b/cookbook/Semi_Structured_RAG.ipynb index a8ac19167ad..2dcf8e57a31 100644 --- a/cookbook/Semi_Structured_RAG.ipynb +++ b/cookbook/Semi_Structured_RAG.ipynb @@ -39,7 +39,7 @@ "metadata": {}, "outputs": [], "source": [ - "! pip install langchain unstructured[all-docs] pydantic lxml langchainhub" + "! pip install langchain langchain-chroma unstructured[all-docs] pydantic lxml langchainhub" ] }, { @@ -320,7 +320,7 @@ "\n", "from langchain.retrievers.multi_vector import MultiVectorRetriever\n", "from langchain.storage import InMemoryStore\n", - "from langchain_community.vectorstores import Chroma\n", + "from langchain_chroma import Chroma\n", "from langchain_core.documents import Document\n", "from langchain_openai import OpenAIEmbeddings\n", "\n", diff --git a/cookbook/Semi_structured_and_multi_modal_RAG.ipynb b/cookbook/Semi_structured_and_multi_modal_RAG.ipynb index e797dfea86b..b440826509a 100644 --- a/cookbook/Semi_structured_and_multi_modal_RAG.ipynb +++ b/cookbook/Semi_structured_and_multi_modal_RAG.ipynb @@ -59,7 +59,7 @@ "metadata": {}, "outputs": [], "source": [ - "! pip install langchain unstructured[all-docs] pydantic lxml" + "! pip install langchain langchain-chroma unstructured[all-docs] pydantic lxml" ] }, { @@ -375,7 +375,7 @@ "\n", "from langchain.retrievers.multi_vector import MultiVectorRetriever\n", "from langchain.storage import InMemoryStore\n", - "from langchain_community.vectorstores import Chroma\n", + "from langchain_chroma import Chroma\n", "from langchain_core.documents import Document\n", "from langchain_openai import OpenAIEmbeddings\n", "\n", diff --git a/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb b/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb index 28316da0a3f..a82c0b4dc89 100644 --- a/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb +++ b/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb @@ -59,7 +59,7 @@ "metadata": {}, "outputs": [], "source": [ - "! pip install langchain unstructured[all-docs] pydantic lxml" + "! pip install langchain langchain-chroma unstructured[all-docs] pydantic lxml" ] }, { @@ -378,8 +378,8 @@ "\n", "from langchain.retrievers.multi_vector import MultiVectorRetriever\n", "from langchain.storage import InMemoryStore\n", + "from langchain_chroma import Chroma\n", "from langchain_community.embeddings import GPT4AllEmbeddings\n", - "from langchain_community.vectorstores import Chroma\n", "from langchain_core.documents import Document\n", "\n", "# The vectorstore to use to index the child chunks\n", diff --git a/cookbook/advanced_rag_eval.ipynb b/cookbook/advanced_rag_eval.ipynb index deac4673ca6..3971999956c 100644 --- a/cookbook/advanced_rag_eval.ipynb +++ b/cookbook/advanced_rag_eval.ipynb @@ -19,7 +19,7 @@ "metadata": {}, "outputs": [], "source": [ - "! pip install -U langchain openai chromadb langchain-experimental # (newest versions required for multi-modal)" + "! pip install -U langchain openai langchain_chroma langchain-experimental # (newest versions required for multi-modal)" ] }, { @@ -132,7 +132,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.vectorstores import Chroma\n", + "from langchain_chroma import Chroma\n", "from langchain_openai import OpenAIEmbeddings\n", "\n", "baseline = Chroma.from_texts(\n", diff --git a/cookbook/agent_vectorstore.ipynb b/cookbook/agent_vectorstore.ipynb index a997bd0aee2..8e6e37697f0 100644 --- a/cookbook/agent_vectorstore.ipynb +++ b/cookbook/agent_vectorstore.ipynb @@ -28,7 +28,7 @@ "outputs": [], "source": [ "from langchain.chains import RetrievalQA\n", - "from langchain_community.vectorstores import Chroma\n", + "from langchain_chroma import Chroma\n", "from langchain_openai import OpenAI, OpenAIEmbeddings\n", "from langchain_text_splitters import CharacterTextSplitter\n", "\n", diff --git a/cookbook/airbyte_github.ipynb b/cookbook/airbyte_github.ipynb index 349c235be69..306ea742687 100644 --- a/cookbook/airbyte_github.ipynb +++ b/cookbook/airbyte_github.ipynb @@ -14,7 +14,7 @@ } ], "source": [ - "%pip install -qU langchain-airbyte" + "%pip install -qU langchain-airbyte langchain_chroma" ] }, { @@ -123,7 +123,7 @@ "outputs": [], "source": [ "import tiktoken\n", - "from langchain_community.vectorstores import Chroma\n", + "from langchain_chroma import Chroma\n", "from langchain_openai import OpenAIEmbeddings\n", "\n", "enc = tiktoken.get_encoding(\"cl100k_base\")\n", diff --git a/cookbook/docugami_xml_kg_rag.ipynb b/cookbook/docugami_xml_kg_rag.ipynb index a9c8607935e..89d66c12fbe 100644 --- a/cookbook/docugami_xml_kg_rag.ipynb +++ b/cookbook/docugami_xml_kg_rag.ipynb @@ -39,7 +39,7 @@ "metadata": {}, "outputs": [], "source": [ - "! pip install langchain docugami==0.0.8 dgml-utils==0.3.0 pydantic langchainhub chromadb hnswlib --upgrade --quiet" + "! pip install langchain docugami==0.0.8 dgml-utils==0.3.0 pydantic langchainhub langchain-chroma hnswlib --upgrade --quiet" ] }, { @@ -547,7 +547,7 @@ "\n", "from langchain.retrievers.multi_vector import MultiVectorRetriever\n", "from langchain.storage import InMemoryStore\n", - "from langchain_community.vectorstores.chroma import Chroma\n", + "from langchain_chroma import Chroma\n", "from langchain_core.documents import Document\n", "from langchain_openai import OpenAIEmbeddings\n", "\n", diff --git a/cookbook/fireworks_rag.ipynb b/cookbook/fireworks_rag.ipynb index 563532c99d5..1e1e826f058 100644 --- a/cookbook/fireworks_rag.ipynb +++ b/cookbook/fireworks_rag.ipynb @@ -84,7 +84,7 @@ } ], "source": [ - "%pip install --quiet pypdf chromadb tiktoken openai \n", + "%pip install --quiet pypdf langchain-chroma tiktoken openai \n", "%pip uninstall -y langchain-fireworks\n", "%pip install --editable /mnt/disks/data/langchain/libs/partners/fireworks" ] @@ -138,7 +138,7 @@ "all_splits = text_splitter.split_documents(data)\n", "\n", "# Add to vectorDB\n", - "from langchain_community.vectorstores import Chroma\n", + "from langchain_chroma import Chroma\n", "from langchain_fireworks.embeddings import FireworksEmbeddings\n", "\n", "vectorstore = Chroma.from_documents(\n", diff --git a/cookbook/hypothetical_document_embeddings.ipynb b/cookbook/hypothetical_document_embeddings.ipynb index d421b6eaf59..3a52c64340c 100644 --- a/cookbook/hypothetical_document_embeddings.ipynb +++ b/cookbook/hypothetical_document_embeddings.ipynb @@ -170,7 +170,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.vectorstores import Chroma\n", + "from langchain_chroma import Chroma\n", "from langchain_text_splitters import CharacterTextSplitter\n", "\n", "with open(\"../../state_of_the_union.txt\") as f:\n", diff --git a/cookbook/langgraph_agentic_rag.ipynb b/cookbook/langgraph_agentic_rag.ipynb index ecd65ba907f..948158611dc 100644 --- a/cookbook/langgraph_agentic_rag.ipynb +++ b/cookbook/langgraph_agentic_rag.ipynb @@ -7,7 +7,7 @@ "metadata": {}, "outputs": [], "source": [ - "! pip install langchain_community tiktoken langchain-openai langchainhub chromadb langchain langgraph" + "! pip install langchain-chroma langchain_community tiktoken langchain-openai langchainhub langchain langgraph" ] }, { @@ -30,8 +30,8 @@ "outputs": [], "source": [ "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", + "from langchain_chroma import Chroma\n", "from langchain_community.document_loaders import WebBaseLoader\n", - "from langchain_community.vectorstores import Chroma\n", "from langchain_openai import OpenAIEmbeddings\n", "\n", "urls = [\n", diff --git a/cookbook/langgraph_crag.ipynb b/cookbook/langgraph_crag.ipynb index 03845311160..607241c1a04 100644 --- a/cookbook/langgraph_crag.ipynb +++ b/cookbook/langgraph_crag.ipynb @@ -7,7 +7,7 @@ "metadata": {}, "outputs": [], "source": [ - "! pip install langchain_community tiktoken langchain-openai langchainhub chromadb langchain langgraph tavily-python" + "! pip install langchain-chroma langchain_community tiktoken langchain-openai langchainhub langchain langgraph tavily-python" ] }, { @@ -77,8 +77,8 @@ "outputs": [], "source": [ "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", + "from langchain_chroma import Chroma\n", "from langchain_community.document_loaders import WebBaseLoader\n", - "from langchain_community.vectorstores import Chroma\n", "from langchain_openai import OpenAIEmbeddings\n", "\n", "urls = [\n", @@ -180,8 +180,8 @@ "from langchain.output_parsers.openai_tools import PydanticToolsParser\n", "from langchain.prompts import PromptTemplate\n", "from langchain.schema import Document\n", + "from langchain_chroma import Chroma\n", "from langchain_community.tools.tavily_search import TavilySearchResults\n", - "from langchain_community.vectorstores import Chroma\n", "from langchain_core.messages import BaseMessage, FunctionMessage\n", "from langchain_core.output_parsers import StrOutputParser\n", "from langchain_core.pydantic_v1 import BaseModel, Field\n", diff --git a/cookbook/langgraph_self_rag.ipynb b/cookbook/langgraph_self_rag.ipynb index 4790a5850fd..c6b1e754066 100644 --- a/cookbook/langgraph_self_rag.ipynb +++ b/cookbook/langgraph_self_rag.ipynb @@ -7,7 +7,7 @@ "metadata": {}, "outputs": [], "source": [ - "! pip install langchain_community tiktoken langchain-openai langchainhub chromadb langchain langgraph" + "! pip install langchain-chroma langchain_community tiktoken langchain-openai langchainhub langchain langgraph" ] }, { @@ -86,8 +86,8 @@ "outputs": [], "source": [ "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", + "from langchain_chroma import Chroma\n", "from langchain_community.document_loaders import WebBaseLoader\n", - "from langchain_community.vectorstores import Chroma\n", "from langchain_openai import OpenAIEmbeddings\n", "\n", "urls = [\n", @@ -188,7 +188,7 @@ "from langchain.output_parsers import PydanticOutputParser\n", "from langchain.output_parsers.openai_tools import PydanticToolsParser\n", "from langchain.prompts import PromptTemplate\n", - "from langchain_community.vectorstores import Chroma\n", + "from langchain_chroma import Chroma\n", "from langchain_core.messages import BaseMessage, FunctionMessage\n", "from langchain_core.output_parsers import StrOutputParser\n", "from langchain_core.pydantic_v1 import BaseModel, Field\n", diff --git a/cookbook/multi_modal_RAG_chroma.ipynb b/cookbook/multi_modal_RAG_chroma.ipynb index 1f2d2685029..b2460ffb06f 100644 --- a/cookbook/multi_modal_RAG_chroma.ipynb +++ b/cookbook/multi_modal_RAG_chroma.ipynb @@ -58,7 +58,7 @@ "metadata": {}, "outputs": [], "source": [ - "! pip install -U langchain openai chromadb langchain-experimental # (newest versions required for multi-modal)" + "! pip install -U langchain openai langchain-chroma langchain-experimental # (newest versions required for multi-modal)" ] }, { @@ -187,7 +187,7 @@ "\n", "import chromadb\n", "import numpy as np\n", - "from langchain_community.vectorstores import Chroma\n", + "from langchain_chroma import Chroma\n", "from langchain_experimental.open_clip import OpenCLIPEmbeddings\n", "from PIL import Image as _PILImage\n", "\n", diff --git a/cookbook/nomic_embedding_rag.ipynb b/cookbook/nomic_embedding_rag.ipynb index 8a01fec8db8..473a2737d75 100644 --- a/cookbook/nomic_embedding_rag.ipynb +++ b/cookbook/nomic_embedding_rag.ipynb @@ -58,7 +58,7 @@ "metadata": {}, "outputs": [], "source": [ - "! pip install -U langchain-nomic langchain_community tiktoken langchain-openai chromadb langchain" + "! pip install -U langchain-nomic langchain-chroma langchain-community tiktoken langchain-openai langchain" ] }, { @@ -167,7 +167,7 @@ "source": [ "import os\n", "\n", - "from langchain_community.vectorstores import Chroma\n", + "from langchain_chroma import Chroma\n", "from langchain_core.output_parsers import StrOutputParser\n", "from langchain_core.runnables import RunnableLambda, RunnablePassthrough\n", "from langchain_nomic import NomicEmbeddings\n", diff --git a/cookbook/nomic_multimodal_rag.ipynb b/cookbook/nomic_multimodal_rag.ipynb index ba8a77ace29..bd273e55520 100644 --- a/cookbook/nomic_multimodal_rag.ipynb +++ b/cookbook/nomic_multimodal_rag.ipynb @@ -56,7 +56,7 @@ }, "outputs": [], "source": [ - "! pip install -U langchain-nomic langchain_community tiktoken langchain-openai chromadb langchain # (newest versions required for multi-modal)" + "! pip install -U langchain-nomic langchain-chroma langchain-community tiktoken langchain-openai langchain # (newest versions required for multi-modal)" ] }, { @@ -194,7 +194,7 @@ "\n", "import chromadb\n", "import numpy as np\n", - "from langchain_community.vectorstores import Chroma\n", + "from langchain_chroma import Chroma\n", "from langchain_nomic import NomicEmbeddings\n", "from PIL import Image as _PILImage\n", "\n", diff --git a/cookbook/openai_functions_retrieval_qa.ipynb b/cookbook/openai_functions_retrieval_qa.ipynb index 3d3b4d5b018..91c0359375f 100644 --- a/cookbook/openai_functions_retrieval_qa.ipynb +++ b/cookbook/openai_functions_retrieval_qa.ipynb @@ -20,8 +20,8 @@ "outputs": [], "source": [ "from langchain.chains import RetrievalQA\n", + "from langchain_chroma import Chroma\n", "from langchain_community.document_loaders import TextLoader\n", - "from langchain_community.vectorstores import Chroma\n", "from langchain_openai import OpenAIEmbeddings\n", "from langchain_text_splitters import CharacterTextSplitter" ] diff --git a/cookbook/optimization.ipynb b/cookbook/optimization.ipynb index 2c3faaabc09..2a039cf4eb7 100644 --- a/cookbook/optimization.ipynb +++ b/cookbook/optimization.ipynb @@ -80,7 +80,7 @@ "outputs": [], "source": [ "from langchain.schema import Document\n", - "from langchain_community.vectorstores import Chroma\n", + "from langchain_chroma import Chroma\n", "from langchain_openai import OpenAIEmbeddings\n", "\n", "embeddings = OpenAIEmbeddings()" diff --git a/cookbook/rag_with_quantized_embeddings.ipynb b/cookbook/rag_with_quantized_embeddings.ipynb index 001f2b47278..8fe825484df 100644 --- a/cookbook/rag_with_quantized_embeddings.ipynb +++ b/cookbook/rag_with_quantized_embeddings.ipynb @@ -36,10 +36,10 @@ "from bs4 import BeautifulSoup as Soup\n", "from langchain.retrievers.multi_vector import MultiVectorRetriever\n", "from langchain.storage import InMemoryByteStore, LocalFileStore\n", + "from langchain_chroma import Chroma\n", "from langchain_community.document_loaders.recursive_url_loader import (\n", " RecursiveUrlLoader,\n", ")\n", - "from langchain_community.vectorstores import Chroma\n", "\n", "# For our example, we'll load docs from the web\n", "from langchain_text_splitters import RecursiveCharacterTextSplitter\n", diff --git a/cookbook/sql_db_qa.mdx b/cookbook/sql_db_qa.mdx index f71e6515fc5..8b61c42e942 100644 --- a/cookbook/sql_db_qa.mdx +++ b/cookbook/sql_db_qa.mdx @@ -740,7 +740,7 @@ Even this relatively large model will most likely fail to generate more complica ```bash -poetry run pip install pyyaml chromadb +poetry run pip install pyyaml langchain_chroma import yaml ``` @@ -994,7 +994,7 @@ from langchain.prompts import FewShotPromptTemplate, PromptTemplate from langchain.chains.sql_database.prompt import _sqlite_prompt, PROMPT_SUFFIX from langchain_huggingface import HuggingFaceEmbeddings from langchain.prompts.example_selector.semantic_similarity import SemanticSimilarityExampleSelector -from langchain_community.vectorstores import Chroma +from langchain_chroma import Chroma example_prompt = PromptTemplate( input_variables=["table_info", "input", "sql_cmd", "sql_result", "answer"], diff --git a/cookbook/together_ai.ipynb b/cookbook/together_ai.ipynb index 346f349eb45..1bb60448573 100644 --- a/cookbook/together_ai.ipynb +++ b/cookbook/together_ai.ipynb @@ -22,7 +22,7 @@ "metadata": {}, "outputs": [], "source": [ - "! pip install --quiet pypdf chromadb tiktoken openai langchain-together" + "! pip install --quiet pypdf tiktoken openai langchain-chroma langchain-together" ] }, { @@ -45,8 +45,8 @@ "all_splits = text_splitter.split_documents(data)\n", "\n", "# Add to vectorDB\n", + "from langchain_chroma import Chroma\n", "from langchain_community.embeddings import OpenAIEmbeddings\n", - "from langchain_community.vectorstores import Chroma\n", "\n", "\"\"\"\n", "from langchain_together.embeddings import TogetherEmbeddings\n", diff --git a/docs/docs/integrations/document_loaders/docugami.ipynb b/docs/docs/integrations/document_loaders/docugami.ipynb index e543aa2bb04..de337bdf72b 100644 --- a/docs/docs/integrations/document_loaders/docugami.ipynb +++ b/docs/docs/integrations/document_loaders/docugami.ipynb @@ -162,7 +162,7 @@ "metadata": {}, "outputs": [], "source": [ - "!poetry run pip install --upgrade langchain-openai tiktoken chromadb hnswlib" + "!poetry run pip install --upgrade langchain-openai tiktoken langchain-chroma hnswlib" ] }, { @@ -211,7 +211,7 @@ "outputs": [], "source": [ "from langchain.chains import RetrievalQA\n", - "from langchain_community.vectorstores.chroma import Chroma\n", + "from langchain_chroma import Chroma\n", "from langchain_openai import OpenAI, OpenAIEmbeddings\n", "\n", "embedding = OpenAIEmbeddings()\n", @@ -365,7 +365,7 @@ "source": [ "from langchain.chains.query_constructor.schema import AttributeInfo\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", - "from langchain_community.vectorstores.chroma import Chroma\n", + "from langchain_chroma import Chroma\n", "\n", "EXCLUDE_KEYS = [\"id\", \"xpath\", \"structure\"]\n", "metadata_field_info = [\n", @@ -540,7 +540,7 @@ "source": [ "from langchain.retrievers.multi_vector import MultiVectorRetriever, SearchType\n", "from langchain.storage import InMemoryStore\n", - "from langchain_community.vectorstores.chroma import Chroma\n", + "from langchain_chroma import Chroma\n", "from langchain_openai import OpenAIEmbeddings\n", "\n", "# The vectorstore to use to index the child chunks\n", diff --git a/libs/community/langchain_community/vectorstores/chroma.py b/libs/community/langchain_community/vectorstores/chroma.py index 503e5dcc5c6..71a30c98788 100644 --- a/libs/community/langchain_community/vectorstores/chroma.py +++ b/libs/community/langchain_community/vectorstores/chroma.py @@ -50,6 +50,7 @@ def _results_to_docs_and_scores(results: Any) -> List[Tuple[Document, float]]: ] +@deprecated(since="0.2.9", removal="0.4", alternative_import="langchain_chroma.Chroma") class Chroma(VectorStore): """`ChromaDB` vector store. diff --git a/libs/community/tests/integration_tests/embeddings/test_sentence_transformer.py b/libs/community/tests/integration_tests/embeddings/test_sentence_transformer.py index 3890b2fd3c8..ce9092d2cfc 100644 --- a/libs/community/tests/integration_tests/embeddings/test_sentence_transformer.py +++ b/libs/community/tests/integration_tests/embeddings/test_sentence_transformer.py @@ -1,10 +1,11 @@ # flake8: noqa """Test sentence_transformer embeddings.""" +from langchain_core.vectorstores import InMemoryVectorStore + from langchain_community.embeddings.sentence_transformer import ( SentenceTransformerEmbeddings, ) -from langchain_community.vectorstores import Chroma def test_sentence_transformer_embedding_documents() -> None: @@ -34,7 +35,7 @@ def test_sentence_transformer_db_query() -> None: query = "what the foo is a bar?" query_vector = embedding.embed_query(query) assert len(query_vector) == 384 - db = Chroma(embedding_function=embedding) + db = InMemoryVectorStore(embedding=embedding) db.add_texts(texts) docs = db.similarity_search_by_vector(query_vector, k=2) assert docs[0].page_content == "we will foo your bar until you can't foo any more" diff --git a/libs/community/tests/integration_tests/retrievers/test_merger_retriever.py b/libs/community/tests/integration_tests/retrievers/test_merger_retriever.py index b5b575e7c3c..d3d1c6d1d24 100644 --- a/libs/community/tests/integration_tests/retrievers/test_merger_retriever.py +++ b/libs/community/tests/integration_tests/retrievers/test_merger_retriever.py @@ -1,7 +1,7 @@ from langchain.retrievers.merger_retriever import MergerRetriever +from langchain_core.vectorstores import InMemoryVectorStore from langchain_community.embeddings import OpenAIEmbeddings -from langchain_community.vectorstores import Chroma def test_merger_retriever_get_relevant_docs() -> None: @@ -17,12 +17,12 @@ def test_merger_retriever_get_relevant_docs() -> None: "Real stupidity beats artificial intelligence every time. TP", ] embeddings = OpenAIEmbeddings() - retriever_a = Chroma.from_texts(texts_group_a, embedding=embeddings).as_retriever( - search_kwargs={"k": 1} - ) - retriever_b = Chroma.from_texts(texts_group_b, embedding=embeddings).as_retriever( - search_kwargs={"k": 1} - ) + retriever_a = InMemoryVectorStore.from_texts( + texts_group_a, embedding=embeddings + ).as_retriever(search_kwargs={"k": 1}) + retriever_b = InMemoryVectorStore.from_texts( + texts_group_b, embedding=embeddings + ).as_retriever(search_kwargs={"k": 1}) # The Lord of the Retrievers. lotr = MergerRetriever(retrievers=[retriever_a, retriever_b]) diff --git a/libs/community/tests/integration_tests/test_long_context_reorder.py b/libs/community/tests/integration_tests/test_long_context_reorder.py index b27de304ced..22bc59a5cc5 100644 --- a/libs/community/tests/integration_tests/test_long_context_reorder.py +++ b/libs/community/tests/integration_tests/test_long_context_reorder.py @@ -1,10 +1,11 @@ """Integration test for doc reordering.""" +from langchain_core.vectorstores import InMemoryVectorStore + from langchain_community.document_transformers.long_context_reorder import ( LongContextReorder, ) from langchain_community.embeddings import OpenAIEmbeddings -from langchain_community.vectorstores import Chroma def test_long_context_reorder() -> None: @@ -22,9 +23,9 @@ def test_long_context_reorder() -> None: "Larry Bird was an iconic NBA player.", ] embeddings = OpenAIEmbeddings() - retriever = Chroma.from_texts(texts, embedding=embeddings).as_retriever( - search_kwargs={"k": 10} - ) + retriever = InMemoryVectorStore.from_texts( + texts, embedding=embeddings + ).as_retriever(search_kwargs={"k": 10}) reordering = LongContextReorder() docs = retriever.invoke("Tell me about the Celtics") actual = reordering.transform_documents(docs) diff --git a/libs/community/tests/unit_tests/chains/test_pebblo_retrieval.py b/libs/community/tests/unit_tests/chains/test_pebblo_retrieval.py index 155b1e85dce..3b49a0d5174 100644 --- a/libs/community/tests/unit_tests/chains/test_pebblo_retrieval.py +++ b/libs/community/tests/unit_tests/chains/test_pebblo_retrieval.py @@ -11,7 +11,11 @@ from langchain_core.callbacks import ( CallbackManagerForRetrieverRun, ) from langchain_core.documents import Document -from langchain_core.vectorstores import VectorStore, VectorStoreRetriever +from langchain_core.vectorstores import ( + InMemoryVectorStore, + VectorStore, + VectorStoreRetriever, +) from langchain_community.chains import PebbloRetrievalQA from langchain_community.chains.pebblo_retrieval.models import ( @@ -19,7 +23,6 @@ from langchain_community.chains.pebblo_retrieval.models import ( ChainInput, SemanticContext, ) -from langchain_community.vectorstores.chroma import Chroma from langchain_community.vectorstores.pinecone import Pinecone from tests.unit_tests.llms.fake_llm import FakeLLM @@ -49,8 +52,8 @@ def unsupported_retriever() -> FakeRetriever: """ retriever = FakeRetriever() retriever.search_kwargs = {} - # Set the class of vectorstore to Chroma - retriever.vectorstore.__class__ = Chroma + # Set the class of vectorstore + retriever.vectorstore.__class__ = InMemoryVectorStore return retriever diff --git a/libs/langchain/langchain/memory/vectorstore_token_buffer_memory.py b/libs/langchain/langchain/memory/vectorstore_token_buffer_memory.py index d4676a7965e..f611c04903d 100644 --- a/libs/langchain/langchain/memory/vectorstore_token_buffer_memory.py +++ b/libs/langchain/langchain/memory/vectorstore_token_buffer_memory.py @@ -67,7 +67,7 @@ class ConversationVectorStoreTokenBufferMemory(ConversationTokenBufferMemory): from langchain.memory.token_buffer_vectorstore_memory import ( ConversationVectorStoreTokenBufferMemory ) - from langchain_community.vectorstores import Chroma + from langchain_chroma import Chroma from langchain_community.embeddings import HuggingFaceInstructEmbeddings from langchain_openai import OpenAI diff --git a/libs/langchain/langchain/retrievers/parent_document_retriever.py b/libs/langchain/langchain/retrievers/parent_document_retriever.py index 1e1fb2df250..c7ede031f5d 100644 --- a/libs/langchain/langchain/retrievers/parent_document_retriever.py +++ b/libs/langchain/langchain/retrievers/parent_document_retriever.py @@ -31,8 +31,8 @@ class ParentDocumentRetriever(MultiVectorRetriever): .. code-block:: python + from langchain_chroma import Chroma from langchain_community.embeddings import OpenAIEmbeddings - from langchain_community.vectorstores import Chroma from langchain_text_splitters import RecursiveCharacterTextSplitter from langchain.storage import InMemoryStore diff --git a/templates/cohere-librarian/cohere_librarian/blurb_matcher.py b/templates/cohere-librarian/cohere_librarian/blurb_matcher.py index 50e63827a05..9a446801c86 100644 --- a/templates/cohere-librarian/cohere_librarian/blurb_matcher.py +++ b/templates/cohere-librarian/cohere_librarian/blurb_matcher.py @@ -1,8 +1,8 @@ import csv from langchain.chains.question_answering import load_qa_chain +from langchain_chroma import Chroma from langchain_community.embeddings import CohereEmbeddings -from langchain_community.vectorstores import Chroma from langchain_core.prompts import PromptTemplate from .chat import chat diff --git a/templates/cohere-librarian/pyproject.toml b/templates/cohere-librarian/pyproject.toml index bd393ff6cc4..5b8485f1ca3 100644 --- a/templates/cohere-librarian/pyproject.toml +++ b/templates/cohere-librarian/pyproject.toml @@ -9,7 +9,7 @@ readme = "README.md" python = ">=3.8.1,<4.0" langchain = "^0.1" cohere = "^4.37" -chromadb = "^0.4.18" +langchain-chroma = "^0.1.2" [tool.poetry.group.dev.dependencies] langchain-cli = ">=0.0.21" diff --git a/templates/hyde/hyde/chain.py b/templates/hyde/hyde/chain.py index 537a4d4501f..1f24323fe4b 100644 --- a/templates/hyde/hyde/chain.py +++ b/templates/hyde/hyde/chain.py @@ -1,6 +1,6 @@ +from langchain_chroma import Chroma from langchain_community.chat_models import ChatOpenAI from langchain_community.embeddings import OpenAIEmbeddings -from langchain_community.vectorstores import Chroma from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_core.pydantic_v1 import BaseModel diff --git a/templates/hyde/pyproject.toml b/templates/hyde/pyproject.toml index d49f46b10bb..fe57dd4f7c9 100644 --- a/templates/hyde/pyproject.toml +++ b/templates/hyde/pyproject.toml @@ -9,7 +9,7 @@ readme = "README.md" python = ">=3.8.1,<4.0" langchain = "^0.1" openai = "<2" -chromadb = "^0.4.15" +langchain-chroma = "^0.1.2" tiktoken = "^0.5.1" langchain-text-splitters = ">=0.0.1,<0.1" diff --git a/templates/intel-rag-xeon/ingest.py b/templates/intel-rag-xeon/ingest.py index a3f6860fa0d..1d293630957 100644 --- a/templates/intel-rag-xeon/ingest.py +++ b/templates/intel-rag-xeon/ingest.py @@ -1,9 +1,9 @@ import os from langchain.text_splitter import RecursiveCharacterTextSplitter +from langchain_chroma import Chroma from langchain_community.document_loaders import UnstructuredFileLoader from langchain_community.embeddings import HuggingFaceEmbeddings -from langchain_community.vectorstores import Chroma from langchain_core.documents import Document diff --git a/templates/intel-rag-xeon/intel_rag_xeon/chain.py b/templates/intel-rag-xeon/intel_rag_xeon/chain.py index f968abaa285..0312003136f 100644 --- a/templates/intel-rag-xeon/intel_rag_xeon/chain.py +++ b/templates/intel-rag-xeon/intel_rag_xeon/chain.py @@ -1,7 +1,7 @@ from langchain.callbacks import streaming_stdout +from langchain_chroma import Chroma from langchain_community.embeddings import HuggingFaceEmbeddings from langchain_community.llms import HuggingFaceEndpoint -from langchain_community.vectorstores import Chroma from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_core.pydantic_v1 import BaseModel diff --git a/templates/intel-rag-xeon/pyproject.toml b/templates/intel-rag-xeon/pyproject.toml index b07059d473a..a753ed375ae 100644 --- a/templates/intel-rag-xeon/pyproject.toml +++ b/templates/intel-rag-xeon/pyproject.toml @@ -14,7 +14,7 @@ fastapi = "^0.104.0" sse-starlette = "^1.6.5" sentence-transformers = "2.2.2" tiktoken = ">=0.5.1" -chromadb = ">=0.4.14" +langchain-chroma = "^0.1.2" beautifulsoup4 = ">=4.12.2" [tool.poetry.dependencies.unstructured] diff --git a/templates/propositional-retrieval/propositional_retrieval/storage.py b/templates/propositional-retrieval/propositional_retrieval/storage.py index 4c8aadb45b0..bed32dba5d0 100644 --- a/templates/propositional-retrieval/propositional_retrieval/storage.py +++ b/templates/propositional-retrieval/propositional_retrieval/storage.py @@ -3,8 +3,8 @@ from pathlib import Path from langchain.retrievers.multi_vector import MultiVectorRetriever from langchain.storage import LocalFileStore +from langchain_chroma import Chroma from langchain_community.embeddings import OpenAIEmbeddings -from langchain_community.vectorstores import Chroma logging.basicConfig(level=logging.INFO) diff --git a/templates/propositional-retrieval/pyproject.toml b/templates/propositional-retrieval/pyproject.toml index 615f3317d1c..f691bf1260d 100644 --- a/templates/propositional-retrieval/pyproject.toml +++ b/templates/propositional-retrieval/pyproject.toml @@ -12,7 +12,7 @@ python = ">=3.8.1,<4.0" langchain = "^0.1" openai = "<2" tiktoken = ">=0.5.1" -chromadb = ">=0.4.14" +langchain-chroma = "^0.1.2" bs4 = "^0.0.1" langchain-text-splitters = ">=0.0.1,<0.1" diff --git a/templates/rag-chroma-multi-modal-multi-vector/ingest.py b/templates/rag-chroma-multi-modal-multi-vector/ingest.py index 9447ca478ec..01a81d95b18 100644 --- a/templates/rag-chroma-multi-modal-multi-vector/ingest.py +++ b/templates/rag-chroma-multi-modal-multi-vector/ingest.py @@ -8,9 +8,9 @@ from pathlib import Path import pypdfium2 as pdfium from langchain.retrievers.multi_vector import MultiVectorRetriever from langchain.storage import LocalFileStore, UpstashRedisByteStore +from langchain_chroma import Chroma from langchain_community.chat_models import ChatOpenAI from langchain_community.embeddings import OpenAIEmbeddings -from langchain_community.vectorstores import Chroma from langchain_core.documents import Document from langchain_core.messages import HumanMessage from PIL import Image diff --git a/templates/rag-chroma-multi-modal-multi-vector/pyproject.toml b/templates/rag-chroma-multi-modal-multi-vector/pyproject.toml index e75877f6e72..62b233674e8 100644 --- a/templates/rag-chroma-multi-modal-multi-vector/pyproject.toml +++ b/templates/rag-chroma-multi-modal-multi-vector/pyproject.toml @@ -12,7 +12,7 @@ python = ">=3.8.1,<4.0" langchain = ">=0.0.353,<0.2" openai = "<2" tiktoken = ">=0.5.1" -chromadb = ">=0.4.14" +langchain-chroma = "^0.1.2" pypdfium2 = ">=4.20.0" langchain-experimental = ">=0.0.43" upstash-redis = ">=1.0.0" diff --git a/templates/rag-chroma-multi-modal-multi-vector/rag_chroma_multi_modal_multi_vector/chain.py b/templates/rag-chroma-multi-modal-multi-vector/rag_chroma_multi_modal_multi_vector/chain.py index 7650b337dce..387fc670033 100644 --- a/templates/rag-chroma-multi-modal-multi-vector/rag_chroma_multi_modal_multi_vector/chain.py +++ b/templates/rag-chroma-multi-modal-multi-vector/rag_chroma_multi_modal_multi_vector/chain.py @@ -6,9 +6,9 @@ from pathlib import Path from langchain.pydantic_v1 import BaseModel from langchain.retrievers.multi_vector import MultiVectorRetriever from langchain.storage import LocalFileStore, UpstashRedisByteStore +from langchain_chroma import Chroma from langchain_community.chat_models import ChatOpenAI from langchain_community.embeddings import OpenAIEmbeddings -from langchain_community.vectorstores import Chroma from langchain_core.documents import Document from langchain_core.messages import HumanMessage from langchain_core.output_parsers import StrOutputParser diff --git a/templates/rag-chroma-multi-modal/ingest.py b/templates/rag-chroma-multi-modal/ingest.py index 67c5f070c5b..60fb8369bac 100644 --- a/templates/rag-chroma-multi-modal/ingest.py +++ b/templates/rag-chroma-multi-modal/ingest.py @@ -2,7 +2,7 @@ import os from pathlib import Path import pypdfium2 as pdfium -from langchain_community.vectorstores import Chroma +from langchain_chroma import Chroma from langchain_experimental.open_clip import OpenCLIPEmbeddings diff --git a/templates/rag-chroma-multi-modal/pyproject.toml b/templates/rag-chroma-multi-modal/pyproject.toml index e95e8f80cc6..202584eb410 100644 --- a/templates/rag-chroma-multi-modal/pyproject.toml +++ b/templates/rag-chroma-multi-modal/pyproject.toml @@ -12,7 +12,7 @@ python = ">=3.8.1,<4.0" langchain = ">=0.0.353,<0.2" openai = "<2" tiktoken = ">=0.5.1" -chromadb = ">=0.4.14" +langchain-chroma = "^0.1.2" open-clip-torch = ">=2.23.0" torch = ">=2.1.0" pypdfium2 = ">=4.20.0" diff --git a/templates/rag-chroma-multi-modal/rag_chroma_multi_modal/chain.py b/templates/rag-chroma-multi-modal/rag_chroma_multi_modal/chain.py index 4773494af00..c227fbac9c7 100644 --- a/templates/rag-chroma-multi-modal/rag_chroma_multi_modal/chain.py +++ b/templates/rag-chroma-multi-modal/rag_chroma_multi_modal/chain.py @@ -2,8 +2,8 @@ import base64 import io from pathlib import Path +from langchain_chroma import Chroma from langchain_community.chat_models import ChatOpenAI -from langchain_community.vectorstores import Chroma from langchain_core.documents import Document from langchain_core.messages import HumanMessage from langchain_core.output_parsers import StrOutputParser diff --git a/templates/rag-chroma-private/pyproject.toml b/templates/rag-chroma-private/pyproject.toml index 9fd7c4d3785..36b9a270d65 100644 --- a/templates/rag-chroma-private/pyproject.toml +++ b/templates/rag-chroma-private/pyproject.toml @@ -11,7 +11,7 @@ readme = "README.md" python = ">=3.8.1,<4.0" langchain = "^0.1" tiktoken = ">=0.5.1" -chromadb = ">=0.4.14" +langchain-chroma = "^0.1.2" gpt4all = ">=1.0.8" beautifulsoup4 = ">=4.12.2" langchain-text-splitters = ">=0.0.1,<0.1" diff --git a/templates/rag-chroma-private/rag_chroma_private/chain.py b/templates/rag-chroma-private/rag_chroma_private/chain.py index 378c312686c..3fd02b26922 100644 --- a/templates/rag-chroma-private/rag_chroma_private/chain.py +++ b/templates/rag-chroma-private/rag_chroma_private/chain.py @@ -1,8 +1,8 @@ # Load +from langchain_chroma import Chroma from langchain_community.chat_models import ChatOllama from langchain_community.document_loaders import WebBaseLoader from langchain_community.embeddings import GPT4AllEmbeddings -from langchain_community.vectorstores import Chroma from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_core.pydantic_v1 import BaseModel diff --git a/templates/rag-chroma/pyproject.toml b/templates/rag-chroma/pyproject.toml index 863b9176ce7..012bbabace0 100644 --- a/templates/rag-chroma/pyproject.toml +++ b/templates/rag-chroma/pyproject.toml @@ -12,7 +12,7 @@ python = ">=3.8.1,<4.0" langchain = "^0.1" openai = "<2" tiktoken = ">=0.5.1" -chromadb = ">=0.4.14" +langchain-chroma = "^0.1.2" langchain-text-splitters = ">=0.0.1,<0.1" [tool.poetry.group.dev.dependencies] diff --git a/templates/rag-chroma/rag_chroma/chain.py b/templates/rag-chroma/rag_chroma/chain.py index c920363b5a5..acce539954c 100644 --- a/templates/rag-chroma/rag_chroma/chain.py +++ b/templates/rag-chroma/rag_chroma/chain.py @@ -1,6 +1,6 @@ +from langchain_chroma import Chroma from langchain_community.chat_models import ChatOpenAI from langchain_community.embeddings import OpenAIEmbeddings -from langchain_community.vectorstores import Chroma from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_core.pydantic_v1 import BaseModel diff --git a/templates/rag-codellama-fireworks/pyproject.toml b/templates/rag-codellama-fireworks/pyproject.toml index 362ffd56712..99bd6d84562 100644 --- a/templates/rag-codellama-fireworks/pyproject.toml +++ b/templates/rag-codellama-fireworks/pyproject.toml @@ -12,7 +12,7 @@ python = ">=3.9,<4.0" langchain = "^0.1" gpt4all = ">=1.0.8" tiktoken = ">=0.5.1" -chromadb = ">=0.4.14" +langchain-chroma = "^0.1.2" fireworks-ai = ">=0.6.0" langchain-text-splitters = ">=0.0.1,<0.1" diff --git a/templates/rag-codellama-fireworks/rag_codellama_fireworks/chain.py b/templates/rag-codellama-fireworks/rag_codellama_fireworks/chain.py index 4e0d7d5af59..61c2c7aad5d 100644 --- a/templates/rag-codellama-fireworks/rag_codellama_fireworks/chain.py +++ b/templates/rag-codellama-fireworks/rag_codellama_fireworks/chain.py @@ -1,11 +1,11 @@ import os from git import Repo +from langchain_chroma import Chroma from langchain_community.document_loaders.generic import GenericLoader from langchain_community.document_loaders.parsers import LanguageParser from langchain_community.embeddings import GPT4AllEmbeddings from langchain_community.llms.fireworks import Fireworks -from langchain_community.vectorstores import Chroma from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_core.pydantic_v1 import BaseModel diff --git a/templates/rag-gemini-multi-modal/ingest.py b/templates/rag-gemini-multi-modal/ingest.py index 67c5f070c5b..60fb8369bac 100644 --- a/templates/rag-gemini-multi-modal/ingest.py +++ b/templates/rag-gemini-multi-modal/ingest.py @@ -2,7 +2,7 @@ import os from pathlib import Path import pypdfium2 as pdfium -from langchain_community.vectorstores import Chroma +from langchain_chroma import Chroma from langchain_experimental.open_clip import OpenCLIPEmbeddings diff --git a/templates/rag-gemini-multi-modal/pyproject.toml b/templates/rag-gemini-multi-modal/pyproject.toml index bc14ef99fbc..96275806d39 100644 --- a/templates/rag-gemini-multi-modal/pyproject.toml +++ b/templates/rag-gemini-multi-modal/pyproject.toml @@ -12,7 +12,7 @@ python = ">=3.9,<4.0" langchain = ">=0.0.353,<0.2" openai = "<2" tiktoken = ">=0.5.1" -chromadb = ">=0.4.14" +langchain-chroma = "^0.1.2" open-clip-torch = ">=2.23.0" torch = ">=2.1.0" pypdfium2 = ">=4.20.0" diff --git a/templates/rag-gemini-multi-modal/rag_gemini_multi_modal/chain.py b/templates/rag-gemini-multi-modal/rag_gemini_multi_modal/chain.py index 51991d066af..cf731429578 100644 --- a/templates/rag-gemini-multi-modal/rag_gemini_multi_modal/chain.py +++ b/templates/rag-gemini-multi-modal/rag_gemini_multi_modal/chain.py @@ -2,7 +2,7 @@ import base64 import io from pathlib import Path -from langchain_community.vectorstores import Chroma +from langchain_chroma import Chroma from langchain_core.documents import Document from langchain_core.messages import HumanMessage from langchain_core.output_parsers import StrOutputParser diff --git a/templates/rag-gpt-crawler/pyproject.toml b/templates/rag-gpt-crawler/pyproject.toml index 71dadccddc4..4c64bc03d4f 100644 --- a/templates/rag-gpt-crawler/pyproject.toml +++ b/templates/rag-gpt-crawler/pyproject.toml @@ -12,7 +12,7 @@ python = ">=3.8.1,<4.0" langchain = "^0.1" openai = "<2" tiktoken = ">=0.5.1" -chromadb = ">=0.4.14" +langchain-chroma = "^0.1.2" langchain-text-splitters = ">=0.0.1,<0.1" [tool.poetry.group.dev.dependencies] diff --git a/templates/rag-gpt-crawler/rag_gpt_crawler/chain.py b/templates/rag-gpt-crawler/rag_gpt_crawler/chain.py index 0c312c31780..b5934cfb522 100644 --- a/templates/rag-gpt-crawler/rag_gpt_crawler/chain.py +++ b/templates/rag-gpt-crawler/rag_gpt_crawler/chain.py @@ -1,9 +1,9 @@ import json from pathlib import Path +from langchain_chroma import Chroma from langchain_community.chat_models import ChatOpenAI from langchain_community.embeddings import OpenAIEmbeddings -from langchain_community.vectorstores import Chroma from langchain_core.documents import Document from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate diff --git a/templates/rag-multi-modal-local/ingest.py b/templates/rag-multi-modal-local/ingest.py index 1b2faa622f7..c1e6ba88589 100644 --- a/templates/rag-multi-modal-local/ingest.py +++ b/templates/rag-multi-modal-local/ingest.py @@ -1,7 +1,7 @@ import os from pathlib import Path -from langchain_community.vectorstores import Chroma +from langchain_chroma import Chroma from langchain_nomic import NomicMultimodalEmbeddings # Load images diff --git a/templates/rag-multi-modal-local/pyproject.toml b/templates/rag-multi-modal-local/pyproject.toml index 4117a731b20..a9f214d210a 100644 --- a/templates/rag-multi-modal-local/pyproject.toml +++ b/templates/rag-multi-modal-local/pyproject.toml @@ -12,7 +12,7 @@ python = ">=3.8.1,<4.0" langchain = ">=0.0.353,<0.2" openai = "<2" tiktoken = ">=0.5.1" -chromadb = ">=0.4.14" +langchain-chroma = "^0.1.2" open-clip-torch = ">=2.23.0" torch = ">=2.1.0" langchain-experimental = ">=0.0.43" diff --git a/templates/rag-multi-modal-local/rag_multi_modal_local/chain.py b/templates/rag-multi-modal-local/rag_multi_modal_local/chain.py index c4df1253d93..215a85cdd7f 100644 --- a/templates/rag-multi-modal-local/rag_multi_modal_local/chain.py +++ b/templates/rag-multi-modal-local/rag_multi_modal_local/chain.py @@ -2,8 +2,8 @@ import base64 import io from pathlib import Path +from langchain_chroma import Chroma from langchain_community.chat_models import ChatOllama -from langchain_community.vectorstores import Chroma from langchain_core.documents import Document from langchain_core.messages import HumanMessage from langchain_core.output_parsers import StrOutputParser diff --git a/templates/rag-multi-modal-mv-local/ingest.py b/templates/rag-multi-modal-mv-local/ingest.py index 4e3f711bd2e..a8e6fe87880 100644 --- a/templates/rag-multi-modal-mv-local/ingest.py +++ b/templates/rag-multi-modal-mv-local/ingest.py @@ -7,9 +7,9 @@ from pathlib import Path from langchain.retrievers.multi_vector import MultiVectorRetriever from langchain.storage import LocalFileStore +from langchain_chroma import Chroma from langchain_community.chat_models import ChatOllama from langchain_community.embeddings import OllamaEmbeddings -from langchain_community.vectorstores import Chroma from langchain_core.documents import Document from langchain_core.messages import HumanMessage from PIL import Image diff --git a/templates/rag-multi-modal-mv-local/pyproject.toml b/templates/rag-multi-modal-mv-local/pyproject.toml index 236e8fda57c..737316d4227 100644 --- a/templates/rag-multi-modal-mv-local/pyproject.toml +++ b/templates/rag-multi-modal-mv-local/pyproject.toml @@ -12,7 +12,7 @@ python = ">=3.8.1,<4.0" langchain = ">=0.0.353,<0.2" openai = "<2" tiktoken = ">=0.5.1" -chromadb = ">=0.4.14" +langchain-chroma = "^0.1.2" pypdfium2 = ">=4.20.0" langchain-experimental = ">=0.0.43" pillow = ">=10.1.0" diff --git a/templates/rag-multi-modal-mv-local/rag_multi_modal_mv_local/chain.py b/templates/rag-multi-modal-mv-local/rag_multi_modal_mv_local/chain.py index c91f7598e8d..5e027b712bd 100644 --- a/templates/rag-multi-modal-mv-local/rag_multi_modal_mv_local/chain.py +++ b/templates/rag-multi-modal-mv-local/rag_multi_modal_mv_local/chain.py @@ -5,9 +5,9 @@ from pathlib import Path from langchain.pydantic_v1 import BaseModel from langchain.retrievers.multi_vector import MultiVectorRetriever from langchain.storage import LocalFileStore +from langchain_chroma import Chroma from langchain_community.chat_models import ChatOllama from langchain_community.embeddings import OllamaEmbeddings -from langchain_community.vectorstores import Chroma from langchain_core.documents import Document from langchain_core.messages import HumanMessage from langchain_core.output_parsers import StrOutputParser diff --git a/templates/rag-ollama-multi-query/pyproject.toml b/templates/rag-ollama-multi-query/pyproject.toml index a617fc9cdba..f7e9a07d718 100644 --- a/templates/rag-ollama-multi-query/pyproject.toml +++ b/templates/rag-ollama-multi-query/pyproject.toml @@ -12,7 +12,7 @@ python = ">=3.8.1,<4.0" langchain = "^0.1" openai = "<2" tiktoken = ">=0.5.1" -chromadb = ">=0.4.14" +langchain-chroma = "^0.1.2" langchain-text-splitters = ">=0.0.1,<0.1" [tool.poetry.group.dev.dependencies] diff --git a/templates/rag-ollama-multi-query/rag_ollama_multi_query/chain.py b/templates/rag-ollama-multi-query/rag_ollama_multi_query/chain.py index 840a8080426..b06596c2653 100644 --- a/templates/rag-ollama-multi-query/rag_ollama_multi_query/chain.py +++ b/templates/rag-ollama-multi-query/rag_ollama_multi_query/chain.py @@ -1,8 +1,8 @@ from langchain.retrievers.multi_query import MultiQueryRetriever +from langchain_chroma import Chroma from langchain_community.chat_models import ChatOllama, ChatOpenAI from langchain_community.document_loaders import WebBaseLoader from langchain_community.embeddings import OpenAIEmbeddings -from langchain_community.vectorstores import Chroma from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate, PromptTemplate from langchain_core.pydantic_v1 import BaseModel diff --git a/templates/rag-semi-structured/pyproject.toml b/templates/rag-semi-structured/pyproject.toml index 9705f67c7e7..c2d21a5f1f0 100644 --- a/templates/rag-semi-structured/pyproject.toml +++ b/templates/rag-semi-structured/pyproject.toml @@ -11,7 +11,7 @@ readme = "README.md" python = ">=3.9,<4" langchain = "^0.1" tiktoken = ">=0.5.1" -chromadb = ">=0.4.14" +langchain-chroma = "^0.1.2" openai = "<2" unstructured = ">=0.10.19" pdf2image = ">=1.16.3" diff --git a/templates/rag-semi-structured/rag_semi_structured/chain.py b/templates/rag-semi-structured/rag_semi_structured/chain.py index 37df06f73f4..e5e630c4871 100644 --- a/templates/rag-semi-structured/rag_semi_structured/chain.py +++ b/templates/rag-semi-structured/rag_semi_structured/chain.py @@ -3,9 +3,9 @@ import uuid from langchain.retrievers.multi_vector import MultiVectorRetriever from langchain.storage import InMemoryStore +from langchain_chroma import Chroma from langchain_community.chat_models import ChatOpenAI from langchain_community.embeddings import OpenAIEmbeddings -from langchain_community.vectorstores import Chroma from langchain_core.documents import Document from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate