community[patch]: deprecate langchain_community Chroma in favor of langchain_chroma (#24474)

This commit is contained in:
ccurme
2024-07-22 11:00:13 -04:00
committed by GitHub
parent 0f7569ddbc
commit dcba7df2fe
67 changed files with 108 additions and 102 deletions

View File

@@ -50,6 +50,7 @@ def _results_to_docs_and_scores(results: Any) -> List[Tuple[Document, float]]:
]
@deprecated(since="0.2.9", removal="0.4", alternative_import="langchain_chroma.Chroma")
class Chroma(VectorStore):
"""`ChromaDB` vector store.

View File

@@ -1,10 +1,11 @@
# flake8: noqa
"""Test sentence_transformer embeddings."""
from langchain_core.vectorstores import InMemoryVectorStore
from langchain_community.embeddings.sentence_transformer import (
SentenceTransformerEmbeddings,
)
from langchain_community.vectorstores import Chroma
def test_sentence_transformer_embedding_documents() -> None:
@@ -34,7 +35,7 @@ def test_sentence_transformer_db_query() -> None:
query = "what the foo is a bar?"
query_vector = embedding.embed_query(query)
assert len(query_vector) == 384
db = Chroma(embedding_function=embedding)
db = InMemoryVectorStore(embedding=embedding)
db.add_texts(texts)
docs = db.similarity_search_by_vector(query_vector, k=2)
assert docs[0].page_content == "we will foo your bar until you can't foo any more"

View File

@@ -1,7 +1,7 @@
from langchain.retrievers.merger_retriever import MergerRetriever
from langchain_core.vectorstores import InMemoryVectorStore
from langchain_community.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores import Chroma
def test_merger_retriever_get_relevant_docs() -> None:
@@ -17,12 +17,12 @@ def test_merger_retriever_get_relevant_docs() -> None:
"Real stupidity beats artificial intelligence every time. TP",
]
embeddings = OpenAIEmbeddings()
retriever_a = Chroma.from_texts(texts_group_a, embedding=embeddings).as_retriever(
search_kwargs={"k": 1}
)
retriever_b = Chroma.from_texts(texts_group_b, embedding=embeddings).as_retriever(
search_kwargs={"k": 1}
)
retriever_a = InMemoryVectorStore.from_texts(
texts_group_a, embedding=embeddings
).as_retriever(search_kwargs={"k": 1})
retriever_b = InMemoryVectorStore.from_texts(
texts_group_b, embedding=embeddings
).as_retriever(search_kwargs={"k": 1})
# The Lord of the Retrievers.
lotr = MergerRetriever(retrievers=[retriever_a, retriever_b])

View File

@@ -1,10 +1,11 @@
"""Integration test for doc reordering."""
from langchain_core.vectorstores import InMemoryVectorStore
from langchain_community.document_transformers.long_context_reorder import (
LongContextReorder,
)
from langchain_community.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores import Chroma
def test_long_context_reorder() -> None:
@@ -22,9 +23,9 @@ def test_long_context_reorder() -> None:
"Larry Bird was an iconic NBA player.",
]
embeddings = OpenAIEmbeddings()
retriever = Chroma.from_texts(texts, embedding=embeddings).as_retriever(
search_kwargs={"k": 10}
)
retriever = InMemoryVectorStore.from_texts(
texts, embedding=embeddings
).as_retriever(search_kwargs={"k": 10})
reordering = LongContextReorder()
docs = retriever.invoke("Tell me about the Celtics")
actual = reordering.transform_documents(docs)

View File

@@ -11,7 +11,11 @@ from langchain_core.callbacks import (
CallbackManagerForRetrieverRun,
)
from langchain_core.documents import Document
from langchain_core.vectorstores import VectorStore, VectorStoreRetriever
from langchain_core.vectorstores import (
InMemoryVectorStore,
VectorStore,
VectorStoreRetriever,
)
from langchain_community.chains import PebbloRetrievalQA
from langchain_community.chains.pebblo_retrieval.models import (
@@ -19,7 +23,6 @@ from langchain_community.chains.pebblo_retrieval.models import (
ChainInput,
SemanticContext,
)
from langchain_community.vectorstores.chroma import Chroma
from langchain_community.vectorstores.pinecone import Pinecone
from tests.unit_tests.llms.fake_llm import FakeLLM
@@ -49,8 +52,8 @@ def unsupported_retriever() -> FakeRetriever:
"""
retriever = FakeRetriever()
retriever.search_kwargs = {}
# Set the class of vectorstore to Chroma
retriever.vectorstore.__class__ = Chroma
# Set the class of vectorstore
retriever.vectorstore.__class__ = InMemoryVectorStore
return retriever

View File

@@ -67,7 +67,7 @@ class ConversationVectorStoreTokenBufferMemory(ConversationTokenBufferMemory):
from langchain.memory.token_buffer_vectorstore_memory import (
ConversationVectorStoreTokenBufferMemory
)
from langchain_community.vectorstores import Chroma
from langchain_chroma import Chroma
from langchain_community.embeddings import HuggingFaceInstructEmbeddings
from langchain_openai import OpenAI

View File

@@ -31,8 +31,8 @@ class ParentDocumentRetriever(MultiVectorRetriever):
.. code-block:: python
from langchain_chroma import Chroma
from langchain_community.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores import Chroma
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain.storage import InMemoryStore