From efe6cfafe22a8bb3f084d9f12509748f67d47e58 Mon Sep 17 00:00:00 2001 From: Varik Matevosyan Date: Sat, 13 Jan 2024 00:00:16 +0400 Subject: [PATCH] community: Added Lantern as VectorStore (#12951) Support [Lantern](https://github.com/lanterndata/lantern) as a new VectorStore type. - Added Lantern as VectorStore. It will support 3 distance functions `l2 squared`, `cosine` and `hamming` and will use `HNSW` index. - Added tests - Added example notebook --- docs/docs/integrations/providers/lantern.mdx | 25 + .../integrations/vectorstores/lantern.ipynb | 659 +++++++++++ .../vectorstores/__init__.py | 9 + .../vectorstores/lantern.py | 1018 +++++++++++++++++ .../vectorstores/test_lantern.py | 319 ++++++ .../vectorstores/test_public_api.py | 1 + .../tests/unit_tests/indexes/test_indexing.py | 1 + 7 files changed, 2032 insertions(+) create mode 100644 docs/docs/integrations/providers/lantern.mdx create mode 100644 docs/docs/integrations/vectorstores/lantern.ipynb create mode 100644 libs/community/langchain_community/vectorstores/lantern.py create mode 100644 libs/community/tests/integration_tests/vectorstores/test_lantern.py diff --git a/docs/docs/integrations/providers/lantern.mdx b/docs/docs/integrations/providers/lantern.mdx new file mode 100644 index 00000000000..9b4a537acfa --- /dev/null +++ b/docs/docs/integrations/providers/lantern.mdx @@ -0,0 +1,25 @@ +# Lantern + +This page covers how to use the [Lantern](https://github.com/lanterndata/lantern) within LangChain +It is broken into two parts: setup, and then references to specific Lantern wrappers. + +## Setup +1. The first step is to create a database with the `lantern` extension installed. + + Follow the steps at [Lantern Installation Guide](https://github.com/lanterndata/lantern#-quick-install) to install the database and the extension. The docker image is the easiest way to get started. + +## Wrappers + +### VectorStore + +There exists a wrapper around Postgres vector databases, allowing you to use it as a vectorstore, +whether for semantic search or example selection. + +To import this vectorstore: +```python +from langchain_community.vectorstores import Lantern +``` + +### Usage + +For a more detailed walkthrough of the Lantern Wrapper, see [this notebook](/docs/integrations/vectorstores/lantern) diff --git a/docs/docs/integrations/vectorstores/lantern.ipynb b/docs/docs/integrations/vectorstores/lantern.ipynb new file mode 100644 index 00000000000..a9121bb7319 --- /dev/null +++ b/docs/docs/integrations/vectorstores/lantern.ipynb @@ -0,0 +1,659 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Lantern\n", + "\n", + ">[Lantern](https://github.com/lanterndata/lantern) is an open-source vector similarity search for `Postgres`\n", + "\n", + "It supports:\n", + "- Exact and approximate nearest neighbor search\n", + "- L2 squared distance, hamming distance, and cosine distance\n", + "\n", + "This notebook shows how to use the Postgres vector database (`Lantern`)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "See the [installation instruction](https://github.com/lanterndata/lantern#-quick-install)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We want to use `OpenAIEmbeddings` so we have to get the OpenAI API Key." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Pip install necessary package\n", + "!pip install openai\n", + "!pip install psycopg2-binary\n", + "!pip install tiktoken" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "ExecuteTime": { + "end_time": "2023-09-09T08:02:16.802456Z", + "start_time": "2023-09-09T08:02:07.065604Z" + } + }, + "outputs": [ + { + "name": "stdin", + "output_type": "stream", + "text": [ + "OpenAI API Key: ········\n" + ] + } + ], + "source": [ + "import getpass\n", + "import os\n", + "\n", + "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "ExecuteTime": { + "end_time": "2023-09-09T08:02:19.742896Z", + "start_time": "2023-09-09T08:02:19.732527Z" + }, + "tags": [] + }, + "outputs": [ + { + "data": { + "text/plain": [ + "False" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "## Loading Environment Variables\n", + "from typing import List, Tuple\n", + "\n", + "from dotenv import load_dotenv\n", + "\n", + "load_dotenv()" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "ExecuteTime": { + "end_time": "2023-09-09T08:02:23.144824Z", + "start_time": "2023-09-09T08:02:22.047801Z" + }, + "tags": [] + }, + "outputs": [], + "source": [ + "from langchain.text_splitter import CharacterTextSplitter\n", + "from langchain_community.document_loaders import TextLoader\n", + "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_community.vectorstores import Lantern\n", + "from langchain_core.documents import Document" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": { + "ExecuteTime": { + "end_time": "2023-09-09T08:02:25.452472Z", + "start_time": "2023-09-09T08:02:25.441563Z" + } + }, + "outputs": [], + "source": [ + "loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n", + "documents = loader.load()\n", + "text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n", + "docs = text_splitter.split_documents(documents)\n", + "\n", + "embeddings = OpenAIEmbeddings()" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": { + "ExecuteTime": { + "end_time": "2023-09-09T08:02:28.174088Z", + "start_time": "2023-09-09T08:02:28.162698Z" + } + }, + "outputs": [ + { + "name": "stdin", + "output_type": "stream", + "text": [ + "DB Connection String: ········\n" + ] + } + ], + "source": [ + "# Lantern needs the connection string to the database.\n", + "# Example postgresql://postgres:postgres@localhost:5432/postgres\n", + "CONNECTION_STRING = getpass.getpass(\"DB Connection String:\")\n", + "\n", + "# # Alternatively, you can create it from environment variables.\n", + "# import os\n", + "\n", + "# CONNECTION_STRING = Lantern.connection_string_from_db_params(\n", + "# driver=os.environ.get(\"LANTERN_DRIVER\", \"psycopg2\"),\n", + "# host=os.environ.get(\"LANTERN_HOST\", \"localhost\"),\n", + "# port=int(os.environ.get(\"LANTERN_PORT\", \"5432\")),\n", + "# database=os.environ.get(\"LANTERN_DATABASE\", \"postgres\"),\n", + "# user=os.environ.get(\"LANTERN_USER\", \"postgres\"),\n", + "# password=os.environ.get(\"LANTERN_PASSWORD\", \"postgres\"),\n", + "# )\n", + "\n", + "# or you can pass it via `LANTERN_CONNECTION_STRING` env variable" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } + }, + "source": [ + "## Similarity Search with Cosine Distance (Default)" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": { + "ExecuteTime": { + "end_time": "2023-09-09T08:04:16.696625Z", + "start_time": "2023-09-09T08:02:31.817790Z" + } + }, + "outputs": [], + "source": [ + "# The Lantern Module will try to create a table with the name of the collection.\n", + "# So, make sure that the collection name is unique and the user has the permission to create a table.\n", + "\n", + "COLLECTION_NAME = \"state_of_the_union_test\"\n", + "\n", + "db = Lantern.from_documents(\n", + " embedding=embeddings,\n", + " documents=docs,\n", + " collection_name=COLLECTION_NAME,\n", + " connection_string=CONNECTION_STRING,\n", + " pre_delete_collection=True,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": { + "ExecuteTime": { + "end_time": "2023-09-09T08:05:11.104135Z", + "start_time": "2023-09-09T08:05:10.548998Z" + } + }, + "outputs": [], + "source": [ + "query = \"What did the president say about Ketanji Brown Jackson\"\n", + "docs_with_score = db.similarity_search_with_score(query)" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": { + "ExecuteTime": { + "end_time": "2023-09-09T08:05:13.532334Z", + "start_time": "2023-09-09T08:05:13.523191Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "--------------------------------------------------------------------------------\n", + "Score: 0.18440479\n", + "Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n", + "\n", + "Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n", + "\n", + "One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n", + "\n", + "And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n", + "--------------------------------------------------------------------------------\n", + "--------------------------------------------------------------------------------\n", + "Score: 0.21727282\n", + "A former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder. Since she’s been nominated, she’s received a broad range of support—from the Fraternal Order of Police to former judges appointed by Democrats and Republicans. \n", + "\n", + "And if we are to advance liberty and justice, we need to secure the Border and fix the immigration system. \n", + "\n", + "We can do both. At our border, we’ve installed new technology like cutting-edge scanners to better detect drug smuggling. \n", + "\n", + "We’ve set up joint patrols with Mexico and Guatemala to catch more human traffickers. \n", + "\n", + "We’re putting in place dedicated immigration judges so families fleeing persecution and violence can have their cases heard faster. \n", + "\n", + "We’re securing commitments and supporting partners in South and Central America to host more refugees and secure their own borders.\n", + "--------------------------------------------------------------------------------\n", + "--------------------------------------------------------------------------------\n", + "Score: 0.22621095\n", + "And for our LGBTQ+ Americans, let’s finally get the bipartisan Equality Act to my desk. The onslaught of state laws targeting transgender Americans and their families is wrong. \n", + "\n", + "As I said last year, especially to our younger transgender Americans, I will always have your back as your President, so you can be yourself and reach your God-given potential. \n", + "\n", + "While it often appears that we never agree, that isn’t true. I signed 80 bipartisan bills into law last year. From preventing government shutdowns to protecting Asian-Americans from still-too-common hate crimes to reforming military justice. \n", + "\n", + "And soon, we’ll strengthen the Violence Against Women Act that I first wrote three decades ago. It is important for us to show the nation that we can come together and do big things. \n", + "\n", + "So tonight I’m offering a Unity Agenda for the Nation. Four big things we can do together. \n", + "\n", + "First, beat the opioid epidemic.\n", + "--------------------------------------------------------------------------------\n", + "--------------------------------------------------------------------------------\n", + "Score: 0.22654456\n", + "Tonight, I’m announcing a crackdown on these companies overcharging American businesses and consumers. \n", + "\n", + "And as Wall Street firms take over more nursing homes, quality in those homes has gone down and costs have gone up. \n", + "\n", + "That ends on my watch. \n", + "\n", + "Medicare is going to set higher standards for nursing homes and make sure your loved ones get the care they deserve and expect. \n", + "\n", + "We’ll also cut costs and keep the economy going strong by giving workers a fair shot, provide more training and apprenticeships, hire them based on their skills not degrees. \n", + "\n", + "Let’s pass the Paycheck Fairness Act and paid leave. \n", + "\n", + "Raise the minimum wage to $15 an hour and extend the Child Tax Credit, so no one has to raise a family in poverty. \n", + "\n", + "Let’s increase Pell Grants and increase our historic support of HBCUs, and invest in what Jill—our First Lady who teaches full-time—calls America’s best-kept secret: community colleges.\n", + "--------------------------------------------------------------------------------\n" + ] + } + ], + "source": [ + "for doc, score in docs_with_score:\n", + " print(\"-\" * 80)\n", + " print(\"Score: \", score)\n", + " print(doc.page_content)\n", + " print(\"-\" * 80)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } + }, + "source": [ + "## Maximal Marginal Relevance Search (MMR)\n", + "Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents." + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": { + "ExecuteTime": { + "end_time": "2023-09-09T08:05:23.276819Z", + "start_time": "2023-09-09T08:05:21.972256Z" + }, + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } + }, + "outputs": [], + "source": [ + "docs_with_score = db.max_marginal_relevance_search_with_score(query)" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": { + "ExecuteTime": { + "end_time": "2023-09-09T08:05:27.478580Z", + "start_time": "2023-09-09T08:05:27.470138Z" + }, + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "--------------------------------------------------------------------------------\n", + "Score: 0.18440479\n", + "Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n", + "\n", + "Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n", + "\n", + "One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n", + "\n", + "And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n", + "--------------------------------------------------------------------------------\n", + "--------------------------------------------------------------------------------\n", + "Score: 0.23515457\n", + "We can’t change how divided we’ve been. But we can change how we move forward—on COVID-19 and other issues we must face together. \n", + "\n", + "I recently visited the New York City Police Department days after the funerals of Officer Wilbert Mora and his partner, Officer Jason Rivera. \n", + "\n", + "They were responding to a 9-1-1 call when a man shot and killed them with a stolen gun. \n", + "\n", + "Officer Mora was 27 years old. \n", + "\n", + "Officer Rivera was 22. \n", + "\n", + "Both Dominican Americans who’d grown up on the same streets they later chose to patrol as police officers. \n", + "\n", + "I spoke with their families and told them that we are forever in debt for their sacrifice, and we will carry on their mission to restore the trust and safety every community deserves. \n", + "\n", + "I’ve worked on these issues a long time. \n", + "\n", + "I know what works: Investing in crime prevention and community police officers who’ll walk the beat, who’ll know the neighborhood, and who can restore trust and safety.\n", + "--------------------------------------------------------------------------------\n", + "--------------------------------------------------------------------------------\n", + "Score: 0.24478757\n", + "One was stationed at bases and breathing in toxic smoke from “burn pits” that incinerated wastes of war—medical and hazard material, jet fuel, and more. \n", + "\n", + "When they came home, many of the world’s fittest and best trained warriors were never the same. \n", + "\n", + "Headaches. Numbness. Dizziness. \n", + "\n", + "A cancer that would put them in a flag-draped coffin. \n", + "\n", + "I know. \n", + "\n", + "One of those soldiers was my son Major Beau Biden. \n", + "\n", + "We don’t know for sure if a burn pit was the cause of his brain cancer, or the diseases of so many of our troops. \n", + "\n", + "But I’m committed to finding out everything we can. \n", + "\n", + "Committed to military families like Danielle Robinson from Ohio. \n", + "\n", + "The widow of Sergeant First Class Heath Robinson. \n", + "\n", + "He was born a soldier. Army National Guard. Combat medic in Kosovo and Iraq. \n", + "\n", + "Stationed near Baghdad, just yards from burn pits the size of football fields. \n", + "\n", + "Heath’s widow Danielle is here with us tonight. They loved going to Ohio State football games. He loved building Legos with their daughter.\n", + "--------------------------------------------------------------------------------\n", + "--------------------------------------------------------------------------------\n", + "Score: 0.25137997\n", + "And I’m taking robust action to make sure the pain of our sanctions is targeted at Russia’s economy. And I will use every tool at our disposal to protect American businesses and consumers. \n", + "\n", + "Tonight, I can announce that the United States has worked with 30 other countries to release 60 Million barrels of oil from reserves around the world. \n", + "\n", + "America will lead that effort, releasing 30 Million barrels from our own Strategic Petroleum Reserve. And we stand ready to do more if necessary, unified with our allies. \n", + "\n", + "These steps will help blunt gas prices here at home. And I know the news about what’s happening can seem alarming. \n", + "\n", + "But I want you to know that we are going to be okay. \n", + "\n", + "When the history of this era is written Putin’s war on Ukraine will have left Russia weaker and the rest of the world stronger. \n", + "\n", + "While it shouldn’t have taken something so terrible for people around the world to see what’s at stake now everyone sees it clearly.\n", + "--------------------------------------------------------------------------------\n" + ] + } + ], + "source": [ + "for doc, score in docs_with_score:\n", + " print(\"-\" * 80)\n", + " print(\"Score: \", score)\n", + " print(doc.page_content)\n", + " print(\"-\" * 80)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Working with vectorstore\n", + "\n", + "Above, we created a vectorstore from scratch. However, often times we want to work with an existing vectorstore.\n", + "In order to do that, we can initialize it directly." + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [], + "source": [ + "store = Lantern(\n", + " collection_name=COLLECTION_NAME,\n", + " connection_string=CONNECTION_STRING,\n", + " embedding_function=embeddings,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Add documents\n", + "We can add documents to the existing vectorstore." + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "['f8164598-aa28-11ee-a037-acde48001122']" + ] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "store.add_documents([Document(page_content=\"foo\")])" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [], + "source": [ + "docs_with_score = db.similarity_search_with_score(\"foo\")" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "(Document(page_content='foo'), -1.1920929e-07)" + ] + }, + "execution_count": 18, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "docs_with_score[0]" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "(Document(page_content='And let’s pass the PRO Act when a majority of workers want to form a union—they shouldn’t be stopped. \\n\\nWhen we invest in our workers, when we build the economy from the bottom up and the middle out together, we can do something we haven’t done in a long time: build a better America. \\n\\nFor more than two years, COVID-19 has impacted every decision in our lives and the life of the nation. \\n\\nAnd I know you’re tired, frustrated, and exhausted. \\n\\nBut I also know this. \\n\\nBecause of the progress we’ve made, because of your resilience and the tools we have, tonight I can say \\nwe are moving forward safely, back to more normal routines. \\n\\nWe’ve reached a new moment in the fight against COVID-19, with severe cases down to a level not seen since last July. \\n\\nJust a few days ago, the Centers for Disease Control and Prevention—the CDC—issued new mask guidelines. \\n\\nUnder these new guidelines, most Americans in most of the country can now be mask free.', metadata={'source': '../../modules/state_of_the_union.txt'}),\n", + " 0.24038416)" + ] + }, + "execution_count": 19, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "docs_with_score[1]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Overriding a vectorstore\n", + "\n", + "If you have an existing collection, you override it by doing `from_documents` and setting `pre_delete_collection` = True \n", + "This will delete the collection before re-populating it" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [], + "source": [ + "db = Lantern.from_documents(\n", + " documents=docs,\n", + " embedding=embeddings,\n", + " collection_name=COLLECTION_NAME,\n", + " connection_string=CONNECTION_STRING,\n", + " pre_delete_collection=True,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [], + "source": [ + "docs_with_score = db.similarity_search_with_score(\"foo\")" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "(Document(page_content='And let’s pass the PRO Act when a majority of workers want to form a union—they shouldn’t be stopped. \\n\\nWhen we invest in our workers, when we build the economy from the bottom up and the middle out together, we can do something we haven’t done in a long time: build a better America. \\n\\nFor more than two years, COVID-19 has impacted every decision in our lives and the life of the nation. \\n\\nAnd I know you’re tired, frustrated, and exhausted. \\n\\nBut I also know this. \\n\\nBecause of the progress we’ve made, because of your resilience and the tools we have, tonight I can say \\nwe are moving forward safely, back to more normal routines. \\n\\nWe’ve reached a new moment in the fight against COVID-19, with severe cases down to a level not seen since last July. \\n\\nJust a few days ago, the Centers for Disease Control and Prevention—the CDC—issued new mask guidelines. \\n\\nUnder these new guidelines, most Americans in most of the country can now be mask free.', metadata={'source': '../../modules/state_of_the_union.txt'}),\n", + " 0.2403456)" + ] + }, + "execution_count": 22, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "docs_with_score[0]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Using a VectorStore as a Retriever" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [], + "source": [ + "retriever = store.as_retriever()" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "tags=['Lantern', 'OpenAIEmbeddings'] vectorstore=\n" + ] + } + ], + "source": [ + "print(retriever)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.6" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/libs/community/langchain_community/vectorstores/__init__.py b/libs/community/langchain_community/vectorstores/__init__.py index 0ddaafed857..1cb42b8fc51 100644 --- a/libs/community/langchain_community/vectorstores/__init__.py +++ b/libs/community/langchain_community/vectorstores/__init__.py @@ -458,6 +458,12 @@ def _import_zilliz() -> Any: return Zilliz +def _import_lantern() -> Any: + from langchain_community.vectorstores.lantern import Lantern + + return Lantern + + def __getattr__(name: str) -> Any: if name == "AnalyticDB": return _import_analyticdb() @@ -599,6 +605,8 @@ def __getattr__(name: str) -> Any: return _import_zilliz() elif name == "VespaStore": return _import_vespa() + elif name == "Lantern": + return _import_lantern() else: raise AttributeError(f"Could not find: {name}") @@ -673,4 +681,5 @@ __all__ = [ "TencentVectorDB", "AzureCosmosDBVectorSearch", "VectorStore", + "Lantern", ] diff --git a/libs/community/langchain_community/vectorstores/lantern.py b/libs/community/langchain_community/vectorstores/lantern.py new file mode 100644 index 00000000000..9b16af9ce98 --- /dev/null +++ b/libs/community/langchain_community/vectorstores/lantern.py @@ -0,0 +1,1018 @@ +from __future__ import annotations + +import contextlib +import enum +import logging +import uuid +from typing import ( + Any, + Callable, + Dict, + Generator, + Iterable, + List, + Optional, + Tuple, + Type, + Union, +) + +import numpy as np +import sqlalchemy +from sqlalchemy import delete, func +from sqlalchemy.dialects.postgresql import JSON, UUID +from sqlalchemy.exc import ProgrammingError +from sqlalchemy.orm import Session +from sqlalchemy.sql import quoted_name + +from langchain_community.vectorstores.utils import maximal_marginal_relevance + +try: + from sqlalchemy.orm import declarative_base +except ImportError: + from sqlalchemy.ext.declarative import declarative_base + +from langchain_core.documents import Document +from langchain_core.embeddings import Embeddings +from langchain_core.utils import get_from_dict_or_env +from langchain_core.vectorstores import VectorStore + +ADA_TOKEN_COUNT = 1536 +_LANGCHAIN_DEFAULT_COLLECTION_NAME = "langchain" + + +def _results_to_docs(docs_and_scores: Any) -> List[Document]: + """Return docs from docs and scores.""" + return [doc for doc, _ in docs_and_scores] + + +class BaseEmbeddingStore: + """Embedding store.""" + + +def get_embedding_store( + distance_strategy: DistanceStrategy, collection_name: str +) -> Any: + embedding_type = None + + if distance_strategy == DistanceStrategy.HAMMING: + embedding_type = sqlalchemy.INTEGER # type: ignore + else: + embedding_type = sqlalchemy.REAL # type: ignore + + DynamicBase = declarative_base(class_registry=dict()) # type: Any + + class EmbeddingStore(DynamicBase, BaseEmbeddingStore): + __tablename__ = collection_name + uuid = sqlalchemy.Column( + UUID(as_uuid=True), primary_key=True, default=uuid.uuid4 + ) + __table_args__ = {"extend_existing": True} + document = sqlalchemy.Column(sqlalchemy.String, nullable=True) + cmetadata = sqlalchemy.Column(JSON, nullable=True) + # custom_id : any user defined id + custom_id = sqlalchemy.Column(sqlalchemy.String, nullable=True) + embedding = sqlalchemy.Column(sqlalchemy.ARRAY(embedding_type)) # type: ignore + + return EmbeddingStore + + +class QueryResult: + """Result from a query.""" + + EmbeddingStore: BaseEmbeddingStore + distance: float + + +class DistanceStrategy(str, enum.Enum): + """Enumerator of the Distance strategies.""" + + EUCLIDEAN = "l2sq" + COSINE = "cosine" + HAMMING = "hamming" + + +DEFAULT_DISTANCE_STRATEGY = DistanceStrategy.COSINE + + +class Lantern(VectorStore): + """`Postgres` with the `lantern` extension as a vector store. + + lantern uses sequential scan by default. but you can create a HNSW index + using the create_hnsw_index method. + - `connection_string` is a postgres connection string. + - `embedding_function` any embedding function implementing + `langchain.embeddings.base.Embeddings` interface. + - `collection_name` is the name of the collection to use. (default: langchain) + - NOTE: This is the name of the table in which embedding data will be stored + The table will be created when initializing the store (if not exists) + So, make sure the user has the right permissions to create tables. + - `distance_strategy` is the distance strategy to use. (default: EUCLIDEAN) + - `EUCLIDEAN` is the euclidean distance. + - `COSINE` is the cosine distance. + - `HAMMING` is the hamming distance. + - `pre_delete_collection` if True, will delete the collection if it exists. + (default: False) + - Useful for testing. + """ + + def __init__( + self, + connection_string: str, + embedding_function: Embeddings, + distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, + collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, + collection_metadata: Optional[dict] = None, + pre_delete_collection: bool = False, + logger: Optional[logging.Logger] = None, + relevance_score_fn: Optional[Callable[[float], float]] = None, + ) -> None: + self.connection_string = connection_string + self.embedding_function = embedding_function + self.collection_name = collection_name + self.collection_metadata = collection_metadata + self._distance_strategy = distance_strategy + self.pre_delete_collection = pre_delete_collection + self.logger = logger or logging.getLogger(__name__) + self.override_relevance_score_fn = relevance_score_fn + self.EmbeddingStore = get_embedding_store( + self.distance_strategy, collection_name + ) + self.__post_init__() + + def __post_init__( + self, + ) -> None: + self._conn = self.connect() + self.create_hnsw_extension() + self.create_collection() + + @property + def distance_strategy(self) -> DistanceStrategy: + if isinstance(self._distance_strategy, DistanceStrategy): + return self._distance_strategy + + if self._distance_strategy == DistanceStrategy.EUCLIDEAN.value: + return DistanceStrategy.EUCLIDEAN + elif self._distance_strategy == DistanceStrategy.COSINE.value: + return DistanceStrategy.COSINE + elif self._distance_strategy == DistanceStrategy.HAMMING.value: + return DistanceStrategy.HAMMING + else: + raise ValueError( + f"Got unexpected value for distance: {self._distance_strategy}. " + f"Should be one of {', '.join([ds.value for ds in DistanceStrategy])}." + ) + + @property + def embeddings(self) -> Embeddings: + return self.embedding_function + + @classmethod + def connection_string_from_db_params( + cls, + driver: str, + host: str, + port: int, + database: str, + user: str, + password: str, + ) -> str: + """Return connection string from database parameters.""" + return f"postgresql+{driver}://{user}:{password}@{host}:{port}/{database}" + + def connect(self) -> sqlalchemy.engine.Connection: + engine = sqlalchemy.create_engine(self.connection_string) + conn = engine.connect() + return conn + + @property + def distance_function(self) -> Any: + if self.distance_strategy == DistanceStrategy.EUCLIDEAN: + return "l2sq_dist" + elif self.distance_strategy == DistanceStrategy.COSINE: + return "cos_dist" + elif self.distance_strategy == DistanceStrategy.HAMMING: + return "hamming_dist" + + def create_hnsw_extension(self) -> None: + try: + with Session(self._conn) as session: + statement = sqlalchemy.text("CREATE EXTENSION IF NOT EXISTS lantern") + session.execute(statement) + session.commit() + except Exception as e: + self.logger.exception(e) + + def create_tables_if_not_exists(self) -> None: + try: + self.create_collection() + except ProgrammingError: + pass + + def drop_table(self) -> None: + try: + self.EmbeddingStore.__table__.drop(self._conn.engine) + except ProgrammingError: + pass + + def drop_tables(self) -> None: + self.drop_table() + + def _hamming_relevance_score_fn(self, distance: float) -> float: + return distance + + def _select_relevance_score_fn(self) -> Callable[[float], float]: + """ + The 'correct' relevance function + may differ depending on a few things, including: + - the distance / similarity metric used by the VectorStore + - the scale of your embeddings (OpenAI's are unit normed. Many others are not!) + - embedding dimensionality + - etc. + """ + if self.override_relevance_score_fn is not None: + return self.override_relevance_score_fn + + # Default strategy is to rely on distance strategy provided + # in vectorstore constructor + if self.distance_strategy == DistanceStrategy.COSINE: + return self._cosine_relevance_score_fn + elif self.distance_strategy == DistanceStrategy.EUCLIDEAN: + return self._euclidean_relevance_score_fn + elif self.distance_strategy == DistanceStrategy.HAMMING: + return self._hamming_relevance_score_fn + else: + raise ValueError( + "No supported normalization function" + f" for distance_strategy of {self._distance_strategy}." + "Consider providing relevance_score_fn to Lantern constructor." + ) + + def _get_op_class(self) -> str: + if self.distance_strategy == DistanceStrategy.COSINE: + return "dist_cos_ops" + elif self.distance_strategy == DistanceStrategy.EUCLIDEAN: + return "dist_l2sq_ops" + elif self.distance_strategy == DistanceStrategy.HAMMING: + return "dist_hamming_ops" + else: + raise ValueError( + "No supported operator class" + f" for distance_strategy of {self._distance_strategy}." + ) + + def _get_operator(self) -> str: + if self.distance_strategy == DistanceStrategy.COSINE: + return "<=>" + elif self.distance_strategy == DistanceStrategy.EUCLIDEAN: + return "<->" + elif self.distance_strategy == DistanceStrategy.HAMMING: + return "<+>" + else: + raise ValueError( + "No supported operator" + f" for distance_strategy of {self._distance_strategy}." + ) + + def _typed_arg_for_distance( + self, embedding: List[Union[float, int]] + ) -> List[Union[float, int]]: + if self.distance_strategy == DistanceStrategy.HAMMING: + return list(map(lambda x: int(x), embedding)) + return embedding + + @property + def _index_name(self) -> str: + return f"langchain_{self.collection_name}_idx" + + def create_hnsw_index( + self, + dims: int = ADA_TOKEN_COUNT, + m: int = 16, + ef_construction: int = 64, + ef_search: int = 64, + **_kwargs: Any, + ) -> None: + """Create HNSW index on collection. + + Optional Keyword Args for HNSW Index: + engine: "nmslib", "faiss", "lucene"; default: "nmslib" + + ef: Size of the dynamic list used during k-NN searches. Higher values + lead to more accurate but slower searches; default: 64 + + ef_construction: Size of the dynamic list used during k-NN graph creation. + Higher values lead to more accurate graph but slower indexing speed; + default: 64 + + m: Number of bidirectional links created for each new element. Large impact + on memory consumption. Between 2 and 100; default: 16 + + dims: Dimensions of the vectors in collection. default: 1536 + """ + create_index_query = sqlalchemy.text( + "CREATE INDEX IF NOT EXISTS {} " + "ON {} USING hnsw (embedding {}) " + "WITH (" + "dim = :dim, " + "m = :m, " + "ef_construction = :ef_construction, " + "ef = :ef" + ");".format( + quoted_name(self._index_name, True), + quoted_name(self.collection_name, True), + self._get_op_class(), + ) + ) + + with Session(self._conn) as session: + # Create the HNSW index + session.execute( + create_index_query, + { + "dim": dims, + "m": m, + "ef_construction": ef_construction, + "ef": ef_search, + }, + ) + session.commit() + self.logger.info("HNSW extension and index created successfully.") + + def drop_index(self) -> None: + with Session(self._conn) as session: + # Drop the HNSW index + session.execute( + sqlalchemy.text( + "DROP INDEX IF EXISTS {}".format( + quoted_name(self._index_name, True) + ) + ) + ) + session.commit() + + def create_collection(self) -> None: + if self.pre_delete_collection: + self.delete_collection() + self.drop_table() + + with self._conn.begin(): + try: + self.EmbeddingStore.__table__.create(self._conn.engine) + except ProgrammingError as e: + # Duplicate table + if e.code == "f405": + pass + else: + raise e + + def delete_collection(self) -> None: + self.logger.debug("Trying to delete collection") + self.drop_table() + + @contextlib.contextmanager + def _make_session(self) -> Generator[Session, None, None]: + """Create a context manager for the session, bind to _conn string.""" + yield Session(self._conn) + + def delete( + self, + ids: Optional[List[str]] = None, + **kwargs: Any, + ) -> None: + """Delete vectors by ids or uuids. + + Args: + ids: List of ids to delete. + """ + with Session(self._conn) as session: + if ids is not None: + self.logger.debug( + "Trying to delete vectors by ids (represented by the model " + "using the custom ids field)" + ) + stmt = delete(self.EmbeddingStore).where( + self.EmbeddingStore.custom_id.in_(ids) + ) + session.execute(stmt) + session.commit() + + @classmethod + def _initialize_from_embeddings( + cls, + texts: List[str], + embeddings: List[List[float]], + embedding: Embeddings, + metadatas: Optional[List[dict]] = None, + ids: Optional[List[str]] = None, + collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, + distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, + pre_delete_collection: bool = False, + **kwargs: Any, + ) -> Lantern: + """ + Order of elements for lists `ids`, `embeddings`, `texts`, `metadatas` + should match, so each row will be associated with correct values. + + Postgres connection string is required + "Either pass it as `connection_string` parameter + or set the LANTERN_CONNECTION_STRING environment variable. + + - `texts` texts to insert into collection. + - `embeddings` an Embeddings to insert into collection + - `embedding` is :class:`Embeddings` that will be used for + embedding the text sent. If none is sent, then the + multilingual Tensorflow Universal Sentence Encoder will be used. + - `metadatas` row metadata to insert into collection. + - `ids` row ids to insert into collection. + - `collection_name` is the name of the collection to use. (default: langchain) + - NOTE: This is the name of the table in which embedding data will be stored + The table will be created when initializing the store (if not exists) + So, make sure the user has the right permissions to create tables. + - `distance_strategy` is the distance strategy to use. (default: EUCLIDEAN) + - `EUCLIDEAN` is the euclidean distance. + - `COSINE` is the cosine distance. + - `HAMMING` is the hamming distance. + - `pre_delete_collection` if True, will delete the collection if it exists. + (default: False) + - Useful for testing. + """ + if ids is None: + ids = [str(uuid.uuid1()) for _ in texts] + + if not metadatas: + metadatas = [{} for _ in texts] + + connection_string = cls.__get_connection_string(kwargs) + + store = cls( + connection_string=connection_string, + collection_name=collection_name, + embedding_function=embedding, + pre_delete_collection=pre_delete_collection, + distance_strategy=distance_strategy, + ) + + store.add_embeddings( + texts=texts, embeddings=embeddings, metadatas=metadatas, ids=ids, **kwargs + ) + + store.create_hnsw_index(**kwargs) + + return store + + def add_embeddings( + self, + texts: List[str], + embeddings: List[List[float]], + metadatas: List[dict], + ids: List[str], + **kwargs: Any, + ) -> None: + with Session(self._conn) as session: + for text, metadata, embedding, id in zip(texts, metadatas, embeddings, ids): + embedding_store = self.EmbeddingStore( + embedding=embedding, + document=text, + cmetadata=metadata, + custom_id=id, + ) + session.add(embedding_store) + session.commit() + + def add_texts( + self, + texts: Iterable[str], + metadatas: Optional[List[dict]] = None, + ids: Optional[List[str]] = None, + **kwargs: Any, + ) -> List[str]: + if ids is None: + ids = [str(uuid.uuid4()) for _ in texts] + + embeddings = self.embedding_function.embed_documents(list(texts)) + + if not metadatas: + metadatas = [{} for _ in texts] + + with Session(self._conn) as session: + for text, metadata, embedding, id in zip(texts, metadatas, embeddings, ids): + embedding_store = self.EmbeddingStore( + embedding=embedding, + document=text, + cmetadata=metadata, + custom_id=id, + ) + session.add(embedding_store) + session.commit() + + return ids + + def _results_to_docs_and_scores(self, results: Any) -> List[Tuple[Document, float]]: + """Return docs and scores from results.""" + docs = [ + ( + Document( + page_content=result.EmbeddingStore.document, + metadata=result.EmbeddingStore.cmetadata, + ), + result.distance if self.embedding_function is not None else None, + ) + for result in results + ] + return docs + + def similarity_search( + self, + query: str, + k: int = 4, + filter: Optional[dict] = None, + **kwargs: Any, + ) -> List[Document]: + embedding = self.embedding_function.embed_query(text=query) + return self.similarity_search_by_vector( + embedding=embedding, + k=k, + filter=filter, + ) + + def similarity_search_with_score( + self, + query: str, + k: int = 4, + filter: Optional[dict] = None, + ) -> List[Tuple[Document, float]]: + embedding = self.embedding_function.embed_query(query) + docs = self.similarity_search_with_score_by_vector( + embedding=embedding, k=k, filter=filter + ) + return docs + + def similarity_search_with_score_by_vector( + self, + embedding: List[float], + k: int = 4, + filter: Optional[dict] = None, + ) -> List[Tuple[Document, float]]: + results = self.__query_collection(embedding=embedding, k=k, filter=filter) + + return self._results_to_docs_and_scores(results) + + def __query_collection( + self, + embedding: List[float], + k: int = 4, + filter: Optional[dict] = None, + ) -> List[Any]: + with Session(self._conn) as session: + set_enable_seqscan_stmt = sqlalchemy.text("SET enable_seqscan = off") + set_init_k = sqlalchemy.text("SET hnsw.init_k = :k") + session.execute(set_enable_seqscan_stmt) + session.execute(set_init_k, {"k": k}) + + filter_by = None + if filter is not None: + filter_clauses = [] + for key, value in filter.items(): + IN = "in" + if isinstance(value, dict) and IN in map(str.lower, value): + value_case_insensitive = { + k.lower(): v for k, v in value.items() + } + filter_by_metadata = self.EmbeddingStore.cmetadata[ + key + ].astext.in_(value_case_insensitive[IN]) + filter_clauses.append(filter_by_metadata) + else: + filter_by_metadata = self.EmbeddingStore.cmetadata[ + key + ].astext == str(value) + filter_clauses.append(filter_by_metadata) + + filter_by = sqlalchemy.and_(*filter_clauses) + + embedding = self._typed_arg_for_distance(embedding) + query = session.query( + self.EmbeddingStore, + getattr(func, self.distance_function)( + self.EmbeddingStore.embedding, embedding + ).label("distance"), + ) # Specify the columns you need here, e.g., EmbeddingStore.embedding + + if filter_by is not None: + query = query.filter(filter_by) + + results: List[QueryResult] = ( + query.order_by( + self.EmbeddingStore.embedding.op(self._get_operator())(embedding) + ) # Using PostgreSQL specific operator with the correct column name + .limit(k) + .all() + ) + + return results + + def similarity_search_by_vector( + self, + embedding: List[float], + k: int = 4, + filter: Optional[dict] = None, + **kwargs: Any, + ) -> List[Document]: + docs_and_scores = self.similarity_search_with_score_by_vector( + embedding=embedding, k=k, filter=filter + ) + return _results_to_docs(docs_and_scores) + + @classmethod + def from_texts( + cls: Type[Lantern], + texts: List[str], + embedding: Embeddings, + metadatas: Optional[List[dict]] = None, + collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, + distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, + ids: Optional[List[str]] = None, + pre_delete_collection: bool = False, + **kwargs: Any, + ) -> Lantern: + """ + Initialize Lantern vectorstore from list of texts. + The embeddings will be generated using `embedding` class provided. + + Order of elements for lists `ids`, `texts`, `metadatas` should match, + so each row will be associated with correct values. + + Postgres connection string is required + "Either pass it as `connection_string` parameter + or set the LANTERN_CONNECTION_STRING environment variable. + + - `connection_string` is fully populated connection string for postgres database + - `texts` texts to insert into collection. + - `embedding` is :class:`Embeddings` that will be used for + embedding the text sent. If none is sent, then the + multilingual Tensorflow Universal Sentence Encoder will be used. + - `metadatas` row metadata to insert into collection. + - `collection_name` is the name of the collection to use. (default: langchain) + - NOTE: This is the name of the table in which embedding data will be stored + The table will be created when initializing the store (if not exists) + So, make sure the user has the right permissions to create tables. + - `distance_strategy` is the distance strategy to use. (default: EUCLIDEAN) + - `EUCLIDEAN` is the euclidean distance. + - `COSINE` is the cosine distance. + - `HAMMING` is the hamming distance. + - `ids` row ids to insert into collection. + - `pre_delete_collection` if True, will delete the collection if it exists. + (default: False) + - Useful for testing. + """ + embeddings = embedding.embed_documents(list(texts)) + + return cls._initialize_from_embeddings( + texts, + embeddings, + embedding, + metadatas=metadatas, + ids=ids, + collection_name=collection_name, + pre_delete_collection=pre_delete_collection, + distance_strategy=distance_strategy, + **kwargs, + ) + + @classmethod + def from_embeddings( + cls, + text_embeddings: List[Tuple[str, List[float]]], + embedding: Embeddings, + metadatas: Optional[List[dict]] = None, + collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, + ids: Optional[List[str]] = None, + pre_delete_collection: bool = False, + distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, + **kwargs: Any, + ) -> Lantern: + """Construct Lantern wrapper from raw documents and pre- + generated embeddings. + + Postgres connection string is required + "Either pass it as `connection_string` parameter + or set the LANTERN_CONNECTION_STRING environment variable. + + Order of elements for lists `ids`, `text_embeddings`, `metadatas` should match, + so each row will be associated with correct values. + + - `connection_string` is fully populated connection string for postgres database + - `text_embeddings` is array with tuples (text, embedding) + to insert into collection. + - `embedding` is :class:`Embeddings` that will be used for + embedding the text sent. If none is sent, then the + multilingual Tensorflow Universal Sentence Encoder will be used. + - `metadatas` row metadata to insert into collection. + - `collection_name` is the name of the collection to use. (default: langchain) + - NOTE: This is the name of the table in which embedding data will be stored + The table will be created when initializing the store (if not exists) + So, make sure the user has the right permissions to create tables. + - `ids` row ids to insert into collection. + - `pre_delete_collection` if True, will delete the collection if it exists. + (default: False) + - Useful for testing. + - `distance_strategy` is the distance strategy to use. (default: EUCLIDEAN) + - `EUCLIDEAN` is the euclidean distance. + - `COSINE` is the cosine distance. + - `HAMMING` is the hamming distance. + """ + texts = [t[0] for t in text_embeddings] + embeddings = [t[1] for t in text_embeddings] + + return cls._initialize_from_embeddings( + texts, + embeddings, + embedding, + metadatas=metadatas, + ids=ids, + collection_name=collection_name, + pre_delete_collection=pre_delete_collection, + distance_strategy=distance_strategy, + **kwargs, + ) + + @classmethod + def from_existing_index( + cls: Type[Lantern], + embedding: Embeddings, + collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, + pre_delete_collection: bool = False, + distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, + **kwargs: Any, + ) -> Lantern: + """ + Get instance of an existing Lantern store.This method will + return the instance of the store without inserting any new + embeddings + + Postgres connection string is required + "Either pass it as `connection_string` parameter + or set the LANTERN_CONNECTION_STRING environment variable. + + - `connection_string` is a postgres connection string. + - `embedding` is :class:`Embeddings` that will be used for + embedding the text sent. If none is sent, then the + multilingual Tensorflow Universal Sentence Encoder will be used. + - `collection_name` is the name of the collection to use. (default: langchain) + - NOTE: This is the name of the table in which embedding data will be stored + The table will be created when initializing the store (if not exists) + So, make sure the user has the right permissions to create tables. + - `ids` row ids to insert into collection. + - `pre_delete_collection` if True, will delete the collection if it exists. + (default: False) + - Useful for testing. + - `distance_strategy` is the distance strategy to use. (default: EUCLIDEAN) + - `EUCLIDEAN` is the euclidean distance. + - `COSINE` is the cosine distance. + - `HAMMING` is the hamming distance. + """ + connection_string = cls.__get_connection_string(kwargs) + + store = cls( + connection_string=connection_string, + collection_name=collection_name, + embedding_function=embedding, + pre_delete_collection=pre_delete_collection, + distance_strategy=distance_strategy, + ) + + return store + + @classmethod + def __get_connection_string(cls, kwargs: Dict[str, Any]) -> str: + connection_string: str = get_from_dict_or_env( + data=kwargs, + key="connection_string", + env_key="LANTERN_CONNECTION_STRING", + ) + + if not connection_string: + raise ValueError( + "Postgres connection string is required" + "Either pass it as `connection_string` parameter" + "or set the LANTERN_CONNECTION_STRING variable." + ) + + return connection_string + + @classmethod + def from_documents( + cls: Type[Lantern], + documents: List[Document], + embedding: Embeddings, + collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, + distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, + ids: Optional[List[str]] = None, + pre_delete_collection: bool = False, + **kwargs: Any, + ) -> Lantern: + """ + Initialize a vector store with a set of documents. + + Postgres connection string is required + "Either pass it as `connection_string` parameter + or set the LANTERN_CONNECTION_STRING environment variable. + + - `connection_string` is a postgres connection string. + - `documents` is list of :class:`Document` to initialize the vector store with + - `embedding` is :class:`Embeddings` that will be used for + embedding the text sent. If none is sent, then the + multilingual Tensorflow Universal Sentence Encoder will be used. + - `collection_name` is the name of the collection to use. (default: langchain) + - NOTE: This is the name of the table in which embedding data will be stored + The table will be created when initializing the store (if not exists) + So, make sure the user has the right permissions to create tables. + - `distance_strategy` is the distance strategy to use. (default: EUCLIDEAN) + - `EUCLIDEAN` is the euclidean distance. + - `COSINE` is the cosine distance. + - `HAMMING` is the hamming distance. + - `ids` row ids to insert into collection. + - `pre_delete_collection` if True, will delete the collection if it exists. + (default: False) + - Useful for testing. + """ + texts = [d.page_content for d in documents] + metadatas = [d.metadata for d in documents] + connection_string = cls.__get_connection_string(kwargs) + + kwargs["connection_string"] = connection_string + + return cls.from_texts( + texts=texts, + pre_delete_collection=pre_delete_collection, + embedding=embedding, + metadatas=metadatas, + ids=ids, + collection_name=collection_name, + distance_strategy=distance_strategy, + **kwargs, + ) + + def max_marginal_relevance_search_with_score_by_vector( + self, + embedding: List[float], + k: int = 4, + fetch_k: int = 20, + lambda_mult: float = 0.5, + filter: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> List[Tuple[Document, float]]: + """Return docs selected using the maximal marginal relevance with score + to embedding vector. + + Maximal marginal relevance optimizes for similarity to query AND diversity + among selected documents. + + Args: + embedding: Embedding to look up documents similar to. + k (int): Number of Documents to return. Defaults to 4. + fetch_k (int): Number of Documents to fetch to pass to MMR algorithm. + Defaults to 20. + lambda_mult (float): Number between 0 and 1 that determines the degree + of diversity among the results with 0 corresponding + to maximum diversity and 1 to minimum diversity. + Defaults to 0.5. + filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. + + Returns: + List[Tuple[Document, float]]: List of Documents selected by maximal marginal + relevance to the query and score for each. + """ + results = self.__query_collection(embedding=embedding, k=fetch_k, filter=filter) + embedding_list = [result.EmbeddingStore.embedding for result in results] + + mmr_selected = maximal_marginal_relevance( + np.array(embedding, dtype=np.float32), + embedding_list, + k=k, + lambda_mult=lambda_mult, + ) + + candidates = self._results_to_docs_and_scores(results) + + return [r for i, r in enumerate(candidates) if i in mmr_selected] + + def max_marginal_relevance_search( + self, + query: str, + k: int = 4, + fetch_k: int = 20, + lambda_mult: float = 0.5, + filter: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> List[Document]: + """Return docs selected using the maximal marginal relevance. + + Maximal marginal relevance optimizes for similarity to query AND diversity + among selected documents. + + Args: + query (str): Text to look up documents similar to. + k (int): Number of Documents to return. Defaults to 4. + fetch_k (int): Number of Documents to fetch to pass to MMR algorithm. + Defaults to 20. + lambda_mult (float): Number between 0 and 1 that determines the degree + of diversity among the results with 0 corresponding + to maximum diversity and 1 to minimum diversity. + Defaults to 0.5. + filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. + + Returns: + List[Document]: List of Documents selected by maximal marginal relevance. + """ + embedding = self.embedding_function.embed_query(query) + return self.max_marginal_relevance_search_by_vector( + embedding, + k=k, + fetch_k=fetch_k, + lambda_mult=lambda_mult, + filter=filter, + **kwargs, + ) + + def max_marginal_relevance_search_with_score( + self, + query: str, + k: int = 4, + fetch_k: int = 20, + lambda_mult: float = 0.5, + filter: Optional[dict] = None, + **kwargs: Any, + ) -> List[Tuple[Document, float]]: + """Return docs selected using the maximal marginal relevance with score. + + Maximal marginal relevance optimizes for similarity to query AND diversity + among selected documents. + + Args: + query (str): Text to look up documents similar to. + k (int): Number of Documents to return. Defaults to 4. + fetch_k (int): Number of Documents to fetch to pass to MMR algorithm. + Defaults to 20. + lambda_mult (float): Number between 0 and 1 that determines the degree + of diversity among the results with 0 corresponding + to maximum diversity and 1 to minimum diversity. + Defaults to 0.5. + filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. + + Returns: + List[Tuple[Document, float]]: List of Documents selected by maximal marginal + relevance to the query and score for each. + """ + embedding = self.embedding_function.embed_query(query) + docs = self.max_marginal_relevance_search_with_score_by_vector( + embedding=embedding, + k=k, + fetch_k=fetch_k, + lambda_mult=lambda_mult, + filter=filter, + **kwargs, + ) + return docs + + def max_marginal_relevance_search_by_vector( + self, + embedding: List[float], + k: int = 4, + fetch_k: int = 20, + lambda_mult: float = 0.5, + filter: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> List[Document]: + """Return docs selected using the maximal marginal relevance + to embedding vector. + + Maximal marginal relevance optimizes for similarity to query AND diversity + among selected documents. + + Args: + embedding (str): Text to look up documents similar to. + k (int): Number of Documents to return. Defaults to 4. + fetch_k (int): Number of Documents to fetch to pass to MMR algorithm. + Defaults to 20. + lambda_mult (float): Number between 0 and 1 that determines the degree + of diversity among the results with 0 corresponding + to maximum diversity and 1 to minimum diversity. + Defaults to 0.5. + filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. + + Returns: + List[Document]: List of Documents selected by maximal marginal relevance. + """ + docs_and_scores = self.max_marginal_relevance_search_with_score_by_vector( + embedding, + k=k, + fetch_k=fetch_k, + lambda_mult=lambda_mult, + filter=filter, + **kwargs, + ) + + return _results_to_docs(docs_and_scores) diff --git a/libs/community/tests/integration_tests/vectorstores/test_lantern.py b/libs/community/tests/integration_tests/vectorstores/test_lantern.py new file mode 100644 index 00000000000..8de7d803644 --- /dev/null +++ b/libs/community/tests/integration_tests/vectorstores/test_lantern.py @@ -0,0 +1,319 @@ +"""Test Lantern functionality.""" +import os +from typing import List, Tuple + +from langchain_core.documents import Document + +from langchain_community.embeddings import FakeEmbeddings +from langchain_community.vectorstores import Lantern + +CONNECTION_STRING = Lantern.connection_string_from_db_params( + driver=os.environ.get("TEST_LANTERN_DRIVER", "psycopg2"), + host=os.environ.get("TEST_LANTERN_HOST", "localhost"), + port=int(os.environ.get("TEST_LANTERN_PORT", "5432")), + database=os.environ.get("TEST_LANTERN_DATABASE", "postgres"), + user=os.environ.get("TEST_LANTERN_USER", "postgres"), + password=os.environ.get("TEST_LANTERN_PASSWORD", "postgres"), +) + + +ADA_TOKEN_COUNT = 1536 + + +def fix_distance_precision( + results: List[Tuple[Document, float]], precision: int = 2 +) -> List[Tuple[Document, float]]: + return list( + map(lambda x: (x[0], float(f"{{:.{precision}f}}".format(x[1]))), results) + ) + + +class FakeEmbeddingsWithAdaDimension(FakeEmbeddings): + """Fake embeddings functionality for testing.""" + + def __init__(self): + super(FakeEmbeddingsWithAdaDimension, self).__init__(size=ADA_TOKEN_COUNT) + + def embed_documents(self, texts: List[str]) -> List[List[float]]: + """Return simple embeddings.""" + return [ + [float(1.0)] * (ADA_TOKEN_COUNT - 1) + [float(i)] for i in range(len(texts)) + ] + + def embed_query(self, text: str) -> List[float]: + """Return simple embeddings.""" + return [float(1.0)] * (ADA_TOKEN_COUNT - 1) + [float(0.0)] + + +def test_lantern() -> None: + """Test end to end construction and search.""" + texts = ["foo", "bar", "baz"] + docsearch = Lantern.from_texts( + texts=texts, + collection_name="test_collection", + embedding=FakeEmbeddingsWithAdaDimension(), + connection_string=CONNECTION_STRING, + pre_delete_collection=True, + ) + output = docsearch.similarity_search("foo", k=1) + assert output == [Document(page_content="foo")] + + +def test_lantern_embeddings() -> None: + """Test end to end construction with embeddings and search.""" + texts = ["foo", "bar", "baz"] + text_embeddings = FakeEmbeddingsWithAdaDimension().embed_documents(texts) + text_embedding_pairs = list(zip(texts, text_embeddings)) + docsearch = Lantern.from_embeddings( + text_embeddings=text_embedding_pairs, + collection_name="test_collection", + embedding=FakeEmbeddingsWithAdaDimension(), + connection_string=CONNECTION_STRING, + pre_delete_collection=True, + ) + output = docsearch.similarity_search("foo", k=1) + assert output == [Document(page_content="foo")] + + +def test_lantern_embeddings_distance_strategy() -> None: + """Test end to end construction with embeddings and search.""" + texts = ["foo", "bar", "baz"] + text_embeddings = FakeEmbeddingsWithAdaDimension().embed_documents(texts) + text_embedding_pairs = list(zip(texts, text_embeddings)) + docsearch = Lantern.from_embeddings( + text_embeddings=text_embedding_pairs, + collection_name="test_collection", + embedding=FakeEmbeddingsWithAdaDimension(), + connection_string=CONNECTION_STRING, + distance_strategy="hamming", + pre_delete_collection=True, + ) + output = docsearch.similarity_search("foo", k=1) + assert output == [Document(page_content="foo")] + + +def test_lantern_with_metadatas() -> None: + """Test end to end construction and search.""" + texts = ["foo", "bar", "baz"] + metadatas = [{"page": str(i)} for i in range(len(texts))] + docsearch = Lantern.from_texts( + texts=texts, + collection_name="test_collection", + embedding=FakeEmbeddingsWithAdaDimension(), + metadatas=metadatas, + connection_string=CONNECTION_STRING, + pre_delete_collection=True, + ) + output = docsearch.similarity_search("foo", k=1) + assert output == [Document(page_content="foo", metadata={"page": "0"})] + + +def test_lantern_with_metadatas_with_scores() -> None: + """Test end to end construction and search.""" + texts = ["foo", "bar", "baz"] + metadatas = [{"page": str(i)} for i in range(len(texts))] + docsearch = Lantern.from_texts( + texts=texts, + collection_name="test_collection", + embedding=FakeEmbeddingsWithAdaDimension(), + metadatas=metadatas, + connection_string=CONNECTION_STRING, + pre_delete_collection=True, + ) + output = fix_distance_precision(docsearch.similarity_search_with_score("foo", k=1)) + assert output == [(Document(page_content="foo", metadata={"page": "0"}), 0.0)] + + +def test_lantern_with_filter_match() -> None: + """Test end to end construction and search.""" + texts = ["foo", "bar", "baz"] + metadatas = [{"page": str(i)} for i in range(len(texts))] + docsearch = Lantern.from_texts( + texts=texts, + collection_name="test_collection_filter", + embedding=FakeEmbeddingsWithAdaDimension(), + metadatas=metadatas, + connection_string=CONNECTION_STRING, + pre_delete_collection=True, + ) + output = fix_distance_precision( + docsearch.similarity_search_with_score("foo", k=1, filter={"page": "0"}) + ) + assert output == [(Document(page_content="foo", metadata={"page": "0"}), 0.0)] + + +def test_lantern_with_filter_distant_match() -> None: + """Test end to end construction and search.""" + texts = ["foo", "bar", "baz"] + metadatas = [{"page": str(i)} for i in range(len(texts))] + docsearch = Lantern.from_texts( + texts=texts, + collection_name="test_collection_filter", + embedding=FakeEmbeddingsWithAdaDimension(), + metadatas=metadatas, + connection_string=CONNECTION_STRING, + pre_delete_collection=True, + ) + output = fix_distance_precision( + docsearch.similarity_search_with_score("foo", k=1, filter={"page": "2"}) + ) + assert output == [(Document(page_content="baz", metadata={"page": "2"}), 0.0)] + + +def test_lantern_with_filter_no_match() -> None: + """Test end to end construction and search.""" + texts = ["foo", "bar", "baz"] + metadatas = [{"page": str(i)} for i in range(len(texts))] + docsearch = Lantern.from_texts( + texts=texts, + collection_name="test_collection_filter", + embedding=FakeEmbeddingsWithAdaDimension(), + metadatas=metadatas, + connection_string=CONNECTION_STRING, + pre_delete_collection=True, + ) + output = docsearch.similarity_search_with_score("foo", k=1, filter={"page": "5"}) + assert output == [] + + +def test_lantern_with_filter_in_set() -> None: + """Test end to end construction and search.""" + texts = ["foo", "bar", "baz"] + metadatas = [{"page": str(i)} for i in range(len(texts))] + docsearch = Lantern.from_texts( + texts=texts, + collection_name="test_collection_filter", + embedding=FakeEmbeddingsWithAdaDimension(), + metadatas=metadatas, + connection_string=CONNECTION_STRING, + pre_delete_collection=True, + ) + output = fix_distance_precision( + docsearch.similarity_search_with_score( + "foo", k=2, filter={"page": {"IN": ["0", "2"]}} + ), + 4, + ) + assert output == [ + (Document(page_content="foo", metadata={"page": "0"}), 0.0), + (Document(page_content="baz", metadata={"page": "2"}), 0.0013), + ] + + +def test_lantern_delete_docs() -> None: + """Add and delete documents.""" + texts = ["foo", "bar", "baz"] + metadatas = [{"page": str(i)} for i in range(len(texts))] + docsearch = Lantern.from_texts( + texts=texts, + collection_name="test_collection_filter", + embedding=FakeEmbeddingsWithAdaDimension(), + metadatas=metadatas, + ids=["1", "2", "3"], + connection_string=CONNECTION_STRING, + pre_delete_collection=True, + ) + docsearch.delete(["1", "2", "3"]) + output = docsearch.similarity_search("foo", k=3) + assert output == [] + + +def test_lantern_relevance_score() -> None: + """Test to make sure the relevance score is scaled to 0-1.""" + texts = ["foo", "bar", "baz"] + metadatas = [{"page": str(i)} for i in range(len(texts))] + docsearch = Lantern.from_texts( + texts=texts, + collection_name="test_collection", + embedding=FakeEmbeddingsWithAdaDimension(), + metadatas=metadatas, + connection_string=CONNECTION_STRING, + pre_delete_collection=True, + ) + + output = fix_distance_precision( + docsearch.similarity_search_with_relevance_scores("foo", k=3), 4 + ) + assert output == [ + (Document(page_content="foo", metadata={"page": "0"}), 1.0), + (Document(page_content="bar", metadata={"page": "1"}), 0.9997), + (Document(page_content="baz", metadata={"page": "2"}), 0.9987), + ] + + +def test_lantern_retriever_search_threshold() -> None: + """Test using retriever for searching with threshold.""" + texts = ["foo", "bar", "baz"] + metadatas = [{"page": str(i)} for i in range(len(texts))] + docsearch = Lantern.from_texts( + texts=texts, + collection_name="test_collection", + embedding=FakeEmbeddingsWithAdaDimension(), + metadatas=metadatas, + connection_string=CONNECTION_STRING, + pre_delete_collection=True, + ) + + retriever = docsearch.as_retriever( + search_type="similarity_score_threshold", + search_kwargs={"k": 3, "score_threshold": 0.999}, + ) + output = retriever.get_relevant_documents("summer") + assert output == [ + Document(page_content="foo", metadata={"page": "0"}), + Document(page_content="bar", metadata={"page": "1"}), + ] + + +def test_lantern_retriever_search_threshold_custom_normalization_fn() -> None: + """Test searching with threshold and custom normalization function""" + texts = ["foo", "bar", "baz"] + metadatas = [{"page": str(i)} for i in range(len(texts))] + docsearch = Lantern.from_texts( + texts=texts, + collection_name="test_collection", + embedding=FakeEmbeddingsWithAdaDimension(), + metadatas=metadatas, + connection_string=CONNECTION_STRING, + relevance_score_fn=lambda d: d * 0, + pre_delete_collection=True, + ) + + retriever = docsearch.as_retriever( + search_type="similarity_score_threshold", + search_kwargs={"k": 3, "score_threshold": 0.9999}, + ) + output = retriever.get_relevant_documents("foo") + assert output == [ + Document(page_content="foo", metadata={"page": "0"}), + ] + + +def test_lantern_max_marginal_relevance_search() -> None: + """Test max marginal relevance search.""" + texts = ["foo", "bar", "baz"] + docsearch = Lantern.from_texts( + texts=texts, + collection_name="test_collection", + embedding=FakeEmbeddingsWithAdaDimension(), + connection_string=CONNECTION_STRING, + pre_delete_collection=True, + ) + output = docsearch.max_marginal_relevance_search("foo", k=1, fetch_k=3) + assert output == [Document(page_content="foo")] + + +def test_lantern_max_marginal_relevance_search_with_score() -> None: + """Test max marginal relevance search with relevance scores.""" + texts = ["foo", "bar", "baz"] + docsearch = Lantern.from_texts( + texts=texts, + collection_name="test_collection", + embedding=FakeEmbeddingsWithAdaDimension(), + connection_string=CONNECTION_STRING, + pre_delete_collection=True, + ) + output = fix_distance_precision( + docsearch.max_marginal_relevance_search_with_score("foo", k=1, fetch_k=3) + ) + assert output == [(Document(page_content="foo"), 0.0)] diff --git a/libs/community/tests/unit_tests/vectorstores/test_public_api.py b/libs/community/tests/unit_tests/vectorstores/test_public_api.py index e994afe0666..25db0bf3ac1 100644 --- a/libs/community/tests/unit_tests/vectorstores/test_public_api.py +++ b/libs/community/tests/unit_tests/vectorstores/test_public_api.py @@ -29,6 +29,7 @@ _EXPECTED = [ "FAISS", "Hologres", "LanceDB", + "Lantern", "LLMRails", "Marqo", "MatchingEngine", diff --git a/libs/langchain/tests/unit_tests/indexes/test_indexing.py b/libs/langchain/tests/unit_tests/indexes/test_indexing.py index 352800e04dd..fda61008dcd 100644 --- a/libs/langchain/tests/unit_tests/indexes/test_indexing.py +++ b/libs/langchain/tests/unit_tests/indexes/test_indexing.py @@ -1250,5 +1250,6 @@ def test_compatible_vectorstore_documentation() -> None: "VespaStore", "Weaviate", "ZepVectorStore", + "Lantern", } assert compatible == documented