mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-23 07:09:31 +00:00
Add template for Pinecone + Multi-Query (#12353)
This commit is contained in:
parent
c6a733802b
commit
bc6f6e968e
21
templates/rag-pinecone-multi-query/LICENSE
Normal file
21
templates/rag-pinecone-multi-query/LICENSE
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2023 LangChain, Inc.
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
16
templates/rag-pinecone-multi-query/README.md
Normal file
16
templates/rag-pinecone-multi-query/README.md
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
# RAG Pinecone multi query
|
||||||
|
|
||||||
|
This template performs RAG using Pinecone and OpenAI with the [multi-query retriever](https://python.langchain.com/docs/modules/data_connection/retrievers/MultiQueryRetriever).
|
||||||
|
|
||||||
|
This will use an LLM to generate multiple queries from different perspectives for a given user input query.
|
||||||
|
|
||||||
|
For each query, it retrieves a set of relevant documents and takes the unique union across all queries for answer synthesis.
|
||||||
|
|
||||||
|
## Pinecone
|
||||||
|
|
||||||
|
This template uses Pinecone as a vectorstore and requires that `PINECONE_API_KEY`, `PINECONE_ENVIRONMENT`, and `PINECONE_INDEX` are set.
|
||||||
|
|
||||||
|
## LLM
|
||||||
|
|
||||||
|
Be sure that `OPENAI_API_KEY` is set in order to the OpenAI models.
|
||||||
|
|
2296
templates/rag-pinecone-multi-query/poetry.lock
generated
Normal file
2296
templates/rag-pinecone-multi-query/poetry.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
21
templates/rag-pinecone-multi-query/pyproject.toml
Normal file
21
templates/rag-pinecone-multi-query/pyproject.toml
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
[tool.poetry]
|
||||||
|
name = "rag-pinecone-multi-query"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = ""
|
||||||
|
authors = ["Lance Martin <lance@langchain.dev>"]
|
||||||
|
readme = "README.md"
|
||||||
|
|
||||||
|
[tool.poetry.dependencies]
|
||||||
|
python = ">=3.8.1,<4.0"
|
||||||
|
langchain = ">=0.0.313, <0.1"
|
||||||
|
openai = ">=0.28.1"
|
||||||
|
tiktoken = ">=0.5.1"
|
||||||
|
pinecone-client = ">=2.2.4"
|
||||||
|
|
||||||
|
[tool.langserve]
|
||||||
|
export_module = "rag_pinecone_multi_query"
|
||||||
|
export_attr = "chain"
|
||||||
|
|
||||||
|
[build-system]
|
||||||
|
requires = ["poetry-core"]
|
||||||
|
build-backend = "poetry.core.masonry.api"
|
46
templates/rag-pinecone-multi-query/rag_pinecone.ipynb
Normal file
46
templates/rag-pinecone-multi-query/rag_pinecone.ipynb
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"attachments": {},
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "681a5d1e",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Connect to template"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "d774be2a",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from langserve.client import RemoteRunnable\n",
|
||||||
|
"rag_app_pinecone = RemoteRunnable('http://localhost:8000/rag-pinecone-multi-query')\n",
|
||||||
|
"rag_app_pinecone.invoke(\"What are the different types of agent memory\")"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3 (ipykernel)",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python3"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.9.16"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 5
|
||||||
|
}
|
@ -0,0 +1,3 @@
|
|||||||
|
from rag_pinecone_multi_query.chain import chain
|
||||||
|
|
||||||
|
__all__ = ["chain"]
|
@ -0,0 +1,58 @@
|
|||||||
|
import os
|
||||||
|
import pinecone
|
||||||
|
from operator import itemgetter
|
||||||
|
from langchain.vectorstores import Pinecone
|
||||||
|
from langchain.prompts import ChatPromptTemplate
|
||||||
|
from langchain.chat_models import ChatOpenAI
|
||||||
|
from langchain.embeddings import OpenAIEmbeddings
|
||||||
|
from langchain.schema.output_parser import StrOutputParser
|
||||||
|
from langchain.retrievers.multi_query import MultiQueryRetriever
|
||||||
|
from langchain.schema.runnable import RunnablePassthrough, RunnableParallel
|
||||||
|
|
||||||
|
if os.environ.get("PINECONE_API_KEY", None) is None:
|
||||||
|
raise Exception("Missing `PINECONE_API_KEY` environment variable.")
|
||||||
|
|
||||||
|
if os.environ.get("PINECONE_ENVIRONMENT", None) is None:
|
||||||
|
raise Exception("Missing `PINECONE_ENVIRONMENT` environment variable.")
|
||||||
|
|
||||||
|
PINECONE_INDEX_NAME = os.environ.get("PINECONE_INDEX", "langchain-test")
|
||||||
|
|
||||||
|
### Ingest code - you may need to run this the first time
|
||||||
|
# Load
|
||||||
|
# from langchain.document_loaders import WebBaseLoader
|
||||||
|
# loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/")
|
||||||
|
# data = loader.load()
|
||||||
|
|
||||||
|
# # Split
|
||||||
|
# from langchain.text_splitter import RecursiveCharacterTextSplitter
|
||||||
|
# text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
|
||||||
|
# all_splits = text_splitter.split_documents(data)
|
||||||
|
|
||||||
|
# # Add to vectorDB
|
||||||
|
# vectorstore = Pinecone.from_documents(
|
||||||
|
# documents=all_splits, embedding=OpenAIEmbeddings(), index_name=PINECONE_INDEX_NAME
|
||||||
|
# )
|
||||||
|
# retriever = vectorstore.as_retriever()
|
||||||
|
|
||||||
|
# Set up index with multi query retriever
|
||||||
|
vectorstore = Pinecone.from_existing_index(PINECONE_INDEX_NAME, OpenAIEmbeddings())
|
||||||
|
model = ChatOpenAI(temperature=0)
|
||||||
|
retriever = MultiQueryRetriever.from_llm(
|
||||||
|
retriever=vectorstore.as_retriever(), llm=model
|
||||||
|
)
|
||||||
|
|
||||||
|
# RAG prompt
|
||||||
|
template = """Answer the question based only on the following context:
|
||||||
|
{context}
|
||||||
|
Question: {question}
|
||||||
|
"""
|
||||||
|
prompt = ChatPromptTemplate.from_template(template)
|
||||||
|
|
||||||
|
# RAG
|
||||||
|
model = ChatOpenAI()
|
||||||
|
chain = (
|
||||||
|
RunnableParallel({"context": retriever, "question": RunnablePassthrough()})
|
||||||
|
| prompt
|
||||||
|
| model
|
||||||
|
| StrOutputParser()
|
||||||
|
)
|
@ -1,84 +0,0 @@
|
|||||||
{
|
|
||||||
"cells": [
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"id": "681a5d1e",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"## Connect to template\n",
|
|
||||||
"\n",
|
|
||||||
"`Context`\n",
|
|
||||||
" \n",
|
|
||||||
"* LangServe apps gives you access to templates.\n",
|
|
||||||
"* Templates LLM pipeline (runnables or chains) end-points accessible via FastAPI.\n",
|
|
||||||
"* The environment for these templates is managed by Poetry.\n",
|
|
||||||
"\n",
|
|
||||||
"`Create app`\n",
|
|
||||||
"\n",
|
|
||||||
"* Install LangServe and create an app.\n",
|
|
||||||
"* This will create a new Poetry environment /\n",
|
|
||||||
"```\n",
|
|
||||||
"pip install < to add > \n",
|
|
||||||
"langchain serve new my-app\n",
|
|
||||||
"cd my-app\n",
|
|
||||||
"```\n",
|
|
||||||
"\n",
|
|
||||||
"`Add templates`\n",
|
|
||||||
"\n",
|
|
||||||
"* When we add a template, we update the Poetry config file with the necessary dependencies.\n",
|
|
||||||
"* It also automatically installed these template dependencies in your Poetry environment\n",
|
|
||||||
"```\n",
|
|
||||||
"langchain serve add rag-pinecone\n",
|
|
||||||
"```\n",
|
|
||||||
"\n",
|
|
||||||
"`Start FastAPI server`\n",
|
|
||||||
"\n",
|
|
||||||
"```\n",
|
|
||||||
"langchain start\n",
|
|
||||||
"```\n",
|
|
||||||
"\n",
|
|
||||||
"Note, we can now look at the endpoints:\n",
|
|
||||||
"\n",
|
|
||||||
"http://127.0.0.1:8000/docs#\n",
|
|
||||||
"\n",
|
|
||||||
"And look specifically at our loaded template:\n",
|
|
||||||
"\n",
|
|
||||||
"http://127.0.0.1:8000/docs#/default/invoke_rag_pinecone_invoke_post\n",
|
|
||||||
" \n",
|
|
||||||
"We can also use remote runnable to call it:"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"id": "d774be2a",
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"from langserve.client import RemoteRunnable\n",
|
|
||||||
"rag_app_pinecone = RemoteRunnable('http://localhost:8000/rag-pinecone')"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"metadata": {
|
|
||||||
"kernelspec": {
|
|
||||||
"display_name": "Python 3 (ipykernel)",
|
|
||||||
"language": "python",
|
|
||||||
"name": "python3"
|
|
||||||
},
|
|
||||||
"language_info": {
|
|
||||||
"codemirror_mode": {
|
|
||||||
"name": "ipython",
|
|
||||||
"version": 3
|
|
||||||
},
|
|
||||||
"file_extension": ".py",
|
|
||||||
"mimetype": "text/x-python",
|
|
||||||
"name": "python",
|
|
||||||
"nbconvert_exporter": "python",
|
|
||||||
"pygments_lexer": "ipython3",
|
|
||||||
"version": "3.9.16"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"nbformat": 4,
|
|
||||||
"nbformat_minor": 5
|
|
||||||
}
|
|
45
templates/rag-pinecone/rag_pinecone.ipynb
Normal file
45
templates/rag-pinecone/rag_pinecone.ipynb
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "681a5d1e",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Connect to template"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "d774be2a",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from langserve.client import RemoteRunnable\n",
|
||||||
|
"rag_app_pinecone = RemoteRunnable('http://localhost:8000/rag-pinecone')\n",
|
||||||
|
"rag_app_pinecone.invoke(\"How does agent memory work?\")"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3 (ipykernel)",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python3"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.9.16"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 5
|
||||||
|
}
|
@ -8,25 +8,32 @@ from langchain.embeddings import OpenAIEmbeddings
|
|||||||
from langchain.schema.output_parser import StrOutputParser
|
from langchain.schema.output_parser import StrOutputParser
|
||||||
from langchain.schema.runnable import RunnablePassthrough, RunnableParallel
|
from langchain.schema.runnable import RunnablePassthrough, RunnableParallel
|
||||||
|
|
||||||
# Pinecone init
|
if os.environ.get("PINECONE_API_KEY", None) is None:
|
||||||
# Find API key in console at app.pinecone.io
|
raise Exception("Missing `PINECONE_API_KEY` environment variable.")
|
||||||
YOUR_API_KEY = os.getenv('PINECONE_API_KEY') or 'PINECONE_API_KEY'
|
|
||||||
# Find ENV (cloud region) next to API key in console
|
|
||||||
YOUR_ENV = os.getenv('PINECONE_ENVIRONMENT') or 'PINECONE_ENV'
|
|
||||||
# Init
|
|
||||||
pinecone.init(
|
|
||||||
api_key=YOUR_API_KEY,
|
|
||||||
environment=YOUR_ENV
|
|
||||||
)
|
|
||||||
|
|
||||||
# Get vectorstore
|
if os.environ.get("PINECONE_ENVIRONMENT", None) is None:
|
||||||
text_field = "text"
|
raise Exception("Missing `PINECONE_ENVIRONMENT` environment variable.")
|
||||||
index_name = "langchain-multi-query-demo"
|
|
||||||
index = pinecone.Index(index_name)
|
|
||||||
vectorstore = Pinecone(index,
|
|
||||||
OpenAIEmbeddings(),
|
|
||||||
text_field)
|
|
||||||
|
|
||||||
|
PINECONE_INDEX_NAME = os.environ.get("PINECONE_INDEX", "langchain-test")
|
||||||
|
|
||||||
|
### Ingest code - you may need to run this the first time
|
||||||
|
# Load
|
||||||
|
# from langchain.document_loaders import WebBaseLoader
|
||||||
|
# loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/")
|
||||||
|
# data = loader.load()
|
||||||
|
|
||||||
|
# # Split
|
||||||
|
# from langchain.text_splitter import RecursiveCharacterTextSplitter
|
||||||
|
# text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
|
||||||
|
# all_splits = text_splitter.split_documents(data)
|
||||||
|
|
||||||
|
# # Add to vectorDB
|
||||||
|
# vectorstore = Pinecone.from_documents(
|
||||||
|
# documents=all_splits, embedding=OpenAIEmbeddings(), index_name=PINECONE_INDEX_NAME
|
||||||
|
# )
|
||||||
|
# retriever = vectorstore.as_retriever()
|
||||||
|
|
||||||
|
vectorstore = Pinecone.from_existing_index(PINECONE_INDEX_NAME, OpenAIEmbeddings())
|
||||||
retriever = vectorstore.as_retriever()
|
retriever = vectorstore.as_retriever()
|
||||||
|
|
||||||
# RAG prompt
|
# RAG prompt
|
||||||
|
Loading…
Reference in New Issue
Block a user