mirror of
https://github.com/hwchase17/langchain.git
synced 2025-09-02 19:47:13 +00:00
Add template for Pinecone + Multi-Query (#12353)
This commit is contained in:
@@ -0,0 +1,58 @@
|
||||
import os
|
||||
import pinecone
|
||||
from operator import itemgetter
|
||||
from langchain.vectorstores import Pinecone
|
||||
from langchain.prompts import ChatPromptTemplate
|
||||
from langchain.chat_models import ChatOpenAI
|
||||
from langchain.embeddings import OpenAIEmbeddings
|
||||
from langchain.schema.output_parser import StrOutputParser
|
||||
from langchain.retrievers.multi_query import MultiQueryRetriever
|
||||
from langchain.schema.runnable import RunnablePassthrough, RunnableParallel
|
||||
|
||||
if os.environ.get("PINECONE_API_KEY", None) is None:
|
||||
raise Exception("Missing `PINECONE_API_KEY` environment variable.")
|
||||
|
||||
if os.environ.get("PINECONE_ENVIRONMENT", None) is None:
|
||||
raise Exception("Missing `PINECONE_ENVIRONMENT` environment variable.")
|
||||
|
||||
PINECONE_INDEX_NAME = os.environ.get("PINECONE_INDEX", "langchain-test")
|
||||
|
||||
### Ingest code - you may need to run this the first time
|
||||
# Load
|
||||
# from langchain.document_loaders import WebBaseLoader
|
||||
# loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/")
|
||||
# data = loader.load()
|
||||
|
||||
# # Split
|
||||
# from langchain.text_splitter import RecursiveCharacterTextSplitter
|
||||
# text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
|
||||
# all_splits = text_splitter.split_documents(data)
|
||||
|
||||
# # Add to vectorDB
|
||||
# vectorstore = Pinecone.from_documents(
|
||||
# documents=all_splits, embedding=OpenAIEmbeddings(), index_name=PINECONE_INDEX_NAME
|
||||
# )
|
||||
# retriever = vectorstore.as_retriever()
|
||||
|
||||
# Set up index with multi query retriever
|
||||
vectorstore = Pinecone.from_existing_index(PINECONE_INDEX_NAME, OpenAIEmbeddings())
|
||||
model = ChatOpenAI(temperature=0)
|
||||
retriever = MultiQueryRetriever.from_llm(
|
||||
retriever=vectorstore.as_retriever(), llm=model
|
||||
)
|
||||
|
||||
# RAG prompt
|
||||
template = """Answer the question based only on the following context:
|
||||
{context}
|
||||
Question: {question}
|
||||
"""
|
||||
prompt = ChatPromptTemplate.from_template(template)
|
||||
|
||||
# RAG
|
||||
model = ChatOpenAI()
|
||||
chain = (
|
||||
RunnableParallel({"context": retriever, "question": RunnablePassthrough()})
|
||||
| prompt
|
||||
| model
|
||||
| StrOutputParser()
|
||||
)
|
Reference in New Issue
Block a user