mirror of
				https://github.com/hwchase17/langchain.git
				synced 2025-10-30 23:29:54 +00:00 
			
		
		
		
	ran ```bash g grep -l "langchain.vectorstores" | xargs -L 1 sed -i '' "s/langchain\.vectorstores/langchain_community.vectorstores/g" g grep -l "langchain.document_loaders" | xargs -L 1 sed -i '' "s/langchain\.document_loaders/langchain_community.document_loaders/g" g grep -l "langchain.chat_loaders" | xargs -L 1 sed -i '' "s/langchain\.chat_loaders/langchain_community.chat_loaders/g" g grep -l "langchain.document_transformers" | xargs -L 1 sed -i '' "s/langchain\.document_transformers/langchain_community.document_transformers/g" g grep -l "langchain\.graphs" | xargs -L 1 sed -i '' "s/langchain\.graphs/langchain_community.graphs/g" g grep -l "langchain\.memory\.chat_message_histories" | xargs -L 1 sed -i '' "s/langchain\.memory\.chat_message_histories/langchain_community.chat_message_histories/g" gco master libs/langchain/tests/unit_tests/*/test_imports.py gco master libs/langchain/tests/unit_tests/**/test_public_api.py ```
		
			
				
	
	
		
			67 lines
		
	
	
		
			2.1 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
			
		
		
	
	
			67 lines
		
	
	
		
			2.1 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
| import os
 | |
| 
 | |
| from langchain.prompts import ChatPromptTemplate
 | |
| from langchain.retrievers.multi_query import MultiQueryRetriever
 | |
| from langchain_community.chat_models import ChatOpenAI
 | |
| from langchain_community.embeddings import OpenAIEmbeddings
 | |
| from langchain_community.vectorstores import Pinecone
 | |
| from langchain_core.output_parsers import StrOutputParser
 | |
| from langchain_core.pydantic_v1 import BaseModel
 | |
| from langchain_core.runnables import RunnableParallel, RunnablePassthrough
 | |
| 
 | |
| if os.environ.get("PINECONE_API_KEY", None) is None:
 | |
|     raise Exception("Missing `PINECONE_API_KEY` environment variable.")
 | |
| 
 | |
| if os.environ.get("PINECONE_ENVIRONMENT", None) is None:
 | |
|     raise Exception("Missing `PINECONE_ENVIRONMENT` environment variable.")
 | |
| 
 | |
| PINECONE_INDEX_NAME = os.environ.get("PINECONE_INDEX", "langchain-test")
 | |
| 
 | |
| ### Ingest code - you may need to run this the first time
 | |
| # Load
 | |
| # from langchain_community.document_loaders import WebBaseLoader
 | |
| # loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/")
 | |
| # data = loader.load()
 | |
| 
 | |
| # # Split
 | |
| # from langchain.text_splitter import RecursiveCharacterTextSplitter
 | |
| # text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
 | |
| # all_splits = text_splitter.split_documents(data)
 | |
| 
 | |
| # # Add to vectorDB
 | |
| # vectorstore = Pinecone.from_documents(
 | |
| #     documents=all_splits, embedding=OpenAIEmbeddings(), index_name=PINECONE_INDEX_NAME
 | |
| # )
 | |
| # retriever = vectorstore.as_retriever()
 | |
| 
 | |
| # Set up index with multi query retriever
 | |
| vectorstore = Pinecone.from_existing_index(PINECONE_INDEX_NAME, OpenAIEmbeddings())
 | |
| model = ChatOpenAI(temperature=0)
 | |
| retriever = MultiQueryRetriever.from_llm(
 | |
|     retriever=vectorstore.as_retriever(), llm=model
 | |
| )
 | |
| 
 | |
| # RAG prompt
 | |
| template = """Answer the question based only on the following context:
 | |
| {context}
 | |
| Question: {question}
 | |
| """
 | |
| prompt = ChatPromptTemplate.from_template(template)
 | |
| 
 | |
| # RAG
 | |
| model = ChatOpenAI()
 | |
| chain = (
 | |
|     RunnableParallel({"context": retriever, "question": RunnablePassthrough()})
 | |
|     | prompt
 | |
|     | model
 | |
|     | StrOutputParser()
 | |
| )
 | |
| 
 | |
| 
 | |
| # Add typing for input
 | |
| class Question(BaseModel):
 | |
|     __root__: str
 | |
| 
 | |
| 
 | |
| chain = chain.with_types(input_type=Question)
 |