mirror of
https://github.com/hwchase17/langchain.git
synced 2025-07-19 19:11:33 +00:00
Updated the Bedrock rag template (#12462)
Updates the bedrock rag template. - Removes pinecone and replaces with FAISS as the vector store - Fixes the environment variables, setting defaults - Adds a `main.py` test file quick sanity testing - Updates README.md with correct instructions
This commit is contained in:
parent
5c2243ee91
commit
5545de0466
@ -4,26 +4,24 @@ AWS Bedrock is a managed serve that offers a set of foundation models.
|
|||||||
|
|
||||||
Here we will use `Anthropic Claude` for text generation and `Amazon Titan` for text embedding.
|
Here we will use `Anthropic Claude` for text generation and `Amazon Titan` for text embedding.
|
||||||
|
|
||||||
We will use Pinecode as our vectorstore.
|
We will use FAISS as our vectorstore.
|
||||||
|
|
||||||
(See [this notebook](https://github.com/aws-samples/amazon-bedrock-workshop/blob/main/03_QuestionAnswering/01_qa_w_rag_claude.ipynb) for additional context on the RAG pipeline.)
|
(See [this notebook](https://github.com/aws-samples/amazon-bedrock-workshop/blob/main/03_QuestionAnswering/01_qa_w_rag_claude.ipynb) for additional context on the RAG pipeline.)
|
||||||
|
|
||||||
(See [this notebook](https://github.com/aws-samples/amazon-bedrock-workshop/blob/58f238a183e7e629c9ae11dd970393af4e64ec44/00_Intro/bedrock_boto3_setup.ipynb#Prerequisites) for additional context on setup.)
|
Code here uses the `boto3` library to connect with the Bedrock service. See [this page](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/quickstart.html#configuration) for setting up and configuring boto3 to work with an AWS account.
|
||||||
|
|
||||||
## Pinecone
|
## FAISS
|
||||||
|
|
||||||
This connects to a hosted Pinecone vectorstore.
|
You need to install the `faiss-cpu` package to work with the FAISS vector store.
|
||||||
|
|
||||||
Be sure that you have set a few env variables in `chain.py`:
|
```bash
|
||||||
|
pip install faiss-cpu
|
||||||
|
```
|
||||||
|
|
||||||
* `PINECONE_API_KEY`
|
|
||||||
* `PINECONE_ENV`
|
|
||||||
* `index_name`
|
|
||||||
|
|
||||||
## LLM and Embeddings
|
## LLM and Embeddings
|
||||||
|
|
||||||
Be sure to set AWS enviorment variables:
|
The code assumes that you are working with the `default` AWS profile and `us-east-1` region. If not, specify these environment variables to reflect the correct region and AWS profile.
|
||||||
|
|
||||||
* `AWS_DEFAULT_REGION`
|
* `AWS_DEFAULT_REGION`
|
||||||
* `AWS_PROFILE`
|
* `AWS_PROFILE`
|
||||||
* `BEDROCK_ASSUME_ROLE`
|
|
||||||
|
6
templates/rag-aws-bedrock/main.py
Normal file
6
templates/rag-aws-bedrock/main.py
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
from rag_aws_bedrock.chain import chain
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
query = "What is this data about?"
|
||||||
|
|
||||||
|
print(chain.invoke(query))
|
1227
templates/rag-aws-bedrock/poetry.lock
generated
1227
templates/rag-aws-bedrock/poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@ -10,10 +10,9 @@ python = ">=3.8.1,<4.0"
|
|||||||
langchain = ">=0.0.313, <0.1"
|
langchain = ">=0.0.313, <0.1"
|
||||||
openai = ">=0.28.1"
|
openai = ">=0.28.1"
|
||||||
tiktoken = ">=0.5.1"
|
tiktoken = ">=0.5.1"
|
||||||
pinecone-client = ">=2.2.4"
|
faiss-cpu = ">=1.7.4"
|
||||||
boto3 = ">=1.28.57"
|
boto3 = ">=1.28.57"
|
||||||
awscli = ">=1.29.57"
|
awscli = ">=1.29.57"
|
||||||
botocore = ">=1.31.57"
|
|
||||||
|
|
||||||
[tool.langserve]
|
[tool.langserve]
|
||||||
export_module = "rag_aws_bedrock"
|
export_module = "rag_aws_bedrock"
|
||||||
|
@ -5,56 +5,31 @@ from langchain.llms.bedrock import Bedrock
|
|||||||
from langchain.prompts import ChatPromptTemplate
|
from langchain.prompts import ChatPromptTemplate
|
||||||
from langchain.schema.output_parser import StrOutputParser
|
from langchain.schema.output_parser import StrOutputParser
|
||||||
from langchain.schema.runnable import RunnableParallel, RunnablePassthrough
|
from langchain.schema.runnable import RunnableParallel, RunnablePassthrough
|
||||||
from langchain.vectorstores import Pinecone
|
from langchain.vectorstores import FAISS
|
||||||
from utils import bedrock
|
|
||||||
|
|
||||||
if os.environ.get("PINECONE_API_KEY", None) is None:
|
# Get region and profile from env
|
||||||
raise Exception("Missing `PINECONE_API_KEY` environment variable.")
|
region = os.environ.get("AWS_DEFAULT_REGION", "us-east-1")
|
||||||
|
profile = os.environ.get("AWS_PROFILE", "default")
|
||||||
if os.environ.get("PINECONE_ENVIRONMENT", None) is None:
|
|
||||||
raise Exception("Missing `PINECONE_ENVIRONMENT` environment variable.")
|
|
||||||
|
|
||||||
if os.environ.get("AWS_DEFAULT_REGION", None) is None:
|
|
||||||
raise Exception("Missing `AWS_DEFAULT_REGION` environment variable.")
|
|
||||||
|
|
||||||
if os.environ.get("AWS_PROFILE", None) is None:
|
|
||||||
raise Exception("Missing `AWS_PROFILE` environment variable.")
|
|
||||||
|
|
||||||
if os.environ.get("BEDROCK_ASSUME_ROLE", None) is None:
|
|
||||||
raise Exception("Missing `BEDROCK_ASSUME_ROLE` environment variable.")
|
|
||||||
|
|
||||||
PINECONE_INDEX_NAME = os.environ.get("PINECONE_INDEX", "langchain-test")
|
|
||||||
|
|
||||||
### Ingest code - you may need to run this the first time
|
|
||||||
# Load
|
|
||||||
# from langchain.document_loaders import WebBaseLoader
|
|
||||||
# loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/")
|
|
||||||
# data = loader.load()
|
|
||||||
|
|
||||||
# # Split
|
|
||||||
# from langchain.text_splitter import RecursiveCharacterTextSplitter
|
|
||||||
# text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
|
|
||||||
# all_splits = text_splitter.split_documents(data)
|
|
||||||
|
|
||||||
# # Add to vectorDB
|
|
||||||
# vectorstore = Pinecone.from_documents(
|
|
||||||
# documents=all_splits, embedding=OpenAIEmbeddings(), index_name=PINECONE_INDEX_NAME
|
|
||||||
# )
|
|
||||||
# retriever = vectorstore.as_retriever()
|
|
||||||
|
|
||||||
# Set LLM and embeddings
|
# Set LLM and embeddings
|
||||||
boto3_bedrock = bedrock.get_bedrock_client(
|
model = Bedrock(
|
||||||
assumed_role=os.environ.get("BEDROCK_ASSUME_ROLE", None),
|
model_id="anthropic.claude-v2",
|
||||||
region=os.environ.get("AWS_DEFAULT_REGION", None)
|
region_name=region,
|
||||||
|
credentials_profile_name=profile,
|
||||||
|
model_kwargs={'max_tokens_to_sample':200}
|
||||||
|
)
|
||||||
|
bedrock_embeddings = BedrockEmbeddings(
|
||||||
|
model_id="amazon.titan-embed-text-v1"
|
||||||
)
|
)
|
||||||
model = Bedrock(model_id="anthropic.claude-v2",
|
|
||||||
client=boto3_bedrock,
|
|
||||||
model_kwargs={'max_tokens_to_sample':200})
|
|
||||||
bedrock_embeddings = BedrockEmbeddings(model_id="amazon.titan-embed-text-v1",
|
|
||||||
client=boto3_bedrock)
|
|
||||||
|
|
||||||
# Set vectostore
|
# Add to vectorDB
|
||||||
vectorstore = Pinecone.from_existing_index(PINECONE_INDEX_NAME, bedrock_embeddings)
|
vectorstore = FAISS.from_texts(
|
||||||
|
["harrison worked at kensho"],
|
||||||
|
embedding=bedrock_embeddings
|
||||||
|
)
|
||||||
|
retriever = vectorstore.as_retriever()
|
||||||
|
|
||||||
|
# Get retriever from vectorstore
|
||||||
retriever = vectorstore.as_retriever()
|
retriever = vectorstore.as_retriever()
|
||||||
|
|
||||||
# RAG prompt
|
# RAG prompt
|
||||||
@ -64,6 +39,7 @@ Question: {question}
|
|||||||
"""
|
"""
|
||||||
prompt = ChatPromptTemplate.from_template(template)
|
prompt = ChatPromptTemplate.from_template(template)
|
||||||
|
|
||||||
|
|
||||||
# RAG
|
# RAG
|
||||||
chain = (
|
chain = (
|
||||||
RunnableParallel({"context": retriever, "question": RunnablePassthrough()})
|
RunnableParallel({"context": retriever, "question": RunnablePassthrough()})
|
||||||
|
Loading…
Reference in New Issue
Block a user