mirror of
https://github.com/hwchase17/langchain.git
synced 2025-07-04 12:18:24 +00:00
Vectara RAG template (#12975)
- **Description:** RAG template using Vectara - **Twitter handle:** @ofermend
This commit is contained in:
parent
0c81cd923e
commit
ce21308f29
21
templates/rag-vectara/LICENSE
Normal file
21
templates/rag-vectara/LICENSE
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2023 LangChain, Inc.
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
72
templates/rag-vectara/README.md
Normal file
72
templates/rag-vectara/README.md
Normal file
@ -0,0 +1,72 @@
|
|||||||
|
|
||||||
|
# rag-vectara
|
||||||
|
|
||||||
|
This template performs RAG with vectara.
|
||||||
|
|
||||||
|
## Environment Setup
|
||||||
|
|
||||||
|
Set the `OPENAI_API_KEY` environment variable to access the OpenAI models.
|
||||||
|
|
||||||
|
Also, ensure the following environment variables are set:
|
||||||
|
* `VECTARA_CUSTOMER_ID`
|
||||||
|
* `VECTARA_CORPUS_ID`
|
||||||
|
* `VECTARA_API_KEY`
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
To use this package, you should first have the LangChain CLI installed:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
pip install -U langchain-cli
|
||||||
|
```
|
||||||
|
|
||||||
|
To create a new LangChain project and install this as the only package, you can do:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
langchain app new my-app --package rag-vectara
|
||||||
|
```
|
||||||
|
|
||||||
|
If you want to add this to an existing project, you can just run:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
langchain app add rag-vectara
|
||||||
|
```
|
||||||
|
|
||||||
|
And add the following code to your `server.py` file:
|
||||||
|
```python
|
||||||
|
from rag_vectara import chain as rag_vectara_chain
|
||||||
|
|
||||||
|
add_routes(app, rag_vectara_chain, path="/rag-vectara")
|
||||||
|
```
|
||||||
|
|
||||||
|
(Optional) Let's now configure LangSmith.
|
||||||
|
LangSmith will help us trace, monitor and debug LangChain applications.
|
||||||
|
LangSmith is currently in private beta, you can sign up [here](https://smith.langchain.com/).
|
||||||
|
If you don't have access, you can skip this section
|
||||||
|
|
||||||
|
|
||||||
|
```shell
|
||||||
|
export LANGCHAIN_TRACING_V2=true
|
||||||
|
export LANGCHAIN_API_KEY=<your-api-key>
|
||||||
|
export LANGCHAIN_PROJECT=<your-project> # if not specified, defaults to "vectara-demo"
|
||||||
|
```
|
||||||
|
|
||||||
|
If you are inside this directory, then you can spin up a LangServe instance directly by:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
langchain serve
|
||||||
|
```
|
||||||
|
|
||||||
|
This will start the FastAPI app with a server is running locally at
|
||||||
|
[http://localhost:8000](http://localhost:8000)
|
||||||
|
|
||||||
|
We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs)
|
||||||
|
We can access the playground at [http://127.0.0.1:8000/rag-vectara/playground](http://127.0.0.1:8000/rag-vectara/playground)
|
||||||
|
|
||||||
|
We can access the template from code with:
|
||||||
|
|
||||||
|
```python
|
||||||
|
from langserve.client import RemoteRunnable
|
||||||
|
|
||||||
|
runnable = RemoteRunnable("http://localhost:8000/rag-vectara")
|
||||||
|
```
|
1744
templates/rag-vectara/poetry.lock
generated
Normal file
1744
templates/rag-vectara/poetry.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
32
templates/rag-vectara/pyproject.toml
Normal file
32
templates/rag-vectara/pyproject.toml
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
[tool.poetry]
|
||||||
|
name = "rag-vectara"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = ""
|
||||||
|
authors = [
|
||||||
|
"Ofer Mendelevitch <ofer@vectara.com>",
|
||||||
|
]
|
||||||
|
readme = "README.md"
|
||||||
|
|
||||||
|
[tool.poetry.dependencies]
|
||||||
|
python = ">=3.8.1,<4.0"
|
||||||
|
langchain = ">=0.0.325"
|
||||||
|
openai = "^0.28.1"
|
||||||
|
tiktoken = "^0.5.1"
|
||||||
|
|
||||||
|
[tool.poetry.group.dev.dependencies]
|
||||||
|
langchain-cli = ">=0.0.15"
|
||||||
|
[tool.poetry.group.dev.dependencies.python-dotenv]
|
||||||
|
extras = [
|
||||||
|
"cli",
|
||||||
|
]
|
||||||
|
version = "^1.0.0"
|
||||||
|
|
||||||
|
[tool.langserve]
|
||||||
|
export_module = "rag_vectara"
|
||||||
|
export_attr = "chain"
|
||||||
|
|
||||||
|
[build-system]
|
||||||
|
requires = [
|
||||||
|
"poetry-core",
|
||||||
|
]
|
||||||
|
build-backend = "poetry.core.masonry.api"
|
57
templates/rag-vectara/rag_vectara.ipynb
Normal file
57
templates/rag-vectara/rag_vectara.ipynb
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"attachments": {},
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "8692a430",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Run Template\n",
|
||||||
|
"\n",
|
||||||
|
"In `server.py`, set -\n",
|
||||||
|
"```\n",
|
||||||
|
"add_routes(app, chain_ext, path=\"/rag-vectara\")\n",
|
||||||
|
"```"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "41db5e30",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from langserve.client import RemoteRunnable\n",
|
||||||
|
"\n",
|
||||||
|
"rag_app_vectara = RemoteRunnable(\"http://localhost:8000/rag-vectara\")\n",
|
||||||
|
"rag_app_vectara.invoke(\"How does agent memory work?\")"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3.11.6 64-bit",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python3"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.11.6"
|
||||||
|
},
|
||||||
|
"vscode": {
|
||||||
|
"interpreter": {
|
||||||
|
"hash": "aee8b7b246df8f9039afb4144a1f6fd8d2ca17a180786b69acc140d282b71a49"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 5
|
||||||
|
}
|
3
templates/rag-vectara/rag_vectara/__init__.py
Normal file
3
templates/rag-vectara/rag_vectara/__init__.py
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
from rag_vectara.chain import chain
|
||||||
|
|
||||||
|
__all__ = ["chain"]
|
49
templates/rag-vectara/rag_vectara/chain.py
Normal file
49
templates/rag-vectara/rag_vectara/chain.py
Normal file
@ -0,0 +1,49 @@
|
|||||||
|
import os
|
||||||
|
|
||||||
|
from langchain.chat_models import ChatOpenAI
|
||||||
|
from langchain.prompts import ChatPromptTemplate
|
||||||
|
from langchain.pydantic_v1 import BaseModel
|
||||||
|
from langchain.schema.output_parser import StrOutputParser
|
||||||
|
from langchain.schema.runnable import RunnableParallel, RunnablePassthrough
|
||||||
|
from langchain.vectorstores import Vectara
|
||||||
|
|
||||||
|
if os.environ.get("VECTARA_CUSTOMER_ID", None) is None:
|
||||||
|
raise Exception("Missing `VECTARA_CUSTOMER_ID` environment variable.")
|
||||||
|
if os.environ.get("VECTARA_CORPUS_ID", None) is None:
|
||||||
|
raise Exception("Missing `VECTARA_CORPUS_ID` environment variable.")
|
||||||
|
if os.environ.get("VECTARA_API_KEY", None) is None:
|
||||||
|
raise Exception("Missing `VECTARA_API_KEY` environment variable.")
|
||||||
|
|
||||||
|
# If you want to ingest data then use this code.
|
||||||
|
# Note that no document chunking is needed, as this is
|
||||||
|
# done efficiently in the Vectara backend.
|
||||||
|
# loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/")
|
||||||
|
# docs = loader.load()
|
||||||
|
# vec_store = Vectara.from_document(docs)
|
||||||
|
# retriever = vec_store.as_retriever()
|
||||||
|
# Otherwise, if data is already loaded into Vectara then use this code:
|
||||||
|
retriever = Vectara().as_retriever()
|
||||||
|
|
||||||
|
# RAG prompt
|
||||||
|
template = """Answer the question based only on the following context:
|
||||||
|
{context}
|
||||||
|
Question: {question}
|
||||||
|
"""
|
||||||
|
prompt = ChatPromptTemplate.from_template(template)
|
||||||
|
|
||||||
|
# RAG
|
||||||
|
model = ChatOpenAI()
|
||||||
|
chain = (
|
||||||
|
RunnableParallel({"context": retriever, "question": RunnablePassthrough()})
|
||||||
|
| prompt
|
||||||
|
| model
|
||||||
|
| StrOutputParser()
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# Add typing for input
|
||||||
|
class Question(BaseModel):
|
||||||
|
__root__: str
|
||||||
|
|
||||||
|
|
||||||
|
chain = chain.with_types(input_type=Question)
|
0
templates/rag-vectara/tests/__init__.py
Normal file
0
templates/rag-vectara/tests/__init__.py
Normal file
Loading…
Reference in New Issue
Block a user