mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-20 13:54:48 +00:00
fix elastic rag template in playground (#12682)
- a few instructions in the readme (load_documents -> ingest.py) - added docker run command for local elastic - adds input type definition to render playground properly
This commit is contained in:
parent
f0eba1ac63
commit
b825dddf95
@ -165,7 +165,6 @@ def format_chat_history(chain_input: dict) -> dict:
|
||||
# with the new `tool.langserve.export_attr`
|
||||
chain = (
|
||||
(format_chat_history | _prompt | _model | StrOutputParser())
|
||||
.with_types(input_type=ChainInput)
|
||||
# This is to add the evaluators as "listeners"
|
||||
# and to customize the name of the chain.
|
||||
# Any chain that accepts a compatible input type works here.
|
||||
@ -180,3 +179,5 @@ chain = (
|
||||
],
|
||||
)
|
||||
)
|
||||
|
||||
chain = chain.with_types(input_type=ChainInput)
|
||||
|
@ -1,7 +1,7 @@
|
||||
|
||||
# rag-elasticsearch
|
||||
|
||||
This template performs RAG using ElasticSearch.
|
||||
This template performs RAG using [ElasticSearch](https://python.langchain.com/docs/integrations/vectorstores/elasticsearch).
|
||||
|
||||
It relies on sentence transformer `MiniLM-L6-v2` for embedding passages and questions.
|
||||
|
||||
@ -19,7 +19,12 @@ export ELASTIC_PASSWORD = <ClOUD_PASSWORD>
|
||||
For local development with Docker, use:
|
||||
|
||||
```bash
|
||||
export ES_URL = "http://localhost:9200"
|
||||
export ES_URL="http://localhost:9200"
|
||||
```
|
||||
|
||||
And run an Elasticsearch instance in Docker with
|
||||
```bash
|
||||
docker run -p 9200:9200 -e "discovery.type=single-node" -e "xpack.security.enabled=false" -e "xpack.security.http.ssl.enabled=false" docker.elastic.co/elasticsearch/elasticsearch:8.9.0
|
||||
```
|
||||
|
||||
## Usage
|
||||
@ -83,7 +88,7 @@ runnable = RemoteRunnable("http://localhost:8000/rag-elasticsearch")
|
||||
For loading the fictional workplace documents, run the following command from the root of this repository:
|
||||
|
||||
```bash
|
||||
python ./data/load_documents.py
|
||||
python ingest.py
|
||||
```
|
||||
|
||||
However, you can choose from a large number of document loaders [here](https://python.langchain.com/docs/integrations/document_loaders).
|
||||
|
@ -1,12 +1,13 @@
|
||||
from operator import itemgetter
|
||||
from typing import List, Tuple
|
||||
from typing import List, Optional, Tuple
|
||||
|
||||
from langchain.chat_models import ChatOpenAI
|
||||
from langchain.embeddings import HuggingFaceEmbeddings
|
||||
from langchain.schema import format_document
|
||||
from langchain.schema import BaseMessage, format_document
|
||||
from langchain.schema.output_parser import StrOutputParser
|
||||
from langchain.schema.runnable import RunnableMap, RunnablePassthrough
|
||||
from langchain.vectorstores.elasticsearch import ElasticsearchStore
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from .connection import es_connection_details
|
||||
from .prompts import CONDENSE_QUESTION_PROMPT, DOCUMENT_PROMPT, LLM_CONTEXT_PROMPT
|
||||
@ -41,6 +42,13 @@ def _format_chat_history(chat_history: List[Tuple]) -> str:
|
||||
return buffer
|
||||
|
||||
|
||||
class ChainInput(BaseModel):
|
||||
chat_history: Optional[List[BaseMessage]] = Field(
|
||||
description="Previous chat messages."
|
||||
)
|
||||
question: str = Field(..., description="The question to answer.")
|
||||
|
||||
|
||||
_inputs = RunnableMap(
|
||||
standalone_question=RunnablePassthrough.assign(
|
||||
chat_history=lambda x: _format_chat_history(x["chat_history"])
|
||||
@ -56,3 +64,5 @@ _context = {
|
||||
}
|
||||
|
||||
chain = _inputs | _context | LLM_CONTEXT_PROMPT | llm | StrOutputParser()
|
||||
|
||||
chain = chain.with_types(input_type=ChainInput)
|
||||
|
Loading…
Reference in New Issue
Block a user