mirror of
https://github.com/hwchase17/langchain.git
synced 2025-09-09 15:03:21 +00:00
Format Templates (#12396)
This commit is contained in:
@@ -1,15 +1,21 @@
|
||||
import os
|
||||
from typing import Tuple, List
|
||||
from pydantic import BaseModel
|
||||
from operator import itemgetter
|
||||
from langchain.vectorstores import Pinecone
|
||||
from typing import List, Tuple
|
||||
|
||||
from langchain.chat_models import ChatOpenAI
|
||||
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
|
||||
from langchain.embeddings import OpenAIEmbeddings
|
||||
from langchain.schema import format_document, AIMessage, HumanMessage
|
||||
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
|
||||
from langchain.prompts.prompt import PromptTemplate
|
||||
from langchain.schema import AIMessage, HumanMessage, format_document
|
||||
from langchain.schema.output_parser import StrOutputParser
|
||||
from langchain.schema.runnable import RunnablePassthrough, RunnableBranch, RunnableLambda, RunnableMap
|
||||
from langchain.schema.runnable import (
|
||||
RunnableBranch,
|
||||
RunnableLambda,
|
||||
RunnableMap,
|
||||
RunnablePassthrough,
|
||||
)
|
||||
from langchain.vectorstores import Pinecone
|
||||
from pydantic import BaseModel
|
||||
|
||||
if os.environ.get("PINECONE_API_KEY", None) is None:
|
||||
raise Exception("Missing `PINECONE_API_KEY` environment variable.")
|
||||
@@ -44,7 +50,7 @@ _template = """Given the following conversation and a follow up question, rephra
|
||||
Chat History:
|
||||
{chat_history}
|
||||
Follow Up Input: {question}
|
||||
Standalone question:"""
|
||||
Standalone question:""" # noqa: E501
|
||||
CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)
|
||||
|
||||
# RAG answer synthesis prompt
|
||||
@@ -52,18 +58,25 @@ template = """Answer the question based only on the following context:
|
||||
<context>
|
||||
{context}
|
||||
</context>"""
|
||||
ANSWER_PROMPT = ChatPromptTemplate.from_messages([
|
||||
("system",template),
|
||||
MessagesPlaceholder(variable_name="chat_history"),
|
||||
("user", "{question}")
|
||||
])
|
||||
ANSWER_PROMPT = ChatPromptTemplate.from_messages(
|
||||
[
|
||||
("system", template),
|
||||
MessagesPlaceholder(variable_name="chat_history"),
|
||||
("user", "{question}"),
|
||||
]
|
||||
)
|
||||
|
||||
# Conversational Retrieval Chain
|
||||
DEFAULT_DOCUMENT_PROMPT = PromptTemplate.from_template(template="{page_content}")
|
||||
def _combine_documents(docs, document_prompt = DEFAULT_DOCUMENT_PROMPT, document_separator="\n\n"):
|
||||
|
||||
|
||||
def _combine_documents(
|
||||
docs, document_prompt=DEFAULT_DOCUMENT_PROMPT, document_separator="\n\n"
|
||||
):
|
||||
doc_strings = [format_document(doc, document_prompt) for doc in docs]
|
||||
return document_separator.join(doc_strings)
|
||||
|
||||
|
||||
def _format_chat_history(chat_history: List[Tuple[str, str]]) -> List:
|
||||
buffer = []
|
||||
for human, ai in chat_history:
|
||||
@@ -71,6 +84,7 @@ def _format_chat_history(chat_history: List[Tuple[str, str]]) -> List:
|
||||
buffer.append(AIMessage(content=ai))
|
||||
return buffer
|
||||
|
||||
|
||||
# User input
|
||||
class ChatHistory(BaseModel):
|
||||
chat_history: List[Tuple[str, str]]
|
||||
@@ -78,24 +92,28 @@ class ChatHistory(BaseModel):
|
||||
|
||||
|
||||
_search_query = RunnableBranch(
|
||||
# If input includes chat_history, we condense it with the follow-up question
|
||||
(
|
||||
RunnableLambda(lambda x: bool(x.get("chat_history"))).with_config(
|
||||
run_name="HasChatHistoryCheck"
|
||||
), # Condense follow-up question and chat into a standalone_question
|
||||
RunnablePassthrough.assign(
|
||||
chat_history=lambda x: _format_chat_history(x['chat_history'])
|
||||
) | CONDENSE_QUESTION_PROMPT | ChatOpenAI(temperature=0) | StrOutputParser(),
|
||||
),
|
||||
# Else, we have no chat history, so just pass through the question
|
||||
RunnableLambda(itemgetter("question"))
|
||||
# If input includes chat_history, we condense it with the follow-up question
|
||||
(
|
||||
RunnableLambda(lambda x: bool(x.get("chat_history"))).with_config(
|
||||
run_name="HasChatHistoryCheck"
|
||||
), # Condense follow-up question and chat into a standalone_question
|
||||
RunnablePassthrough.assign(
|
||||
chat_history=lambda x: _format_chat_history(x["chat_history"])
|
||||
)
|
||||
| CONDENSE_QUESTION_PROMPT
|
||||
| ChatOpenAI(temperature=0)
|
||||
| StrOutputParser(),
|
||||
),
|
||||
# Else, we have no chat history, so just pass through the question
|
||||
RunnableLambda(itemgetter("question")),
|
||||
)
|
||||
|
||||
)
|
||||
|
||||
_inputs = RunnableMap({
|
||||
"question": lambda x: x["question"],
|
||||
"chat_history": lambda x: _format_chat_history(x['chat_history']),
|
||||
"context": _search_query | retriever | _combine_documents
|
||||
}).with_types(input_type=ChatHistory)
|
||||
_inputs = RunnableMap(
|
||||
{
|
||||
"question": lambda x: x["question"],
|
||||
"chat_history": lambda x: _format_chat_history(x["chat_history"]),
|
||||
"context": _search_query | retriever | _combine_documents,
|
||||
}
|
||||
).with_types(input_type=ChatHistory)
|
||||
|
||||
chain = _inputs | ANSWER_PROMPT | ChatOpenAI() | StrOutputParser()
|
||||
|
Reference in New Issue
Block a user