diff --git a/libs/langchain/langchain/chains/qa_generation/base.py b/libs/langchain/langchain/chains/qa_generation/base.py index bdc3444cf70..b66b8a54425 100644 --- a/libs/langchain/langchain/chains/qa_generation/base.py +++ b/libs/langchain/langchain/chains/qa_generation/base.py @@ -3,6 +3,7 @@ from __future__ import annotations import json from typing import Any, Dict, List, Optional +from langchain_core._api import deprecated from langchain_core.callbacks import CallbackManagerForChainRun from langchain_core.language_models import BaseLanguageModel from langchain_core.prompts import BasePromptTemplate @@ -14,8 +15,53 @@ from langchain.chains.llm import LLMChain from langchain.chains.qa_generation.prompt import PROMPT_SELECTOR +@deprecated( + since="0.2.7", + alternative=( + "example in API reference with more detail: " + "https://api.python.langchain.com/en/latest/chains/langchain.chains.qa_generation.base.QAGenerationChain.html" # noqa: E501 + ), + removal="1.0", +) class QAGenerationChain(Chain): - """Base class for question-answer generation chains.""" + """Base class for question-answer generation chains. + + This class is deprecated. See below for an alternative implementation. + + Advantages of this implementation include: + + - Supports async and streaming; + - Surfaces prompt and text splitter for easier customization; + - Use of JsonOutputParser supports JSONPatch operations in streaming mode, + as well as robustness to markdown. + + .. code-block:: python + + from langchain.chains.qa_generation.prompt import CHAT_PROMPT as prompt + # Note: import PROMPT if using a legacy non-chat model. + from langchain_core.output_parsers import JsonOutputParser + from langchain_core.runnables import ( + RunnableLambda, + RunnableParallel, + RunnablePassthrough, + ) + from langchain_core.runnables.base import RunnableEach + from langchain_openai import ChatOpenAI + from langchain_text_splitters import RecursiveCharacterTextSplitter + + llm = ChatOpenAI() + text_splitter = RecursiveCharacterTextSplitter(chunk_overlap=500) + split_text = RunnableLambda( + lambda x: text_splitter.create_documents([x]) + ) + + chain = RunnableParallel( + text=RunnablePassthrough(), + questions=( + split_text | RunnableEach(bound=prompt | llm | JsonOutputParser()) + ) + ) + """ llm_chain: LLMChain """LLM Chain that generates responses from user input and context."""