diff --git a/docs/docs/versions/migrating_chains/conversation_chain.ipynb b/docs/docs/versions/migrating_chains/conversation_chain.ipynb
index e984fc139ce..ba56524614a 100644
--- a/docs/docs/versions/migrating_chains/conversation_chain.ipynb
+++ b/docs/docs/versions/migrating_chains/conversation_chain.ipynb
@@ -54,12 +54,9 @@
"id": "00df631d-5121-4918-94aa-b88acce9b769",
"metadata": {},
"source": [
- "import { ColumnContainer, Column } from \"@theme/Columns\";\n",
+ "## Legacy\n",
"\n",
- "\n",
- "\n",
- "\n",
- "#### Legacy\n"
+ ""
]
},
{
@@ -111,12 +108,11 @@
"id": "f8e36b0e-c7dc-4130-a51b-189d4b756c7f",
"metadata": {},
"source": [
- " \n",
+ "\n",
"\n",
- "\n",
+ "## LCEL\n",
"\n",
- "#### LCEL\n",
- "\n"
+ ""
]
},
{
@@ -174,10 +170,6 @@
"id": "6b386ce6-895e-442c-88f3-7bec0ab9f401",
"metadata": {},
"source": [
- "\n",
- " \n",
- "\n",
- "\n",
"The above example uses the same `history` for all sessions. The example below shows how to use a different chat history for each session."
]
},
@@ -230,6 +222,8 @@
"id": "b2717810",
"metadata": {},
"source": [
+ "\n",
+ "\n",
"## Next steps\n",
"\n",
"See [this tutorial](/docs/tutorials/chatbot) for a more end-to-end guide on building with [`RunnableWithMessageHistory`](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html).\n",
diff --git a/docs/docs/versions/migrating_chains/conversation_retrieval_chain.ipynb b/docs/docs/versions/migrating_chains/conversation_retrieval_chain.ipynb
index 91a331232ee..70de0606e78 100644
--- a/docs/docs/versions/migrating_chains/conversation_retrieval_chain.ipynb
+++ b/docs/docs/versions/migrating_chains/conversation_retrieval_chain.ipynb
@@ -83,13 +83,9 @@
"id": "8bc06416",
"metadata": {},
"source": [
- "import { ColumnContainer, Column } from \"@theme/Columns\";\n",
+ "## Legacy\n",
"\n",
- "\n",
- "\n",
- "\n",
- "\n",
- "#### Legacy"
+ ""
]
},
{
@@ -165,12 +161,11 @@
"id": "43a8a23c",
"metadata": {},
"source": [
- " \n",
+ "\n",
"\n",
- "\n",
+ "## LCEL\n",
"\n",
- "#### LCEL\n",
- "\n"
+ ""
]
},
{
@@ -253,9 +248,7 @@
"id": "b2717810",
"metadata": {},
"source": [
- " \n",
- "\n",
- "\n",
+ "\n",
"\n",
"## Next steps\n",
"\n",
@@ -263,6 +256,14 @@
"\n",
"Next, check out the [LCEL conceptual docs](/docs/concepts/#langchain-expression-language-lcel) for more background information."
]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "7bfc38bd-0ff8-40ee-83a3-9d7553364fd7",
+ "metadata": {},
+ "outputs": [],
+ "source": []
}
],
"metadata": {
diff --git a/docs/docs/versions/migrating_chains/index.mdx b/docs/docs/versions/migrating_chains/index.mdx
index 6d3aabe178b..4f809972e0a 100644
--- a/docs/docs/versions/migrating_chains/index.mdx
+++ b/docs/docs/versions/migrating_chains/index.mdx
@@ -2,33 +2,48 @@
sidebar_position: 1
---
-# How to migrate chains to LCEL
+# How to migrate from v0.0 chains
:::info Prerequisites
This guide assumes familiarity with the following concepts:
- [LangChain Expression Language](/docs/concepts#langchain-expression-language-lcel)
-
+- [LangGraph](https://langchain-ai.github.io/langgraph/)
:::
-LCEL is designed to streamline the process of building useful apps with LLMs and combining related components. It does this by providing:
+LangChain maintains a number of legacy abstractions. Many of these can be reimplemented via short combinations of LCEL and LangGraph primitives.
+
+### LCEL
+[LCEL](/docs/concepts/#langchain-expression-language-lcel) is designed to streamline the process of building useful apps with LLMs and combining related components. It does this by providing:
1. **A unified interface**: Every LCEL object implements the `Runnable` interface, which defines a common set of invocation methods (`invoke`, `batch`, `stream`, `ainvoke`, ...). This makes it possible to also automatically and consistently support useful operations like streaming of intermediate steps and batching, since every chain composed of LCEL objects is itself an LCEL object.
2. **Composition primitives**: LCEL provides a number of primitives that make it easy to compose chains, parallelize components, add fallbacks, dynamically configure chain internals, and more.
-LangChain maintains a number of legacy abstractions. Many of these can be reimplemented via short combinations of LCEL primitives. Doing so confers some general advantages:
+### LangGraph
+[LangGraph](https://langchain-ai.github.io/langgraph/), built on top of LCEL, allows for performant orchestrations of application components while maintaining concise and readable code. It includes built-in persistence, support for cycles, and prioritizes controllability.
+If LCEL grows unwieldy for larger or more complex chains, they may benefit from a LangGraph implementation.
+
+### Advantages
+Using these frameworks for existing v0.0 chains confers some advantages:
- The resulting chains typically implement the full `Runnable` interface, including streaming and asynchronous support where appropriate;
- The chains may be more easily extended or modified;
- The parameters of the chain are typically surfaced for easier customization (e.g., prompts) over previous versions, which tended to be subclasses and had opaque parameters and internals.
+- If using LangGraph, the chain supports built-in persistence, allowing for conversational experiences via a "memory" of the chat history.
+- If using LangGraph, the steps of the chain can be streamed, allowing for greater control and customizability.
-The LCEL implementations can be slightly more verbose, but there are significant benefits in transparency and customizability.
-The below pages assist with migration from various specific chains to LCEL:
+The below pages assist with migration from various specific chains to LCEL and LangGraph:
- [LLMChain](/docs/versions/migrating_chains/llm_chain)
- [ConversationChain](/docs/versions/migrating_chains/conversation_chain)
- [RetrievalQA](/docs/versions/migrating_chains/retrieval_qa)
- [ConversationalRetrievalChain](/docs/versions/migrating_chains/conversation_retrieval_chain)
+- [StuffDocumentsChain](/docs/versions/migrating_chains/stuff_docs_chain)
+- [MapReduceDocumentsChain](/docs/versions/migrating_chains/map_reduce_chain)
+- [MapRerankDocumentsChain](/docs/versions/migrating_chains/map_rerank_docs_chain)
+- [RefineDocumentsChain](/docs/versions/migrating_chains/refine_docs_chain)
+- [LLMRouterChain](/docs/versions/migrating_chains/llm_router_chain)
+- [MultiPromptChain](/docs/versions/migrating_chains/multi_prompt_chain)
-Check out the [LCEL conceptual docs](/docs/concepts/#langchain-expression-language-lcel) for more background information.
\ No newline at end of file
+Check out the [LCEL conceptual docs](/docs/concepts/#langchain-expression-language-lcel) and [LangGraph docs](https://langchain-ai.github.io/langgraph/) for more background information.
\ No newline at end of file
diff --git a/docs/docs/versions/migrating_chains/llm_chain.ipynb b/docs/docs/versions/migrating_chains/llm_chain.ipynb
index 48de1ac9b73..c288cadf0d1 100644
--- a/docs/docs/versions/migrating_chains/llm_chain.ipynb
+++ b/docs/docs/versions/migrating_chains/llm_chain.ipynb
@@ -52,13 +52,9 @@
"id": "e3621b62-a037-42b8-8faa-59575608bb8b",
"metadata": {},
"source": [
- "import { ColumnContainer, Column } from \"@theme/Columns\";\n",
+ "## Legacy\n",
"\n",
- "\n",
- "\n",
- "\n",
- "\n",
- "#### Legacy\n"
+ ""
]
},
{
@@ -98,13 +94,11 @@
"id": "cdc3b527-c09e-4c77-9711-c3cc4506cd95",
"metadata": {},
"source": [
+ " \n",
"\n",
- "\n",
+ "## LCEL\n",
"\n",
- "\n",
- "\n",
- "#### LCEL\n",
- "\n"
+ ""
]
},
{
@@ -143,10 +137,6 @@
"id": "3c0b0513-77b8-4371-a20e-3e487cec7e7f",
"metadata": {},
"source": [
- "\n",
- " \n",
- "\n",
- "\n",
"Note that `LLMChain` by default returns a `dict` containing both the input and the output. If this behavior is desired, we can replicate it using another LCEL primitive, [`RunnablePassthrough`](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.passthrough.RunnablePassthrough.html):"
]
},
@@ -181,6 +171,8 @@
"id": "b2717810",
"metadata": {},
"source": [
+ "\n",
+ "\n",
"## Next steps\n",
"\n",
"See [this tutorial](/docs/tutorials/llm_chain) for more detail on building with prompt templates, LLMs, and output parsers.\n",
diff --git a/docs/docs/versions/migrating_chains/llm_router_chain.ipynb b/docs/docs/versions/migrating_chains/llm_router_chain.ipynb
new file mode 100644
index 00000000000..64561caa965
--- /dev/null
+++ b/docs/docs/versions/migrating_chains/llm_router_chain.ipynb
@@ -0,0 +1,283 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "575befea-4d98-4941-8e55-1581b169a674",
+ "metadata": {},
+ "source": [
+ "---\n",
+ "title: Migrating from LLMRouterChain\n",
+ "---"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "14625d35-efca-41cf-b203-be9f4c375700",
+ "metadata": {},
+ "source": [
+ "The [`LLMRouterChain`](https://api.python.langchain.com/en/latest/chains/langchain.chains.router.llm_router.LLMRouterChain.html) routed an input query to one of multiple destinations-- that is, given an input query, it used a LLM to select from a list of destination chains, and passed its inputs to the selected chain.\n",
+ "\n",
+ "`LLMRouterChain` does not support common [chat model](/docs/concepts/#chat-models) features, such as message roles and [tool calling](/docs/concepts/#functiontool-calling). Under the hood, `LLMRouterChain` routes a query by instructing the LLM to generate JSON-formatted text, and parsing out the intended destination.\n",
+ "\n",
+ "Consider an example from a [MultiPromptChain](/docs/versions/migrating_chains/multi_prompt_chain), which uses `LLMRouterChain`. Below is an (example) default prompt:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "id": "364814a5-d15c-41bb-bf3f-581df51a4721",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Given a raw text input to a language model select the model prompt best suited for the input. You will be given the names of the available prompts and a description of what the prompt is best suited for. You may also revise the original input if you think that revising it will ultimately lead to a better response from the language model.\n",
+ "\n",
+ "<< FORMATTING >>\n",
+ "Return a markdown code snippet with a JSON object formatted to look like:\n",
+ "'''json\n",
+ "{{\n",
+ " \"destination\": string \\ name of the prompt to use or \"DEFAULT\"\n",
+ " \"next_inputs\": string \\ a potentially modified version of the original input\n",
+ "}}\n",
+ "'''\n",
+ "\n",
+ "REMEMBER: \"destination\" MUST be one of the candidate prompt names specified below OR it can be \"DEFAULT\" if the input is not well suited for any of the candidate prompts.\n",
+ "REMEMBER: \"next_inputs\" can just be the original input if you don't think any modifications are needed.\n",
+ "\n",
+ "<< CANDIDATE PROMPTS >>\n",
+ "\n",
+ "animals: prompt for animal expert\n",
+ "vegetables: prompt for a vegetable expert\n",
+ "\n",
+ "\n",
+ "<< INPUT >>\n",
+ "{input}\n",
+ "\n",
+ "<< OUTPUT (must include '''json at the start of the response) >>\n",
+ "<< OUTPUT (must end with ''') >>\n",
+ "\n"
+ ]
+ }
+ ],
+ "source": [
+ "from langchain.chains.router.multi_prompt import MULTI_PROMPT_ROUTER_TEMPLATE\n",
+ "\n",
+ "destinations = \"\"\"\n",
+ "animals: prompt for animal expert\n",
+ "vegetables: prompt for a vegetable expert\n",
+ "\"\"\"\n",
+ "\n",
+ "router_template = MULTI_PROMPT_ROUTER_TEMPLATE.format(destinations=destinations)\n",
+ "\n",
+ "print(router_template.replace(\"`\", \"'\")) # for rendering purposes"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "934937d1-fc0a-4d3f-b297-29f96e6a8f5e",
+ "metadata": {},
+ "source": [
+ "Most of the behavior is determined via a single natural language prompt. Chat models that support [tool calling](/docs/how_to/tool_calling/) features confer a number of advantages for this task:\n",
+ "\n",
+ "- Supports chat prompt templates, including messages with `system` and other roles;\n",
+ "- Tool-calling models are fine-tuned to generate structured output;\n",
+ "- Support for runnable methods like streaming and async operations.\n",
+ "\n",
+ "Now let's look at `LLMRouterChain` side-by-side with an LCEL implementation that uses tool-calling. Note that for this guide we will `langchain-openai >= 0.1.20`:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "ed12b22b-5452-4776-aee3-b67d9f965082",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "%pip install -qU langchain-core langchain-openai"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "b0edbba1-a497-49ef-ade7-4fe7967360eb",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import os\n",
+ "from getpass import getpass\n",
+ "\n",
+ "os.environ[\"OPENAI_API_KEY\"] = getpass()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "5d4dc41c-3fdc-4093-ba5e-31a9ebb54e13",
+ "metadata": {},
+ "source": [
+ "## Legacy\n",
+ "\n",
+ ""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "id": "c58c9269-5a1d-4234-88b5-7168944618bf",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from langchain.chains.router.llm_router import LLMRouterChain, RouterOutputParser\n",
+ "from langchain_core.prompts import PromptTemplate\n",
+ "from langchain_openai import ChatOpenAI\n",
+ "\n",
+ "llm = ChatOpenAI(model=\"gpt-4o-mini\")\n",
+ "\n",
+ "router_prompt = PromptTemplate(\n",
+ " # Note: here we use the prompt template from above. Generally this would need\n",
+ " # to be customized.\n",
+ " template=router_template,\n",
+ " input_variables=[\"input\"],\n",
+ " output_parser=RouterOutputParser(),\n",
+ ")\n",
+ "\n",
+ "chain = LLMRouterChain.from_llm(llm, router_prompt)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "id": "a22ebdca-5f53-459e-9cff-a97b2354ffe0",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "vegetables\n"
+ ]
+ }
+ ],
+ "source": [
+ "result = chain.invoke({\"input\": \"What color are carrots?\"})\n",
+ "\n",
+ "print(result[\"destination\"])"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "6fd48120-056f-4c58-a04f-da5198c23068",
+ "metadata": {},
+ "source": [
+ " \n",
+ "\n",
+ "## LCEL\n",
+ "\n",
+ ""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "id": "5bbebac2-df19-4f59-8a69-f61cd7286e59",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from operator import itemgetter\n",
+ "from typing import Literal\n",
+ "\n",
+ "from langchain_core.prompts import ChatPromptTemplate\n",
+ "from langchain_core.runnables import RunnablePassthrough\n",
+ "from langchain_openai import ChatOpenAI\n",
+ "from typing_extensions import TypedDict\n",
+ "\n",
+ "llm = ChatOpenAI(model=\"gpt-4o-mini\")\n",
+ "\n",
+ "route_system = \"Route the user's query to either the animal or vegetable expert.\"\n",
+ "route_prompt = ChatPromptTemplate.from_messages(\n",
+ " [\n",
+ " (\"system\", route_system),\n",
+ " (\"human\", \"{input}\"),\n",
+ " ]\n",
+ ")\n",
+ "\n",
+ "\n",
+ "# Define schema for output:\n",
+ "class RouteQuery(TypedDict):\n",
+ " \"\"\"Route query to destination expert.\"\"\"\n",
+ "\n",
+ " destination: Literal[\"animal\", \"vegetable\"]\n",
+ "\n",
+ "\n",
+ "# Instead of writing formatting instructions into the prompt, we\n",
+ "# leverage .with_structured_output to coerce the output into a simple\n",
+ "# schema.\n",
+ "chain = route_prompt | llm.with_structured_output(RouteQuery)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "id": "88012e10-8def-44fa-833f-989935824182",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "vegetable\n"
+ ]
+ }
+ ],
+ "source": [
+ "result = chain.invoke({\"input\": \"What color are carrots?\"})\n",
+ "\n",
+ "print(result[\"destination\"])"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "baf7ba9e-65b4-48af-8a39-453c01a7b7cb",
+ "metadata": {},
+ "source": [
+ " \n",
+ "\n",
+ "## Next steps\n",
+ "\n",
+ "See [this tutorial](/docs/tutorials/llm_chain) for more detail on building with prompt templates, LLMs, and output parsers.\n",
+ "\n",
+ "Check out the [LCEL conceptual docs](/docs/concepts/#langchain-expression-language-lcel) for more background information."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "353e4bab-3b8a-4e89-89e2-200a8d8eb8dd",
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.10.4"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/docs/docs/versions/migrating_chains/map_reduce_chain.ipynb b/docs/docs/versions/migrating_chains/map_reduce_chain.ipynb
new file mode 100644
index 00000000000..0520e67f8d7
--- /dev/null
+++ b/docs/docs/versions/migrating_chains/map_reduce_chain.ipynb
@@ -0,0 +1,706 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "3270b34b-8958-425c-886a-ea4b9e26b475",
+ "metadata": {},
+ "source": [
+ "---\n",
+ "title: Migrating from MapReduceDocumentsChain\n",
+ "---"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "2c7bdc91-9b89-4e59-bc27-89508b024635",
+ "metadata": {},
+ "source": [
+ "[MapReduceDocumentsChain](https://api.python.langchain.com/en/latest/chains/langchain.chains.combine_documents.map_reduce.MapReduceDocumentsChain.html) implements a map-reduce strategy over (potentially long) texts. The strategy is as follows:\n",
+ "\n",
+ "- Split a text into smaller documents;\n",
+ "- Map a process onto the smaller documents;\n",
+ "- Reduce or consolidate the results of the process into a final result.\n",
+ "\n",
+ "Note that the map step is typically parallelized over the input documents.\n",
+ "\n",
+ "A common process applied in this context is summarization, in which the map step summarizes individual documents, and the reduce step generates a summary of the summaries.\n",
+ "\n",
+ "In the reduce step, `MapReduceDocumentsChain` supports a recursive \"collapsing\" of the summaries: the inputs would be partitioned based on a token limit, and summaries would be generated of the partitions. This step would be repeated until the total length of the summaries was within a desired limit, allowing for the summarization of arbitrary-length text. This is particularly useful for models with smaller context windows.\n",
+ "\n",
+ "LangGraph suports [map-reduce](https://langchain-ai.github.io/langgraph/how-tos/map-reduce/) workflows, and confers a number of advantages for this problem:\n",
+ "\n",
+ "- LangGraph allows for individual steps (such as successive summarizations) to be streamed, allowing for greater control of execution;\n",
+ "- LangGraph's [checkpointing](https://langchain-ai.github.io/langgraph/how-tos/persistence/) supports error recovery, extending with human-in-the-loop workflows, and easier incorporation into conversational applications.\n",
+ "- The LangGraph implementation is easier to extend, as we will see below.\n",
+ "\n",
+ "Below we will go through both `MapReduceDocumentsChain` and a corresponding LangGraph implementation, first on a simple example for illustrative purposes, and second on a longer example text to demonstrate the recursive reduce step.\n",
+ "\n",
+ "Let's first load a chat model:\n",
+ "\n",
+ "```{=mdx}\n",
+ "import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
+ "\n",
+ "\n",
+ "```"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "id": "0bdf886b-aeeb-407e-81b8-28bad59ad57a",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# | output: false\n",
+ "# | echo: false\n",
+ "\n",
+ "from langchain_openai import ChatOpenAI\n",
+ "\n",
+ "llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "41cfb569-f7e6-48cb-a90e-45a482009971",
+ "metadata": {},
+ "source": [
+ "## Basic example (short documents)\n",
+ "\n",
+ "Let's generate some simple documents for illustrative purposes."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "id": "b221a71f-982b-4c08-8597-96c890e00965",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from langchain_core.documents import Document\n",
+ "\n",
+ "documents = [\n",
+ " Document(page_content=\"Apples are red\", metadata={\"title\": \"apple_book\"}),\n",
+ " Document(page_content=\"Blueberries are blue\", metadata={\"title\": \"blueberry_book\"}),\n",
+ " Document(page_content=\"Bananas are yelow\", metadata={\"title\": \"banana_book\"}),\n",
+ "]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "c717514a-1b6d-4a0f-9093-ef594b9a0b17",
+ "metadata": {},
+ "source": [
+ "### Legacy\n",
+ "\n",
+ "\n",
+ " \n",
+ "Below we show an implementation with `MapReduceDocumentsChain`. We define the prompt templates for the map and reduce steps, instantiate separate chains for these steps, and finally instantiate the `MapReduceDocumentsChain`:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "id": "84ee3851-b4a9-4fbe-a78f-d05168715b91",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from langchain.chains import MapReduceDocumentsChain, ReduceDocumentsChain\n",
+ "from langchain.chains.combine_documents.stuff import StuffDocumentsChain\n",
+ "from langchain.chains.llm import LLMChain\n",
+ "from langchain_core.prompts import ChatPromptTemplate\n",
+ "from langchain_text_splitters import CharacterTextSplitter\n",
+ "\n",
+ "# Map\n",
+ "map_template = \"Write a concise summary of the following: {docs}.\"\n",
+ "map_prompt = ChatPromptTemplate([(\"human\", map_template)])\n",
+ "map_chain = LLMChain(llm=llm, prompt=map_prompt)\n",
+ "\n",
+ "\n",
+ "# Reduce\n",
+ "reduce_template = \"\"\"\n",
+ "The following is a set of summaries:\n",
+ "{docs}\n",
+ "Take these and distill it into a final, consolidated summary\n",
+ "of the main themes.\n",
+ "\"\"\"\n",
+ "reduce_prompt = ChatPromptTemplate([(\"human\", reduce_template)])\n",
+ "reduce_chain = LLMChain(llm=llm, prompt=reduce_prompt)\n",
+ "\n",
+ "\n",
+ "# Takes a list of documents, combines them into a single string, and passes this to an LLMChain\n",
+ "combine_documents_chain = StuffDocumentsChain(\n",
+ " llm_chain=reduce_chain, document_variable_name=\"docs\"\n",
+ ")\n",
+ "\n",
+ "# Combines and iteratively reduces the mapped documents\n",
+ "reduce_documents_chain = ReduceDocumentsChain(\n",
+ " # This is final chain that is called.\n",
+ " combine_documents_chain=combine_documents_chain,\n",
+ " # If documents exceed context for `StuffDocumentsChain`\n",
+ " collapse_documents_chain=combine_documents_chain,\n",
+ " # The maximum number of tokens to group documents into.\n",
+ " token_max=1000,\n",
+ ")\n",
+ "\n",
+ "# Combining documents by mapping a chain over them, then combining results\n",
+ "map_reduce_chain = MapReduceDocumentsChain(\n",
+ " # Map chain\n",
+ " llm_chain=map_chain,\n",
+ " # Reduce chain\n",
+ " reduce_documents_chain=reduce_documents_chain,\n",
+ " # The variable name in the llm_chain to put the documents in\n",
+ " document_variable_name=\"docs\",\n",
+ " # Return the results of the map steps in the output\n",
+ " return_intermediate_steps=False,\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "id": "4f57ed52-08a5-49f6-ab19-1be51a853a2f",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Fruits come in a variety of colors, with apples being red, blueberries being blue, and bananas being yellow.\n"
+ ]
+ }
+ ],
+ "source": [
+ "result = map_reduce_chain.invoke(documents)\n",
+ "\n",
+ "print(result[\"output_text\"])"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "46d29559-5948-4ce9-b7c5-fa6729cf2485",
+ "metadata": {},
+ "source": [
+ "In the [LangSmith trace](https://smith.langchain.com/public/8d88a2c0-5d26-41f6-9176-d06549b17aa6/r) we observe four LLM calls: one summarizing each of the three input documents, and one summarizing the summaries."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "b5399533-8662-4fad-b885-e3df3d809c44",
+ "metadata": {},
+ "source": [
+ " \n",
+ "\n",
+ "### LangGraph\n",
+ "\n",
+ "Below we show a LangGraph implementation, using the same prompt templates as above. The graph includes a node for generating summaries which is mapped across a list of input documents. This node then flows to a second node that generates the final summary.\n",
+ "\n",
+ "\n",
+ "\n",
+ "We will need to install `langgraph`:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "815f889b-7f19-4702-8e61-8dadcee7c729",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "pip install -qU langgraph"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "id": "2c710a66-2d3d-44be-b9e7-9dfee8c22a50",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import operator\n",
+ "from typing import Annotated, List, TypedDict\n",
+ "\n",
+ "from langchain_core.output_parsers import StrOutputParser\n",
+ "from langchain_core.prompts import ChatPromptTemplate\n",
+ "from langgraph.constants import Send\n",
+ "from langgraph.graph import END, START, StateGraph\n",
+ "\n",
+ "map_template = \"Write a concise summary of the following: {context}.\"\n",
+ "\n",
+ "reduce_template = \"\"\"\n",
+ "The following is a set of summaries:\n",
+ "{docs}\n",
+ "Take these and distill it into a final, consolidated summary\n",
+ "of the main themes.\n",
+ "\"\"\"\n",
+ "\n",
+ "map_prompt = ChatPromptTemplate([(\"human\", map_template)])\n",
+ "reduce_prompt = ChatPromptTemplate([(\"human\", reduce_template)])\n",
+ "\n",
+ "map_chain = map_prompt | llm | StrOutputParser()\n",
+ "reduce_chain = reduce_prompt | llm | StrOutputParser()\n",
+ "\n",
+ "# Graph components: define the components that will make up the graph\n",
+ "\n",
+ "\n",
+ "# This will be the overall state of the main graph.\n",
+ "# It will contain the input document contents, corresponding\n",
+ "# summaries, and a final summary.\n",
+ "class OverallState(TypedDict):\n",
+ " # Notice here we use the operator.add\n",
+ " # This is because we want combine all the summaries we generate\n",
+ " # from individual nodes back into one list - this is essentially\n",
+ " # the \"reduce\" part\n",
+ " contents: List[str]\n",
+ " summaries: Annotated[list, operator.add]\n",
+ " final_summary: str\n",
+ "\n",
+ "\n",
+ "# This will be the state of the node that we will \"map\" all\n",
+ "# documents to in order to generate summaries\n",
+ "class SummaryState(TypedDict):\n",
+ " content: str\n",
+ "\n",
+ "\n",
+ "# Here we generate a summary, given a document\n",
+ "async def generate_summary(state: SummaryState):\n",
+ " response = await map_chain.ainvoke(state[\"content\"])\n",
+ " return {\"summaries\": [response]}\n",
+ "\n",
+ "\n",
+ "# Here we define the logic to map out over the documents\n",
+ "# We will use this an edge in the graph\n",
+ "def map_summaries(state: OverallState):\n",
+ " # We will return a list of `Send` objects\n",
+ " # Each `Send` object consists of the name of a node in the graph\n",
+ " # as well as the state to send to that node\n",
+ " return [\n",
+ " Send(\"generate_summary\", {\"content\": content}) for content in state[\"contents\"]\n",
+ " ]\n",
+ "\n",
+ "\n",
+ "# Here we will generate the final summary\n",
+ "async def generate_final_summary(state: OverallState):\n",
+ " response = await reduce_chain.ainvoke(state[\"summaries\"])\n",
+ " return {\"final_summary\": response}\n",
+ "\n",
+ "\n",
+ "# Construct the graph: here we put everything together to construct our graph\n",
+ "graph = StateGraph(OverallState)\n",
+ "graph.add_node(\"generate_summary\", generate_summary)\n",
+ "graph.add_node(\"generate_final_summary\", generate_final_summary)\n",
+ "graph.add_conditional_edges(START, map_summaries, [\"generate_summary\"])\n",
+ "graph.add_edge(\"generate_summary\", \"generate_final_summary\")\n",
+ "graph.add_edge(\"generate_final_summary\", END)\n",
+ "app = graph.compile()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "id": "934cf1a5-ce6d-48ac-8151-942d14586052",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "image/jpeg": "/9j/4AAQSkZJRgABAQAAAQABAAD/4gHYSUNDX1BST0ZJTEUAAQEAAAHIAAAAAAQwAABtbnRyUkdCIFhZWiAH4AABAAEAAAAAAABhY3NwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAA9tYAAQAAAADTLQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAlkZXNjAAAA8AAAACRyWFlaAAABFAAAABRnWFlaAAABKAAAABRiWFlaAAABPAAAABR3dHB0AAABUAAAABRyVFJDAAABZAAAAChnVFJDAAABZAAAAChiVFJDAAABZAAAAChjcHJ0AAABjAAAADxtbHVjAAAAAAAAAAEAAAAMZW5VUwAAAAgAAAAcAHMAUgBHAEJYWVogAAAAAAAAb6IAADj1AAADkFhZWiAAAAAAAABimQAAt4UAABjaWFlaIAAAAAAAACSgAAAPhAAAts9YWVogAAAAAAAA9tYAAQAAAADTLXBhcmEAAAAAAAQAAAACZmYAAPKnAAANWQAAE9AAAApbAAAAAAAAAABtbHVjAAAAAAAAAAEAAAAMZW5VUwAAACAAAAAcAEcAbwBvAGcAbABlACAASQBuAGMALgAgADIAMAAxADb/2wBDAAMCAgMCAgMDAwMEAwMEBQgFBQQEBQoHBwYIDAoMDAsKCwsNDhIQDQ4RDgsLEBYQERMUFRUVDA8XGBYUGBIUFRT/2wBDAQMEBAUEBQkFBQkUDQsNFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBT/wAARCAEvANADASIAAhEBAxEB/8QAHQABAAIDAQEBAQAAAAAAAAAAAAUGBAcIAwIBCf/EAFQQAAEDBAADAQkLBwkHAQkAAAECAwQABQYRBxIhEwgUFRYiMUFWlBdRU1VhkpOV0tPUMjdUcXaz0QkjOGJydYGhtDZCUnSRsbLDGCQmJzQ1hcHw/8QAGwEBAQADAQEBAAAAAAAAAAAAAAECAwQFBgf/xAA0EQACAAMDCQYGAwEAAAAAAAAAAQIDERJRkQQTFCExUmGh0RVBU3GxwQUjMjOB4SJC8EP/2gAMAwEAAhEDEQA/AP6p0pSgFKUoBSlKAUpWBerw1ZYYeW25IdWtLTMZgAuPOKPRCQSB75JJASAVEgAkVJxOiBn1HPZFaY6yh25w2lj/AHVyEA/96ifE9V9HbZM6LgpQ/wDtzSlCE1/V5enan0FS/P1ISgHlqQbxCxMp5W7LbkJ3vSYjYG/+lb7MqHVE2/L/AHsXUffjVZfjiB7Sj+NPGqy/HED2lH8aeKtl+J4HsyP4U8VbL8TwPZkfwp8njyLqHjVZfjiB7Sj+NPGqy/HED2lH8aeKtl+J4HsyP4U8VbL8TwPZkfwp8njyGoeNVl+OIHtKP408arL8cQPaUfxp4q2X4ngezI/hTxVsvxPA9mR/CnyePIaj7ayO0vrCW7pCcUfQiQgn/vUjUQ5iNieQUOWW3LQevKqI2R/2qO8SUWQdrjL3gdxPXvEEmE7/AFS15m/7TfKR03zAcpWZUWpNrz/3sTUWilR1kvKLzGcUWXIsllZakRXdc7Kx6DroQQQQR0III89SNaYoXC6MgpSlYgUpSgFKUoBSlKAUpSgFKUoBVXjau/ECYtzSmbPFbaZSfQ89tTive/IS0AfOOZY6bO7RVYso7zzrJI69gym401s66KHIWlAH3wWhv+0PfrolbI33090vSpV3lnpSlc5D8JABJOgK1Q53TmCzsXya8WG4vX4WOA9PW3HgygiQhB5dtOdkQ4kr0krb5wN7PQGtrOBKm1BSedJBBTrex72q5R4aWfI3xlmFYpZMrtXDqTjMxuNbsygd7KtdwcPK3GivK8p1kpWskbWlHKNL66oDbGLd0hil34UWvOLo5NtESSmO0805a5nMJLjSXC0ygshb6fKOltpUlWjo+epb/wBoDh+METmaskYRjXfibeuctl1PYyFOBsNuoKOdohShvnSnlB2dDrWlEZZlsjgfw5s0PH85xyNZ1wLXlYgWh5u5pYbiqSrvTySpxBeQ2FOM7UEq2PTqqWzA727huXWxvFMqEaZxKst5jM3xh6TIfgqXDC3nFqKyrQZcK+ZRUga5+U9KA3TlPdX4xj2S4ZAah3iXbr+uYHZgsdwDjCWWyoFDPe5W7zL0PJHRPlebrW7UKC0JUN6I2NjVaY47IuNj4i8K8yYsd1vtpsU2e3cGrLEVLktJkRFNtuBlG1KSFgAkA63utxQJYnwY8oNOsB5tLgafQUOI2N6Uk9UqG+oPmNAe9KUoCsXLVozq0SkaSi7NuQHx18txCFPNK97olL4+XmHvVZ6rGRp79y3E4qASpiQ/cF6GwEIYWz1Po8qQj9ej8tWet836YHw92V9wpSlaCClKUApSlAKUpQClKUApSlAKhcgtD8l6HcreG/CsEqDQdUUodaXrtGVEeYK5UkHrpSEHRAIM1Ss4YnA6obCCS/Zs9s023TIrUyM6gsTrVPaBUkK6Ft1s76Hr74UOoJBBqqDubOE483DfFh/+IY+zVyvWK2vIFtuzI25LaSluWw4pl9sb2Ql1BC0jejoHXQVHHCHBsN5LfWk73yiShf8AmpBP+dbbMqLWoqefVdC6iCidzrwtgSmZMbh3jDEhlaXGnW7UylSFA7CgQnoQRvdbEqr+JMj1qv30zP3VPEmR61X76Zn7qmbl7/JiivLRSqLkuE33xcuvgPKrt4b70d7x77ea7HvjkPZ8+mt8vNy716N14YZhOS+KVn8Z8qufjF3q34R7wea73745R2nZ7a3y829b9FM3L3+TFFebBrX9y7nzhjeLjKnzuH+NTJ0p1b78h+1srcdcUSpS1KKdkkkkk+cmpjxJketV++mZ+6p4kyPWq/fTM/dUzcvf5MUV5AL7m/hS6oFfDjF1kAJ2q0sHoBoD8n0AAVae0sXDyxwrfFjx7ZCaHYQbbCaCSojqG2Wk+c/IB06k6GzWMMIdI05k19cTveu+G0/5pbB/zrPsuJWuwPLfix1KluJ5Vy5Lq331j3i4slWvk3r5KWZUOtxV8l7voxqPOwWqQmXKu9yQhFylpS32SFcyY7KSShsH0nyiVEecn3gKnKUrVHE43Vh6xSlKwIKUpQClKUApSlAKUpQClKUApSlAKUpQClKUBXeI0a3TOHuUMXec5bLS7a5SJk5n8uOyWlBxxPQ9Up2R0Pm81YPB6HZ7fwqxGLj9zevVjZtcdEG4yN9pJZDYCHFbA6qGj5h5/NUln8lmHgmSSJFpN+Yatslxy1JTzGakNKJYA0d848nWj+V5jWFwomx7jwzxaVEsCsVjPW1hxqyLRyGAkoBDJGhrl/J1oebzUBa6UpQClKUApSlAKUpQClKUApSlAKUpQClKUApSlAKUpQClKUApSlAROWN3Z7Fby3YHWmL6qE8m3uvjbaJBQeyKtg+SF8pPQ9PRWPgTOQR8JsTWVvsScmRCaTcnooAaXI5R2hRoAaKt66D9VefEaNbpnD3KGLvOctlpdtcpEycz+XHZLSg44noeqU7I6HzeasHg9Ds9v4VYjFx+5vXqxs2uOiDcZG+0kshsBDitgdVDR8w8/moC4UpSgFKUoBSlKAUpSgFKUoBSlKAUpSgFKVD5DkIsojsssGZcJSiliMFcgIGuZalaPKhII2dHzgAEkA5QwuN2YdoJilUk33LyelvsgHvGY8df49l1r88O5h+gWP2t77uurRY71ii0LvSqR4dzD9Asftb33dPDuYfoFj9re+7posd6xQoXelUjw7mH6BY/a3vu6eHcw/QLH7W993TRY71ihQu9KpHh3MP0Cx+1vfd08O5h+gWP2t77umix3rFChwd/Kj8DFW3IbVxRtkcmNcgm3XcpG+V9CdMuH+02nk35h2SfSqoz+S64KOX/AD65cSprakQLAhcKArRAclutlLhB9IQ0sgg/DJPortjiviV74v8ADy+YferdZO8LpHLRcTKdKmVghTbidt65kLCVDfTaetYvBfBL3wR4a2XDrNBsrsW3tkLkuSXUrkOqJU44rTfnUonp10NDzCmix3rFChu2lUjw7mH6BY/a3vu6eHcw/QLH7W993TRY71ihQu9KpHh3MP0Cx+1vfd08O5h+gWP2t77umix3rFChd6VSPDuYfoFj9re+7p4dzD9Asftb33dNFjvWKFC70qkeHcw/QLH7W993Tw7mHxfYz8nfbw/9Kmix3rFChd6VCY5karwqRFlxu8bnGCS9HC+0QUq3yrQvQ5knRG9AggggembrmjgigdmLaQUpSsAKUpQCqVkR/wDmNZR71qm6+T+ei/8A9/gKutUnIvzj2b+6Zn76NXZkv3Pw/RlRJ0rU3HjOLtjE7CLPb72zicXILouHMyN9ltwQ0oYcdShIdBbC3VICElYIHXoTqtRw+OGdxsLjwItzlZRe73mU6x26+woUQl2DHaKi9GaUpplSj2SgOdZTzFwjmASmtjiSdCHWtK5cu3EPi9i2D3xdxTOty0XazR7Rer/AgCQ8mRLQ1IaeZiurbUlII0pPIohZ1ogKr3zzjHmXBSdnNll3ZWZy49rtc+zypcNhlxp2XMXEKFpa7NC0pUkLTvlP+6VdeYLSB05SuZ4+ZcXsateWSbkxfXbTHxq4TUXXIIFrjuwpzTRWz2aYrziXEK8raVo2ClPlKBNSr2SZXj/BizX+/cRJ4v8AkrdvTDZtliiyFokOIKzHis8g51rB6qdUpKezKtJGwFoHQLjiGW1OOKShCRtSlHQA98mvquM88zPLs27m/i5Z8kn3CLdMauUaOZEqFFYlSY7gjuoQ+20XGkqHa/lNkbCU+bygdkcTczzfEb/iXD2yXW+368TYcu5zr5DgW5dwUy24hKEIbdLMYdXQCrlJASPJJJUFoHQlK13wRuubXPG7gjObdJhzo09bUORMbjtPy4vKhSHHW2HHG0L5itJCVaPIDob1WL3QF4zGx4lbpOId+NgXJpN2lWyEibNjQeVfaOMMLBS4oK7PY0o8pUQkmrXVUGzqVo7COJk++cQeHtug5ajKsfuuO3Kc/PRCbY77eZkR0IWpISC2pAWtCkDlG97SCNCpReJ+dZHklmscXJvBhuWd5BY1y0wGHVtQ4rby2kICka5khsAKUD16q5/MZaQOnqVye/xB4l49hmcZPJznwknCsnFn7xctMZtFzjh2PzKfUlO0ucsjQLXIByDYOzrO4i8QuIUOLxqvtqzDwbFweY2qBbfBkd1t9Hecd5bby1J5iklatcpSoFR2ojlCVoHUVK0GznWSYDnV0s2U5q3OtL2HSMjF1lW1lrwY6y6htfKhoJ529OhQQrmV5GuY7qp4ZxgzuHkV8tVzuF6uEGViM2/2u4X+zRID6HWVIAU22yo7bIdB5XkhYKRvYJpaQOqKVzZEy3iFjPBPEeJ14zF28NON2q53q2It0ZthEB1AEgoKW+fnSl5Dqjza2yrlCUq5a2Zwvy265xl+fT1TA5i0G5Is9pjpbQAXGEDvp7nA5lBTqygbJA7HoBs7qiqC52c64lPD37Qnfy/zx1/3P/WrvVHs/wCct7+6B++NXitWVfWvJGTFKUrjMRSlKAVSci/OPZv7pmfvo1XaqtltrlJudvvcOOqYqI07Gfit67RTThQoqRvzqSW0+TsbBVrZCQerJmlM13P0ZUar7pDCblnOI2yJbbTdL0WLgmQ7FtVxiRXCkIWASmW2tl0AkEJWBo6UCCkVAYPwavuZcOHLLxGdnw3IV1TNx15mXHF0tSEISG1F6M2lrtAou65UkcqgDv0bbVmMdB0q130HXUCyyjr/ABDeq/PHON8WX76kl/dV2ZiNutllssqzvA+DcMRdsN2ybJL6hy5xbqqdcpjbkjtGHW3G0J02EIb20naUoG9qPQndZOXcEMYzu9X64XxqROTerMzZJUNTgSz2LTzjyFp0ApLgW4Tzc3TlToAjZsHjnG+LL99SS/uqeOcb4sv31JL+6q5iPdYsu4q1q4JMQsfv9ouGYZXkUa8W5y1rVeLgh1UdlaVJJbAbSnn0o+WoKUdDZNZuScHrRkmG49j6p1yt5x9cd62XSC8hEuM6y2W0OBRQUElClJIKCkhR6VOeOcb4sv31JL+6p45xviy/fUkv7qmYj3WLLuKVC7nPGmLJmlqmT7zeY2Xtti6quMwOOLdQgp7ZCgkFKyOToPJHZo5UpAIP7cu59t93ttlTMyvKHr9ZnnHYGTd+tJuTCXEhLjXOGghTagkbSpB3rfnq6eOcb4sv31JL+6p45xviy/fUkv7qmYj3WLLuIBNsyzArPAteNRE5mhJcckXDKMgcYklal83nTGcCh1PQBISAABrzY8zH8u4i28xMhU9gKor6JEWbiN/Mh51XKtKkOB2IhPJpQPKQoE6PQpBqwT+IVstUGTNmxLzEhxm1PPyH7PKQ202kEqUpRb0AACST5gK+LVxItN9tkW422Pd7hb5TaXo8qLaJTjTzahtKkKS2QoEdQRTMTN1kssqLPc4Y9bbVjUez3a+WKfYTK73u8KUgy3RJXzyQ8XG1oX2i9KO09CBy8te2K9zvjmIy7FJiXC8SHbPeZ18YVMkpdU4/LaW26HFFHMpIDiiOvNvRKldd3LxzjfFl++pJf3VPHON8WX76kl/dUzEe6y2XcVe5cCLBdMUzHH3ZlyTCym7G8TXEOthxt4lk8rZKNBH8wjooKPVXXza+7zwOsN8s/EK2vy7ihjN1hy4qbcbCmj2DbH8ztBCfJbSfKCupPo6VZfHON8WX76kl/dU8c43xZfvqSX91TMR7rFl3EDlXBbHc0vL9wu3fUkSMfkY07F7QJaXFecbWtXRPMHAWk6UFADr03oiBg9zfaWLo3c5mU5Rebii1ybMZNxmtOFUN5ASWikNBI5SkLCgAoqA5iodKvnjnG+LL99SS/uqeOcb4sv31JL+6pmI91ksu4rOWYrLxrgsnDsasa8qS3a0WJqLNltsczHY9j2jzhABASAVcqdnZ0mpPg5w7Z4T8MMcxNpaXlWyIlt95G9OvnanXBvr5Tilq6+/Un45xviy/fUkv7qnjlGPmtd+J97wJLH/p0zEe2yy2XcZVn/OW9/dA/fGrxVTxW2ypN4lX2XGcgh2OiLHjPa7UIClKUtYG+XmJGk72AnZ0SUi2Vx5S046LuSIxSlK5CClKUApSlAKUpQClKUApSlAKUpQFG46/mR4hfs7cf9M5UR3Lv9HDhl+zsH9ympfjr+ZHiF+ztx/0zlRHcu/0cOGX7Owf3KaA2hSlKAUpSgFKUoBSlKAUpSgFKUoBSlKAUpSgFKUoBSlKAUpSgFKUoCjcdfzI8Qv2duP+mcqI7l3+jhwy/Z2D+5TUvx1/MjxC/Z24/wCmcqI7l3+jhwy/Z2D+5TQG0KUpQClKUApSlAKUpQClKUApSlAKUr5W4hsbWoJH9Y6oD6pXl30z8M384U76Z+Gb+cKtGD1pXl30z8M384U76Z+Gb+cKUYPWleXfTPwzfzhTvpn4Zv5wpRg9aV5d9M/DN/OFO+mfhm/nClGD1pXl30z8M384U76Z+Gb+cKUYOMO7U7sm48IrnkvDWXgBlxL3ZXG4d88L9mFtvsqbUvsuwPVC+ccvP15Qdjm6RncOd2NN4gzMM4SRcCW3HtFnDMu/JuvOG2o7PKHSz2I0FudmjXP0Lg6nXW+fyhnBNri3wWdvluSl3IsV557ARoqdjEDvhv5qQsenbeh+VUR/JucFGuG/CJzL7m2hq/ZUUvoDnRbMJO+xT183OSpzp5wpvfmpRg7CpXl30z8M384U76Z+Gb+cKUYPWleXfTPwzfzhTvpn4Zv5wpRg9aV5d9M/DN/OFO+mfhm/nClGD1pXl30z8M384U76Z+Gb+cKUYPWleXfTPwzfzhTvpn4Zv5wpRg9aV8IdQ5vkWlWvPyndfdQGLdJvg22S5fLzdgyt3l9/lST/APqteWvErVfrdEuV5t8S8XKUyh56TOYS8ragCUp5h5KB5gkaGh7+zV5yr/Zi8f8AJvf+BqvY1/s5av8AlGv/AAFelk7cEtxQujqZbEYXufYt6tWf2Br7NPc+xb1as/sDX2arDPdC4DKy6PjMa+Kl3iRMVAZbjwpC2nH0b7RCXg32aijR5tKPLo82tV7L4+YC3lXi8rIWhcu+xA5uwe72753rsO+OTse0305Ofm301vpW3PzN94kq7yw+59i3q1Z/YGvs09z7FvVqz+wNfZqsS+6G4fQLzItb+QBuXGneDZJ7zkFmNJ5+QNvOhvkaJUQAVqAV6Cay8z444Rw/u5td8vgizkNB95tqK9IEZs70t5TaFBlJ0dFwpGhumfmb7xFXeTnufYt6tWf2Br7NPc+xb1as/sDX2agr7xzwnHb4xZpV5U9dJENq4MRIEKRMW9GdUpKHUBltfMnaFbI3oaJ0CCfwcdsG8dU4mq+dlfFSTCS07EfQ0uQN7aS+pAaUvofJCt/JTPzN94irvJ73PsW9WrP7A19mnufYt6tWf2Br7NU3A+PVrzjiTl2HohTYsqyTu82XlQZPZyAllK3FKcLQbb0pSkpSpXlBIUnYUKkbJx7wLI8kZsVvyFp+e+6tiOosPIjyXE75kMvqQGnVDR6IUo9D71M/M33iKu8sPufYt6tWf2Br7NPc+xb1as/sDX2an65+x7ulncqyTPJMZyPb8TxYOMqEqxXFct5aW2z2pUlASlIW4AWghTnKkq8kEGjnzF/Z4irvNxe59i3q1Z/YGvs09z7FvVqz+wNfZqlW3ugsXtuN4w5kV7juXu7WWPeEs2a3zHkyGnE7LrDQbU7yb2dKHMkaKgKmL1xzwiw41Zr/ACb321pvKC5AfgxH5ZfSACSENIUoAbG9ga9OqZ+ZvvEVd5O+59i3q1Z/YGvs09z7FvVqz+wNfZqoXrjE0/d+Fy8ZkQbtYcvuD0dc0BSj2SIjzwLZChyq52gDzA68oaB82zqqnzH/AGeIq7yA9z7FvVqz+wNfZp7n2LerVn9ga+zUTA4y4fdc3exKHdzJvrLzkdxlqK8WkuoQVra7bk7LnSkElHNsa81e0Ti3ic7GcbyFi689nyKSzDtcnvZ0d8OukhtPKUcydlJ6qAA11Ipn5m+8RV3kh7n2LerVn9ga+zT3PsW9WrP7A19mqbM7pzhpb5C2pOTJYDct2A4+uFJDDcltSkrZW72fIlzaFaQVAqGikEKBMizx6wR3Grvfl34RbZZ5DMW4rmRH47kRx1aEN9q04hLiAouJ0op5dEnegSJn5m+8RV3lh9z7FvVqz+wNfZp7n2LerVn9ga+zVJc7qHhq0qYhd+kokQ0hyRGVaJofaa1vti12POGtde11ydR5XUVMZbx1wfB4tsk3a98ke5Ru/Yr0SI/KQ4xoHtSWULCUaIPMrQ6+emfmb7xFXeT3ufYt6tWf2Br7NPc+xb1as/sDX2ahsk42YViYsXhG9p3fYy5drESO9KM1pIbJLQaQorOnUEJHUg7AIB1kQeLuJ3DGb/kDd0LdqsHaC5uSYrzC4pQ0l1QU2tAXvkWlQ0k7302elXPzN94irvM2VhNogxnH7Tb4tmuDSVLYlwGUMuNq1sdUjqDobSdgjoQRV0xq6m+45arkpKUKmRGpBSnegVoCtDf66hXH0Sbap5sktuNFaSUlJ0RsdD1H+NZPDj83mL/3VF/cprTPbjlWonVp9S7VrM/Kv9mLx/yb3/gar2Nf7OWr/lGv/AVZMjZXIx66NNpKnFxXUpSPSSggVWsXWlzGrSpJ2lURkg++OQVjJ+y/P2J3HInD8PWbIMHw3LBcrBjGNZS+9Y5E3HZjLs6Stx9EZp2UUlgbL6jtCj2nk+Yk1l8LuF0K3Wa38Ps4xjiNOu0e4qbffi3CebFJT3wXW5e0vBhKfyVlOgoKB8kmt+23ueeH1pyZu/R8fHhFqUZrXazJDrDUgkq7VDC3C0heyTzJSCD1FbGrFQXkOWMqwy/SOA3HyA1Yri7cLlk86TAioiOF2UgqjlDjSQNrB5TpSdjyTrzV4ZBh6sX4pcRHMmx/iFeoOQy259tlYbNmiPIbMdDSoz6I7qEIWkoICndApI8oAarq6lWyDSmA4AjEOPj4t1mlw8dg4NbbVBkPIWtCA3JkEsB1W+ZSU9mSOYnXKT6K1Dm8HMMhuQk3u0Z3c8jtWaRp/YRGHvA0a2MzkqbWwhBDb6uxCT0C3Qoq2AAa7JpVcINAwIF3tvEvi7jTtovEY5itEi036PCccgoBtyGSXHkghtSXGiNK0TtOt7qocE8DtTjGDY3kuIcRo2Q4+phbpn3Cc5ZI0qKjaHm1Ke7BTalI8hLYOucDlA3XVteUqKzOjPRpDSH47yC2404naVpI0QR6QRSyD1rSWB49dIdg45NyLbMYcuORXF+EhxhaTKbVBjpStsEeWkqSoAp2CQR6KtQ7nbhakgjh5jII6gi1M/ZrYdWldoOc+BmK3q0Ztw5kT7PPhMxOFkK3SHZEVbaWZSXWSphZIHK4ACSg+UNHpVQxiBl2N4Fw9tFzt2ZW3Ew9e1XKPjEV9Fw7cz3FREOdmA80yptS1BSNA+TshJFdd0qWQch4JjGR4phPCyVJxTIP/hjM7oqfAVHU9Majye+w28ACe1QO+GypaCodVHZ0a68pWvV9zxwvcWpa+HuMqUo7KjamSSfm0SpsBre1i74/x673wqy5Vb7RdLvJdyeHdreU2hQ7NX/v0WQfM4taUeQhRCuYkpSRuqTjcTIIvDbgzgbuG5Ii74xk9v8ACslVscERlpl1wF1L2uVxBBCgpGwB+UU+nrqHDYt8RiLFZRHjMIS00y0kJShCRoJAHmAAA1XtSyDlhOGX73HmIRsVx78HE7wgY/ebnad7eGi52/LrfZ9n5fP5uXrvVfvGTDb9dL3xpXCsVxltXFGJd6KYiOLEkszVKe7PQ8vkToq1vlGt6FdTUpZBp2Vj9wc7oDNLh4Nkqt0nC4kRqV2Ciy68JEsqaSrWlKAUklIO9KHTqK1FYLfl1vxDhzYb9a85YxtnDIzTdvxll5h9d0G0rZlrRyrZSlHZ8oWpDeyrmPTVdf0pZBy3wZw++Q7h3PxuVguURdhxu8QZypcNaRDfBjNpSpRGk8wQvkO/LTsp2K9uNWG3CVx1tONQG0rx/iSiOu+p31QLY4l1xX6nmVNsn+yK6eqvQOH+P23MbllbFtQMiuDSWJFwcWtxfZpCQEI5iQ2nyEkpQACRs7PWpZ1UBOS//pHv7Cv+1evDj83mL/3VF/cprHuDiWYElxauVCGlKUT6AAd1mYAwuLgmNsuDlcbtsZCgfQQ0kGspv2fyvRmXcT9VOVw+T27i7Ze7lY2VqKzFhhhbIUepKUutL5dnrpJA2SddatlK4oJkUv6WStCm+IFw9c739BC/D08QLh653v6CF+Hq5UrdpMzhgugqU3xAuHrne/oIX4eniBcPXO9/QQvw9XKlNJmcMF0FSm+IFw9c739BC/D08QLh653v6CF+Hq5UppMzhgugqU3xAuHrne/oIX4eniBcPXO9/QQvw9XKlNJmcMF0FSm+IFw9c739BC/D08QLh653v6CF+Hq5UppMzhgugqar4k2S84dw7ym/w8vuzsu1WqVOZbfjwy2pbTKlpCgGAdEpG9EdPTWBwbh3ziJwnxDKLjl10Yn3i1Rpz7UaPDDSFuNhSgkKYJA2emyf11auOv5keIX7O3H/AEzlRHcu/wBHDhl+zsH9ymmkzOGC6CpPeIFw9c739BC/D08QLh653v6CF+Hq5UppMzhgugqU3xAuHrne/oIX4eniBcPXO9/QQvw9XKlNJmcMF0FSm+IFw9c739BC/D08QLh653v6CF+Hq5UppMzhgugqU3xAuHrne/oIX4eniBcPXO9/QQvw9XKlNJmcMF0FSm+IFw9c739BC/D08QLh653v6CF+Hq5UppMzhgugqVJrh6l0pTdL3cr1GB2qJLDCGnPMQFhppBUNj8knR8xBHSrbSlao5scz6mK1FKUrUQUpSgFKUoBSlKAUpSgFKUoCjcdfzI8Qv2duP+mcqI7l3+jhwy/Z2D+5TUvx1/MjxC/Z24/6ZyojuXf6OHDL9nYP7lNAbQpSlAKUpQClKUApSlAKUpQClKUApSlAKUpQClKUApSlAKUpQClKUBRuOv5keIX7O3H/AEzlRHcu/wBHDhl+zsH9ymuHf5UfgkuyZjauJ1vZJh3kIt9zI68kptGmln+20jl+TsflqM/kvuC7+S8TJ3EaUlxu242hcWGsdA9LebUhQ36Qhpatj33EGgP6i0pSgFKUoBSlKAUpSgFKUoBSlKAUpSgFKUoBSlKAUpXhOmsW2FImSnAzGjtqddcV5kISNkn9QBqpNuiBiX7Irbi9vVNuktuHGB5QpeyVK9CUpGypR0egBNa4nce9vatuOyJDO9drOkJj8w98JAWdfrAPyVr+/ZHMzO6+FpwUgEERIqvNFaOvJ1/xHQKj6T08wSBhV9pkvwaVBAnlCrFdXUsBVI2B7vN49VYf1ur8PT3ebx6qw/rdX4etf0rv7LyLw+cXUlrgZfGbJnuNXDK/YbdcYhsx7mxyIki6KWqO6CFNugdgNlKwk62NgEbG6xeBF3d4D8MLPh1rxqHLTDSVyJpuam1Sn1HbjpT2B1s9ANnSQkbOq/KU7LyLw+cXUWuBsD3ebx6qw/rdX4enu83j1Vh/W6vw9a2RcoblwcgJlMKnNtpeXFDgLqUKJCVlO9hJKVAHzHR96smnZmRP/nzi6i1wNlwePaw7q5Y28yzvXaQZaZBHylKktnX6tn5DWx8eyW2ZVAEy1S0S2N8qtApWhX/CtCgFJPyKANc21lWa+zcUuqLtbhzPoAS8x5hKaB2W1fL5+VX+6T6QVA8OVfBpMcLchWYvPU8S1TOnKViWm6Rr3a4lwhr7SLKaS80vWtpUNjp6D181ZdfFNOF0YFKUqAUpSgFKUoBSlKAUpSgFKUoBVH41PrY4bXQJVyh5yNHWf6jkhtCh/ilRH+NXioLOMdOWYldLUlSUPSGSGVr/ACUOjym1H5AsJP8AhXTk0cMufLji2JpvEq2nO1K82HFON/zjS2HkkodZcGlNrSdKQflBBB/VVfvdyyyNcFt2mwWu4QgByvyruuOsnXUFAjrA6/1v+lfpsUShVX19DAslas4r5tfoOV2PF8dbnJkzYz86RItrEd6QlttSEhKEyFpb6lfUnZAA0OpIsJvWe9NYnY/l3kDv4Ssa64M/xAZgTr80vGL/AG15ZhTbBci660hSQFDnWykEK8xQUEeSDv3uabE5kFmXVPya89dAUg5XxDRGxm23F52xTbhkDlvTMlRIynpEPvVxxK1NoWtCHApJHknW0AkEEpP7L4j5RYmb5jXhFq431vIodjg3iVGQkIRJZQ6HHG0BKVKQkrHQAE8vTz72IjhtDLWPCTc7pcHrJNXPZkTJAcddcUhxBDhKeqdOK0E8utDXQarFvXCCx37xiMpyb2l6lR5y3WnghcV9htCGnGFAbQQEA7O+u/QdVzuTOS/jE68X3U694KxgVquln45ZJHu17cv8nwBBUmW7GbYVy9u/5JS2Ak6OzvQ6ED0bO3q17A4ezcJuUu+2iVNyu+TGGYT3jBcksp7FClqBCm2FaO161y6Pn6He5EXnPNK3iljB101kDvU+yfrrfJ+VDZiT2t9777wXGlVmz3TL5FxZbuePWmDBO+0kRry4+4jodaQYyAdnQ/KGt7661VikPd7sqXyLcI6JQ2OZSyegSkekk6AHvmuqGJRKq6eoN2cD3lOYAy0fyGJclpH9kPKIH+G9f4VfqrnDzG3MTw62218gykIU7IKTsds4ouOaPpAUogfIBVjr80yuOGZlEyODY2/Uze0UpSuQgpSlAKUpQClKUApSlAKUpQClKUBrviHwtOQyF3WzLZi3ZWu2ae2GZQAABUQCUrAAAWAdgaIPklOpptlvVqdLU+wXSOsf7zcVUhv9fO0FJ1+siunaV7eS/Fp2TQKW1aSv2r8l1Pacr7f+L7l9Xv8A2Kbf+L7l9Xv/AGK6opXf29F4fP8ARKI5X2/8X3L6vf8AsU2/8X3L6vf+xXVFKdvReHz/AEKI5X2/8X3L6vf+xTb/AMX3L6vf+xXVFKdvReHz/QojmGDabxdXg1BsN1kLJ0FLhrYb+kcCU/51tXh9wqXZpTV1vimn7i31YiskqZjn/i2QOZevToAdde/WyaVw5V8XnZRC5cKsp44l1LYKUpXhkFKUoBSlKAUpSgP/2Q==",
+ "text/plain": [
+ ""
+ ]
+ },
+ "execution_count": 7,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "from IPython.display import Image\n",
+ "\n",
+ "Image(app.get_graph().draw_mermaid_png())"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "cc38735a-004e-41cc-944d-288334e04850",
+ "metadata": {},
+ "source": [
+ "Note that calling the graph in streaming mode allows us to monitor steps and potentially take action on them during execution."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "id": "7b1994f4-5a7a-4dda-bc0c-b9548cc8242f",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "{'generate_summary': {'summaries': ['Apples are typically red in color.']}}\n",
+ "{'generate_summary': {'summaries': ['Bananas are yellow in color.']}}\n",
+ "{'generate_summary': {'summaries': ['Blueberries are a type of fruit that are blue in color.']}}\n",
+ "{'generate_final_summary': {'final_summary': 'The main themes are the colors of different fruits: apples are red, blueberries are blue, and bananas are yellow.'}}\n"
+ ]
+ }
+ ],
+ "source": [
+ "# Call the graph:\n",
+ "async for step in app.astream({\"contents\": [doc.page_content for doc in documents]}):\n",
+ " print(step)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "c4d4e16c-4279-4a55-b3a8-5adbe63febe5",
+ "metadata": {},
+ "source": [
+ "In the [LangSmith trace](https://smith.langchain.com/public/8ecbe9fd-eb02-4c6e-90ae-659952c9360a/r) we recover the same four LLM calls as before."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "b8027c94-c151-4fa4-8180-088fa52bb042",
+ "metadata": {},
+ "source": [
+ " \n",
+ "\n",
+ "## Summarizing long documents"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "2b50a081-7dec-4680-88f2-2d43f0079e1c",
+ "metadata": {},
+ "source": [
+ "Map-reduce flows are particularly useful when texts are long compared to the context window of a LLM. `MapReduceDocumentsChain` supports a recursive \"collapsing\" of the summaries: the inputs are partitioned based on a token limit, and summaries are generated of the partitions. This step is repeated until the total length of the summaries is within a desired limit, allowing for the summarization of arbitrary-length text.\n",
+ "\n",
+ "This \"collapse\" step is implemented as a `while` loop within `MapReduceDocumentsChain`. We can demonstrate this step on a longer text, a [LLM Powered Autonomous Agents](https://lilianweng.github.io/posts/2023-06-23-agent/) blog post by Lilian Weng (as featured in the [RAG tutorial](/docs/tutorials/rag) and other documentation).\n",
+ "\n",
+ "First we load the post and chunk it into smaller \"sub documents\":"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "id": "dfbb6ce8-9183-41d4-b022-924ee01669e0",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "USER_AGENT environment variable not set, consider setting it to identify your requests.\n",
+ "Created a chunk of size 1003, which is longer than the specified 1000\n"
+ ]
+ },
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Generated 14 documents.\n"
+ ]
+ }
+ ],
+ "source": [
+ "from langchain_community.document_loaders import WebBaseLoader\n",
+ "from langchain_text_splitters import CharacterTextSplitter\n",
+ "\n",
+ "loader = WebBaseLoader(\"https://lilianweng.github.io/posts/2023-06-23-agent/\")\n",
+ "documents = loader.load()\n",
+ "\n",
+ "text_splitter = CharacterTextSplitter.from_tiktoken_encoder(\n",
+ " chunk_size=1000, chunk_overlap=0\n",
+ ")\n",
+ "split_docs = text_splitter.split_documents(documents)\n",
+ "print(f\"Generated {len(split_docs)} documents.\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "06b84ce2-4405-4f0c-aae3-6eff92de6bd9",
+ "metadata": {},
+ "source": [
+ "### Legacy\n",
+ "\n",
+ "\n",
+ "We can invoke `MapReduceDocumentsChain` as before:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "id": "ec58fdce-c6f1-4964-bea6-0fcba0e0ae8a",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "The article discusses the use of Large Language Models (LLMs) to power autonomous agents in various tasks, showcasing their capabilities in problem-solving beyond generating written content. Key components such as planning, memory optimization, and tool use are explored, with proof-of-concept demos like AutoGPT and GPT-Engineer demonstrating the potential of LLM-powered agents. Challenges include limitations in historical information retention and natural language interface reliability, while the potential of LLMs in enhancing reasoning, problem-solving, and planning proficiency for autonomous agents is highlighted. Overall, the article emphasizes the versatility and power of LLMs in creating intelligent agents for tasks like scientific discovery and experiment design.\n"
+ ]
+ }
+ ],
+ "source": [
+ "result = map_reduce_chain.invoke(split_docs)\n",
+ "\n",
+ "print(result[\"output_text\"])"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "705b9c3b-6674-4c7e-8843-1d08dde078f8",
+ "metadata": {},
+ "source": [
+ "Consider the [LangSmith trace](https://smith.langchain.com/public/d8b3311d-2220-487a-8eaf-104ef90678dd/r) for the above invocation. When instantiating our `ReduceDocumentsChain`, we set a `token_max` of 1,000 tokens. This results in a total of 17 LLM calls:\n",
+ "\n",
+ "- 14 calls are for summarizing the 14 sub-documents generated by our text splitter.\n",
+ "- This generated summaries that totaled about 1,000 - 2,000 tokens. Because we set a `token_max` of 1,000, there are two more calls to summarize (or \"collapse\") these summaries.\n",
+ "- One final call is for generating a final summary of the two \"collapsed\" summaries."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "4100aedb-0170-45a1-97d5-aa7e9bdb0419",
+ "metadata": {},
+ "source": [
+ " \n",
+ "\n",
+ "### LangGraph\n",
+ "\n",
+ "\n",
+ "We can extend our original map-reduce implementation in LangGraph to implement the same recursive collapsing step. We make the following changes:\n",
+ "\n",
+ "- Add a `collapsed_summaries` key to the state to store the collapsed summaries;\n",
+ "- Update the final summarization node to summarize the collapsed summaries;\n",
+ "- Add a `collapse_summaries` node that partitions a list of documents based on a token length (1,000 tokens here, as before) and generates summaries of each partition and stores the result in `collapsed_summaries`.\n",
+ "\n",
+ "We add a conditional edge from `collapse_summaries` to itself to form a loop: if the collapsed summaries total more than the `token_max`, we re-run the node."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "id": "a1cb9fcf-3a27-45e0-84bc-83c66aa65421",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from typing import Literal\n",
+ "\n",
+ "from langchain.chains.combine_documents.reduce import (\n",
+ " acollapse_docs,\n",
+ " split_list_of_docs,\n",
+ ")\n",
+ "\n",
+ "\n",
+ "def length_function(documents: List[Document]) -> int:\n",
+ " \"\"\"Get number of tokens for input contents.\"\"\"\n",
+ " return sum(llm.get_num_tokens(doc.page_content) for doc in documents)\n",
+ "\n",
+ "\n",
+ "token_max = 1000\n",
+ "\n",
+ "\n",
+ "class OverallState(TypedDict):\n",
+ " contents: List[str]\n",
+ " summaries: Annotated[list, operator.add]\n",
+ " collapsed_summaries: List[Document] # add key for collapsed summaries\n",
+ " final_summary: str\n",
+ "\n",
+ "\n",
+ "# Add node to store summaries for collapsing\n",
+ "def collect_summaries(state: OverallState):\n",
+ " return {\n",
+ " \"collapsed_summaries\": [Document(summary) for summary in state[\"summaries\"]]\n",
+ " }\n",
+ "\n",
+ "\n",
+ "# Modify final summary to read off collapsed summaries\n",
+ "async def generate_final_summary(state: OverallState):\n",
+ " response = await reduce_chain.ainvoke(state[\"collapsed_summaries\"])\n",
+ " return {\"final_summary\": response}\n",
+ "\n",
+ "\n",
+ "graph = StateGraph(OverallState)\n",
+ "graph.add_node(\"generate_summary\", generate_summary) # same as before\n",
+ "graph.add_node(\"collect_summaries\", collect_summaries)\n",
+ "graph.add_node(\"generate_final_summary\", generate_final_summary)\n",
+ "\n",
+ "\n",
+ "# Add node to collapse summaries\n",
+ "async def collapse_summaries(state: OverallState):\n",
+ " doc_lists = split_list_of_docs(\n",
+ " state[\"collapsed_summaries\"], length_function, token_max\n",
+ " )\n",
+ " results = []\n",
+ " for doc_list in doc_lists:\n",
+ " results.append(await acollapse_docs(doc_list, reduce_chain.ainvoke))\n",
+ "\n",
+ " return {\"collapsed_summaries\": results}\n",
+ "\n",
+ "\n",
+ "graph.add_node(\"collapse_summaries\", collapse_summaries)\n",
+ "\n",
+ "\n",
+ "def should_collapse(\n",
+ " state: OverallState,\n",
+ ") -> Literal[\"collapse_summaries\", \"generate_final_summary\"]:\n",
+ " num_tokens = length_function(state[\"collapsed_summaries\"])\n",
+ " if num_tokens > token_max:\n",
+ " return \"collapse_summaries\"\n",
+ " else:\n",
+ " return \"generate_final_summary\"\n",
+ "\n",
+ "\n",
+ "graph.add_conditional_edges(START, map_summaries, [\"generate_summary\"])\n",
+ "graph.add_edge(\"generate_summary\", \"collect_summaries\")\n",
+ "graph.add_conditional_edges(\"collect_summaries\", should_collapse)\n",
+ "graph.add_conditional_edges(\"collapse_summaries\", should_collapse)\n",
+ "graph.add_edge(\"generate_final_summary\", END)\n",
+ "app = graph.compile()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "c93f1c59-0e76-4528-9971-5aeb837b97aa",
+ "metadata": {},
+ "source": [
+ "LangGraph allows the graph structure to be plotted to help visualize its function:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "id": "4c1704a1-d7e3-43a4-8ea8-7af765253194",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "image/jpeg": "/9j/4AAQSkZJRgABAQAAAQABAAD/4gHYSUNDX1BST0ZJTEUAAQEAAAHIAAAAAAQwAABtbnRyUkdCIFhZWiAH4AABAAEAAAAAAABhY3NwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAA9tYAAQAAAADTLQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAlkZXNjAAAA8AAAACRyWFlaAAABFAAAABRnWFlaAAABKAAAABRiWFlaAAABPAAAABR3dHB0AAABUAAAABRyVFJDAAABZAAAAChnVFJDAAABZAAAAChiVFJDAAABZAAAAChjcHJ0AAABjAAAADxtbHVjAAAAAAAAAAEAAAAMZW5VUwAAAAgAAAAcAHMAUgBHAEJYWVogAAAAAAAAb6IAADj1AAADkFhZWiAAAAAAAABimQAAt4UAABjaWFlaIAAAAAAAACSgAAAPhAAAts9YWVogAAAAAAAA9tYAAQAAAADTLXBhcmEAAAAAAAQAAAACZmYAAPKnAAANWQAAE9AAAApbAAAAAAAAAABtbHVjAAAAAAAAAAEAAAAMZW5VUwAAACAAAAAcAEcAbwBvAGcAbABlACAASQBuAGMALgAgADIAMAAxADb/2wBDAAMCAgMCAgMDAwMEAwMEBQgFBQQEBQoHBwYIDAoMDAsKCwsNDhIQDQ4RDgsLEBYQERMUFRUVDA8XGBYUGBIUFRT/2wBDAQMEBAUEBQkFBQkUDQsNFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBT/wAARCAHXARsDASIAAhEBAxEB/8QAHQABAAMAAwEBAQAAAAAAAAAAAAUGBwMECAECCf/EAFcQAAEEAQIDAggHCgoJAwMFAAEAAgMEBQYRBxIhEzEIFBYiQVFWlBUXVZOV0dMyQlJTVGGBs9LUCSM3OHF1dpKhtDM0NmJygpGxsiQ1dCZEw3ODheHw/8QAGgEBAQEBAQEBAAAAAAAAAAAAAAECBAMFB//EADMRAQABAgIHBQcFAQEAAAAAAAABAhEDkRIUIVFSYdEEEzFToSNBcbHB0uEVM4Gi8EIy/9oADAMBAAIRAxEAPwD+qaIiAiIgIiICIuhmsxFhaYmfHJYle9sUNaAAyTSOPRjQSB6ySSA0AuJABIsRNU2gd9R02o8TXeWS5SlE8fevsMB/xKifI92dHballGQc4f8At0TnClEN/ueXp2p9Bc/v6kNYDyqRj0jgoW8seFxzG777Nqxgb/8ARe+jhU7Kpmfh/vo1sffKrCfLFD3pn1p5VYT5Yoe9M+tffJbC/JFD3Zn1J5LYX5Ioe7M+pPY8/Q2PnlVhPlih70z608qsJ8sUPemfWvvkthfkih7sz6k8lsL8kUPdmfUnsefobHzyqwnyxQ96Z9aeVWE+WKHvTPrX3yWwvyRQ92Z9SeS2F+SKHuzPqT2PP0Nj9RakxE7w2LKUpHH71lhhP/dSSiZNJYKZhZJhce9h6lrqsZH/AGUb5Eswn8dpmb4Hkb18RBJpS/7pi7o/+KPlI6b8wHKWjhVbImY+Ph/v4TYtCKOwmZZma0jjDJVswvMVirLtzwvHoO3QggggjoQQR3qRXjVTNM2lBERZBERAREQEREBERAREQEREBERAREQEREBERAVYrbZfX9x79nQ4etHFC0+iabd0jvVvyNiAPeOZ46bnezqsYUeJ651JXfuDajrXozt0cOQxOAPrBiG//EPWujC8K599vrEfK6x71nRdTK5ajgsbZyGSuV8fQrMMs9q1K2KKJg73Oe4gNA9ZKpQ8IThYe7iXo8//AM9V+0XOi/Pe2NjnuIa1o3JPoCxat4SsWqOHGpNVaa0hqSanRxU+Sxt29Sjjq5FrNwHRntgeXccxa/kcWgkDdW6vx84ZXJ469biLpOzZlcI4oYs5Vc+RxOwa0CTqSdgAse0Dwo1jNntXVa+lH8M9H5nT9unYwUmYjv035OZ2zbFWOMnsWBpfzbBnNu3zNxugv+h+N+Vy3BrB6tyehdTz5K3BVacfj6kEstt8kDXmeFrZy1sBJOxkcwj0gdN/tnwn9K0eHdrV9rH5ytXpZiPBX8ZJSHj9K2+RjOSSIO67dox3mF27XDl5j0Wb3NHcSc9wd0FpvKaEtNraYnpVczga+crM+H6sVZ8RMcjZABGJBFIYpSzmA2Pd1isHwK1fQ0tqLFVtEVdP1bmvsPqSljqV6u+GGkx9Xtm/dNAfGIHFzQNiXbML+9Bf9aeEVqTA624e42pw41IaudkvizRmip+OyCGEuYIv/Vhjeuz3c5Hmjp16LemO5mNcWlpI35T3hZLxr01qZ+tOHOsdNYPymk03cueNYmO3FWmlisVnRc7HylrN2O5SQSNweinDx84d0ia+W11pbD5SL+Lt461naglqzDo+J47T7prt2n84KDQEVBf4QPC6JwD+JOkGEgO2dnao6Ebg/wCk9IIKuWIzFDUGMr5HF3q2Sx9lnaQW6crZYpW/hNe0kOH5wUEJktsRrrEWWbNZlo5KE46+fJGx00TvV0a2cfn5h6lZ1WNRt8c1bpOqwEugnnyD9huAxkD4ep9HnWG/07H86s66MX/zRPL6z9Fn3CIi50EREBERAREQEREBERAREQEREBERAREQEREBQuoMTPZmp5LHiP4VolwiEri1ksT9u0icR3B3K0g9dnMYdiAQZpFqmqaJvB4IzEZylqGCQRbtmj82xTsN5ZoHfgyM9Hcdj3EdQSCCu18G1PyWD5sfUulmtLYvPyRy3K29mNpbHbgkdDPGCdyGysIe0b7HYHboFHO0PICez1LnYm778otMd/i5hP8AivbRwqtsVW+PX8LsT4x1RpBFaEEdQRGF2FVvIif2pz3z8X2SeRE/tTnvn4vsk7vD4/SVtG9aUVF1LojOeTmV+AtU5b4b8Ul8R8bnj7HxjkPZ8+0W/Lzcu+3o3XX0ZojUnklh/KfVOT8ovFI/hHxCePxfxjlHadnvFvy82+2/oTu8Pj9JLRvaEuu7H1XuLnVoXOJ3JMY3Kr3kRP7U575+L7JPIif2pz3z8X2Sd3h8fpJaN6wfBtT8lg+bH1Lq5fOUNOVojYkbG6Q8letEN5Z3fgRsHVx/MO7vOwBKihoiQjaTUudkbvvsbLG/4tYD/ipDC6TxeBmknq13OtyDlfbsyvnnePUZHku2/Nvt+ZNHCp2zVf4R9Z6SbHHgMVYbbtZfJMYzJW2tj7JjuZteFpJZGD6T5xLiO8n1AKcRF5V1TXN5SdoiIsIIiICIiAiIgIiICIiAiIgIiICIiAiIgIiICIiAiIgIiIK7xGrY65w91RBl70mMxMuLtMuXofu68JicJJG9D1a3cjoe7uXR4P08Pj+FWkaun8nNmsHDi67KORsb9pZhEYDJHbgdXDY9w7+5SOv7MNPQmpLFjEnPQRY2zJJimt5jdaInEwAbHfnHm7bH7ruK6fCm5XyPDPS1qpgHaWrTY2vJFhHs5DQaYwRCW7Dbk+522Hd3ILWiIgIiICIiAiIgIiICIiAiIgIiICIiAiIgIiICIiAiIgIiICIiAiIgIiIInVseWm0rmY8BLFBnXUpm4+WcbxssFh7Iu3B80P5Seh6ehdfQUOfr6JwUWq54LOpmUom5KaqAIn2OUdoWbADYu326D+hcfEatjrnD3VEGXvSYzEy4u0y5eh+7rwmJwkkb0PVrdyOh7u5dHg/Tw+P4VaRq6fyc2awcOLrso5Gxv2lmERgMkduB1cNj3Dv7kFwREQEREBERAREQEREBERAREQEREBERAREQEREBERARFD6h1CMKK8MMBuZC04tgrB3ICBtzPc7Y8rGgjc7HvAAJIB1TTNc6NPiJhFSTnNXk7ihhAPUbUx2/T2fVfPhzWH5Dg/epvs11arXvjOFsu6KkfDmsPyHB+9TfZp8Oaw/IcH71N9mmq174zgsu6KkfDmsPyHB+9TfZp8Oaw/IcH71N9mmq174zgsu6KkfDmsPyHB+9TfZp8Oaw/IcH71N9mmq174zgs8Hfwo3Ax2N1Di+KOMrk1skGY7Llo35Z2N2hkP8AxRt5N+4dk30uUZ/BdcFJM/r7JcSrsbmUMCx9LHu6gSW5Yy2Qg+kMieQQfxzT6F7Y4r6RzfF/h5nNH5rH4TxDKVzEZG2ZS6F4IdHI3ePbmY8NcN+m7eq6vBjQeb4I8NsLo7DU8LLVx8ZD7MliUPsSuJdJI7aPvc4np12Gw7gmq174zgs21FSPhzWH5Dg/epvs0+HNYfkOD96m+zTVa98ZwWXdFSPhzWH5Dg/epvs0+HNYfkOD96m+zTVa98ZwWXdFSPhzWH5Dg/epvs0+HNYfkOD96m+zTVa98ZwWXdFSPhzWH5Dg/epvs0+HNYD/AOxwZ/N41MP/AMaarXvjOCy7ooTTuo3Zh1irareI5OsGmauH9owtdvyvY/YczTykb7AggggembXNXRVROjV4oIiLAIiICIiAiIgIiICIiAiIgKlaiO/EbCj0DFXdvzfx1X/+v+iuqpOov5R8N/VNz9dWXZ2X9z+J+UtQk0WTceNcZbTF7RGHx+bh0nV1BlH07mo54Y5BTayCSVrGiUGMPlcwMaXggdehOyyOnxw13W0XXoVcna1Rm83rK9g8dnaVKoTLRrxFxmrROdFC5x7JwHO8t5jIRzANavSaoibMvWqLyzkeIPGDTuCfWyBv4sWdQ4bH4zN5/H0PGZWWZzFYjlhrSvjIZ5hDm8hPMR023Xc1fxl1hwfHErEXMm7WF7FV8RPhrlqpBDKH3p31yyVsXZxuDHsDh9zvvyl3pTSgemkXmevrLi9prF6ss5KDOy4mvprIXWZXUFDF15aV6KIvh7NtWaRsjHedu17NwWt85wJUrNqTVen+DGGz+e4iXxn9Sx49tOHGYKrYeyxIwvNerDyDne8Hq6Vzmt7Mu2aNwGkPQMkjIY3SSOaxjRu5zjsAPWSv0vGevNZ6u1t4N/FzD6kv5CrlNNZKtXNi1SqwWrNeQV5WMnjiMkTXDtfuoyNw1vd5wOkcTdZ630jn9JcPcJlc7nsxdp28nezlOhjn5B0McjGsYyOUw1h1lALuUkBo80klwaQ9CIs74I5XW2T03kGa5x1mnerX3xU7FyOvFPbq8rHMkljgkkjY/mL2kNdseQHYb7Lq+EBmNY4PSWOs6Q8cjAyUTctaxlJl27Wo8r+0kggeC2Rwd2e42ceUuIaSrfZcaciw7RHEy/nOIPD3HUdWs1Vp/K6dyV6e+ylHB43NDYrsY9zQ0GNzA97HMHKN992gjYVKrxP11qPUmGwdXU3wYclrvUGDfbbQglfFTqxzPiYwOZtzNEYAc4Hr1dz9xmlA9PIvJ8/EHiXp7RmuNT2dc/CTdFanGH8RkxNaNmTriWvzOnc1u7ZOWxsDFyAcg3B3O3e4i8QuIVOrxqzuK1h8G1dD3I3UMb8GV5Y52eJ15nxzPc3mLSXu25S1wLju4jlDWkPUSLA4ddak0DrrKYbVOtY72Jm0dY1GMraxsMXwZLDKyN/KyIN549pQ4MdzO8zbmO6qejOMGu6eos5isnkM1kKNrSN3P4vIZ/DVKE7JYXMAdHHC47xkSg8szQ8Fo33BKaUD1Qi82VNW8QtM8E9I8TsxrGXMRSR4rJ5rGMx1aOBlCVgFgsLY+fna2Zkrjzbbwu5Q1ruVaZwv1bldcav19fdcEmlqOSZh8TXbGwAyQMHjU3OBzODpXlg3JA7HoBud7FVxc8OduJUw9eIbv+f+OO3/AHP/AFV3VHw/8pc39UD9cVeF5dq/9x8IakREXGyIiICIiAiIgIiICIiAiIgKk6i/lHw39U3P11ZXZVbVuLtNyePzdOu646pFLWnqx7do6KQscXM373NdG3zdxuC7bchoPV2aYjE27p+UrDK/CQ0Tktc6RxlTG4nKZowZBtiWrisjUqyFoY8AltuN8MoBIIa8DY7OBBaFAaH4NZ3WXDiTC8Rpb9OSllW3dOzQ264ymKYxjRG4zVo2xdoHGXblaRyuAO/o1t2sYGHZ2KzoO3UDDWjt+kR7L55Z1/krPfQlv7NdncVzN9GV0ZVb4j6NrBVMbldTakzrq+aqZ1tzJ3I5JjNXex8bOkYY2MmMbtY1u+5O4J3XZ1LwR0zrDJ6rt5mKxfj1Ljq2MvVHyARCOB8j43R7AOa8OlJ5uY7FrSNtutg8s6/yVnvoS39mnlnX+Ss99CW/s1e4r4TRncq2K4JQUtP5/EZDWGq9RVsxjpMW92YyDJXV4Xtc0mMCNrefZx89wc47Dcld3UnB7Eak0bp7T7r2Sx50++vNjMpRmYy3WlhjMbJA4sLCSxzmkFhaQ49FOeWdf5Kz30Jb+zTyzr/JWe+hLf2adxXwyaM7lKpeDnpqDCa0xVy/mczW1fHGMq7I3BJI+VjC3tmODQWvI5Og80dmzla0Ag/cl4PuPy+Nwrbmq9UTZ7DTSS0NTeOxNyUDZGhskXOIgx0bg0btcw77b96unlnX+Ss99CW/s08s6/yVnvoS39mncV8MmjO5ANxmrNBYehi9NVG6zY0ySWMhqjUEkFkvc/m721pA4dT0AaGgAAbd3Xuaf1dxFx5qahdNoF1Wdlird0jnzYmldyva5kglqMbybOB5SHAnY9C0FT+Q4hY3FULN27TzNSnWidNPYnw9pkcUbQS5znGPYAAEknuAX4xPEnE57GVcjja2XyGPtxNmr2q2IsyRTRuG7XNcIyHAjqCE7jE4ZTRlUofBw09jcVpqvh8tnMFfwJteL5elaYbcosv57ImMkb2P7R+zju3oQOXlXNpXwd9OaRt4KzUyGYsS4fM3s5A65ZbK6Se3E+OUSOLOZzQJHEdebfYlzuu9y8s6/wAlZ76Et/Zp5Z1/krPfQlv7NO4r4V0Z3KvkuBGAymlNY6fluZJtLVOWOYuyMljEkcxMJ5YyWbBn8Qzo4OPV3Xu2/eZ4HYHOYfiFjZ7eRZBrd4kyLo5Iw6I9hHB/E7sIb5sbT5wd1J9HRWXyzr/JWe+hLf2aeWdf5Kz30Jb+zTuK+E0Z3IHVXBbTutMzPkMt41ZFjT9jTUtXtA2J9WaSN73dG8wkBibs4OAHXpvsRA0fBvxMGUjydzVOqMzkWYuzhjZyN2KQupzMDTEWiINHKWh4cAHFwHMXDor55Z1/krPfQlv7NPLOv8lZ76Et/Zp3FfCmjO5WdWaVt6a4LN0dprBv1U2PFswUVW7bjg5oOx7HtJpCACA0Au5W7nc7NUnwc4dw8J+GGnNJxPbM7GVGxzzM32lnO7pZBv186Rz3dfWpPyzr/JWe+hLf2aeWVc92KzxP9S2h/wDjTuMTx0ZXRnc7WH/lLm/qgfrirwqppXG2rOYtZ23WkoiWuyrXrTbdqGBznOe8DflLiRs3fcBoJ2JLRa1x9pmJrtHuiEkREXIgiIgIiICIiAiIgIiICIiAiIgIiICIiAiIgIiIKNx1/kR4hf2dyP8AlpFEeC7/ADcOGX9naP6lql+Ov8iPEL+zuR/y0iiPBd/m4cMv7O0f1LUGoIiICIiAiIgIiICIiAiIgIiICIiAiIgIiICIiAiIgIiICIiAiIgIiICIiCjcdf5EeIX9ncj/AJaRRHgu/wA3Dhl/Z2j+papfjr/IjxC/s7kf8tIojwXf5uHDL+ztH9S1BqCIiAiIgIiICIiAiIgIiICIiAiIgIiICIiAiIgIiICIiAi432Io3cr5WMPqc4BfnxyD8fH/AHwraRzIuHxyD8fH/fCeOQfj4/74S0jmRcPjkH4+P++E8cg/Hx/3wlpHMi4fHIPx8f8AfCeOQfj4/wC+EtI8Y+Gp4ZGR4RZPUvDa3oA3KmbwskdPOfC3Zh8c8Lo3P7LsD1Y/nHLz9eUHcc3SM8BzwxrvEG5ozhJV0E+OviMOIbeeblecRxV4eUSmHsRsHydmzbn6GQdTt1vn8IXwVg4t8Fpc3jgyXUWlee/AGEF0tcgeMR/3Wh49O8ew+6UT/BwcE4eGnCN+rcoyOLPaq5Z2CQgPhpN/0Le/pz7mQ7d4czfq1LSPYKLh8cg/Hx/3wnjkH4+P++EtI5kXD45B+Pj/AL4TxyD8fH/fCWkcyLh8cg/Hx/3wnjkH4+P++EtI5kXD45B+Pj/vhfplmKRwa2VjnH0BwJS0jkREUBERAREQEREBERAREQEREBERAVa15kbFHF1IK0zq0l+5FUM0Z2exrty4tOx2dytIB9BO6sqqHEb/AEWnv63h/wDCRdPZoicWmJWPFFt4faYA87T2Mld6XzVGSPcfWXOBJP5yd19+L7S3s3iPcIv2VLZLJVMNjrN+/Zip0qsbpp7E7wyOJjRu5znHoAACSSqPp/j9oLU1HJ3aOeDaWNq+O2bVypPVibB+Na+VjQ9n+80kFd/f4nHOZed6xfF9pb2bxHuEX7KfF9pb2bxHuEX7KplnwgNN53Q+ssppLJR38tgcRPkxSv1J6ziGxPfG8xytje6NxZtzN6H0FceL4rZa7q7hZipK9IV9U6ftZW65rH88csUdVzWxHm2Dd537hwcejeo67zWMTjnMvO9d/i+0t7N4j3CL9lPi+0t7N4j3CL9lV6Dj3oKzqluno9QxOyT7Rosd2EwrPsAkGFtgs7J0m4I5A8ncbbb9FyXuOmh8frA6XkzfaZptiOpJDWqTzRwzPIDI5JWMMcbySPNc4HqE7/E45zLzvTvxfaW9m8R7hF+ynxfaW9m8R7hF+yqbwy49YviPrLVmnIqV6nbw2SlpQvfRsiOeOOOJzpHSuibHG7mkcBGXcxDQ4bhwK02zZhpVpbFiVkEETDJJLI4NaxoG5JJ7gB6VYx8SfCucy870J8X2lvZvEe4Rfsp8X2lvZvEe4RfsqrY3wieH2W09lc5Vzr34nGNhfZtOx9pjQyZ/ZxPYHRgyNc7oHMDh0J32Vh1HxK03pLI2qOWyPilqriLGdmj7CR/LSgLRNLu1pB5S9vmjzjv0BTv8TjnMvO92Pi+0t7N4j3CL9lPi+0t7N4j3CL9lUxvhO8Nn2hWj1BLLZki7evDFi7b33I/w64ERNhvp3i5gACe4KTt8etB0sLgcs7Pslo55spxjq1aad9sx7c7GMYwuLwTtybc24I23B2nf4nHOZed6wfF9pb2bxHuEX7KfF9pb2bxHuEX7KqGI8JXhvnbVCClqQSuu2RSje6lZZGywXFogle6MNhlJGwjkLXHcbDqFI5TjtobCaybpW/nPFc0bEdTklqTiETSAGOMz8nZB7g5uzS/c7j1p3+JxzmXnenvi+0t7N4j3CL9lPi+0t7N4j3CL9lQ1/jXo7HaysaTkyc0uoq8sMU1Ctj7M74jK1ro3OMcbg1hD27vJ5RvsSCoLSnHjBWtD5PVebzlBmIjzNjHVJKtK3DKQ1/LHA6CVgldY23DmsYRuDsNgU7/E45zLzvXb4vtLezeI9wi/ZTyA0wAeXTuKYT98ynG0jruNiBuOoBVbHhA6AOlp9QHULG46C2yhIx1acWW2XdWQmsWdtzuHUN5NyOoGyt2ltUYzWeCq5nD2HWcfZ5uzkfC+JxLXFjgWPAc0hzSCCAeivf4k/wDc5l53pHQd6eerlKE877Rxl11Rk0ri+RzOzjlaHuPVxAlDeY7k8oJJJJNnVO4e/wCv6w/rgf5OqriuDtMRGLNuXrEE+IiIuZBERAREQEREBERAREQEREBVDiN/otPf1vD/AOEit6qPEVhNbBP+8jy0BcfVuHNH+LgP0rq7N+9SseKgeEfozK8QOCmp8HhIW2snYiikiqveGCz2c0crodz0HaNY5nXp53XoqHxP1DkOOHCjM4XB6I1RQvVPE8i6jm8YaUdrsLUUr6jXPOz3ObG4Dl3YenndV6GRe8xdHmfUuPznG7WefzOH0xmsHQg0Pk8GJM9SdRlu27RaY4Wsk2JYzkJL/ud3dCe9cmFhzc2W4EZx2mc/Tr1MLe09kGvouFjG2JGVo2SSx97Yuau89p9ztyu7iF6URTRHkDg9wyoUsLpzQms9KcRJc3jLbWTyMyF9+Bc6KUyRWmu7YQchLWP5QOYOP3PTdX3g9nMlwkdk9FZnRupbeSn1DctR5nHY11indis2XSNsyTg8rC1jwHteQ4CPoD0C9BIkU2GLcLZ7+juLPETAZLA5hrc9nnZihloqL3498LqcLSHTjzWPDoXN5XbEkt233Wt52GCxhMhFaqOyFZ9eRstRjeZ07C0gsA6blw3G3518z2AxmqcTYxeYoVspjbAAmqW4hJFIAQ4czT0OxAP6FVsRwM4dYDJ1sjjdDafoX6zxLBZrY2JkkTx3Oa4N3B/OFbTGwecJdPax1Bwx13o/TOG1VNomriac2Go6ro+LXq9mKy176UDnbOmjEUY5S7m2OzQ4hTvEi3l+J2sNU5LFaQ1PXx54Y5vGwzZDETV3T25HwlsDGOHMXnboNvO68vMASvVSLOiMNxunMpHxO4K2nYu42rj9K361uc13hlaV0dINjkdtsxx5H7NOxPK71FUrhpozPY/WHDKWzgsjWr0dSatmnfLUkYyvFM+YwvcSNmtfzDlJ2DtxtvuvU6K6I8sZ3Rmel4K8TqcWCyL79riJ4/VrspyGWaD4Wqv7aNu27mcjXO5x05QTvsCq/wAbMbrHVcevKeSxGustmoMzDNhaWJilbhm42GaGVsnmERzylrZCWu55OflDWjYFex0Umm4yfhphLdXjfxcy8+Os1qmSfiPFbc9d0bbDWU9nBjnAc3K4kEDuO4OxWI5fhvqJjK+fsYLUljF4jiHqC9cx+FknqZGSpZfIyK1XMbmSPDeYHzDu5j3bbglexkVmm48y29EaLs6HymdbpPibFYuZeo4XpfHLOahmrseYLkcc0j5WsZ2j2dW7ncgsLdita4E5TVmY4cUrOs4Z4sx207GPt1m1rE1dsrhBLNC3pHI6MNLmjuJ7h3LQEViLDo8Pf9f1h/XA/wAnVVxVQ4fMItark72S5fdp2PXarXYf8WkfoVvXh2n92fhHyhZERFyoIiICIiAiIgIiICIiAiIgLr5DH1srSmqXIWWK0zeV8bxuCP8A/eldhFYmYm8Cnu4f2mHlg1dnIIh9zHy1JOUermfA5x/pJJ/OvnkBf9s838zR/dlcUXTrOLyyjo1eVO8gL/tnm/maP7snkBf9s838zR/dlcV+ZHiONzyCQ0EkNBJ/QB1Kazicso6F5VDyAv8Atnm/maP7ss1uZqzrDK610foDiFen13p2CJ0jctj4DQimeSRHI9lZpJ2HXlPTmB67OAm6+Ty3hEaOxOU09lNT8M6MGZ7SbxrHsht5KrEdwGCTcxxyO5TuR1Ac0tIK1uKrDXlnkihjjkncHyvY0AyODQ0Fx9J5WtG59AA9Ca1icso6F5UbDcPM/FiqjMrrrJWsk2JoszU6dOGF8m3nFjHQvLW79wLifzru+QF/2zzfzNH92VxRNZxOWUdC8qd5AX/bPN/M0f3ZPIC/7Z5v5mj+7K4oms4nLKOheVO8gL/tnm/maP7suvkeHuakoWWUdcZWvddG4QS2KlOWNj9vNLmCBpcAdtwHDf1jvV5RNZxOWUdC8sAr5q/oGXRWnOJfEO1X1nqWaatWdhcdCKEsrXjkY1z67i0lr2bcxG7ubbZad5AX/bPN/M0f3ZWyerDZdE6aGOV0L+0jL2gljtiOYb9x2JG49ZWUZC9mOAGmdWajzeV1JxHxc2TFuvQrUY5beNryOHaNHLymSNhLndw5WtAA6EprOJyyjoXla/IC/wC2eb+Zo/uyeQF/2zzfzNH92VsqWW3KsNhjZGMlY17WyxujeARvs5rgC0+sEAj0rlTWcTllHQvKneQF/wBs838zR/dl+maBt77S6uzczD3sLKjN+vrbACP0FW9E1nE5ZR0Ly6mLxdXC0IaVKEQVohs1gJPedyST1JJJJJ3JJJJJK7aIuaZmZvLIiIoCIiAiIgIiICIiAiIgIiICIiAiKl8Y9Q6o0pw4y+W0ZhW6i1JW7F1XFvBIsAzMEjehBB7MvIO/QjfY9xDu8QNYWtJ6TzmRw2Gn1XmsdXbPHgqErRYnLjs0de4HZx32JIY7YOI2NdxXDp+rNX6S4i6idlsRqGhiex8m48kX0KliVp7Zxaw8sjwHFnNvykNadtwCJnR/DDTmktR5/U+OxQq6g1G+OfJ2nyvkke5rQAwFxPK0dTyt2G57u7a4ICIiAiIgIiICIiAiIgz/ACfDZmI4g5biNibGXu5yXDOpHA/CJZRuvZ50J5Heax4PM0O6NHaOJG5JMjw01rkdX6Ow2R1FgJ9HZ662QS4O9Mx0rHscWu5SPumnbmB2B5SCQFb1U9Y8LdM69zWm8vmsaLOU07b8dxltkr4pK8nTfZzSN2nYbtO4Ow3HRBbEVH4M6k1Xq3QVXJ61wTdN5+WxYbJjWgjso2zPbEepO5LA07+nfuCvCAiIgIiICIiAiIgIiICIiAiIgIiICIiAqRxqxmZzPDDOU9P6nh0ZmJWRivnLDg1lUiVhJJPraC3/AJld1mHhMeR3xHao+MDxzyQ7OHx/xDftuXt4+Tl26/d8n6N0GlVWubWhD3iV4YA54++O3euVcFHs/Eq/Y79j2beTfv5dun+C50BERAREQEREBERAREQEReEf4Ufgi/P6TxHEzHRF9rCAY7JbdSar3kxP/oZK9w//AHvzIPWXBDFZvC8OqNTUOq4da5Vs1gyZmu4OZK0zPLWgj8BpDP8AlV8X8dv4P7gvPxW4+4rKStkZhtKSR5izMzoDMx4NePf1ukaHbelsb1/YlAREQEREBERAREQEREBERAREQEREBERAVI41ZPM4bhhnLmn9MQ6zzETIzXwdhocy0TKwEEH1NJd/yq7qkcasZmczwwzlPT+p4dGZiVkYr5yw4NZVIlYSST62gt/5kFyquc6tCXsETywFzB96du5cq4qrXNrQh7xK8MAc8ffHbvXKgIiIK3mtV2K199DE48ZO3CAZ3Sz9hBDuAQ1z+VxLiDvytadhsTtu3eO8qNW+zmH+mpf3VdXTp5srqgnv+Fngn0naKID/AAAH6FOL6uhh4dqZoifDfu5TDWyEb5Uat9nMP9NS/uqeVGrfZzD/AE1L+6qSXTOZx7cu3Em9WGUdAbTaJmb25hDg0yBm/NyBxA5tttyAnsvLjOrqX5OHyo1b7OYf6al/dU8qNW+zmH+mpf3Vc2IzOP1Bjochi71bJUJwTFapzNlikAJB5XNJB6gjofQu4nsvLjOrqX5I3yo1b7OYf6al/dU8qNW+zmH+mpf3VSSJbC8uP7dS/JG+VGrfZzD/AE1L+6qL1Q/O6y05k8FltKYW3jMlWkqWYXZuXz43tLXD/Veh2Pf6FZkS2F5cf26l+TDfBi4LZjwZ9CWMDRxeIzF65bfauZN+TkhdMe6NvJ4u7ZrWgDbmPUuPTm2GweVGrfZzD/TUv7qpJEtheXH9upfkjfKjVvs5h/pqX91Tyo1b7OYf6al/dVJIlsLy4/t1L8kb5Uat9nMP9NS/uqeVGrfZzD/TUv7qpJEtheXH9upfkjfKjVvs5h/pqX91Tyo1b7OYf6al/dV2clkqeGx9i/kLUFGjWjMs9mzII4omAblznOIDQB3krnilZPEyWJ7ZI3tDmvYdw4HuIPpCey8uM6upfkj/ACo1b7OYf6al/dV+ma1y+P8A47NYOvVoN/0tihedaMQ/CcwxMPKPSRvsOuykF08y0Ow94OAcDBICCNwfNKsU4VU20IznqXjcZfi/orCaQy2qbGp8bJp/EyCG9fqTizHXkJYAx3Z8xDt5Gebtv5w9aiMtx0wVE6Dkx+OzmoqWszG7HXcNjnzwxRP7LaawTsYWATNcS4bgB3TzSF2+FOgNNaa4fY+ti8BjqFfIwQ3bkUFZjW2J3MYTJINvOduB1PqHqV6a0MaGtAa0DYADYAL5ldOjVNO5mVKxms9TX+JWd0/NomzS07QqiWrqeW7GYbsxEZ7JsI88bc793HpvGR6QqjW4z6j0HpzT9jinpmPEZXO6hiwVZmn5hcrw9q0dlLM9xBa0uEgJAO2zfWtkUFro51ujc2/SzKsmpY6cz8Yy43eF1kMPZtd1bsC7Yb7jbdYE6ih9HTZmxpPDSairw1c+6nCchDXfzxMscg7QMPpbzb7fmUwgIiICIiAsw8JjyO+I7VHxgeOeSHZw+P8AiG/bcvbx8nLt1+75P0brT1SONWTzOG4YZy5p/TEOs8xEyM18HYaHMtEysBBB9TSXf8qC4Uez8Sr9jv2PZt5N+/l26f4LnXFVc51aEvYInlgLmD707dy5UBERBQNOf+6ao/raT9VEs0zGc1jxE4v6l0lp3U/kZi9MUqctm1BQhtWbliyJHtH8cHNbG1sfXZvMST1Gy0vTn/umqP62k/VRKsa04KY3Vuqm6lp5zPaUzrqwpWLun7bIXW4GklrJWvY9ruUuds7YOG52K+ti+OXyWfFSZ8pxC1xr3V2msNrSPTbdHUqML7TMXBMcpdmr9s6SUSBwjhA5Ryx7Hcu87oFC8INey8UOMmhdV2K7atnK8N5bE0Me/K2Tx+AP5d+vLzA7b+jZX/UXg8YjO2zag1JqfCW7GOhxeRsYzIhkmUgiaWs8Zc9ji54DnDtG8r/OPnKYg4Ladx2f0llsSbmEm01Rdi6sNCflimpkN/iJmuB52Asa4dQeYb7rwtKML4f6su4DwZOGGPw+oMnh87lJbEVWrhMVDkLt0Nkmc9kbJj2bA0bOdI/zQBt0Lguzj+L2vtQ8PtI1nZiTBail1/JpTIZB2PrmaSBjLB3dD58bJCGR78hIDmnYlp2Ok1/Bm0/jcbjamKz2osQ/FZCzexVqpcjMuPbYbtNWi543DsXd/K8OIJ6EdNqvrXwbJKOH0ziNKX86+u7W0WocjdkyMZtVAa0zJp45JBu4l5Y4g85Lnu2HLuBm1UQOhqDW/FLT8/EDRuKyUmq85h4cZkqeWjx8HjwpWJXtsN7FobDJMxsT3MHKA7fuJAB6eY43Z+TA6HwOktRZPV2X1DdyEdjL1sRUhyVRlRrXSQGrO6GFk4MjAecDZocQw7hajiuAOMw2GzletqbU7c3mrENi9qY5BvwnIYduyZziPkDGjcBgZy7OcCDuumfBl0v5PQUW5LOR5eHKy5uPUsd0NybbsjQySXtAzk85gDCzk5C0AcvRW0jvcD8try9UzlbW+PvQsq2WfBl/Jw1YLVuFzAXdrHWlkjDmPBG7SA4Fp5Qd1zcfMrrHDaIgs6MbZ8aGQgGQmx9Rlu5BR3PbSV4X+bJIPN2aQehdsCQF2YMHqjh7hYKWnGya5nmmkmtXNV550EzSQ0NDTHWkby9D5oawDbpvuV1reE1lxDpux+oIjoSOF7bEGS0nqJ09l0g3HI5slNjeQhxJB5gSB09K17rCoaS4n38trLhPQx2sPKrC5rH5uW9eNCOs+1JXfXEQfHygxPj7R7HNHLuQd29wFascUdc5bNQ4mlqMY51riRf04LPiMEpiox0nyNY1pbsXNc3cOO5325uZu7TosPg36eoYnA18Zl87icphrVu5Bna1pjrssto72TKZI3Mf2h2JBZt5rdttlyad8HTTumn42SDJZq1LR1FPqZsly0yV8tuWB0LxI4s3czZxO2/NzffbdFLVDIMxr/iZpfSPFDPya7ORHD/LitFWlxFVgycIZBM4WHNYCHcs/IDF2e3Lud99hNcRtccQI8lxwu4fWJw9HQsFe7j6DMZXmbPvj47Ekcr3tLiwuDtuXZwLz5xADRqOa4EYDO6a1/hLFzJMqa0tG5kHxyxiSJ5iij2hJYQ0bQt+6DupPXu27OW4MYTMxcRY5rV9o11A2vkuzkYOxa2qKwMO7DynkG/nc3nfm6JaRn2I19qfR+tKFTVOsIshhczpK3n5LVjHxQtxUsBhLyzswC6HlmJ5XlzvMHnHcqH4R8VtZz8TcVhsxkszm8Bn8LayNG9nMLVxry+F0RD4GQuLuyc2X7mZoePNO53K1rO8FdO6lu0Z8kbdmKrgrWnTVdI0RzVbAjEnPs3m59omgFpG256d20RprwesXpzUmCzsmp9TZnI4avLSqOyl2ORgrSM5DCWNiaNhsx3MAHksbzOcBslpGPxZniFqPwPMnr/Na6fZyVrTcl3xAYag+oQ0F2z2Phdzl7W7PB83zzytGwKl9QcT9fal13l9NaSiztKlpuhQEsunsbjbJmsWK4mHai3NGGxhpaA2Ju5If5w2AWvV+C2ErcFTwwbayBwBxbsT4yZGeNdk5paXc3Jy82x7+Xb8y6Oo+AeJzWoGZzHZ/UOlMu6nHj7dvA3GQOvQxgiMTB0bmlzdzs9oa4bkA7bKaMjL9QcSOJuIuaRu61yE/DTBTYqL4QvUsXBfrMyfbuY+O28l/YROZ2Za4EAF5Bk6L0jlzviLpHd2D/8AxKz7XPAbHcQa8FLJan1RHhxRix9vFV8kBXvxMJP8dzMc4udvs57XNc4bAnotAyrQzDXGtADRXeAB6ByleuHExVF1jxSmhf8AYjT39XV/1TVOKD0L/sRp7+rq/wCqamP11pvLahs4CjqHFXc7WjM0+Mr3YpLMTA4NL3RB3M1oc5oJI23IHpXDjfuVfGSfFOIi62TtvoY23airSXJIYnyNrw7c8pAJDG7+k7bD+leSMy4G0tLady3EPTundSXc7bq6glvZKtc5nDHTWWtkFeN5aA5gA/CcQdwSD0WrKicGH2cpoanqHK6Pq6J1Jnd72VxteMCTtj5odK7la5zyxrN+Ybju3O26vaAiIgIiICpHGrGZnM8MM5T0/qeHRmYlZGK+csODWVSJWEkk+toLf+ZXdZh4THkd8R2qPjA8c8kOzh8f8Q37bl7ePk5duv3fJ+jdBpVVrm1oQ94leGAOePvjt3rlXBR7PxKv2O/Y9m3k37+Xbp/gudAREQUK61+kcxlZbFazNjshY8ajsVa75+zcWMY5j2saXDq3mDtttiQSNhvweXeJ9WR+i7X2a0RF3R2imYjTpvPxt9JavHvZ35d4n1ZH6LtfZp5d4n1ZH6LtfZrREV1jC4Jz/BsZ35d4n1ZH6LtfZp5d4n1ZH6LtfZrRETWMLgnP8Gxnfl3ifVkfou19mnl3ifVkfou19mtERNYwuCc/wbGd+XeJ9WR+i7X2aeXeJ9WR+i7X2a0RE1jC4Jz/AAbGXYbivpjUVBl7FXp8nSeXNbZp0bEsbi0kOAc2Mg7EEH84Xd8u8T6sj9F2vs10fBpyul8zwixlvRunrWlsA6xaEOMub9pG8WJBI47ud908OcOvcVqSaxhcE5/g2M78u8T6sj9F2vs08u8T6sj9F2vs1oiJrGFwTn+DYzvy7xPqyP0Xa+zTy7xPqyP0Xa+zWiImsYXBOf4NjO/LvE+rI/Rdr7NPLvE+rI/Rdr7NaIiaxhcE5/g2M78u8T6sj9F2vs1+LGofKCpNQw9O9YuWGOiY6alNBDFuNud8j2BoA3326k7dAStHRNYojbFM3+P4gvD+XvhwaL496Bs3LFzVGVzHDAu7Co7FSmCvWgJ2jgswx7dWjZokcCHdPO5jyiG/g4Is5p3UnEbXuHwcuqXYXCQY84WnKGW7L7NqNzTHzDl2aytK525B6NAB3O39VbtKvkac9S3BFaqzsMcsEzA9kjCNi1zT0II6EFZvwi8HXRnA3P6ryejqk+Mi1G6u+zju1560Doe02MII5mhxmeSC4gdA0NA2XDMzM3llz5PjdjdO+QMOcwmcxmQ1f2ccFUUXTeIzP7ICKy5m4iIdKBuenmv/AASoDiNq1nFLP5rhTpHVt/Ses8aauQvXYaMo5KrXwyOZFN0ZzObLGOhd0LgQRvtsyKAiIgIiICIiAqRxqyeZw3DDOXNP6Yh1nmImRmvg7DQ5lomVgIIPqaS7/lV3VG43Y/LZThbnq2C1TBonKvjjMOesvDI6m0rC5ziegBaHN/5kF0quc6tCXsETywFzB96du5cq62OmbYx9WVlhlpj4mubPG4ObICBs4EdCD37/AJ12UBERAREQEREBERAREQERfiaaOvE+WV7YomNLnvedmtA6kk+gIKdwgta2uaEpy8QqdKhqkyzieDHkGEMErhERs5w3MfIT17ye5XRZr4O+LoYfhVjauN1q/iFTbPZczPvl7QzkzvJbzczt+Qks7/vfQtKQEREBERAREQEREBERAREQEREBERAREQF0M9gcbqjD3MTl6NfJ4y5GYrFS1GJI5WHvDmnoV30QZhW0fqfh3mtB4DQNHB1OGlCGWpk6Fl0vjULduaOSJ+55jzAgh3UmQk797bboniJpriRjrF7TGbp5urWsSVJpKknN2crDs5rh3g9Nxv3ggjcEFWJUHX2gMzY09aj4d5ejobPWMizJWLjcbHNFdeNg9s7OhdzgNBeDzeaOqC/IqVheLeBzPE3NcP2Pts1NiKkV2eOanJHDLC8N/jInkcrmguDT179wN9jtA8LPCV0Jxm1rqrTOlMm7I29PFna2Whvi9tp3Dn13hxMjGuHKXbAEkFpc1wcQ1NERAREQEREBFlvGPwkdGcC85pPFaoszQ2dSXBUrui7MR1m8zWusWHve0RwtLxu7qdgdgdjtO664p0dC6k0ngpcXl8rkNSXDVrtxlJ0zIGt2Mk0zx0Yxgc0nrvsSQCASAkOInEXT3CnSV3Uup8gzGYioBzzOaXOc4nZrGtaCXOJ6AAKFZQ1VqnXDrc2Qw8/C63hxG3EyUXut25pfunSl+wawM2Abt1Ejg5u4BXJoXQGcwl/VVjVGq59YQ5bJeN0aVqrHHBjYWH+KijaB1I2YS7pu5ocACXF17QRemdMYnRmCp4XBY6vicTTZ2denUjDI42/mA9JO5J7ySSepUoiICIiAiIgIiICIiAiIgIiICIiAi62SutxuOtW3NLmwRPlLR6Q0E7f4LO8fpahqXHVMnnK7crkLULJpH2CXsYXNB5Y2k7MYN9gAB6zuSSenCwYxImqqbRn9YW29pqLOfi50x8hUfmQnxc6Y+QqPzIXvq+FxzlH3LsaMizn4udMfIVH5kJ8XOmPkKj8yE1fC45yj7jY0ZFnPxc6Y+QqPzIT4udMfIVH5kJq+FxzlH3GxlHh88R9b6F4VQ0NA4bLWMtnHyVrmZxlB8/wfTa3+M/jWHeKR5ewMcQfNEpBa5rSv5l+Dxxcv+D7xkweqeynbBWl7DI1Ni101V/SRux23O3nN36czWn0L+xfxc6Y+QqPzIXHNww0lYG0uncdKPU+u0pq+FxzlH3GxoGMyVXNY2pkKM7LVK3CyeCeI7tkjc0Oa4H0ggg/pXaWcM4baWjY1jMBQaxo2DWwgAD1L78XOmPkKj8yE1fC45yj7jY0ZFnPxc6Y+QqPzIT4udMfIVH5kJq+FxzlH3Gxoy4rVmGlWlsWJWQQQsMkksjg1rGgbkknuAHpWffFzpj5Co/MhPi50x8hUfmQmr4XHOUfcbH8hfCk4zW/CF425jPwCWfGNf4hh4GsJIqxuIZs3bfd5LpCPQXkepf0J/g4tVazucJruk9WaZyWIr6cfG3GZTIRTx+OwzGR5jAkGxMXKBuw7cskY5QRu/bIeF+ka42i05joh/uV2j/suX4udMfIVH5kJq+FxzlH3GxoyLOfi50x8hUfmQnxc6Y+QqPzITV8LjnKPuNjRkWc/Fzpj5Co/MhPi50x8hUfmQmr4XHOUfcbGjIs5+LnTHyFR+ZCfFzpj5Co/MhNXwuOco+42NGRZvNo3HYmtLZwtduIvxNL4Zqu7BzDrs5o6OadtiCD0/wCqvGncr8O6fxmS5QzxyrFY5R3DnYHbf4rxxcGKI0qZvHwt9ZS25IIiLlQREQEREBERAREQReqv9mMx/wDDm/8AAqvaZ/2cxX/xIv8AwCsOqv8AZjMf/Dm/8Cq9pn/ZzFf/ABIv/AL6OD+zPx+jXuUuHwhdA2tXV9M1s463mLFx1CGOvSsPiknZv2jGzCPs3Fmx5tnHl2PNtsuZ/HzQMeqvJ52oYhkvGxQ5uwm8W8Z327Dxjk7HtN+nJz82/TbfovOPD8TYbUGh9G6sGSwGmNNapnmwdi7p25DLesvknZWiltFpgG5ncd2OPaeb3EldvhdwupY7DY/h9rjTHEa9lq+RdHPPVyF84Ky3xgyx292zCBrfuXluwcHA+aSsRVMst9t+ENw+oZmxi59QCO3WvfBtk+J2DDWs8/II5pRHyREuIAL3AO9BK7es+OOiOH+XOLzmcFW8yITzRxVZrArRnfZ8zo2OELTsdjIWjYbrE9VaMz1jgNx8oRYLIy5DJanvWaFVlSQy2mF1cskiaBu8HlOzm7jzTt3Lg1Bo92l+KXESTU2n+IWao6htx38ba0bduivYjNdkTq07K8rGMe0sIDpdgWkecANk0pG5Z3jnonTucgw1rMumylinFkIKlClYuPmrSuc1krBDG/mbux25G+w2J2BBPwcdtDeWrdJuznZZx1k0mxS1J2RPsDfeJs7mCJz+h80O3/MqjoHQDNIcfJxjsNbp6do6GxuKo2JmPexgjs2CYBK7fmc1vZkjmJ25SfQsh1vR1hqHJCzm8RrvJ6jxWtK1/sKkE3wNWxkN5ro3wMYRHO7sQ09A+UOLtwACrNUwN40Hx6xeuOJOrtHspXatrCXvE4ZnUbPZ2A2Fr5HOkMQjj2c5zWtc7zg0ObuHBSOE496C1HqSHBY/UMU9+eV8FdxgmZXsyN35mQzuYIpXDY9GOceh9SoNChl8bxL4u6alxGYrHWL2WMTnq9KSSiwHHMhJkmaCI3NkiI2dsTu3bfdVDgnoPFSQaG03qXSHEatqHT7oHym/kL0mErWqrN2TRudN2Do3OZ5jYwducDlA3S8j1avP2nvCWl1VqTXlmtJXx+k9LCSFwtYLIvtzPbHGe1LmsDWtD5ADEGOk5Wl3mggr0CsS0Hp7KU8BxyjsY25BJkdRZGekySB7TajdRrta+MEee0ua4At3BII9C1NxM43wgtL43TemJNRZuvJm8tha+YbDhsfcmbYikbuZYIhG6Xk33OzhzNGxcApjNcc9EYHTWGz9nN9ticywyUJ6NSe2Z2gAkhkTHOAG433A29Oyy/gZpXNYjW3Dmxfw9+lDU4WUsdYlsVXxthtNlhLoHkgcsgAJLD5w2PRVDTFDV2m9BcPcRk8drLG6TE2bdkq+mKs7Mh25vyOqMk7MCaKF0bnuDmbA+buQ0hY0pG25rjFFPl+Fz9M2KOWwOr8hNXfdAc49kypNMDGQ4crueIA8wO3nDYHu05eQ9CaY1HpTRPCy1Z0pqD/6Y1nlHX6Dq7prkVez42I5gAT2rB4xGXPYXDq47nYr14tUzfxFKocZdH5XW82kqeXNnOwzSV5IYqsxibKxhe+LtuTsudrQSWc2427lzVOLek72mdN6hgyvPh9RWYaeLs+LSjxiWUkRt5SzmbuWnq4ADbqQskxYy+n+PXi+isLqrH4jKZezLqenlseW4hw7N3/rqtg90j3tZ5jHEO5iS1pG6pOm6moKvDbgzoOXRupGZfTGp8f8K2XYyQVIYoZZAZWzbcsjCCHBzNwB90W+maUjb7nhOcNMfYfFZ1M2AR25aEk76VkQR2Y3Oa+F8vZ8jZN2O2YXAuGxaCHAmRh49aEl01l88/PCrjMPYhq5F9ypPXkqSSvYyPtYpGNkYHGRuzi3l2JO+wJGMN0ZnviegpHBZHxwcTvhA1/E5O08W+GjJ2/Ltv2fZ+fz93L132X3jJo3PZTN8aX0sFkbcWRZpLxR0FSR4smG6503Z7Dz+RuxdtvyjbfYKaUjUZPCh4axOuMfnrLLFNoksVnYi6J4ott+2MXY84i269rtydR53UKY1bx10PoerjLOWzfJXyVbx2rNUqT2mSQbA9qTCx4azYg8zth171WrWn8hJ4QGtMh8G2XY6zoupUitdg4wyzCxbLomu22c4BzSWg77OHTqFkWAx+rsfpDhzgc9i9cwabh0ZWijx+mYZoJ35QbtfDbezlfC1rOz5Q9zI9y7mPTZW8j0PqTjZorSYwXwjm2752s+3ixUrzWjdiaIyTEImOLztKwho6kHcAgHbsUeLuk8hpnP6gjyhjxWA7QZOSzVmgfVLImyuDo3sD9+R7XDZp336bnosL4M6PzlPIeD8clgMlUfgdN5ijedbpvaKc4NaNrXOI2bzBj+Q7+e3ct3C5uNWjcha464nTVCNr9P8SWV351u/VgxkjZZHf0TQujhP/CE0ptcelJZ2WcY+aMkxyQl7SWlp2Ldx0PUfpXb4c/ye6X/AKrq/qmrguf6nP8A/pu/7Ln4c/ye6X/qur+qatYv7P8AMfKWvcsSIi+cyIiICIiAiIgIiII3UsbptOZWNgLnuqStAHpJYVW9LvEmmcQ5p3a6nCQR6RyBXZVCfQU1eVww+bs4mo4lwpiGKWKMnv5OZu7Rv97vsPQAOi7cDEpimaKpt7/9ZqPCyi43weeH2J1NHnq+nx8IxWjdi7W5YlgisEl3asgfIYmP3JPM1oIPULRlH+RWc9rJvcIfqTyKzntZN7hD9S947qPCuMp6FuaQRR/kVnPayb3CH6k8is57WTe4Q/Ul8LzI9ehbmkEUf5FZz2sm9wh+pPIrOe1k3uEP1JfC8yPXoW5pBcVqrDerTVrETJ68zDHJFI3dr2kbEEekELqeRWc9rJvcIfqTyKzntZN7hD9SXwvMj16FuamDwduFrSCOHmmQR1BGKh/ZWhqP8is57WTe4Q/UnkVnPayb3CH6k9lH/cZT0S0b0gip/EPHah0ZoDU2oINTPsT4nGWb8cMlGINe6KJzw07DfYluy6PCIaj4jcLdJ6ptakdUs5nGV78kENGIsjdJGHFrSRvsN/Sl8LzI9ei25r8s9f4PHC+R7nv4e6Zc5x3LjioSSf7quXkVnPayb3CH6k8is57WTe4Q/Unsp/7jKeiWje7VOnBj6kFWrCyvWgY2KKGJoa1jGjYNAHcAABsuZR/kVnPayb3CH6k8is57WTe4Q/Ul8LzI9ei25pBFH+RWc9rJvcIfqTyKzntZN7hD9SXwvMj16FuaQRR/kVnPayb3CH6k8is57WTe4Q/Ul8LzI9ehbmkFXqHD/T+N1jktVwY1g1FkImwWMhI98j+zaGgMZzEiNvmNJawAEjc7nqpHyKzntZN7hD9SeRWc9rJvcIfqS+FxxlPRLRvdnIPbHQsvcQ1rYnEk+gbFdrh9E6HQWmo3gtezGVmuB9BETVHx6Bnt7xZjOWcpSd0kp9hFFHMPwZOVu5b62ggEEg7gkK4LxxsSnQ0KZvtv/rr7rCIi4WRERAREQEREBERAREQEREBERAREQEREBERBRuOv8iPEL+zuR/y0iiPBd/m4cMv7O0f1LVL8df5EeIX9ncj/AJaRRHgu/wA3Dhl/Z2j+pag1BERAREQEREBERAREQEREBERAREQEREBERAREQEREBERAREQEREBERAREQUbjr/IjxC/s7kf8tIojwXf5uHDL+ztH9S1S/HX+RHiF/Z3I/wCWkUR4Lv8ANw4Zf2do/qWoNQREQEREBERAREQEREBERAREQEREBERAREQEREBERAREQEREBERARdLMZqhgKEl3JW4aVSP7qWZ4aNz3AesnuAHUnuWd3uPNBspbjsLkL0YOwnl5K7XfnAcef/q0LrweyY/aP2qZn5ZrZqKLIfj7sey8vvzP2U+Pux7Ly+/M/ZXX+lds4PWOpZ4g/hQ+CcunuIGP4l0mPfj9QNZTvk9RFbijDY+voD4mDYD0xPPpUf8AwYHCO7qbixc13K6SHD6aicyLYkNntzRPiDdu4hsT5SfSC5nrXrXjfqCpxx4YZzRuU05LWiyEQ7G220x7q0zSHRygbDflcBuNxuNxuN11PB/yNTwf+F2L0djdPSXjWL5rV82GROtzvO7pC3Y7dOVoG52a1o3OyfpXbOD1jqWeo0WQ/H3Y9l5ffmfsp8fdj2Xl9+Z+yn6V2zg9Y6lmvIsto8eab5Wtv4PIU4ydjLC5k7W/nIBDtv6AVoeEz2P1Jj2XcZbiu1XHYSRO32PpaR3gj0g7ELkxuyY/Z9uLTMR6ZlnfREXIgiIgIiICIiAiIgIiICIiAiIgIiICIiAiIgIiIC4rVqKlWmsTyNighYZJJHHYNaBuSf6AuVUfjVZfW4a5UMO3bvr1X/nZLPHG8fpa8j9K98DD77Fow+KYjOVjbLI9Saqta3yYyNnnjrN3NOo8bdgw+kj8Nw6k+jfYdB1jURfpuHh04VMUURaIYmbiIss4r62z1HVeD0vp2O82zdrT3rFjGwV5rDY43MaGsbYe2PqX9SdyABsOpImJiRhU6Uo1NFhp1XxDZW0zjcjNLgruQ1BJj23LVSs6axT8Vkka90bHvYyQOaR5p23YCQQS0/bfEfVGChzmmvhGLI52PUVPB0cxarMaGMswslEkkbA1rnMaXjoACeXp378+t0+MxMdbXsraGZKnJkJKDbUDr0cbZn1RIDK1jiQ15bvuGktcAe47H1LsrIdBYrKYfjlqSvls3Jn7PwBRc23LWjgdy9vP5pbGA07Hc77DoQPRudeXvhVziUzMxbbKC7uB1Hd0dlRlKHPJsNrNNp821GPvSO7nH3ru8Hp3FwPSRbropxKZori8SsTZ6ex9+vlaFa7VkE1axG2WKQffNcNwf+hXYVC4IWXTcP68Tvua1qzAz/hEz+UfoBA/Qr6vzLtGF3ONXh7pmG58RERc6CIiAiIgIiICIiAiIgIiICIiAiIgIiICIiAoHXennaq0hlMXGWieeHeAu7hK0h0ZP5g9rVPIt0Vzh1xXT4xtHlaCUzRNc6N8T+50cg2cxw6FpHoIO4P9Cr2byWrK2QfHicBi8hSAHLPay767yduoLBXeB1/3v+i9AcQ+FkmXsy5bBdlHkJPOsVZXFsdg7AcwOx5X7D1bO6b7fdLJb1a9iZTFkcVkKEgOxE1Z5b+h7QWO/Q4r9E7P2zC7ZRE0VWn3xsv6+7mW3KWc1r3ptpPB/n31BL+6LrZXQ0/ECGhez0T9MZ/GzPNK7gMkZZYmOaA4c74Wgh3cWFhHmg7+q4fCtb8J/wA076k+Fa34T/mnfUuucLSi1czMfx0TRncrrOG1MxaeFnJ5TITYS6+/DYuWBJLLI5kjCJCW9W7SO2DeXbYbdBsurmuEGDz3lEbUl3tM1ar3nyxTBj6s8EbGRSQOA3YQGA7nfrv6DsrZ8K1vwn/NO+pPhWt+E/5p31Kzg0TFpj/Wt8jRncpFDh7d0TkredxFq7qvOXIIaU3lBkmwt7FjnuBDo4HbHd+23Lse/od95EZnXmzt9KYMHbptqCXqfdP6VZvhWt+E/wCad9SfCtb8J/zTvqUjC0dlEzEfx9YNGdyEw+U1fYyMMeT09iaNE79pYrZmSeRnQ7bMNZgO52H3Q23367bKx2Jm14HyuDnBg35WDdx/MB6SfQFyUYbeWlbHj8bfvSE7bQVXlo/pcQGgfnJC1Th9woloXIMvqBsZtwnnrUI3c7IXeh73dznj0Aea09fOPKW8vaO14XY6JnEqvO7Zf0+a6O9beHWnpdL6NxtCwALYa6awBt0lkcXvHT1FxH6FZERfneJXOLXNdXjM3PEREXmCIiAiIgIiICIiAiIgIiICIiAiIgIiICIiAiIgIiICIiAiIgIiICIiAiIgIiICIiAiIgIiICIiAiIgIiICIiD/2Q==",
+ "text/plain": [
+ ""
+ ]
+ },
+ "execution_count": 12,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "from IPython.display import Image\n",
+ "\n",
+ "Image(app.get_graph().draw_mermaid_png())"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "9fcc664b-9d8e-4b16-90bd-5074c778d4ec",
+ "metadata": {},
+ "source": [
+ "As before, we can stream the graph to observe its sequence of steps. Below, we will simply print out the name of the step.\n",
+ "\n",
+ "Note that because we have a loop in the graph, it can be helpful to specify a [recursion_limit](https://langchain-ai.github.io/langgraph/reference/errors/#langgraph.errors.GraphRecursionError) on its execution. This is analogous to [ReduceDocumentsChain.token_max](https://api.python.langchain.com/en/latest/chains/langchain.chains.combine_documents.reduce.ReduceDocumentsChain.html#langchain.chains.combine_documents.reduce.ReduceDocumentsChain.token_max) to will raise a specific error when the specified limit is exceeded."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 13,
+ "id": "bf2b5408-580b-4fc6-857d-55ceae94ba23",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "['generate_summary']\n",
+ "['generate_summary']\n",
+ "['generate_summary']\n",
+ "['generate_summary']\n",
+ "['generate_summary']\n",
+ "['generate_summary']\n",
+ "['generate_summary']\n",
+ "['generate_summary']\n",
+ "['generate_summary']\n",
+ "['generate_summary']\n",
+ "['generate_summary']\n",
+ "['generate_summary']\n",
+ "['generate_summary']\n",
+ "['generate_summary']\n",
+ "['collect_summaries']\n",
+ "['collapse_summaries']\n",
+ "['generate_final_summary']\n"
+ ]
+ }
+ ],
+ "source": [
+ "async for step in app.astream(\n",
+ " {\"contents\": [doc.page_content for doc in split_docs]},\n",
+ " {\"recursion_limit\": 10},\n",
+ "):\n",
+ " print(list(step.keys()))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 19,
+ "id": "e86c67ff-eaf8-410e-99e5-150602022283",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "{'generate_final_summary': {'final_summary': 'The summaries discuss the use of Large Language Models (LLMs) to power autonomous agents in various tasks such as problem-solving, planning, and tool use. Key components like planning, memory, and task decomposition are highlighted, along with challenges such as inefficient planning and hallucination. Techniques like Algorithm Distillation and Maximum Inner Product Search are explored for optimization, while frameworks like ReAct and Reflexion show improvements in knowledge-intensive tasks. The importance of accurate interpretation of user input and well-structured code for functional autonomy is emphasized, along with the potential of LLMs in prompting, reasoning, and emergent social behavior in simulation environments. Challenges in real-world scenarios and the use of LLMs with expert-designed tools for tasks like organic synthesis and drug discovery are also discussed.'}}\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(step)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "47df3079-bd67-4384-8609-1de3d966420b",
+ "metadata": {},
+ "source": [
+ "In the corresponding [LangSmith trace](https://smith.langchain.com/public/9d7b1d50-e1d6-44c9-9ab2-eabef621c883/r) we can see the same 17 LLM calls as before, this time grouped under their respective nodes.\n",
+ "\n",
+ " \n",
+ "\n",
+ "## Next steps\n",
+ "\n",
+ "Check out the [LangGraph documentation](https://langchain-ai.github.io/langgraph/) for detail on building with LangGraph, including [this guide](https://langchain-ai.github.io/langgraph/how-tos/map-reduce/) on the details of map-reduce in LangGraph.\n",
+ "\n",
+ "See [this tutorial](/docs/tutorials/summarization/) for more LLM-based summarization strategies."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "1d9ed018-f9a8-4cc9-9f42-405da6f05206",
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.10.4"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/docs/docs/versions/migrating_chains/map_rerank_docs_chain.ipynb b/docs/docs/versions/migrating_chains/map_rerank_docs_chain.ipynb
new file mode 100644
index 00000000000..43b3408b2ba
--- /dev/null
+++ b/docs/docs/versions/migrating_chains/map_rerank_docs_chain.ipynb
@@ -0,0 +1,341 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "9db5ad7a-857e-46ea-9d0c-ba3fbe62fc81",
+ "metadata": {},
+ "source": [
+ "---\n",
+ "title: Migrating from MapRerankDocumentsChain\n",
+ "---\n",
+ "\n",
+ "[MapRerankDocumentsChain](https://api.python.langchain.com/en/latest/chains/langchain.chains.combine_documents.map_rerank.MapRerankDocumentsChain.html) implements a strategy for analyzing long texts. The strategy is as follows:\n",
+ "\n",
+ "- Split a text into smaller documents;\n",
+ "- Map a process to the set of documents, where the process includes generating a score;\n",
+ "- Rank the results by score and return the maximum.\n",
+ "\n",
+ "A common process in this scenario is question-answering using pieces of context from a document. Forcing the model to generate score along with its answer helps to select for answers generated only by relevant context.\n",
+ "\n",
+ "An [LangGraph](https://langchain-ai.github.io/langgraph/) implementation allows for the incorporation of [tool calling](/docs/concepts/#functiontool-calling) and other features for this problem. Below we will go through both `MapRerankDocumentsChain` and a corresponding LangGraph implementation on a simple example for illustrative purposes."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "39f11f9f-ac24-485e-bc15-285bebb9c12e",
+ "metadata": {},
+ "source": [
+ "## Example\n",
+ "\n",
+ "Let's go through an example where we analyze a set of documents. We first generate some simple documents for illustrative purposes:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "id": "ef975d40-6ea3-4280-84cb-fae4c285c72b",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from langchain_core.documents import Document\n",
+ "\n",
+ "documents = [\n",
+ " Document(page_content=\"Alice has blue eyes\", metadata={\"title\": \"book_chapter_2\"}),\n",
+ " Document(page_content=\"Bob has brown eyes\", metadata={\"title\": \"book_chapter_1\"}),\n",
+ " Document(\n",
+ " page_content=\"Charlie has green eyes\", metadata={\"title\": \"book_chapter_3\"}\n",
+ " ),\n",
+ "]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "e3b99cfc-b99c-4da8-9c87-903e0249d227",
+ "metadata": {},
+ "source": [
+ "### Legacy\n",
+ "\n",
+ "\n",
+ "\n",
+ "Below we show an implementation with `MapRerankDocumentsChain`. We define the prompt template for a question-answering task and instantiate a [LLMChain](https://api.python.langchain.com/en/latest/chains/langchain.chains.llm.LLMChain.html) object for this purpose. We define how documents are formatted into the prompt and ensure consistency among the keys in the various prompts."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "id": "3b65e056-d739-4985-8bfc-0edf783f2b16",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from langchain.chains import LLMChain, MapRerankDocumentsChain\n",
+ "from langchain.output_parsers.regex import RegexParser\n",
+ "from langchain_core.prompts import PromptTemplate\n",
+ "from langchain_openai import OpenAI\n",
+ "\n",
+ "document_variable_name = \"context\"\n",
+ "llm = OpenAI()\n",
+ "# The prompt here should take as an input variable the\n",
+ "# `document_variable_name`\n",
+ "# The actual prompt will need to be a lot more complex, this is just\n",
+ "# an example.\n",
+ "prompt_template = (\n",
+ " \"What color are Bob's eyes? \"\n",
+ " \"Output both your answer and a score (1-10) of how confident \"\n",
+ " \"you are in the format: \\nScore: .\\n\\n\"\n",
+ " \"Provide no other commentary.\\n\\n\"\n",
+ " \"Context: {context}\"\n",
+ ")\n",
+ "output_parser = RegexParser(\n",
+ " regex=r\"(.*?)\\nScore: (.*)\",\n",
+ " output_keys=[\"answer\", \"score\"],\n",
+ ")\n",
+ "prompt = PromptTemplate(\n",
+ " template=prompt_template,\n",
+ " input_variables=[\"context\"],\n",
+ " output_parser=output_parser,\n",
+ ")\n",
+ "llm_chain = LLMChain(llm=llm, prompt=prompt)\n",
+ "chain = MapRerankDocumentsChain(\n",
+ " llm_chain=llm_chain,\n",
+ " document_variable_name=document_variable_name,\n",
+ " rank_key=\"score\",\n",
+ " answer_key=\"answer\",\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "id": "fe94c2e5-4c56-4604-a16c-055c196f4a57",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "/langchain/libs/langchain/langchain/chains/llm.py:369: UserWarning: The apply_and_parse method is deprecated, instead pass an output parser directly to LLMChain.\n",
+ " warnings.warn(\n"
+ ]
+ },
+ {
+ "data": {
+ "text/plain": [
+ "'Brown'"
+ ]
+ },
+ "execution_count": 9,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "response = chain.invoke(documents)\n",
+ "response[\"output_text\"]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "317e51c2-810f-463b-9da2-604fe95a8b48",
+ "metadata": {},
+ "source": [
+ "Inspecting the [LangSmith trace](https://smith.langchain.com/public/7a071bd1-0283-4b90-898c-6e4a2b5a0593/r) for the above run, we can see three LLM calls-- one for each document-- and that the scoring mechanism mitigated against hallucinations.\n",
+ "\n",
+ " \n",
+ "\n",
+ "### LangGraph\n",
+ "\n",
+ "\n",
+ "\n",
+ "Below we show a LangGraph implementation of this process. Note that our template is simplified, as we delegate the formatting instructions to the chat model's tool-calling features via the [.with_structured_output](/docs/how_to/structured_output/) method.\n",
+ "\n",
+ "Here we follow a basic [map-reduce](https://langchain-ai.github.io/langgraph/how-tos/map-reduce/) workflow to execute the LLM calls in parallel.\n",
+ "\n",
+ "We will need to install `langgraph`:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "b8fab4f6-eed1-4662-8d3f-82846a2edfb3",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "pip install -qU langgraph"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "id": "b8493533-7ab3-4f75-aab1-390340bff2ea",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import operator\n",
+ "from typing import Annotated, List, TypedDict\n",
+ "\n",
+ "from langchain_core.prompts import ChatPromptTemplate\n",
+ "from langchain_openai import ChatOpenAI\n",
+ "from langgraph.constants import Send\n",
+ "from langgraph.graph import END, START, StateGraph\n",
+ "\n",
+ "\n",
+ "class AnswerWithScore(TypedDict):\n",
+ " answer: str\n",
+ " score: Annotated[int, ..., \"Score from 1-10.\"]\n",
+ "\n",
+ "\n",
+ "llm = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0)\n",
+ "\n",
+ "prompt_template = \"What color are Bob's eyes?\\n\\n\" \"Context: {context}\"\n",
+ "prompt = ChatPromptTemplate.from_template(prompt_template)\n",
+ "\n",
+ "# The below chain formats context from a document into a prompt, then\n",
+ "# generates a response structured according to the AnswerWithScore schema.\n",
+ "map_chain = prompt | llm.with_structured_output(AnswerWithScore)\n",
+ "\n",
+ "# Below we define the components that will make up the graph\n",
+ "\n",
+ "\n",
+ "# This will be the overall state of the graph.\n",
+ "# It will contain the input document contents, corresponding\n",
+ "# answers with scores, and a final answer.\n",
+ "class State(TypedDict):\n",
+ " contents: List[str]\n",
+ " answers_with_scores: Annotated[list, operator.add]\n",
+ " answer: str\n",
+ "\n",
+ "\n",
+ "# This will be the state of the node that we will \"map\" all\n",
+ "# documents to in order to generate answers with scores\n",
+ "class MapState(TypedDict):\n",
+ " content: str\n",
+ "\n",
+ "\n",
+ "# Here we define the logic to map out over the documents\n",
+ "# We will use this an edge in the graph\n",
+ "def map_analyses(state: State):\n",
+ " # We will return a list of `Send` objects\n",
+ " # Each `Send` object consists of the name of a node in the graph\n",
+ " # as well as the state to send to that node\n",
+ " return [\n",
+ " Send(\"generate_analysis\", {\"content\": content}) for content in state[\"contents\"]\n",
+ " ]\n",
+ "\n",
+ "\n",
+ "# Here we generate an answer with score, given a document\n",
+ "async def generate_analysis(state: MapState):\n",
+ " response = await map_chain.ainvoke(state[\"content\"])\n",
+ " return {\"answers_with_scores\": [response]}\n",
+ "\n",
+ "\n",
+ "# Here we will select the top answer\n",
+ "def pick_top_ranked(state: State):\n",
+ " ranked_answers = sorted(\n",
+ " state[\"answers_with_scores\"], key=lambda x: -int(x[\"score\"])\n",
+ " )\n",
+ " return {\"answer\": ranked_answers[0]}\n",
+ "\n",
+ "\n",
+ "# Construct the graph: here we put everything together to construct our graph\n",
+ "graph = StateGraph(State)\n",
+ "graph.add_node(\"generate_analysis\", generate_analysis)\n",
+ "graph.add_node(\"pick_top_ranked\", pick_top_ranked)\n",
+ "graph.add_conditional_edges(START, map_analyses, [\"generate_analysis\"])\n",
+ "graph.add_edge(\"generate_analysis\", \"pick_top_ranked\")\n",
+ "graph.add_edge(\"pick_top_ranked\", END)\n",
+ "app = graph.compile()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "id": "ccf10061-f0dc-4333-9053-ebebe3f2e196",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "image/jpeg": "/9j/4AAQSkZJRgABAQAAAQABAAD/4gHYSUNDX1BST0ZJTEUAAQEAAAHIAAAAAAQwAABtbnRyUkdCIFhZWiAH4AABAAEAAAAAAABhY3NwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAA9tYAAQAAAADTLQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAlkZXNjAAAA8AAAACRyWFlaAAABFAAAABRnWFlaAAABKAAAABRiWFlaAAABPAAAABR3dHB0AAABUAAAABRyVFJDAAABZAAAAChnVFJDAAABZAAAAChiVFJDAAABZAAAAChjcHJ0AAABjAAAADxtbHVjAAAAAAAAAAEAAAAMZW5VUwAAAAgAAAAcAHMAUgBHAEJYWVogAAAAAAAAb6IAADj1AAADkFhZWiAAAAAAAABimQAAt4UAABjaWFlaIAAAAAAAACSgAAAPhAAAts9YWVogAAAAAAAA9tYAAQAAAADTLXBhcmEAAAAAAAQAAAACZmYAAPKnAAANWQAAE9AAAApbAAAAAAAAAABtbHVjAAAAAAAAAAEAAAAMZW5VUwAAACAAAAAcAEcAbwBvAGcAbABlACAASQBuAGMALgAgADIAMAAxADb/2wBDAAMCAgMCAgMDAwMEAwMEBQgFBQQEBQoHBwYIDAoMDAsKCwsNDhIQDQ4RDgsLEBYQERMUFRUVDA8XGBYUGBIUFRT/2wBDAQMEBAUEBQkFBQkUDQsNFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBT/wAARCAEvAJ8DASIAAhEBAxEB/8QAHQABAAIDAQEBAQAAAAAAAAAAAAYHBAUIAwIJAf/EAFUQAAEDAwICAgoMCwQIBwEAAAECAwQABQYHERIhEzEIFBUWIkFVVpTTFyNRVGFxdZKTldLUMjY3OEJTgaGys9EJUnJ0MzWCkZaisbQYJUNXY3PV8P/EABoBAQEAAwEBAAAAAAAAAAAAAAABAgMEBgX/xAA0EQACAAMDCgMIAwEAAAAAAAAAAQIDERJRkQQTFCExQVJhodEFgbEVIzNCU3HB4SIy8PH/2gAMAwEAAhEDEQA/AP1TpSlAKUpQCv4SEgknYDrJrWX299yGWkssKmz5CujjREK4S4rxkq58KEjmpWx2HUCSEnWpwlm6kPZE6b2+SFdrujaI0fcQz1EfCviV8PiG6GBUtRui6lpebRzJbQyspcusJCh1pVIQD/1r576rL5Ygeko/rXw3iNiZQEN2W3IQOpKYjYA/dX33q2XyPA9GR/Ssvc8+g1DvqsvliB6Sj+tO+qy+WIHpKP6071bL5HgejI/pTvVsvkeB6Mj+lPc8+hdQ76rL5Ygeko/rTvqsvliB6Sj+tO9Wy+R4HoyP6U71bL5HgejI/pT3PPoNQ76rL5Ygeko/rXoxkFrlOBDNyhvLPUlt9Cif2A1596tl8jwPRkf0rzew6wSUcD1jtrqf7q4jZH7xT3PPoNRuKVFziruPJ6bG3VR0oHO1POExHRv1DcEtHxAo8H3UqrdWa7s3uCmSylxo7lDjDw4XGVjkpCxudlA+4SD1gkEE4RQJK1C6r/bSUM6lKVqIKUpQClKUApSlARix7XbL77cHNldoKTbI3XugcCHXSP8AEpaAf/qTUnqMYkntO95TCVuFiemWncbbtuso2I93wkOD/ZqT10T/AO1OS9EVise4XCNaYEmdNfbiw4zSnnn3VBKG0JBKlKJ6gACSfgrIrUZfGiTcTvcefbnbxBdgvtyLcwniclNltQU0kbjcqG6QNxzPWK5yFVZd2V+IW3SbKs1xxyRkSbHGbe7WVBlRg6XN+hPEtnfo1cKvbACjYHnUqm684basOgZNPmXCFbJr5isB6zTUyHHQCSlMcs9KeSVHfg22BO+1c+R8czbL9G9WMHsltyiRh6cfbZxtrMIPalxRI4V8cNBUEqdbSlLYStY61cPEoDepxqFn2QZnZsIlwLNn1gxFya8xkTNttMiPeUlLCVMJShKemSypwkKcaH6OwUASaAsqZ2QWnsDELLlD2SsCw3mUYMGYhl1YdkBLii0UhBUlftTg4VAHiHD+EQDGZHZSY61qhj+Kog3dUO72p2eicqyzw4hwSG2UNlntfiSk8SypxWyU8KeLbiBNNYHgl9bjYhFfxXIozMTVqTeejvEdx95qE5EkONSHXd1hXhOICllR2cJBPFVvamyLhhXZA4lmasevV8sSsfn2V5djgrmOx31vxnWy42gFQQoNLHFtsCOe1AXhSlKAVGE7WjUHo29ks3mGt9aRvzfYLaOL3N1NrSPiaFSeoxcE9u6h2dtO57RgyH3TtyHSKbQgb/DwufNrok7Yk9lH6autCok9KUrnIKUpQClKUApSlAaG/WySzPYvdta6edHQWXo3EE9tMEglAJIAWkjiQVctypJKQsqT8TYmN6m49Jt1whw75anVJTJt89gLCVpIWEutLG6VJISeFQBBAqQ1prviVsvUgSn2VszUgATIjy474A6gVoIJA9wkjmeXOt6ihiShmbt5fuRBPY26UIO6dN8WSdiNxaWOo8j+jWXZ9A9NceukW5WvAsct9wiuB1iVGtjLbjSx1KSoJ3BHuitr3jupGyMnvyE+IdsNq/epsn99O8mR51X76Zn1VXNy+PoxRXkopUX7yZHnVfvpmfVVH8/wnLe827d5+U3Dvn6E9od03m+1ul3G3ScLW+22/VTNy+PoxRXlkUqGWnCbr3Khd0cqvPdDoUds9A810fS8I4+Hdrq332+CsvvJkedV++mZ9VTNy+PoxRXmikdjnpXLkOvv6dYw886orW4u0sFSlE7kk8PMk15/+GvSf/23xY/HaGPs1Ie8mR51X76Zn1VO8da9g7kl+eSP0e2kt7/tQhJ/fTNy+PoxRXmbKuVsw+BCt0aOlJQ0liDaYKEhxSUgJShtG4ASBsNzslI5kgDevrHLM9b0yps8tru09YdkqZJLaNhslpBOxKUDlvsNyVK2BUQPWy4zbcf6Qwo3A64AHJDq1OvObdXE4slavH1k9dbSsYooUnDBv3j7ClKVpIKUpQClKUApSlAKUpQClKUAqBa8QLDdNH8qiZReX8ex92GUzbpG36SM3uPCTslXPfbxHrqe1CNbLhFtWlOTS5uMKzSIzEKnLAhvjM0bj2sJ4Vb+71HqoCRYq1FYxeztQZCpkJENlLEhf4TrYQOFZ5DmRsf21ta1mMuofxu1OtQjbW1xGlJhEbGOCgbN7bDbh6urxVs6AUpSgFKUoBSlKAUpSgFKUoBSlKAUpSgFKUoBUY1Mj5RKwK9tYVJiw8qXHItz80Asod3GxUClQ2238RqT1AteIFhumj+VRMovL+PY+7DKZt0jb9JGb3HhJ2Srnvt4j10BMLIic3ZoCbmtDlyEdsSlt/gqd4Rxkchy4t/FWbWqxVqKxi9nagyFTISIbKWJC/wnWwgcKzyHMjY/tra0ApSlAKUpQClKUApSlAKV/FKCElSiEpA3JJ5AVCjmF7uwEiy2yCbavmzIuElbbjyfEsNpbPCk9Y3O5HWBW6XKim1s9i0qTalQju7mHvCx+lverp3dzD3hY/S3vV1u0WO9YoUJvSoR3dzD3hY/S3vV07u5h7wsfpb3q6aLHesUKE3pUI7u5h7wsfpb3q6d3cw94WP0t71dNFjvWKFCb1+WP9p9o5LxjVqLqCwhbtqyZltl9w8wzLZbS3wfAFNIQR7pS57lfov3dzD3hY/S3vV1X2vOmd51901uOIXmJZorchSHo81qQ6pyK8g7pcSC3z8aSPGlShuN6aLHesUKHMv9lroWCq8aqXSP1cdrs/SJ8fLp3k7/ALGwR/8AKK/RKqn08sV90yweyYrZrXY2rbaYqIrO8p0KXsOa1bNbFSlbqJ8ZUTUh7u5h7wsfpb3q6aLHesUKE3pUI7u5h7wsfpb3q6d3cw94WP0t71dNFjvWKFCb0qEd3cw94WP0t71dO7uYe8LH6W96umix3rFChN6VCO7uYe8LH6W96und3MPeFj9Le9XTRY71ihQm9KiELLrpBlMov1viR4r7iWkTIMhbqULUQEhxKkJKQSQAoE8yN9hzqX1omSopbpEKUNXlBKcZu5B2IhvEEf4DUexkAY3agAABEa2A/wAAqQ5V+LF4/wAm9/Aaj2Nfi5av8o1/AK7JPwX9/wADcbKlcmYlrBmeVX7T+49+zbj9+yWTb7lg0SHGDttjM9PvxKKS8ODoUdIV9fSDh4eRP8xDV7V/UGBb80x+0XyXbp07iYs3aNsTa1Qg+W1AyFSRKDobClcfCBxjbo9qxtoh1pSuWsj1K1Ct+KanZsxloRDw7JZEKPZO5sctSorbjRUh1wp49+FwhJQUkbAkq3rKyzU/UnLs/wA0gYaxfY8DGpSbcwm0W62yGpMjoUOKMlUuQhwJ3cAAaA8Eb8RJ2FtIHTdK5+teRak5vqzEx2VfFYOy3h9uvFygQ4kaS6zPcfeQ62hxxKxweBsd+LkhPDsSSY5kutGVWjUaJcLJfrrkOJKyqPYZjS7JFZtbIdfDC225PGJDjralfhgKQVJIO1LQOokuIUtSApJUnbiSDzG/VvX1XN2nDVzxPU3XfJZOS3GdbrTcjKftPa8YIkgW5lxO6g1xgoTshPCoAhAKuIkk6/TPUPWPKJOG5Eq23qfaL47Hfnw5MG2M2yLDfSFFyO63JMk9GFJUOkSorAO6Uk7BaB1DSlctzNaMqg6l2adab9dclwy45UmwOl6xxY9rbDjqmuFiQFiQ4ttYAK9lNqKFcxyFVugOpKVy1kepWoVvxTU7NmMtCIeHZLIhR7J3NjlqVFbcaKkOuFPHvwuEJKCkjYElW9enZAax5Xh15yq54ff7rcmcXaZeuFpjWSK5bY54UrU3JlOKS6VKQeLZncoCk7ipaQOlY95t8y5TLcxOjP3CEltUqI28lTrAWCUFaAd0hQSrbcDfY7dVZlc8BN/n6ma+KxKZ3PyNVlsjtueLSHB0wYkqQkpWCkhRHCeXIKJHPavM6z5dqoe2dNlMkw8MN4egvNoUl25yd0xYy1KG6S30L5ICkgkpCjtS0DoC7XiBYLe9Puc2NboLIBdlS3UtNNgkAcSlEAcyBz92suuTMszq9XXsdMsm9/Ey636zy4ZuVsvuOQ2JEbjWhKosiOtooKFFfGlYTz4Bss7E1O3M2yq2a/PWrJ8jk4zYJM1pnH4QtLTlvu7ZZBU2ZZBW3I6Tj8AqTySOEK3paBa+oB2xOaR1hTRHwHpUVYlV1qB+KU742v5qasWmUfCg+79ITLcavKvxYvH+Te/gNR7GvxctX+Ua/gFS6bERPhvxnd+iebU2rbr2I2P/AFqv4lyk4vCjWy52u5OvRW0siVBguSWnwkABY6JKinfbmlQBB3HMbE3J/wCUtwLbULWjnzFNL9Rsc1VbuFktl4ssaReS/dJ12utsmw5UEulTiU8DAlqWpO3Dxq8E7AqIFWxYNALVimQdvWTIsltVp7eVce9uLcAm2h5SuNeyODjCFKJUWwsIJJ8Gph35xvJl++pJfqqd+cbyZfvqSX6qtqyeNfKxZdxF7loRYLphebYw7MuSYGW3B65TnEOth1tx3g4g0SjYJ9rTsFBR5nma8cn0EtV+yu5ZDb8hyPE591bbbuYx+emOifwJ4UKcCkKIWE+CFoKVbeOpd35xvJl++pJfqqd+cbyZfvqSX6qrmI+Fiy7jwhYBb4Ofy8vQ/LXc5NrYtC23FpLXRNOOOJUBw8XGS6rclRGwHIcyYFcexfx64KkNJyDJYlsVdO7Ua1RpyExYU3punLzSS2Sd3OJXA4VoBUSEg7EWJ35xvJl++pJfqqd+cbyZfvqSX6qmYjfysWXcaJOjtsY1EuOXRbpdoi7oEd07O0+g2+epLJZSt1tSCdwjYeCpIPCncHatVh2hMTTeUw9YMiyV2228Oqt+My7p/wCWslSVAN8my4UDiOwWpYTyIG4FTLvzjeTL99SS/VVhXrU6y43apNzuzd1tdtjI6R+ZMtMpplpPuqWpsAD4SaZiPhYsu41AyLVLcb4NjIHjIyt7/wDPrQL7F/Hi5HbbyDJWLbBuqb1bbU1OQItvlB/p+NpHR7qBWV+C4VgBatgDzE/jZ5AmxmpEeDe32HUBxt1uyy1JWkjcEEN7EEc969O/ON5Mv31JL9VTMTN8LJZZF7loRYLphebYw7MuSYGW3B65TnEOth1tx3g4g0SjYJ9rTsFBR5nma1uY9jZj2aTsmXIvGQQLbkgCrraLfOSzElOhtLYeI4CsK4UI3AUEq4BxJVz3nXfnG8mX76kl+qp35xvJl++pJfqqZiPhLZdxHUaZx8RyCbmdpcu90v4syID9vEtptu7qYSehU6FJSgPc1JCwUJHGdxtUa0Z0ScxTA8tjXBDmOXfLrnMuktFnme225Lyj0bLT4A5tp25gbBSlbcudWP35xvJl++pJfqqd+cbyZfvqSX6qmYj4WLLuIIOxtsD+M5Xabher/d5WTqjd0rxOlNrmOJjkFlCSGwhKU7EfgfpK35862l50Qt2RZtFyG6ZDkM6PFuDV1j2J6ak29qU0kBtxKODjHCRxBPHw8XPbnUn7843ky/fUkv1VO/ON5Mv31JL9VTMR8LFl3HxqB+KU742v5qasWq6eL+boRbo1unxYanW1yZc+KuMlLaVpUUpS4ApSlbcI2Gw3JJ5AKsWufKdUEMD2pvrTsHsoKUpXAYilKUApSlAKUpQCqZ7Mr817Uf5LV/EmrmqmezK/Ne1H+S1fxJoCxNOPyeYv8lxf5KakVR3Tj8nmL/JcX+SmpFQClKUApSlAKUpQClKUApSlAKUpQClKUApSlAKpnsyvzXtR/ktX8SauaqZ7Mr817Uf5LV/EmgLE04/J5i/yXF/kpqRVHdOPyeYv8lxf5KakVAKUpQClKUApSlAKUpQClKUApXmZLSSQXUAjkQVCv520z+ub+cKtGD1pXl20z+ub+cKdtM/rm/nClGD1pXl20z+ub+cKdtM/rm/nClGD1rgbs7+zDkY1JznRp7CFqbmQWW2b8u5FAWh1ttwrSz0PMJUVI5L5lB5jqHenbTP65v5wriz+0z0RazjTmFn9qbS7ecb9qlpa5qegrVzPLmejWeL3AlbhPVSjBuew67Mi6a83yHhsTT3uZbrLa0GbelXgupbCEBCAG+107qWoDZPGNgFHnw7Hr+uauwM0UZ0X0OhSJ6G2cjyPhuc/i2C20FPtDJ6iOBB3IPUpxYrpDtpn9c384UowetK8u2mf1zfzhTtpn9c384UowetK8u2mf1zfzhTtpn9c384UowetK8u2mf1zfzhX9S+0tQSlxCifEFA0owelKUqAVDszeVcL9abEtakQpEeRMkIQopLwbU0lLZI/QJd3UNxvwgHdJUDMahOR/lHsfyTO/nRK68lXvfJ+jMkYfsf4udt8btB2AA3gtdQ5D9GnsfYt5tWf0Br7NeOd6jY9ppb4M3I56oEedLRAjFEd19Tr6kqUlsJbSo7kIVty6xt1kAwfMOyLxmLpjkOS2K+RW37YsRlquttnFMOQobpEqO2107aCOe5SB8NdjnzF87xJV3k+9j7FvNqz+gNfZp7H2LebVn9Aa+zWmzPWnD9Op8W3ZDeBHuj7HbAhxIr8p0Nb7F1SGULUhvfccSgByPPkajsPXeDDynUEX2XEhYtjzNqeiTW2XVPPCW0pWxSOIrUVcIQlCAo77bE0z8zjeIq7yd+x9i3m1Z/QGvs09j7FvNqz+gNfZreMupfaQ4kKCVpCgFpKVbH3QdiD8B51WN716tdg1tiafSoU7jftqZYmsQZL46ZbyG0N+1tFIRsokulXAk8iQaufmL5niKu8mXsfYt5tWf0Br7NPY+xbzas/oDX2aj83XvArdlpxqRkLSLqmUiCsBh0x25CtuFlcgI6JLh3A4FLCtyBtvWPfuyJ0/wAYvd0tVzvjkaXankMXBXc+UtmIpaErT0ryWi2hJStJ4lKCesb7g7TPzON4irvJR7H2LebVn9Aa+zT2PsW82rP6A19mt4y83IaQ60tLjS0hSVoO4UDzBB8YqGZfrPiGCZA3Yrxc3Wry5FE1uBGgyJTzjJUpHElLTairYoVuBuQBuQBzq5+YvneIq7zb+x9i3m1Z/QGvs09j7FvNqz+gNfZrQSNd8Gh5ojFJF87Xva5KYaWnYj6GS+obpa6co6LjO42TxbncDavC/dkLp/jF4uVruV/7XmWx9EaekQ5C0Q1LShSC8tLZS2ghxOy1EJJ3G+6VATPzON4irvJN7H2LebVn9Aa+zT2PsW82rP6A19moY9rTHsGfahwsikQrbjGMW22TkzQhZdJkl8KCtiePm0gIShPESrbwiRW4y7W/C8FfiMXq7ORJUmMJiYqIMh55tg/+o6222pTSesbuBI3BHWDTPzON4irvN37H2LebVn9Aa+zQaf4wAeHHbUgnxohNpPXuOYT7oB/ZWnybWzCcRttln3C/sqj3pHSW3tJpyYuWjhCittDKVqUgAglQGw3G551iaDakydWtNouTSUxQZM2cy0YaFJbU01KdabUApRO5QhJPPrJ5Dqpn5laWniKu8nuBzXltXe2vPLkptc3tZp11ZW4Wyy26kKUeaikO8O53JCQSSdzUoqHYD/rjMvlRv/so1TGuHKUlMdOXVJh7RUJyP8o9j+SZ386JU2qF5Kgp1BsTh5JNsmtjl1q6WKdv9wP/APA1lkvxPJ+jKio+yhuwsLeltwVElzkx81iOGPAZLz6wIsrcIQOaj8A5nxVWWoeP5FqRi+u2V2/FL3bo17sUK02y2S4K259wcYLilvGNsVj/AEoQncbkJPIV1FkOI2nKnbQ5dInbS7TORcoR6RaOikIStCV+CRxbJcWNlbjn1chW4ra4amJQjlxuOkeuOd3y44pf7/a8oj29yBPsFvVOLJjslpcZ1KPCb8Lw0kjhPGdyCKgeY6Z5Vkerua6h2yBdW37H3FvVqsE2MExro63HV0zRJBCnkIK20lJPA4vfr2Ndb0pZqDDs1zTerRCuCGJMVEplDwYmMqZebCkg8K0KAKVDfYpPMGqizZ+fhvZE2XKXLDeLvZJmNvWYv2aEuWY8jtpt1PSpRuUIKd/DPLcc6mN40N07yG6SbldMHx+4XCSsuPypNtacccUesqUU7k/HUosGPWvFLRHtVmt8a1WyPxBmHDaS003uoqPClIAG5JPxk1aNg5RveOZIzpHmGj7eH3uTkt3vspyNe0wlKtzjT87thE1yV+AkoQRuknj4mwAD11nzMzftOSdkBjsPEr/lNyvFwTGiottvU9GU45a47YS87+A0NyCor2HCeW/VXV9aey4jaceul7uNvidrzL1JTLnudItXTOpbS0FbKJCfAQkbJAHLfr3NY2QYOmGNysN01xOwTnhIm2q0xIL7qTuFuNspQog+MbpNQ9FgmHsqHb0q3Pm3jDEQ0XAsq6EO9vLWpoObbcXDwqKd99tjW+vGhuneQ3STcrpg+P3C4SVlx+VJtrTjjij1lSincn46lFgx614paI9qs1vjWq2R+IMw4bSWmm91FR4UpAA3JJ+MmsqA5G1VgZhkjuSC7WjO7rfoGUMS4EW3MPdx2rUxLacbcQlBDb7haSSR4bvGeSQBynWQYleJOKdk8ymyznXbwl421sRVlU09yWkJ6Ebe2e2ApHDv4QI666PpUsg49v8Ao7l2Q6gXvJoDFwamY9ZceuVutkyOUwrvNjpeUplwqT4S0J40DY7oW+lR2IFbG6211vVbIswvmOakLsmW2+3Srd3ruz4z8RbTHRuRJbDDiFIWFeEFLHD4avCHOusaVLAOardjyNDtT8dvdvwvIpmHSMTbs8VmBGcuMu0viSuQtt5AUpYSsOAFYKhxNgE7AGp52LttuFr0gjNXS1zbNLcul0kGFcGCy82lye+tHEk9W6VJIPUQQQSDvVs0rJQ0YMHAf9cZl8qN/wDZRqmNRDAkHunlzg5oXdEbHY+KJHSf3g1L60ZT8TyXojJ7RWuvdii3+KhmSFpU0sOsvsrKHGXACAtCh1HYkHxEKUkggkHY0rnhicLrC9ZiQ5WAzyfBzG9JGwAHQwj/ANY9fzvAuHnne/oIX3eplSujSZnLBdi1Ib3gXDzzvf0EL7vTvAuHnne/oIX3eplSmkzOWC7CpDe8C4eed7+ghfd6d4Fw88739BC+71MqU0mZywXYVIb3gXDzzvf0EL7vUA19VkGlWjeWZbassuUm42mEZLDUyNDU0pQIGyglhJI5+IirxqmezK/Ne1H+S1fxJppMzlguwqb/ABPFrrfsWs1zfzG8IfmwmZLiW2IQSFLQFEDeOTtufdra94Fw88739BC+71sNOPyeYv8AJcX+SmpFTSZnLBdhUhveBcPPO9/QQvu9O8C4eed7+ghfd6mVKaTM5YLsKkN7wLh553v6CF93p3gXDzzvf0EL7vUypTSZnLBdhUhveBcPPO9/QQvu9fSMBmg7OZdenUHrT0cNPj91LAIqYUppMzlguwqYlqtUWyQGoUJroY7e+wKiokkkqUpRJKlKJJKiSSSSSSTWXSlczbidXtIKUpUApSlAKUpQClKUAqmezK/Ne1H+S1fxJq5qpnsyvzXtR/ktX8SaAsTTj8nmL/JcX+SmpFUd04/J5i/yXF/kpqRUApSlAKUpQClKUApSlAKUpQClKUApSlAKUpQCqZ7Mr817Uf5LV/Emrmr8qf7TvSOZi2s7OdI43rXlLDaVL25MyWGkNFHVyBbS2obnmeP+7QH6aacfk8xf5Li/yU1Iq/OL+y40J7buV11Uusc9HE4rbZuNPW4pOz7w+JKg2COR43B1iv0doBSlKAUpSgFKUoBSlKAUpSgFKUoBWoyTK7ViUISrpLTHQo8LaACtx0+4hA3Kj8Qr3v8Ae42OWWbdJiimNFaU6vhG6jsOQA8ZJ2AHjJFc43O7Tcjubl1uagqa6OEIB3SwjfcNI+Ae74zzNfY8O8PeWROKJ0hX+ohzLCm69ul09z8Zdda57KnTEsKPw8KEucvjO/wVi+zzePNWH9bq+71X9K9WvCsiSo5dfN9xa5FgezzePNWH9bq+71WfZEvO9kPpfPxC44/DtzjjjciJcRcVPKiPoPJYQWBvukqSRuOSzzFZlKvsvIvp9Yu5LXI2+l2bOaT6fWHEbPicMQLTGSwlZuykl1XWtxQEf8JaypR+FRqU+zzePNWH9bq+71X9Y0e5Q5cuVFYlMPSohSmQw24FLZKk8SQtIO6dwdxv1jnT2ZkX0+sXcteRZPs83jzVh/W6vu9Z9u16QXQLrj8mI0Tt00J9MkJHuqSQhW3+EKPwVWFKxi8KyNqigp5v8ti1yOl7LfIGRW9udbZTcyKvqcbPUfGCOsEeMHmKz65qxvKpGDXXuoxxKiqKe3oyRv0zQ61Af30jcg+Pbh6jy6RYfbksNvNLS404kLQtJ3CgRuCDXkcvyF5FGqOsL2P8F5npSlK+WQUpSgFKUoBSlKArjXd9TeGxWB/o5FxYQ4PEQklwf8yE1UFX5qfjb2U4ZOiRU8c5ookxk/3nG1BYT/tAFP8AtVQDD6ZLKHUb8KxuNxsR8BHiPwV7nwWOF5M4VtT1+dP95B7D7pUXut0zFi4PN27HLRMhJPtT8i9OMOLG3jQIywnnv+kaxTes98WJ2P8A4gd+519pzYU6a8H2MCJ6i5xkrmoScVx1u5tIjW5FwkyLTGiPvqK3FIQnaS4lAQOAkkBRJIHg7c8GPkuoFxuuDWO5S1YxPuSLoJrgix3HXUMFosuhO7iG1lKuY3UkFSuR8HaXXLTx3NJUK+XNcrEsmjNri9tY9cekKo5VxdGpbjIC07+FsUcj1Gtuxp/CZumN3Bc24SpdiYkR2HJL4cU8HggLU6ojdSvAGxBHWevxcblTYonFV0bW+mqq3bU6VKVnD1Gyq4xbdizNyZbyGVkU+yqvi4qDwsRkqdU6GvwOkKAlIG3Dvudq3mkUCdbdRdTI1yuarxLRJgcU1xlDKnB2okjdKAEggbDkBvtvsK3M/ReyT4Epjtu5RpDt4dvjM+M+luRFkufhFpQTsE7bjhUFbg896/lswifp+9dJ9g6bKLnd3mlzXL/cwyR0bfAkpUhhXiA8HYD3NuqsYZc2GOGKPXTm3qo1s3uu8E/pUM7tZ95p2L/iF37nWysNxyiVOKLxYrZbonASHol1XJXxcthwGO2NuvnxeLqruUyFumvB9iEhIBGx5ir10fkLk6aWErO/RsFhP+FC1IT/AMqRVDu9MoJajNF+W8oNMMp61uKOyR/vP7Bua6TxOxJxjGbXaUr6TtOOhlTm23GoDwlftO5/bXn/AByOFSYIN7dfKn7M1sNtSlK8YBSlKAUpSgFKUoBVX5/pM7cZj92x/oW5jxK5EF5XA0+rxrSoA8Cz4+XCo7E8J4lG0KV05PlMzJY7cp6/UHME213a2Oqbm2K6x1pJBIhOOo+e2FI/fWNu/wCT7l9Xv/YrqilegXjsdNctYjUcr7v+T7l9Xv8A2Kbv+T7l9Xv/AGK6opV9vRfT6/oURyvu/wCT7l9Xv/Ypu/5PuX1e/wDYrqilPb0X0+v6FEcr7v8Ak+5fV7/2KzrfYb5eHQ3AsNydUd/DfjKjNj4St0JBHxb/ABGumqVjF47HT+MtV+//AAaiAaeaYJxh4XO6ONTLyU8KA1uWYoI2UGyQCpR6isgHbkAkFXFP6Urz0+fMyiNzJjqwKUpWgClKUB//2Q==",
+ "text/plain": [
+ ""
+ ]
+ },
+ "execution_count": 7,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "from IPython.display import Image\n",
+ "\n",
+ "Image(app.get_graph().draw_mermaid_png())"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "id": "5d7bf45e-d615-45f7-a3d6-54700993f69e",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "{'answer': 'Bob has brown eyes.', 'score': 10}"
+ ]
+ },
+ "execution_count": 8,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "result = await app.ainvoke({\"contents\": [doc.page_content for doc in documents]})\n",
+ "result[\"answer\"]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "337f8f18-f048-495a-ade2-870790c01f70",
+ "metadata": {},
+ "source": [
+ "Inspecting the [LangSmith trace](https://smith.langchain.com/public/b64bf9aa-7558-4c1b-be5c-ba8924069039/r) for the above run, we can see three LLM calls as before. Using the model's tool-calling features have also enabled us to remove the parsing step.\n",
+ "\n",
+ " \n",
+ "\n",
+ "## Next steps\n",
+ "\n",
+ "See these [how-to guides](/docs/how_to/#qa-with-rag) for more on question-answering tasks with RAG.\n",
+ "\n",
+ "Check out the [LangGraph documentation](https://langchain-ai.github.io/langgraph/) for detail on building with LangGraph, including [this guide](https://langchain-ai.github.io/langgraph/how-tos/map-reduce/) on the details of map-reduce in LangGraph."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "54e88df8-7d12-4ee1-9e0a-bf8a7baacba7",
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.10.4"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/docs/docs/versions/migrating_chains/multi_prompt_chain.ipynb b/docs/docs/versions/migrating_chains/multi_prompt_chain.ipynb
new file mode 100644
index 00000000000..c1613464382
--- /dev/null
+++ b/docs/docs/versions/migrating_chains/multi_prompt_chain.ipynb
@@ -0,0 +1,362 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "575befea-4d98-4941-8e55-1581b169a674",
+ "metadata": {},
+ "source": [
+ "---\n",
+ "title: Migrating from MultiPromptChain\n",
+ "---"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "14625d35-efca-41cf-b203-be9f4c375700",
+ "metadata": {},
+ "source": [
+ "The [`MultiPromptChain`](https://api.python.langchain.com/en/latest/chains/langchain.chains.router.multi_prompt.MultiPromptChain.html) routed an input query to one of multiple LLMChains-- that is, given an input query, it used a LLM to select from a list of prompts, formatted the query into the prompt, and generated a response.\n",
+ "\n",
+ "`MultiPromptChain` does not support common [chat model](/docs/concepts/#chat-models) features, such as message roles and [tool calling](/docs/concepts/#functiontool-calling).\n",
+ "\n",
+ "A [LangGraph](https://langchain-ai.github.io/langgraph/) implementation confers a number of advantages for this problem:\n",
+ "\n",
+ "- Supports chat prompt templates, including messages with `system` and other roles;\n",
+ "- Supports the use of tool calling for the routing step;\n",
+ "- Supports streaming of both individual steps and output tokens.\n",
+ "\n",
+ "Now let's look at them side-by-side. Note that for this guide we will `langchain-openai >= 0.1.20`"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "cba0c648-63e9-4f4a-b4ba-cd36fcb21466",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "%pip install -qU langchain-core langchain-openai"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "d05ae3aa-96cc-49f2-8dd0-601c2503b7ca",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import os\n",
+ "from getpass import getpass\n",
+ "\n",
+ "os.environ[\"OPENAI_API_KEY\"] = getpass()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "707283ee-fbd8-43e0-9796-343fc2534658",
+ "metadata": {},
+ "source": [
+ "## Legacy\n",
+ "\n",
+ ""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "id": "4cca7a1a-3ce5-4c60-9664-cbbff14fc7d5",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from langchain.chains.router.multi_prompt import MultiPromptChain\n",
+ "from langchain_openai import ChatOpenAI\n",
+ "\n",
+ "llm = ChatOpenAI(model=\"gpt-4o-mini\")\n",
+ "\n",
+ "prompt_1_template = \"\"\"\n",
+ "You are an expert on animals. Please answer the below query:\n",
+ "\n",
+ "{input}\n",
+ "\"\"\"\n",
+ "\n",
+ "prompt_2_template = \"\"\"\n",
+ "You are an expert on vegetables. Please answer the below query:\n",
+ "\n",
+ "{input}\n",
+ "\"\"\"\n",
+ "\n",
+ "prompt_infos = [\n",
+ " {\n",
+ " \"name\": \"animals\",\n",
+ " \"description\": \"prompt for an animal expert\",\n",
+ " \"prompt_template\": prompt_1_template,\n",
+ " },\n",
+ " {\n",
+ " \"name\": \"vegetables\",\n",
+ " \"description\": \"prompt for a vegetable expert\",\n",
+ " \"prompt_template\": prompt_2_template,\n",
+ " },\n",
+ "]\n",
+ "\n",
+ "chain = MultiPromptChain.from_prompts(llm, prompt_infos)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "id": "fcb635d2-0402-4afb-ab64-044811d5348c",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "{'input': 'What color are carrots?',\n",
+ " 'text': 'Carrots are most commonly orange, but they can also be found in a variety of other colors including purple, yellow, white, and red. The orange variety is the most popular and widely recognized.'}"
+ ]
+ },
+ "execution_count": 5,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "chain.invoke({\"input\": \"What color are carrots?\"})"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "10ff8cd1-7e47-4133-a793-1ccd61a57f04",
+ "metadata": {},
+ "source": [
+ "In the [LangSmith trace](https://smith.langchain.com/public/e935238b-0b63-4984-abc8-873b2170a32d/r) we can see the two steps of this process, including the prompts for routing the query and the final selected prompt.\n",
+ "\n",
+ " \n",
+ "\n",
+ "## LangGraph\n",
+ "\n",
+ ""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "4a864796-ec89-4962-87b3-633f90687e1d",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "pip install -qU langgraph"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "id": "cf1edd2b-7592-47f4-ba8d-94a56742a585",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from operator import itemgetter\n",
+ "from typing import Literal\n",
+ "\n",
+ "from langchain_core.output_parsers import StrOutputParser\n",
+ "from langchain_core.prompts import ChatPromptTemplate\n",
+ "from langchain_core.runnables import RunnableConfig\n",
+ "from langchain_openai import ChatOpenAI\n",
+ "from langgraph.graph import END, START, StateGraph\n",
+ "from typing_extensions import TypedDict\n",
+ "\n",
+ "llm = ChatOpenAI(model=\"gpt-4o-mini\")\n",
+ "\n",
+ "# Define the prompts we will route to\n",
+ "prompt_1 = ChatPromptTemplate.from_messages(\n",
+ " [\n",
+ " (\"system\", \"You are an expert on animals.\"),\n",
+ " (\"human\", \"{input}\"),\n",
+ " ]\n",
+ ")\n",
+ "prompt_2 = ChatPromptTemplate.from_messages(\n",
+ " [\n",
+ " (\"system\", \"You are an expert on vegetables.\"),\n",
+ " (\"human\", \"{input}\"),\n",
+ " ]\n",
+ ")\n",
+ "\n",
+ "# Construct the chains we will route to. These format the input query\n",
+ "# into the respective prompt, run it through a chat model, and cast\n",
+ "# the result to a string.\n",
+ "chain_1 = prompt_1 | llm | StrOutputParser()\n",
+ "chain_2 = prompt_2 | llm | StrOutputParser()\n",
+ "\n",
+ "\n",
+ "# Next: define the chain that selects which branch to route to.\n",
+ "# Here we will take advantage of tool-calling features to force\n",
+ "# the output to select one of two desired branches.\n",
+ "route_system = \"Route the user's query to either the animal or vegetable expert.\"\n",
+ "route_prompt = ChatPromptTemplate.from_messages(\n",
+ " [\n",
+ " (\"system\", route_system),\n",
+ " (\"human\", \"{input}\"),\n",
+ " ]\n",
+ ")\n",
+ "\n",
+ "\n",
+ "# Define schema for output:\n",
+ "class RouteQuery(TypedDict):\n",
+ " \"\"\"Route query to destination expert.\"\"\"\n",
+ "\n",
+ " destination: Literal[\"animal\", \"vegetable\"]\n",
+ "\n",
+ "\n",
+ "route_chain = route_prompt | llm.with_structured_output(RouteQuery)\n",
+ "\n",
+ "\n",
+ "# For LangGraph, we will define the state of the graph to hold the query,\n",
+ "# destination, and final answer.\n",
+ "class State(TypedDict):\n",
+ " query: str\n",
+ " destination: RouteQuery\n",
+ " answer: str\n",
+ "\n",
+ "\n",
+ "# We define functions for each node, including routing the query:\n",
+ "async def route_query(state: State, config: RunnableConfig):\n",
+ " destination = await route_chain.ainvoke(state[\"query\"], config)\n",
+ " return {\"destination\": destination}\n",
+ "\n",
+ "\n",
+ "# And one node for each prompt\n",
+ "async def prompt_1(state: State, config: RunnableConfig):\n",
+ " return {\"answer\": await chain_1.ainvoke(state[\"query\"], config)}\n",
+ "\n",
+ "\n",
+ "async def prompt_2(state: State, config: RunnableConfig):\n",
+ " return {\"answer\": await chain_2.ainvoke(state[\"query\"], config)}\n",
+ "\n",
+ "\n",
+ "# We then define logic that selects the prompt based on the classification\n",
+ "def select_node(state: State) -> Literal[\"prompt_1\", \"prompt_2\"]:\n",
+ " if state[\"destination\"] == \"animal\":\n",
+ " return \"prompt_1\"\n",
+ " else:\n",
+ " return \"prompt_2\"\n",
+ "\n",
+ "\n",
+ "# Finally, assemble the multi-prompt chain. This is a sequence of two steps:\n",
+ "# 1) Select \"animal\" or \"vegetable\" via the route_chain, and collect the answer\n",
+ "# alongside the input query.\n",
+ "# 2) Route the input query to chain_1 or chain_2, based on the\n",
+ "# selection.\n",
+ "graph = StateGraph(State)\n",
+ "graph.add_node(\"route_query\", route_query)\n",
+ "graph.add_node(\"prompt_1\", prompt_1)\n",
+ "graph.add_node(\"prompt_2\", prompt_2)\n",
+ "\n",
+ "graph.add_edge(START, \"route_query\")\n",
+ "graph.add_conditional_edges(\"route_query\", select_node)\n",
+ "graph.add_edge(\"prompt_1\", END)\n",
+ "graph.add_edge(\"prompt_2\", END)\n",
+ "app = graph.compile()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "id": "92ef8d86-daa6-4ff3-b722-468e7cf8bcb2",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "image/jpeg": "/9j/4AAQSkZJRgABAQAAAQABAAD/4gHYSUNDX1BST0ZJTEUAAQEAAAHIAAAAAAQwAABtbnRyUkdCIFhZWiAH4AABAAEAAAAAAABhY3NwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAA9tYAAQAAAADTLQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAlkZXNjAAAA8AAAACRyWFlaAAABFAAAABRnWFlaAAABKAAAABRiWFlaAAABPAAAABR3dHB0AAABUAAAABRyVFJDAAABZAAAAChnVFJDAAABZAAAAChiVFJDAAABZAAAAChjcHJ0AAABjAAAADxtbHVjAAAAAAAAAAEAAAAMZW5VUwAAAAgAAAAcAHMAUgBHAEJYWVogAAAAAAAAb6IAADj1AAADkFhZWiAAAAAAAABimQAAt4UAABjaWFlaIAAAAAAAACSgAAAPhAAAts9YWVogAAAAAAAA9tYAAQAAAADTLXBhcmEAAAAAAAQAAAACZmYAAPKnAAANWQAAE9AAAApbAAAAAAAAAABtbHVjAAAAAAAAAAEAAAAMZW5VUwAAACAAAAAcAEcAbwBvAGcAbABlACAASQBuAGMALgAgADIAMAAxADb/2wBDAAMCAgMCAgMDAwMEAwMEBQgFBQQEBQoHBwYIDAoMDAsKCwsNDhIQDQ4RDgsLEBYQERMUFRUVDA8XGBYUGBIUFRT/2wBDAQMEBAUEBQkFBQkUDQsNFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBT/wAARCAEvAOoDASIAAhEBAxEB/8QAHQABAAEFAQEBAAAAAAAAAAAAAAYDBAUHCAIBCf/EAFoQAAEDBAADAgcICwsJBwUAAAEAAgMEBQYRBxIhEzEIFBYiQVaUFRcyUVXS09RCU1RhcXWBkpOV0QkjMzc4c5GxsrO0JDQ2UmJ0doKhNUNFcneDw0aWwdXw/8QAGwEBAQADAQEBAAAAAAAAAAAAAAECAwQFBgf/xAA0EQEAAQMABgcHBAMBAAAAAAAAAQIDERIhMVFhkQQTFEFSodEFFSMzU3HhIkOxwTKB8EL/2gAMAwEAAhEDEQA/AP1TREQEREBERAREQERYG53KtuNwktVoeIJYg01de+PnZTgjYYwHo6UjRAPRoIc4HbWvzppmucKzNRUw0kfaTyshj/1pHBo/pKsDlNlB0bvQA/7yz9qsKbh9YmP7aroWXesI06run+UynrvoX7DRv0NAA0NAaCv/ACWsp/8ACKD2Zn7FtxZjvmeRqPKqy/LFB7Sz9qeVVl+WKD2ln7U8lbL8j0HszP2J5K2X5HoPZmfsT4PHyXUeVVl+WKD2ln7U8qrL8sUHtLP2p5K2X5HoPZmfsTyVsvyPQezM/YnwePkajyqsvyxQe0s/anlVZflig9pZ+1PJWy/I9B7Mz9ieStl+R6D2Zn7E+Dx8jUuqO50dw34rVwVOhs9jI1/9RV0sDWYHjlfozWSh7QEFsscDY5GEdxa9oDmn74KtCavCgHzVVRcrCOj5Kg9pUUXX4TnnrJEPSXbc3vJc3fK0KK9Vude6f6/6ExE7EpRfAdhfVzoIiICIiAiIgIiICIiAiIgIiICIiC0u1xjs9qra+YExUsL53gf6rWlx/qWNwm3SW7GaLxjldXVDPGquRu/Pnk8+Q9fRzEgD0AAdwCucptb73jN3t0ehJWUc1O3fdt7C0f1pjFzZecbtdcwFraimjk5XDRaS0Egj0EHoR6NLo/Z1b9fLV/a9zKIiLnRGs94jY7wys8NzyS4i30k9Qykh5YZJ5ZpnAlscccbXPe4hrjprSdAn0LXWYeFNjOMX/AaaGCvuNpyhtVN4/S2yslfDHDG4jULIHPc4vbylug5gBcRrqsr4RVqtNzxK1PudsymrmpLnHU0Nfh9M6or7ZUNZJy1IY0EloBcwjleD2gBaRsjVguOftoeCOe5ljd4uldZqy6RXaG2Wwvrmwzwyw0s8lJHstc5rYy9rfgl56DuAblyjwgcBwvJmWC9373OuJMTXdrR1HYRGXXZiScRmKMu5hrncO9XGQcb8MxnL34rXXSbyiZHDMbdS2+pqZezlc5rH6ijdtu2kF3c3pzFvMN80+EHQZjxHouJtrrbLn1dNV0UXkpa7PDNBa3U5p2Pe6pLS1r5hJ2odFMS7zWhjSSFufhxaa2o8ILMMjntNdS0VdjFkjpqytpJIuZwdVOli24Dz27j52d7SRsBBk+CnhB2rjNcMjoqWhr6CqtVxq6VjZ6CqjjlghkEbZDLJCxjZHE7MO+dvpHQlbXWj+A1RcMSy3P8AE7tj96pKiryi53qlubqF5t89NPIJIy2oA5ObTtFm+YFp6LeCAvMkbJo3RyNa+N4LXNcNgg94IXpEEZwKV0Nqq7W9xe60VclA1xJJ7IafCCT1JET4wT6SCfSpMoxg47d+Q3Ab7Kuu0r4yRrYiZHT7/ATASD6QQVJ1vv8AzJ8/v3+aztERFoQREQEREBERAREQEREBERAREQFFucYPV1DpQG49VTOnMo3/AJFK8kvL/iic4l3N9i5zt+adtlKLZRXo5idcSsSieVcMMK4kSUlbkWM2bJHxR8tPUV9HHUFrD101zgeh7+iwh8G7hSWBh4cYuWAkhvuTBoE62fg/eH9CkUvD+1skfJb31dke8kuFrqXwRkk7J7IHs9k9SeXZ2evUrwcJqCSfKm/D7wmh+iWzQtTsr5x6ZMQ9YdwyxHh6+qfi+M2nHnVYaKh1so44DKG75eblA3rmOt/GVJlF/Imo9ar9+mh+iTyJqPWq/fpofok6u34/KTEb0oRc+8Sr1kOJ8duD+IUOUXU2nK3XYXB0ronSjxalbLH2buzAb5xO9g7HxLbXkTUetV+/TQ/RJ1dvx+UmI3srkmM2jMbNPab7bKS8Wuo5e1o66FssUnK4ObzNcCDpwBH3wFCh4NnCcd3DfFh+C0wfNUg8iaj1qv36aH6JPImo9ar9+mh+iTq7fj8pMRvY6wcC+HWK3emutmwbH7Vc6Yl0NZR22KKWMkEEtc1oI6Ejp8ay11vb7tUTWeyTNfWDzKqrYeZlE307I6drr4LO8dC7prdM4FT1HSvu14uMfTcU1c5jHfhbHyAj7x2D8Sz9Bb6W1UcVJRU0VJSxDljggYGMYPiAHQJ8O3ricz5fk1Q82y209nt1NQ0kYipqaNsUbB6GgaCukRaJmZnMoIiKAiIgIiICIiAiIgIiICIiAiIgIiICIiAiIg5344fytPBt/nMi/wAAxdELnfjh/K08G3+cyL/AMXRCAiIgIiICIiAiIgIiICIiAiIgIiICIiAiIgIiICIiAiIgIiIOd+OH8rTwbf5zIv8AAMXRC5344fytPBt/nMi/wDF0QgIiICIiAiIgIiICIiAiIgIiICIiAiIgIiICIiAiit1yuukr6iisdDT1jqV3JUVNZO6KJj9A8jeVri9wBG+4DetkggWXu7mH3BY/a5vo11U9GuTGdUf7hcJuihHu7mH3BY/a5vo093cw+4LH7XN9Gsuy1745wYTdY7I8focsx66WO5w+MW250stFVRb1zxSMLHt2O7bXEKM+7uYfcFj9rm+jT3dzD7gsftc30adlr3xzgw/EzjNwruXBvijfsNuDHPqLfUmOGXl/ziF3WKQf+Zhadejeu8L9gfA/4KngRwKslhqWFl5q93O6A/Y1MrW7Z/yMbHH9/k36VDuJng9S8UeMWF8Q7nQWZlyxw+dTtqJTHWhpL4BJuPp2chLvv70emluP3dzD7gsftc30adlr3xzgwm6KEe7uYfcFj9rm+jT3dzD7gsftc30adlr3xzgwm6KEe7uYfcFj9rm+jT3dzD7gsftc30adlr3xzgwm6KFeV19tTHVN3tdE+3xgumkt1TJJLE0d7hG6Mc4HUkA70OgceimUUrJ42SRvbJG8BzXtOw4HuIK03LVVvGkYw9oiLSgiIgIiICIiAiIgIiICIiAiIg19iR2L2T3+69Z1/wDecFnlgMQ+Be/xxW/3zlrWiueZ5dx+zWw0+WSWbGcfhtNSykpqGnklldM2R0kfaSMcQxwjO/stlvK5ujv1704rllVtbpRct0HFjPjw1s/GGoyKF1ir7tBFJiHufEIoqGatFK0Nn12pnaHNeSXcu9jl0steeLeWUnATidkcV15LzZsnuFuoanxaI9jTx3EQxs5eTldqM8u3Ak95JPVaNKGLowSMMjow5pe0Alu+oB3o6/If6F6XPmBWW6QeE7xauTspuAoKOO1zT21tNTGOpY6mm5I3O7LnAj+xLXAn7IuUTwLixxhzyksGY2u03urtt1q4pXWh9FbGWtlC6XldyVHjPjXaNj27mc3Rc3XIAejSHV6IuWcy4kcRrvgfFXPrNlseOW7F6u4W23WWK2QVAl8VPZummkkBdzueHFrW6a0Buw7qFZnA6mRabsfEC/1mUcX6Sav56ew2+gntrOxjHYPkoXSvOw3btvAOnb13DQ6LV1r41cRc9ixSz2Z98dVQ4jar1drhYLfbZqipqqqIu85tXLHGyPzCf3tpJLiNsAG5pQOtUXNsOX8V77kXC3G7ncPIi6XihvMl57OjpppXCmkhEEsbSZWMe5rwS3mc0do7o7TdVMr4r5bw8i4g4jU3c3bLT4gcSramnhZJUCtLaZm2MYGOMNQ2Vx234JG+nRNIdDXAA0FSCAQYnbB/AVd8PnF+A405x242ymJP/tNWN7CamsXY1FQ6snjpuSSoe1rXSuDdF5DQACT10AB16LI8O/4v8Z/FdL/dNVu/J/3H8SvckKIi85BERAREQEREBERAREQEREBERBr3EPgXv8cVv985UrNgdvsebZJlEE1S+4X6Kkiqo5HNMTBTte1nIA0EEiR29k9w1pXM8VRh9wuIkoKutt1ZUvq4p6GB07o3P6vY9jAXfC2QQCCDroR1p+WdN8mX79SVf0S9qqmbs6dEZiWUxMzmEEpvBpxmlvFNMLnfJLDS3I3enxd9Y02uGq5zIHtj5OfQkJeGF5YHHfKqGU+DBj+UxZDRyX/JLfZr7Wm51dnoayNlKaova90oBiLvOc0OLS4s2d8u9ETq48RLXZ7fU19fS3iioaWJ009TUWeqjjijaNue5xj01oAJJPQAK2x/itYcstEF1sgud3tlQCYayhtVTNDIAdHle2Mg9QR0K19RX4ZNGdy2q+ElBLxJOa0V4vFouM8UENfSUNQxtLcGwl3ZduxzHEloe4baWnR0sRivAC14PeYqmx5Fktvs0FU+shxmK4AWyOR5cXBrOTnDC5xd2fPybPwVMfLOm+TL9+pKv6JPLOm+TL9+pKv6JXqK/DJozuRryi4peouMf/dc/wD+vWu+NHg1TX7Cc+qcUuF9oLzkVLJUVGM0FyjZbayucwAvcJGDRcQOYh7A7XnBbp8s6b5Mv36kq/ok8s6b5Mv36kq/olJsXJ20ymjKHX/gLbMmuVdczer9YZ7vb4rfd6a0VbIoq5kbHNZ2m2OcHND3N5o3NJHQ7CtZvBtsMVNjJtF9yHHLnYrRDYortaauOOpqqOJoDI5+aNzH6I5t8gIJOtKd+WdN8mX79SVf0S+HNKVoJNsvoA7ybJV/Rq9RX4ZXRncxtNwttsGQ4len19zq67GrfUW6mkq6kSmdkwiD3zOc0ue/95b52x3u2D01EMj4a1+e+ELjOTXKxxUFlw6Co8TuD6pj5LnNOyPlAib1YyIiQ7eQS/RA11UpxfjNi+bUc9Xj1RW32lgmdTzTW23VFQyOUAEscWMIDgCDo9eoWZ8s6b5Mv36kq/ok6ivwymjLMV/+Y1P827+oq54d/wAX+M/iul/umqOzXqqvlPJR2q1XIVU7TG2auoZaWKLY1zuMjRsDe9AEnWlN7NbY7LaKG3xEuipIGQMJGthrQ0f1LTf/AEW9CduV2QvERF5zEREQEREBERAREQEREBERARFGeI3EfHuE+I1uTZRcG2yz0YHaTljnkuJ01rWtBJJJAAAQSZQTiHxOqMUslLV43jVfn1bUXJtr8TsksR7CTrzume52o2s5SCT3HQOt7HllRmWQ59a6ygnsnvYzWrtZRIyU3CrnkBLOUEBsbGt5T16nnI16W5LhpwuxjhBi8eP4na47VbGyOndG17nvlldrmke9xLnOOgNk9wAHQAILO38O6+HiXfMor8sutztdfRMoqfGJxGLfSN03ncGgbe9xafOJ3p7gdjWppDDHTQsiiY2KKNoaxjBprQOgAA7gvaICIiAiIgIiIIPxN4b1eb437n2HJ7lglwbXMrxcrI1gfJI3e2ytI1Ix2+rTrehvY2D6pc/uTOJlfilbil1pbVT29tdBlUjozQ1GuUSMcQQY3guGmkbIa46DQCZsra426lvFvqqCup46uiqonQT08zQ5ksbgWua4HoQQSCPvoPVFXU1yo4KujqIqqlnYJIp4Hh7JGEbDmuHQgjqCFXWqG8Nr5wpsGJ2HhJTWW3Y9RXN0lztt4fO8vpZXl0nYy7cQ5pe5wDt75WjehoynGOK+L5jmOS4rarmKi/Y7IyO40bonsdFzNBa4FwAc3rrYJ6g/e2EuREQEREBERAREQEREBERAREQQnKc8vFk4g4tjduw653qiuvayV17hexlJbYmDveSfOeXFmmdCQSW8xaQvOCcNqnFmX/3bye55m+7XJ1eG3gMdFSNDgYooYw3lYGBrO7vc3mAbvSw3Am14va3cRPJjIarIDU5hcKi6Cp3/AJDXu7PtqVm2t8xmm67+/vK2igIiICIiAiIgIiICIiAiIgLA5diUWVY/ebdFW1djqrpSmkfdrU8Q1kTdO5SyTRILeZxHxcx13rPIg1fBkOT8Na/h/iNVaL3nlNXROpLhmDBE0087WgtfPENaY4B+3b6aaNvc5bQUZ4n0tsreGuW096uEtps01oq462vh+HTQGF4klb0PVrduHQ93cVS4TUlpoOF2IU1huUt4skNopI6G4z/wlVAIWiOV3QdXN0T0Hf3IJWiIgIiICIiAiLy+RkY29waP9o6QekVLxqH7dH+cE8ah+3R/nBXEiqtD+Fj4TVf4MNgsN6iwx2VW24VElLUTi5eKClkDQ6Np/epObnAl+LXZ+nfTefjUP26P84KDcbuGlq418LMhw64TxRsuVOWwzkg9hO0h0Un/ACvDSR6RselMSOJ+H/7pRTG/+42J8DaeC55Hde2fBR34MNZXTua0yO1SdXuPLtx+JfosvzQ/c5PBzq6fitkWX5VRilOJTy2qmgn1/wBofBkcPQezZsfhlaR8FfpV41D9uj/OCYkVUVLxqH7dH+cE8ah+3R/nBMSKqKkKmEnQlYT/AOYKqoCIiAiIgIiICLw+aOM6dI1p+IkBefGoft0f5wVxIqoqXjUP26P84J41D9uj/OCYkcf+FX4cEnAzO7ngV74XMyOzVtA18dVPeOxjr6aVhbIDF4u8AB3aRkcx3y76b0qfgp+HC7jdnlp4fWLhYzG7NR0L3Pqaa8CaK300MfLGBF4uzbS7sowA4a5wfRpZf90U4JxcVeDDsitrWy5BinPWxtZoumpSB4wz8ga2QfzZA+Erb9zi4KRcMuDpym5sZFfsrLaoB/R8NG3fYM+9zbdJ07w9m+rUxI67RUvGoft0f5wTxqH7dH+cExIqoqXjUP26P84L0yeOQ6bI1x+IOBTEj2iIoLW6VvubbKur5ebsIXy8vx8rSf8A8LXlrxK1X63UlyvNvpLxcqqFk01TXQNmdtwBLW8w81g7g0aGh8eypzlX+jF4/wBzm/sFR7Gv9HLV/ukX9gL0ujzNFuaqZxOWWyFl732LerVn9gi+anvfYt6tWf2CL5qilr8I7h5eLnSUNNf3drVVj7fDLPQVMNO+pa8sMPbPjEfaczSA0u2emgdjeTg404fV5jU4tT3SWqv1LVCjqaSnoKiTxeQsa8do9sZaxpa4ae4hpIIB20gbevueOeaZnezHvfYt6tWf2CL5qe99i3q1Z/YIvmqP4zx6wPMcjjsVnyGKsuMxlbTjsJWQ1Rj32ggmcwRzcuiT2bnaAJ9CxLfCk4YONMRk2o6pzo4JzQVQhllbvcLZOy5XS9COyB596HLshO0V+OeZmd6be99i3q1Z/YIvmp732LerVn9gi+ao4eP2BsxSXI330xWqG4MtUz5aKoZLBVOIDYpITGJGOPM34TR0IPd1Vjd/CIxODAcxyO01E10mxmkdPVWuSjqKaqa/lJia+J8QkY15Gu0LOUDmcTppIdfc8c8zM70x977FvVqz+wRfNT3vsW9WrP7BF81WfDHiFQ8TsPob7QxVMLZWMEsVVRz0xZIWNc4NEzGF7RzDTwC0+gnqshmWSyYlj1RcobTX32oYWsht1sjD5p5HODWtGyA0bOy5xAaASToK9fc26U8zM71P3vcWP/01Z/YIvmq8xIiyZLU2KnJbbXUbauCnJ2IHc5a9rPiadtIbvQO9aBUX4G8QqvitwrsWV11HFb6m5NlkdSwuLmxBsz2Nbs95AaNnps7Oh3KTWr+Ms/ig/wB8FJrqu26oqnMYyuZnanCIi8hiIiICjeeXOot9ngjppnU81bVw0fbM+FG17tOLeh07lDtHXQkKSKIcSf8AMrJ+N6b+0V09GiKr1MSsbWLHD7GNefj9smeerpJ6Vkkjz8bnOBLj98klffe+xb1as/sEXzVcZdllqwXGrjkF8qvEbRb4jPU1HZvk7Ng7zysBcfwAFR7G+NmGZZcK2htt3c6to6U1slPU0c9NI6nHfNG2VjTLH3eczmHUdeoXf19zxzzMzvZn3vsW9WrP7BF81Pe+xb1as/sEXzVGLR4Q3D6+49WX6kyAGxUdKysmuc1HUQ0wjcQGgSvjDXP2Q0xtJeHeaWg9FUoeP+A3DHbzfGX8Q2+zGIXE1dJPTy0gkIEbpIpGNka1xPR3LogE70Dqdfc8c8zM70j977FvVqz+wRfNT3vsW9WrP7BF81Qp/hP8No5aqF1+qBU0zBNJS+5Nb2/YkE9s2Psed0Wh/CtBYOnndQsre+PODWB9oZUXp9Q+7UBudA230NRWGppgWgyMEMb9jz2nXfrZ1oEh19zxzzMzvSD3vsW9WrP7BF81Pe+xb1as/sEXzVr/ADvwl8ZxWxYTera6TILTk10bQx1dBT1EwijAcZH8scbnGRpaG9kQHkl2geRwG1rbcIbtbqWup+08XqYmTR9tE6J/K4AjmY8BzTo9WuAI7iAVYv3J/wDc8zM72J977FvVqz+wRfNXx3D3FyPNx62RO9EkNIyN7T8bXNAIP3wdqJcXMg4jY3FcLrjEOLx2C1W2SvqZb26d01Q9ge50TBGWtiAY0HncXdXfB6FS/Acp8uMGx7I/FH0Huvbqev8AFZDt0PaxtfyE9N65tb+8nX3M40p5yZnezmB3GorbXVwVMzqmSgrJaMTSHb3tbotLjobdyuAJ9JG1JVD+HH8HkP43m/sRqYLg6RERdqiCdrF5V/oxeP8Ac5v7BUexr/Ry1f7pF/YCkmRwvqMeukUbS6R9LK1rR6SWEBRrF3tkxq0uadtdSQkH4xyBb7PyZ+/9Hc49x6vuOf8Ag/jhpZcXv1TdLhklWBeXUDmW2kjbeZJn1BqT5u2BpHKPO5hrXx7pwXCblVVXHWmfSz2me+3iWOirZ4XRiWN1vgjbIxxHnNa8v6jY2Hena2pieI2nB7Ky02Sk8St7JpqhsPaPk0+WV0sh28k9Xvcdb0N6GhoLMJFO9HK2OUd+ymxcFcIiwm92C5YZcKKqvFfX0RhooGUlO+J4hn+DMZnOAHZl3RxLtL1i+GX6n4McEqGWxXGOtt+bNq6ymfRyCSmh7etPayN1tjNPYeY6HnDr1C6nRNEch8XqSvxrK85u9RabgaGbiDilVSCKmdutDIqUP7DehI7naW9D8IaPVSa52K+8ZMo4l5Fbcbu1it9Xg02L0LL5SminuFU8yv5xE/TmsZzNaHOA2XnXRb+ybEbTmNNRU93pPG4qKtguMDe0ezkqIXiSJ/mkb5XAHR2D6QQswpo6xAOB+SyX7h5Z6eeyXmxVlto6eiqKe80ElK/tGRNDuTmHntBBHM3YKn6jeW8NMTz2WmlyTGrVfpKZrmwvuNHHOYwdbDeYHW9Du+JfcS4b4pgL6p+NY5a7C6qDRObdSMgMobvl5uUDeuY638ZWUZEL8Fay3DHeAOJW660FTbLhBFMJaSshdFLGTUSEczHAEbBB6juIWwrV/GWfxQf74LKrGWhhfxIkeOrY7SA7p3c03m/08rv6Fsp1UVRwWE2REXlIIiICiHEn/MrJ+N6b+0VL1EuI7C632h+vNju1KXHXdt/KP+rgPyrq6L86llTtat8Lb+TXxD/FUn9YUPu9TdeL3E7HrtbsTyCy2zGLNdW1VZere6kdVTVMDI46aFrvOk0Wl5cNt81uiSVvXLsTtWdY1ccfvlL49aLhEYKmn7R8faMPeOZhDh+EELLNaGtAHQAaC3TGZYuaX4DfG+Ctwpgp7DVz3DGpLJd6/H+x7OpnbA5j54ezfr986udyO1tzdd5UY4v2fIOLTuIeVWfEr9RW12NUNjp6WvtskFZcahtwE73MpyO05I2HXMWjfM7WwNrr5E0RqpljrneFFVXV1vqDan4ZFSeOmF3YGXx2Rxi59cvNykHl3vR2tB8JL1WcKL5wi928cyGetg4fVtNPbaC1yz1kDvHacjngA5wOgHd0Lm76dR2isPLiNpmy6nyd9Ju+U9FJboqrtH+bTveyR7OXfKdujYdkb6dDolJp3DmZmHZPaOH9nyqfF7o1zuJL8wmsFND21dSUMvasA7Jm9yAPbI5jdkczvSCuo7HdmX60UlwjpqujZUxiQQV0DoJ2A+h8burT94q+UKvnBLh9k11qLnd8JsFzuNS4Omq6u3RSSyHQG3OLdnoAPyJEY2DWHhR5PcK6vx7BhjeU3HFrjIKvI6+w2aprA6kYSW0bXRNI5pXNAf1Bazf+uFuvD75T5JjVBcaW3V1oppWER0Vyo3Uk8LWktAdC4As+D0BHdo+lV8dxm04haYbXY7bSWi2wlxjpKKFsUTC4lztNaABskn8qySsRryLLhx/B5D+N5v7EamCiPDlhFPfX/YSXactOu/Qa0/8AVpH5FLlo6T82pZ2iidVw+b28j7Ze7lY4XuLzS0YgfCHHqS1ssT+XZ66aQNknXVSxFpouVW/8ZM4Q3yAuHrne/wBBRfV08gLh653v9BRfV1MkW7tNzhyj0Mob5AXD1zvf6Ci+rp5AXD1zvf6Ci+rqZInabnDlHoZc88BLvkvFN3EYXTKq+m8m8yuOO0nilNSN7Snp+z5Hyc0LtvPOdkaHdoBbU8gLh653v9BRfV1qjwOPh8cv/VC9/wDwrolO03OHKPQyhvkBcPXO9/oKL6unkBcPXO9/oKL6upkidpucOUehlDhgFw31zO96/maL6us7YcepcfgkbAZJ55nc89VUO5pZndwLjodw6AAAAdwCyiLCu/crjRmdX2iP4MiIi0IIiICt6+gp7pRzUlXCyoppmlkkUg2HBXCKxMxOYEPdw+qmHlp8tvdPCPgxkUsvKPi5nwOcfwuJP3158gLh653v9BRfV1MkXT2m7w5R6LlDfIC4eud7/QUX1dPIC4eud7/QUX1dTJE7Tc4co9DLU3FK03rB+GOX5HQ5fdZq2z2esuEEdRT0ZjdJFC+RocBACWktG9EHXpCo8Irde8/4VYdk1fl10hrrzZ6S4Tx01PRiJkksLXuDQYCQ0Fx1sk69Kz/hC/xBcS/+Gbn/AIWRW3g0/wAnbhh/wzbf8NGnabnDlHoZZfyAuHrne/0FF9XTyAuHrne/0FF9XUyRO03OHKPQyhvkBcPXO9/oKL6uvreH1W88s+XXueI/CjDaSPmHxczIA4fhBB++piidpu8OUehlbW+301pooaSjhZT00LeVkbBoAf8A96fSrlEXNMzM5lBERQEREBERBzt4HHw+OX/qhe//AIV0SuUrZeMh8EDLM5q8lx+a/cNMpySsyN2TWRrpZrQ+oLeaOqp/hdm0MH74zf5S4NHS2KZdZc6sNLeseulLeLVVN5oaujlEjHfGNjuI7iD1B6HSDLoiICIiAiIgIiICIiAiIgIi8ySNijc97gxjQXOc46AA7ySggPhC/wAQXEv/AIZuf+FkVt4NP8nbhh/wzbf8NGtW8UuOUvG+gyThlwhtAzOquFJPa7rkr5DFZrUyWMseTOAe2kDXEhke/j2dELefC7D5OHvDXFMXlqW1ktltVLbn1LGlrZXRRNYXAEnQPLvSCUIiICIiAiIgIiICIiAiIg+Oa17S1wDmkaII2CFz1lngx1+H36qy/gleYsFyGZ3a1lhmaXWS6n4pYB/BOPdzxga66AJJXQyINFcNvCjortkkWE8RLRLw34hdGtttyeDS1/XQfSVHwJAT3N3vfQc2iVvVRTiTwsxXi7jkljy2y015t79lrZm6khd/rxvGnMd99pBWgL3cuIXgaWqa51lzk4mcIKPlEpuVRHHerPGXBjQ2R5a2pZtzWhpIdstA5QOodVIvzm8FHw87/l3hDXa0ZnXubjWXVzzaqaoexwtMx02ngY8NbthY1kZ6Db9P0HPkLv0ZQEREBERAREQEXH/7oT4UlVwbxGmw/Fbi+izC+xl8tVTOAloKPq0vae9j3kFrXDq0NeQWuDSsPwZ8J3ij4U2F27HsEpKLHr3b6SGDKMyu0kU3YSkOaJKalYBzPlDHPHM1rGnmZ9iHoOjeMXhB4hwVgp4bxUzV9/rdNt+O2qPxi4VrydNEcQ66J6cztD0b30WroeFPEfwlpG1vFiqkwnBXkPhwCzVJE9UzvHj9S3RO/TGzQ7vguC2Rwf8AByxXhDPUXaLxnIswrdmvym9SeMV9S4/C08/Ab6OVuugG962tqIMVjGK2fCrHS2aw2yltFqpW8kNJRxCONg/APSe8nvJ6lZVEQEREBERAREQEREBERAREQEREBad8J7wfqrwjsHpsZizO4YnRNqBPVRUtOyaGtAG2NmZtrnBjgHNAeG76lriGOZuJQbitms2K2mCkt7wy7XEujhkLQ7sGNHny6PQlu2gb2OZ7dggELdZs19IuRao2yPzk4gfudsuDVZprXxGpb1doyHiljtj4ZI+4gvIkc1nx+c4EjRAK7TwbjLllkw2z2++2ulvt6paWOGquRrjT+MvA0Xlgjfonpvr1Ozob0IxHE2IO1slzi97nEuc9xOy5xPUkkkknqSV6X29n2P0W3TiuNKeMzH8YMxubA9/m8eqtH+t3fV09/m8eqtH+t3fV1r9Fv919C+n51eqaXBsD3+bx6q0f63d9XT3+bx6q0f63d9XWv15llZBE+SR7Y42Auc9x0Ggd5J9AT3X0L6fnV6mlwbC9/m8eqtH+t3fV19HHm7debFaTX+zdnE/9YAtcUVdTXOjgq6Ooiq6SdgkingeHskYRsOa4dCCOoIVZPdnQp/b86vU0uDl/O/BVyPjfxcv+SZhnNFY3XWqL4amopHyRRx90cW2u0xrGBrQXkDp3k9/Tvgr+BFW+DTl9VfI+JNdd6aspjDVWemt7aalqXfYOl5nyF3IS8tLeRwLvhcpe1/sgEEEbB9C2BwfzGW3XKHGat/NQzMPucT/3LmjZgH+yWguaPseVw7uUDxfaHsmi3RN3o/dtjhwXa3MiIvlQREQEREBERAREQEREBERAREQEREBaH4xTOm4jdm4+bBa4OzafRzyzcx/LyNH/ACrfC09xxsL4LlbMhjYTC6P3PqnAfA87mhcfiHM57fwvava9kV009LiKu+JiPv8A9qVrpFRrX1EVFUPpIY6iqbG4xRSyGNj3681rnAO5QToE6OviPcooy854XtDsUsYbvqRkEpIH4PFF93VXFO3+JlrTFc823iPxHy2mkyOwW661NK6skZS2xtJQ+IyQRzOjIfK+YTh5DSS4AAO6BpHU7UF6z3Y3iljA/wCIJfqatqHhBb7TfJK62Xq+WujlrPH5LPSVgZRvmLuZx5eXmAcRstDg07PTS5LsV3pjQmYjv7vtthUDyDMsyhtPEnIKTIhBT4rc5I6a2mhhcyeNkUMjmSPI5tEPIBaWkdSS7oBlL/fMkz285nQ2m+DHbTYKSON0baOOeSsmlp+2POX/AAWBrmtAbok7PMOimVXwstNbYsvtL6isFPk88lRWOa9nPG58bIyIzy6A1G3Ww7qSrO+cHbZeLzUXOnu15slRWUzKSuFrqmxsrGMaWs7UFjvODSQHN0delaps3t8zH3nj+OQueCn8T2E/iak/uWqaKD0NDkmE2u32DHrFb7nZ7bSw0tPVXC8ugne1jA3z2NpnDfTvB69+h3Kubznmm6xSxk667yCXofZPwLqt1xRRFMxOYjdPoiYr3STOpbxZqhnSSK5Uhb8Z3MxpH5QSPyrE4/VXeronvvVvpLbVCQhsVHWOqmFmhpxc6OPR3sa0e4deuhLcEsL8lzW2QBpNNQysuFS/XRoYdxDfxmQNIHxMd8St65TTZqrq2YllTtdGIiL8vUREQEREBERAREQEREBERAREQEREBUK6hp7nRT0lXCyopp2GOSKQba9pGiCFXRWJmJzA0VlXCi82CeSW0QuvVs72xteBVQj4iHECQD4web4wT1MOfFWQkiW03aFw+xkttQ0/2Ov5F1Mi+is+271FOjcpirjsldU7XK+5/k+5fq+f5ibn+T7l+r5/mLqhFv8Af1X0/P8ACYhyvuf5PuX6vn+Ym5/k+5fq+f5i6oRPf1X0/P8ABiHK+5/k+5fq+f5i+gVDujbbc3H4m26ck/gHJ1XU6J79q+n5/gxDnSw4HkmSytEFsltdMT51XdIzCGj06iOpHH7xDR/tBbwxHEaHDrWKOjDpHvPPPUSa7SZ+vhOI/oAHQAABZtF5PTPaN7pn6atVO6P7BEReWCIiAiIgIiICIiD/2Q==",
+ "text/plain": [
+ ""
+ ]
+ },
+ "execution_count": 2,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "from IPython.display import Image\n",
+ "\n",
+ "Image(app.get_graph().draw_mermaid_png())"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "8a665dd1-7459-4511-8556-418f15cfec57",
+ "metadata": {},
+ "source": [
+ "We can invoke the chain as follows:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "id": "61838f81-4e60-445f-9c05-563e3520ab33",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "{'destination': 'vegetable'}\n",
+ "Carrots are most commonly orange, but they can also come in a variety of other colors, including purple, red, yellow, and white. The different colors often indicate varying flavors and nutritional profiles. For example, purple carrots contain anthocyanins, while orange carrots are rich in beta-carotene, which is converted to vitamin A in the body.\n"
+ ]
+ }
+ ],
+ "source": [
+ "state = await app.ainvoke({\"query\": \"what color are carrots\"})\n",
+ "print(state[\"destination\"])\n",
+ "print(state[\"answer\"])"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "e7e46205-9d80-45b8-a3d5-cfbc8ebbe19a",
+ "metadata": {},
+ "source": [
+ "In the [LangSmith trace](https://smith.langchain.com/public/1017a9d2-2d2a-4954-a5fd-5689632b4c5f/r) we can see the tool call that routed the query and the prompt that was selected to generate the answer.\n",
+ "\n",
+ " \n",
+ "\n",
+ "## Overview:\n",
+ "\n",
+ "- Under the hood, `MultiPromptChain` routes the query by instructing the LLM to generate JSON-formatted text, and parses out the intended destination. It takes a registry of string prompt templates as input.\n",
+ "- The LangGraph implementation, implemented above via lower-level primitives, uses tool-calling to route to arbitrary chains. In this example, the chains include chat model templates and chat models."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "cf89de23-377b-4933-839c-d2f2483d09d2",
+ "metadata": {},
+ "source": [
+ "## Next steps\n",
+ "\n",
+ "See [this tutorial](/docs/tutorials/llm_chain) for more detail on building with prompt templates, LLMs, and output parsers.\n",
+ "\n",
+ "Check out the [LangGraph documentation](https://langchain-ai.github.io/langgraph/) for detail on building with LangGraph."
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.10.4"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/docs/docs/versions/migrating_chains/refine_docs_chain.ipynb b/docs/docs/versions/migrating_chains/refine_docs_chain.ipynb
new file mode 100644
index 00000000000..51fd1bbcc43
--- /dev/null
+++ b/docs/docs/versions/migrating_chains/refine_docs_chain.ipynb
@@ -0,0 +1,452 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "32eee276-7847-45d8-b303-dccc330c8a1a",
+ "metadata": {},
+ "source": [
+ "---\n",
+ "title: Migrating from RefineDocumentsChain\n",
+ "---\n",
+ "\n",
+ "[RefineDocumentsChain](https://api.python.langchain.com/en/latest/chains/langchain.chains.combine_documents.refine.RefineDocumentsChain.html) implements a strategy for analyzing long texts. The strategy is as follows:\n",
+ "\n",
+ "- Split a text into smaller documents;\n",
+ "- Apply a process to the first document;\n",
+ "- Refine or update the result based on the next document;\n",
+ "- Repeat through the sequence of documents until finished.\n",
+ "\n",
+ "A common process applied in this context is summarization, in which a running summary is modified as we proceed through chunks of a long text. This is particularly useful for texts that are large compared to the context window of a given LLM.\n",
+ "\n",
+ "A [LangGraph](https://langchain-ai.github.io/langgraph/) implementation confers a number of advantages for this problem:\n",
+ "\n",
+ "- Where `RefineDocumentsChain` refines the summary via a `for` loop inside the class, a LangGraph implementation lets you step through the execution to monitor or otherwise steer it if needed.\n",
+ "- The LangGraph implementation supports streaming of both execution steps and individual tokens.\n",
+ "- Because it is assembled from modular components, it is also simple to extend or modify (e.g., to incorporate [tool calling](/docs/concepts/#functiontool-calling) or other behavior).\n",
+ "\n",
+ "Below we will go through both `RefineDocumentsChain` and a corresponding LangGraph implementation on a simple example for illustrative purposes.\n",
+ "\n",
+ "Let's first load a chat model:\n",
+ "\n",
+ "```{=mdx}\n",
+ "import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
+ "\n",
+ "\n",
+ "```"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "id": "34fc8315-4354-4d4e-952a-c0465d93b23c",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# | output: false\n",
+ "# | echo: false\n",
+ "\n",
+ "from langchain_openai import ChatOpenAI\n",
+ "\n",
+ "llm = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "9950d6e1-7ca0-4b46-8622-813b3c30b85d",
+ "metadata": {},
+ "source": [
+ "## Example\n",
+ "\n",
+ "Let's go through an example where we summarize a sequence of documents. We first generate some simple documents for illustrative purposes:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "id": "abb1abb0-0c5e-4179-8431-c2b2d52bd57b",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from langchain_core.documents import Document\n",
+ "\n",
+ "documents = [\n",
+ " Document(page_content=\"Apples are red\", metadata={\"title\": \"apple_book\"}),\n",
+ " Document(page_content=\"Blueberries are blue\", metadata={\"title\": \"blueberry_book\"}),\n",
+ " Document(page_content=\"Bananas are yelow\", metadata={\"title\": \"banana_book\"}),\n",
+ "]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "ffd939c9-1717-4afd-a615-4a1d560ca814",
+ "metadata": {},
+ "source": [
+ "### Legacy\n",
+ "\n",
+ "\n",
+ "\n",
+ "Below we show an implementation with `RefineDocumentsChain`. We define the prompt templates for the initial summarization and successive refinements, instantiate separate [LLMChain](https://api.python.langchain.com/en/latest/chains/langchain.chains.llm.LLMChain.html) objects for these two purposes, and instantiate `RefineDocumentsChain` with these components."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "id": "4b2dd248-8bf3-40a5-9569-df32558b5d21",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from langchain.chains import LLMChain, RefineDocumentsChain\n",
+ "from langchain_core.prompts import ChatPromptTemplate, PromptTemplate\n",
+ "from langchain_openai import ChatOpenAI\n",
+ "\n",
+ "# This controls how each document will be formatted. Specifically,\n",
+ "# it will be passed to `format_document` - see that function for more\n",
+ "# details.\n",
+ "document_prompt = PromptTemplate(\n",
+ " input_variables=[\"page_content\"], template=\"{page_content}\"\n",
+ ")\n",
+ "document_variable_name = \"context\"\n",
+ "# The prompt here should take as an input variable the\n",
+ "# `document_variable_name`\n",
+ "summarize_prompt = ChatPromptTemplate(\n",
+ " [\n",
+ " (\"human\", \"Write a concise summary of the following: {context}\"),\n",
+ " ]\n",
+ ")\n",
+ "initial_llm_chain = LLMChain(llm=llm, prompt=summarize_prompt)\n",
+ "initial_response_name = \"existing_answer\"\n",
+ "# The prompt here should take as an input variable the\n",
+ "# `document_variable_name` as well as `initial_response_name`\n",
+ "refine_template = \"\"\"\n",
+ "Produce a final summary.\n",
+ "\n",
+ "Existing summary up to this point:\n",
+ "{existing_answer}\n",
+ "\n",
+ "New context:\n",
+ "------------\n",
+ "{context}\n",
+ "------------\n",
+ "\n",
+ "Given the new context, refine the original summary.\n",
+ "\"\"\"\n",
+ "refine_prompt = ChatPromptTemplate([(\"human\", refine_template)])\n",
+ "refine_llm_chain = LLMChain(llm=llm, prompt=refine_prompt)\n",
+ "chain = RefineDocumentsChain(\n",
+ " initial_llm_chain=initial_llm_chain,\n",
+ " refine_llm_chain=refine_llm_chain,\n",
+ " document_prompt=document_prompt,\n",
+ " document_variable_name=document_variable_name,\n",
+ " initial_response_name=initial_response_name,\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "06ee50d8-5f37-4bcd-9181-5280b54b1b44",
+ "metadata": {},
+ "source": [
+ "We can now invoke our chain:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "id": "8686f56f-992f-4556-a74c-8d3903d0db38",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "'Apples are typically red in color, blueberries are blue, and bananas are yellow.'"
+ ]
+ },
+ "execution_count": 5,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "result = chain.invoke(documents)\n",
+ "result[\"output_text\"]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "22d5f5d2-b7f3-431c-bb25-fa1b4d353663",
+ "metadata": {},
+ "source": [
+ "The [LangSmith trace](https://smith.langchain.com/public/8ec51479-9420-412f-bb21-cb8c9f59dfde/r) is composed of three LLM calls: one for the initial summary, and two more updates of that summary. The process completes when we update the summary with content from the final document."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "8f5caba4-f363-4bcf-8dd4-1d015e27a18d",
+ "metadata": {},
+ "source": [
+ " \n",
+ "\n",
+ "### LangGraph\n",
+ "\n",
+ "\n",
+ "\n",
+ "Below we show a LangGraph implementation of this process:\n",
+ "\n",
+ "- We use the same two templates as before.\n",
+ "- We generate a simple chain for the initial summary that plucks out the first document, formats it into a prompt and runs inference with our LLM.\n",
+ "- We generate a second `refine_summary_chain` that operates on each successive document, refining the initial summary.\n",
+ "\n",
+ "We will need to install `langgraph`:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "bda06266-f4fe-43cf-9044-0ce5ee76c793",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "pip install -qU langgraph"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "id": "3477bef6-97cc-492f-87fe-cf5336edd581",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import operator\n",
+ "from typing import List, Literal, TypedDict\n",
+ "\n",
+ "from langchain_core.output_parsers import StrOutputParser\n",
+ "from langchain_core.prompts import ChatPromptTemplate\n",
+ "from langchain_core.runnables import RunnableConfig\n",
+ "from langchain_openai import ChatOpenAI\n",
+ "from langgraph.constants import Send\n",
+ "from langgraph.graph import END, START, StateGraph\n",
+ "\n",
+ "llm = ChatOpenAI(model=\"gpt-4o-mini\", temperature=0)\n",
+ "\n",
+ "# Initial summary\n",
+ "summarize_prompt = ChatPromptTemplate(\n",
+ " [\n",
+ " (\"human\", \"Write a concise summary of the following: {context}\"),\n",
+ " ]\n",
+ ")\n",
+ "initial_summary_chain = summarize_prompt | llm | StrOutputParser()\n",
+ "\n",
+ "# Refining the summary with new docs\n",
+ "refine_template = \"\"\"\n",
+ "Produce a final summary.\n",
+ "\n",
+ "Existing summary up to this point:\n",
+ "{existing_answer}\n",
+ "\n",
+ "New context:\n",
+ "------------\n",
+ "{context}\n",
+ "------------\n",
+ "\n",
+ "Given the new context, refine the original summary.\n",
+ "\"\"\"\n",
+ "refine_prompt = ChatPromptTemplate([(\"human\", refine_template)])\n",
+ "\n",
+ "refine_summary_chain = refine_prompt | llm | StrOutputParser()\n",
+ "\n",
+ "\n",
+ "# For LangGraph, we will define the state of the graph to hold the query,\n",
+ "# destination, and final answer.\n",
+ "class State(TypedDict):\n",
+ " contents: List[str]\n",
+ " index: int\n",
+ " summary: str\n",
+ "\n",
+ "\n",
+ "# We define functions for each node, including a node that generates\n",
+ "# the initial summary:\n",
+ "async def generate_initial_summary(state: State, config: RunnableConfig):\n",
+ " summary = await initial_summary_chain.ainvoke(\n",
+ " state[\"contents\"][0],\n",
+ " config,\n",
+ " )\n",
+ " return {\"summary\": summary, \"index\": 1}\n",
+ "\n",
+ "\n",
+ "# And a node that refines the summary based on the next document\n",
+ "async def refine_summary(state: State, config: RunnableConfig):\n",
+ " content = state[\"contents\"][state[\"index\"]]\n",
+ " summary = await refine_summary_chain.ainvoke(\n",
+ " {\"existing_answer\": state[\"summary\"], \"context\": content},\n",
+ " config,\n",
+ " )\n",
+ "\n",
+ " return {\"summary\": summary, \"index\": state[\"index\"] + 1}\n",
+ "\n",
+ "\n",
+ "# Here we implement logic to either exit the application or refine\n",
+ "# the summary.\n",
+ "def should_refine(state: State) -> Literal[\"refine_summary\", END]:\n",
+ " if state[\"index\"] >= len(state[\"contents\"]):\n",
+ " return END\n",
+ " else:\n",
+ " return \"refine_summary\"\n",
+ "\n",
+ "\n",
+ "graph = StateGraph(State)\n",
+ "graph.add_node(\"generate_initial_summary\", generate_initial_summary)\n",
+ "graph.add_node(\"refine_summary\", refine_summary)\n",
+ "\n",
+ "graph.add_edge(START, \"generate_initial_summary\")\n",
+ "graph.add_conditional_edges(\"generate_initial_summary\", should_refine)\n",
+ "graph.add_conditional_edges(\"refine_summary\", should_refine)\n",
+ "app = graph.compile()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "id": "0f1f0f00-5378-4687-987f-5feec0805d7a",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "image/jpeg": "/9j/4AAQSkZJRgABAQAAAQABAAD/4gHYSUNDX1BST0ZJTEUAAQEAAAHIAAAAAAQwAABtbnRyUkdCIFhZWiAH4AABAAEAAAAAAABhY3NwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAA9tYAAQAAAADTLQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAlkZXNjAAAA8AAAACRyWFlaAAABFAAAABRnWFlaAAABKAAAABRiWFlaAAABPAAAABR3dHB0AAABUAAAABRyVFJDAAABZAAAAChnVFJDAAABZAAAAChiVFJDAAABZAAAAChjcHJ0AAABjAAAADxtbHVjAAAAAAAAAAEAAAAMZW5VUwAAAAgAAAAcAHMAUgBHAEJYWVogAAAAAAAAb6IAADj1AAADkFhZWiAAAAAAAABimQAAt4UAABjaWFlaIAAAAAAAACSgAAAPhAAAts9YWVogAAAAAAAA9tYAAQAAAADTLXBhcmEAAAAAAAQAAAACZmYAAPKnAAANWQAAE9AAAApbAAAAAAAAAABtbHVjAAAAAAAAAAEAAAAMZW5VUwAAACAAAAAcAEcAbwBvAGcAbABlACAASQBuAGMALgAgADIAMAAxADb/2wBDAAMCAgMCAgMDAwMEAwMEBQgFBQQEBQoHBwYIDAoMDAsKCwsNDhIQDQ4RDgsLEBYQERMUFRUVDA8XGBYUGBIUFRT/2wBDAQMEBAUEBQkFBQkUDQsNFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBT/wAARCAEvAQsDASIAAhEBAxEB/8QAHQABAAICAwEBAAAAAAAAAAAAAAYHBQgBAwQCCf/EAFYQAAEDBAADAggICAgMBQUAAAEAAgMEBQYRBxIhEzEIFBUWIkGU0xcyUVRVVmGVI1NxkpPR0tQzQlJ0dYKxsgkkJTY3OHJzgZGztDQ1Q2JjZIOho9X/xAAbAQEBAAMBAQEAAAAAAAAAAAAAAQIDBAYFB//EADQRAQABAgMFBgMIAwEAAAAAAAABAhEDElEEExQxkSFBUmGh0QUVUyMzcYGxweHwIjLxQv/aAAwDAQACEQMRAD8A/VNERAREQEREBERAXVUVUNJGZJ5o4Ix/HkcGj/mVhblcay518tqtEniz4eXxu4OjD20+xsMYD0dKQQdHYaCC4HYa7rpuHthjk7aqoGXWsI06ruf+Myn1nTn75Rv+K3QGhoDQW+KKaYviTbyhbavecqsoOjeKDf8AOWfrXHnVZPpig9qZ+tcnF7MTs2ig3/NmfqTzWsv0RQezM/Ur9j5+i9jjzqsn0xQe1M/WnnVZPpig9qZ+tc+a1l+iKD2Zn6k81rL9EUHszP1J9j5+h2OPOqyfTFB7Uz9aedVk+mKD2pn61z5rWX6IoPZmfqTzWsv0RQezM/Un2Pn6HY486rJ9MUHtTP1r10dzo7hvxWrgqddT2MjX/wBhXl81rL9EUHszP1Ly1mCY7XaMtkoRICC2WKBscjSO4te3TgftBT7Ge+fT+E7GeRRcvq8L5XVFVPcrDsNdLUHnqKLZ1zPf3yRfK47czW3FzSSyULXXRl7Ym8STAiItaCIiAiIgIiICIiAiIgIiICIiAiIgIiIC8V6ukdks9fcZgTDRwSVDwPW1jS4/2L2rEZfan37E73bI/wCErKGenbv5XxuaP7VnhxTNcRVyusc3XhlsktWN0UdQWurpWeMVcjd/hJ3+nI7r11zE6HqGh6lm1j8eujL3YbdcI9hlVTxzAOGiOZoOiPURvRHqWKyriZh+C1MNNkmV2PHqidnaRRXW4w0z5G71zND3Akb6bCuJNU11TVzuTzSVQribxWtvC+Gztqrdc71cbxWeI2+12eFstTUyhjpHaD3saAGscSXOHcvKfCC4XBgeeJOIBhJAd5dpdEjWx/CfaP8AmofxSyPF+MmMR2/GLVa+LwpauOaogx/I6WCrth5X9lUxSiQcj+YaBD2nRd1OiDrR05r4QV9sPEPhvabdgt/rbfkdBW1tVSGmgjrWuiazliaJKhga5nMXSB3qczlJPMBJs54+W7h3f5aO84xk8VmglghqMmjt7XWyB0paGl0nPzloL2guawgHYJ6FVvTYPxUx+g4M5NcrY7N8nxmC5Ul3omXGGOpdHVNaIndtIWxyPjbFG152C47I2oZxn4FZzxBPEaOowOLJ75d5mVFgyGvvELYLVStjiIo44nOLo5Q9kreZrQ15k254CDYCt46W+LibccEoMcyC9Xu3NpJKp9BTwmnhiqN8srpHytAa3XpD43eWtcA7WF4Acar7xWrcsp7xidys0dsvNdRwVkrIG07Y4ZRG2B/LO95nAJLiG8mwdO7gslw+xO9W/jVxIyavtrqC2XyiszKN8k0T3OfDFOJmEMcSCwyNGz0O/RJCjfDuounBO951S5fQ0dnw+vyGuvdNl1XdqaGk5aqRr2QPY94e2QOLm93KdDR6oL2RQBvhB8LXnTeJWIOOidC/UvcOp/8AUXtsfGfh9k91p7ZZs6xq7XKoJENHQ3enmmkIBJDWNeSdAE9B3AoJfLEyeJ8UrGyRvaWuY8bDge8EesKO4DK9lmntsjzI+01UtAHOJJMbDuLZPUnsnR7J7zsqSqMYKO3ZfbgN9nXXWeSPY1tsYbBv8h7EkH1gg+tdFH3VV/Lr/wAuscknREXOgiIgIiICIiAiIgIiICIiAiIgIiICIiAiIgiolbgtTUGfTMdqJXTifrqikeS5/P6hE5xLubuaXHehoiRGGmrmRylkVQxzQWP0HAg9QQfkXoUZk4fWuOR8lvfWWRzyS5tsqXwxkk7J7IHs9k9d8u/tXRmoxO2ubTrzv+P97V582d8m0nzWH9GP1LshpoaffZRMi338jQNqOOwmcknzovw36hPF0/8A1rjzIn+tN+/Txe6Td4fj9JW0apSii3mRP9ab9+ni90qm4iXnIMX4/wDCPDaPJ7qbRlLLu6vMr4zKDTUzZIuR3IA30id7B2PkTd4fj9JLRq2CXxLEyZhZIxsjT/FcNhRnzIn+tN+/Txe6TzIn+tN+/Txe6Td4fj9JLRqkHk2k+aw/ox+pfUdDTRPD2U8THDuc1gBCjvmRP9ab9+ni90vrzCpp+lddbxco+m4p657GO18rY+UEfYdg/ImTDjnX6f8AEtGrsut5kvE81nsswdVD0KutYdsom9xGx0M2vis9XRzumg7NW6309pt9NRUkQhpaaNsUUbe5rWjQH/ILmgt9La6OKkoqaGkpYm8scEDAxjB8gaOgC9CwrriYy08v1/voCIi1IIiICIiAiIgIiICIiAiIgIiICIiAiIgIiICIiAiIgLXfjP8A64Pg5/7vI/8AsWLYha78Z/8AXB8HP/d5H/2LEGxCIiAiIgIiICIiAiIgIiICIiAiIgIiICIiAiIgIiICIiAiIgIiICIiAtd+M/8Arg+Dn/u8j/7Fi2IWu/Gf/XB8HP8A3eR/9ixBsQiIgIiICIiAiIgIiICIiAiIgIiICIiAiIgIuHODGlziGtA2Se4KFuy+93YCosttofJr+sNRcKh7HzN9TxG1h5WnvGzsjvAW7DwqsW+VYi6aooR5czD5jY/apvdp5czD5jY/apvdrdwtesdYLJuihHlzMPmNj9qm92nlzMPmNj9qm92nC16x1gsm6KEeXMw+Y2P2qb3aeXMw+Y2P2qb3acLXrHWCybooR5czD5jY/apvdp5czD5jY/apvdpwtesdYLJuihHlzMPmNj9qm92nlzMPmNj9qm92nC16x1gskmT47Q5fjd2sVzjM1tulJLQ1UbXcpdFIwseN+rbXFfhLxc4W3ThHxPvuF3GN0lbbqswxua3/AMRGesUjR8j2FrgPt13r9tvLmYfMbH7VN7tU5xF8HmXiVxpw7iRcqGzNuePD0qZs8pjrCwl0Bf8Ag+hjeS4a7+gPQJwtesdYLJr4JHBQcBuB1jx6dgbeKgG43U//AFUobzN/qNayPfr7PfrVyqEeXMw+Y2P2qb3aeXMw+Y2P2qb3acLXrHWCybooR5czD5jY/apvdp5czD5jY/apvdpwtesdYLJuihHlzMPmNj9qm92nlzMPmNj9qm92nC16x1gsm6KEeXMw+Y2P2qb3aeXMw+Y2P2qb3acLXrHWCybooR5czD5jY/apvdp5czD5jY/apvdpwtesdYLJuihHlzMPmNj9qm92vpuQZbCeeS12ioYOpjhrZGPI/wDaXR639h0PtCcLXrHWCyaovFZrvT323RVtNzCN+2lkjeV7HNJa5jh6iHAgj5QvauSYmmbTzQREUBERAREQYvKCW4zdyDoijmII/wBgqPYyAMbtIAAApIug/wBgKQ5V/mxeP5nN/cKj2M/5uWr+aRf3Avo4P3M/j+zLuZJFqZiXGDM8qv3D+4+e0ck9+yWpt9ywako6YS22mh7ffM4tMw5OxZ2hf39oOXl6E8Yhxe4v8QaC35pj9ovlXbq6u5oLN4jbG2t1EJzG4GodUiqEojDnc/KBzjXZ6WOeGLbRFq1kfEriFb8U4nZtBloZR4dktRRU9k8m05iqqWOSIuZLIW8++WQhpYWkaBJdterLOJ/EnLs/zSgw2C+09BjVU23QNtFuttRFU1HYskcal1XUMkDdyAARAeiN8xJ0LmgbNotfrXkXEnN+LNJjtVfHYPDHh9uvFyoKOkpqmWGvknmZLGySRrxyeho75ujG8uiSTHMl40ZVaOI1JcLJfrrkOJOyqnsNZE+yUsNrhEs4gfHHU84qJJY3O+OA5hc0g6TMNomyMc9zA5pc3XM0HqN9219LW7hxFc8T4m8d8lqcluNdbrTcjVT2nxemDKkC3QyN24Rc4LG6Y3lcAQwF3MSScfwz4h8Y8oqcNyJ1tvVfaL5LTz19HU0NshtlLRztDjJTyx1JqT2Yc1w7Rri8A7a0nQZhtCvJQXiguktZFRVtNVy0c3i9SyCVr3QS8odyPAPou05p0euiD6161qjT5pmNgo8hobdkEDLrPxThx511ktNKHvppaWAkyMjYxsjxz9Hn0jytBcQNKzNhtLcrlR2a31FfcKqChoaaMyz1NTII4omAbLnOcQGgDvJXbFUwzCMxyskErO0YWuB529PSHyjqOv2hawcQcqzG3YTx0xusyg3SoxW2U1zo7nVWuje+ohmgle6mnhMRhe3cLhvsweV3y9VlWWK73XwqMfqKXKK21RDCYqp1NS0tKY3xNq4g+n9OIkMeepIIcO5rmjopmGwdHeKC41VbTUlbTVVTRSCKqhhla98Dy0ODXgHbSWuB0ddCD61xd7zb7Bb5K+6V1NbaGItD6mrmbFGwucGt25xAG3EAfKSAtcYc2uWK03F6KryqWiucWVUtut1xorFSz11Q+Wmp3sp2QsYxs0pD3RtdJzEAAuJDSonlefZTkvBHizj+WGtmrsfudoZDU3Okp6WrfDNPTStE0dO50XMDzaLD1BbsA7CmYbjIiofivm2VYrxZt7bhkdThuASU9M2nulPaYqylnq3TESQ1krgXU4LezDHDlbtx27Y0s5mwvhFr9cuKWUU/Cvj7eI7ny3HF7pcae0TeLxHxaOKjgkjHLy6fp73Hbw4nejsaCwuS5xxBulXxVmteZuslNiFiortSU8Vsppu3lfRPmeyRz2E9mXRno3TgX9HAABY5oGzaLXG38Qc5x27Ysbpkzb3T5didwvDYDb4YG22qghglHYlo26MifXLIXn0Qeb1LDY7lPE67jgy+XiI9reIFtfNWtZZqT/EnNoxUh1P6HxjotPac7fSJDR0aGYbTotaBxYySTA6u01mWV0GW0mXVmO0tVZbLBVV93bAHOHJA/UMbuQtc95AY0MPdzBYul4zcRLjwupI23DyZldPxBp8Tlra+3wB8sL5GdZ4WOdGHcsoDhE4fF9Fw3tM0DatFSN4ze9cE84tsWX5TU5BilxstdKK2spaaGSGtpeaocNwxsGn0xeADvrT/ACuO51warMiufDDHrhlc/bX+vpvHalvZtj7HtXGRkOmgD8GxzY962eTZJJJVib9glPDY/wCTbuPULtV6H/3NqXKIcNf/AC68f0vV/wB9S9aNp++qWeYiIuVBERAREQYvKv8ANi8fzOb+4VHsZ/zctX80i/uBS6tpI6+jnppd9lNG6N2u/RGj/aq/pblU4tRU1suVruUs1LG2EVNDRSVMU4aAA8dm0lu9dWuAIOx1Gifo7P8A5Yc0RzuyjtizXzFOF/EbHOKsdwslsvFlpqi8me6V12utsraOqoTKXSNbyQCrc9zdcvO70ToFxAVsWDgBasUyDx6yZFktqtPjzrj5t0twDbaJnO536Zyc4Y5xLjGHhhJPoqYeedP9FX77kq/dp550/wBFX77kq/drbGz1x/5kyzoi9y4EWC6YXm2MS1lybQZbcJrlXSMljEsckvJzCIlmg38G3QcHHqepXTk/AS1X7K7lkNvyHI8Tr7rHHHcxj9e2nZX8jeVjpA5jiHhvoh7C12vWpd550/0VfvuSr92nnnT/AEVfvuSr92ruK/CZZ0dFFgFvoc/q8vZPVvudTa4LQ+OR7TF2UUkkjXAcvNzkyu2S4jQHQdSYFcfBfx64OqIm5BktJbHXTy1TWqmrmNpaKt7btzNE0xkncnM7kkL2AuJDQdEWJ550/wBFX77kq/dp550/0VfvuSr92m4r8MmWdGCbwdtkHES45dS3S7Uj7oGeU7PFOw2+vc2EwtfLG5hOwzQ9FzQeVuwdLFYdwJpOG9VBNYMiyWW228Sut+M1d0/ybCXNcBH0jMhYOY6D3PDehA2Apl550/0VfvuSr92nnnT/AEVfvuSr92m4r8MmWdEbGRcUtjeDYyB6yMrm/wD5665eBFgmqaic1lyD58qiy9wEsehVxxxxtYPQ/gtRt2PjbJ9JZXJOLWP4dZp7tfvKNltcHL2tbcLZUQQx8zg1u3uYANkgDr1JAXrt/EO23agp62io7zV0dTG2aGogs9U+ORjhtrmuEeiCCCCE3GJ30ymWWFyLgnY8mkz59VV3CM5pboLZcOxkjHZRRMlY0w7YeVxEztl3MNgdB6/rJuDNtyG/2K+U95vVgvFopDb2VlpqGRvqKYua4wyh7HBzeZjT0AIPcQpB550/0VfvuSr92nnnT/RV++5Kv3abivwyuWdESvnADH76b/K+vu1JW3a809/bW0s7GTUVZDEyKN8B5CAOVnUPDweZ3qOh4o/Brxt9szCir7rfru3LIYGXSeurQ+WSWEns52ODByPHogBumARs00a6zrzzp/oq/fclX7tPPOn+ir99yVfu03FfhMs6I46p4h46yG12vHrVkNBRxRwRXW8ZLJDWVQawAyTMZQuaHkgk6Oj39N6GJyPgzNxbhZUZncbvaIqhkcVfi9ovPb2uoZHKXs5i+Bj/AEuhdy8hPQEkAFZ6w8asVym4XSgs1TWXautUvY19NRW+eWSkfsgNla1hLDtrho6+KfkKzfnnT/RV++5Kv3abjE76ZTLKB5n4NlhzOTKmPv2RWi25R6d1tdrrI4qaebs2x9tp0bnB3KxmwHcruUczXdd5w8FrIW5s3xq4ay22w2uu/CM/BxRU74GmL0OjuV5JLuYb10A6KQeedP8ARV++5Kv3aeedP9FX77kq/dpuK/CuWdGBruDFkr5sWkkqq8Ox20VVmpOWRmnw1EUUT3Seh1eGwtII0Nk7B6ALVwZstoZw6bDVV7hg1M6ltvPIw9sx1N4sTNpg5jydfR5fS+zos9550/0VfvuSr92nnnT/AEVfvuSr92m4r8JlnRC67wd7DUGWelu97tV0N+qshgulDURNqKWoqGdnMyPmjLTG5nTle1x+1fFs8HDHbVSGmjut8nidkdNlLzVVbZnvroeTbi5zC4tkLGl7d9/xeQdFN/POn+ir99yVfu0886f6Kv33JV+7TcV+Eyzor3wguG1fxl82sTdY4p8ebcqe53C8T1TGiBkTjzwsi6ve+RhczfRoDzs+pXCAAAANALAeedP9FX77kq/drlmWtnPJT2W+zzHo2M2uaHmPyc8rWsH5XOA+1NzXHbZLSyXDX/y68f0vV/31L1hMQsk1itDo6ksNXUTy1U4jO2tfI8u5QdDYaCG70N63obWbXBtFUV4tUxyJ5iIi50EREBERAREQEREBERAREQFEOJPFXG+E1st9dkdZJTsuFdDbqSGCB88088h01rI2AudobJ0O4HvOgfjKeK9hxHOMWxGtdVy33I3yiigpKV8wayNu3yyOaNMYDygk9xcCegJHn4YYVkmO2isZmuUDNLtLc5q6CpdRRwR0cZ9GOOJo6jTd9SSdvcN67w+LHh2TVeVZjPmF6t1/xa4SQx2iwNt7RFSQsGy6Uu2XyOeeu9j0Gka3ytnvciICIiAiIghHEvD79dcVu4wK7UeJZbVSQ1Dbq+iZK2d8Zbpkw0S5rmt5C7qQ09Ae5fVk4q2Ot4g1nD6e4A5nbrfDX1NP4tJDHNG/QMkJdsOaHEb053LzAEkg6mqw2VWCa/WO501vr32O7VVHJS094p4mPnpS4dHN5h10dO19nq70GZRVbj3EMcPLhgnDzOb3Pec2vFFIWXiK2uhpK2aLq5nM0crX8uzrp0aSeXmaDaSAiIgIiICIiAiIgIiICIiAiIgIiICIiAiIgKurtxPor/nl64Z2SpuVFlMVofWOvENv7Wktz3+jDzOcORzySXBvUHkcCQeisVQvDxmvn1mpyEUXmx21N5vGn123Z9ke37XXXfaa1v1IMhw+xOsw7ELTarpfqzKrnRQmKW9XFrRUVBLuZxPL3DegBsnTW7LiNmSIiAiIgIiICIiAiIg+JImy62BzNJLXaBLTojY369E/81TrMoPg1YraaTOsnv8AmsFzvbqSnvctuEjqGOXZibUuiHVoI5efWyXgBoaOlyqG8XxmZ4dXj4PhRHMNReIeUNdhvtWc/Nvp/B8+vt0gmIOxsdQuV8Rc3ZM59c+hza+X1r7QEREBERAREQEREBERAREQEREBERAREQFWXDi1WSi4rcUKq3ZXNerpV1NC642eRxLLS5sBEbWj1do30j+RWaqy4cXWyVvFbihS27FJrLdKSpoW3G8SNIZdnOgJjc0+vs2+ifyoLNREQEREBF1vqIo3cr5WNd8jnAFfPjkH4+P88K2kdyLp8cg/Hx/nhPHIPx8f54S0juRdPjkH4+P88J45B+Pj/PCWkdy/PLwxfDduFBU5xwiuOCV1nmjqGQx3qhyDspnwtlZNFMxvix5e0Y1uxzHQeRs6X6D+OQfj4/zwtEv8J7wRjyTGrVxKs7Gy3G1ctvubItEvpnuPZSdP5Eji37RKPU1LSLV8ErwyK7wnchvFtZgL8etloomzTXTyt40DK54bHEW9izRc0Su3s/wZ6dVs8qG8DTgvT8B+CFptdYIoshuX+UbqS4czZngcsR6/+mzlboHXMHEd6vPxyD8fH+eEtI7kXT45B+Pj/PCeOQfj4/zwlpHci6fHIPx8f54TxyD8fH+eEtI7kXT45B+Pj/PC+45o5d8j2v138p2lpH2iIoCLgnQ2egWNx/J7NllE+ssd2obzSMkMTqi31LJ42vABLS5hI3og6+0IMmiIgIiICIiAiIgKF4f57efOa+cPiXmv21N5veL67Xs+yPb9rrrvtNa36lNFWXDi1WSi4rcUKq3ZXNerpV1NC642eRxLLS5sBEbWj1do30j+RBZqIiAozntxnpLdQ0tPO+mfcayOjdNGdPYwhzncp9RLWEA+rexo6Kkyh/Eb42M/0xH/ANGZdOzRE4sXWObGjh/jGhzY9a5D63S0cb3H7S4gkn7SnwfYt9W7R7BF+yu/L8vtGBY1X5BfavxC0UDO0qKns3ydm3YG+VgLj1I7gVGZeO2EQY7NfZbxJFamVTaKOd9DUNNVK5oc1tO0x81Rtp2DEHgjZB6Fd+/xPHPUvOrP/B9i31btHsEX7KfB9i31btHsEX7KrbiR4T+NYzwor8ux6oF7mZVMtsFOaSpHZVb3NaG1DBH2kXKHBxDw0u6Nbtzmg+vH+LlbJkuF2K53e0yV10tdVdaxkdmuFG6aFujC6nEoc1haN9oyV3ONt00b0pxGJ456l51T74PsW+rdo9gi/ZT4PsW+rdo9gi/ZUbxLwgMBzq6Wu32S/isqLpEZqFzqSeKKqDWc7mxyvjDHPa3ZcwO5m6IcAQdfb+PmAx5V5vOyGIXLxsUHN2E3i3jO9dh4xydj2m+nJz82+mt9E3+J456l51SH4PsW+rdo9gi/ZT4PsW+rdo9gi/ZWfUKoOMuH3XN5sSo7uam+wzSU8kMVLMYmysYXvi7bk7Lna0ElnNsa7ld/ieKepedWW+D7Fvq3aPYIv2U+D7Fvq3aPYIv2Vj6Ti3iddjON5DBdeez5FUw0drqfFpR4xLKSI28pZzN2Wnq4ADXUhR2s8Jzhpb6h8VTkzYBHVy0Ek76KpEEdTG5zXwvl7PkbJtjtMLgXDRaCHAmb/E8c9S86pl8H2LfVu0ewRfsp8H2LfVu0ewRfsqPQ8esElxq7359+FLbLPUQ0txfWUk9PJSSSvYyPtYpGNkYHGRunFvLok70CRipPCh4axOrGPv1Syoo2iSopnWitE8UWt9sYux5xFrr2uuTqPS6hOIxPHPUvOqbfB9i31btHsEX7KfB9i31btHsEX7KwOW8dcHweltlTdr3yU9ypvHaWakpJ6pkkGge1JhY8NZog8ztDr3rtyTjZhWJixeUb23d9pn1drFJTzVRrYmiMkxCJji86lYQ0dSDsAgHTf4njnqXnVmfg+xb6t2j2CL9lddThNoo6eSe1W+ls1wia58FZQQshkjd3g7aOo6DbTsOHQgjovFQ8XcTuGM3/ACCO6GO1WDtBc5KmlmgfSlkTZXB0b2B++R7XDTTvfTZ6KSRVsVytDKunLnQVEAljL2OY4tc3Y21wBB0e4gEetZRjYkz/ALT1W8onjl84pZ5w1vNYKOw4nfqtzH2CpMj6yF1O4McJZmaBDi3m00d2xvuKyVy4ZZHlNHgk13zy7W26WF8dRcjjzm0tPeJWmMkSsIP4Mlh2zuIe4etSXhr/AKOsV/oql/6LVJF8vGpinEqpjlEyk80NtnCTGbTxLu+fQUk5ye6UzaOoqZKuV0YhAjHI2Iu5Gg9kwkhu9jv6lQ/OODeK4lglG6wV7eGFjx67x5PWS2SHsoZRA3cjZo2kB7HMaObYPxB0OlcS89xt9NdrfU0NbAypo6mJ0M0Eo22RjgQ5pHrBBIWpHRYb5QZPY7feLXUtrLZcKeOrpahgIbLE9ocxw316gg9V71X3Bq9y11pvVnOFTYPQY7dJrPb6Qt1BVUsQaI6iE8rRyO2dAA613lWCgIiICIiAiIgKsuHF1slbxW4oUtuxSay3SkqaFtxvEjSGXZzoCY3NPr7Nvon8qs1QvD/Pbz5zXzh8S81+2pvN7xfXa9n2R7ftddd9prW/UgmiIiAofxG+NjP9MR/9GZTBRHiIwluOyfxI7vEXH5NxyNH/AOXAf8V1bN97H5/oyp5q28KWyV2R+D/mlttlBU3OuqaNrIqSkhdLLKe0YSGsaCT0B7go14SOG11bknDrI4LbfrnYLBPWRXGjxWpmguEbJ4WsjmiEL2PcGFmnNYd8sh6EbV8ot8xdi1gyvA6Gv4I5PXYpjOYR3O8XuzvqIsiNXU3CqbT11L+F5JnvkDGxh3fohrCSAAFYPE6x3G4cbeHNdS2+qqaGltN+jqKmGFz44XyR0oja9wGmlxa7lB7+U67lbyKZRrHjWH3yl4TeDXTPslwirrRdqN9whdSSNkooxQ1TXmZutxjmc1pLtDbgD3qMcLuF1FbrNb+H2cYxxGrrtT3F0c89LcK82Kpb4wZY6vbZhA1vxXlug4OB9ElbhopkgFrraxd8f49eL4VZcqt9oul3qZcno7tby20OHZu/x6lqD3SPe1noMcQ7mJLWkbVkP8HjhfI9z38PcZc5x2XG1Qkk/mqeUdHBb6SClpYWU9NAxsUUMTQ1rGNGg0AdwAAGllaZGouN0mQUvDbgzgcuG5Iy74xk9v8AKtS62SCkhihlkBlbNrlkYQQ4OZsAfGLfXmW4ZfvgegojYrj44OJ3lA0/icnaeLeWjJ2/LrfZ9n6fP3cvXeltOixyjVnjJht+ul740vorFcauK4sxLxR0FJI8VJhrXOm7PQ9Pkbou1vlGt6CsSqx+4SeEBmlw8m1LrdU4XSUkVV2DjDLMKirLomu1pzgHNJaDvTh06hXEiuUagWC35db8Q4c2G/WvOYMbhwymijt+MwzQTvug218NW9nK+FrWdnyh7mR7LuY9NLN8GcPvlHcPB+NysFypH2HG7xQ1zquje0Uc4NNG1rnEabzBj+Q79Nuy3YW0iKZRrDxqw24VXHW041QRtfj/ABJZTvvrd9WC2SNlkd+SaF0cJ/2Qtmar/wANL/sH+xYOg4f4/bcxuWVwW1gyK4RNgqLhI98j+zaGgMZzEiNvoNJawAEjZ2eqzVdI2GiqJHuDWMjc5zj6gB1WdMdo7uGv+jrFf6Kpf+i1SRaK5j/hIbHwgttJh9vwu7XTILLTxUFUbjI2igEjIw0vZ0e97TrY21mwQR0O11eCN4X/ABH8JbjubZdqy049jVrttTc6i3W+g344A6OGON0sj3OZp87ZOZpG+z5dad05Mftxa/xn9Vnm3vRFCeMnEuzcJOHtxyO/RVtRbo3RUzoLaN1MrppGxNbGOZvpbfvoQeh0tCOjh9RZFNlmY3y4ZTR37GLpPAbDRUHK6Ohijj5JdvA9Jz39T6RHTprZCnqjXDjALJwuwq14vjlG6gs1AxzYKd7y9zeZ7nu24kkkuc4k79akqAiIgIiICIiAqy4cWqyUXFbihVW7K5r1dKupoXXGzyOJZaXNgIja0ertG+kfyKzVWXDi62St4rcUKW3YpNZbpSVNC243iRpDLs50BMbmn19m30T+VBZqIiAvNcrbTXehmo6yFtRTSjlfG/uPyfkIOiCOoIBC9KKxMxN4EPfgFUDqHLr3BGO5nLSSa/rPgLj/AMSSuPMCv+ud7/Q0P7spii6eJxfLpHsyvKHeYFf9c73+hof3ZPMCv+ud7/Q0P7spiicTieXSPYvKHeYFf9c73+hof3ZPMCv+ud7/AEND+7KYonE4nl0j2Lyh3mBX/XO9/oaH92UW4nYVntHg1zmwTJ6qvytoj8Sp7vHRtpnntGh/OWwNPRnORojqAraVa+Eda7LeuC+R0WQ5TNhdnlbB299p3Fr6XU8ZaQR/KcGs/rJxOJ5dI9i8svHgNyMbefMr0H6HMBDQ637MvrzAr/rne/0ND+7KWwACCMNdztDRp3y9O9dicTieXSPYvKHeYFf9c73+hof3ZPMCv+ud7/Q0P7spiicTieXSPYvKHeYFf9c73+hof3ZPMCv+ud7/AEND+7KYonE4nl0j2Lyh3mBX/XO9/oaH92XZFw9ZMWtul7uV6pgdupKsQMik7iA8RRMLh0+KTo9xBHRS1E4nF19Ij9i8qS8JHwTcO8JG0DypGbTklPHyUd+pIwZox3hkg6drHs/FJBGzyluzujfA/wDAouPDk8V7DxNx+huNpvMVJQUVwp6lp8cpg6V8zWPjcJomlwpy5p5Nlje/l6bvouVirO5cDqeOy4XaMZya/YbasXkZ2VFaav8AB1kILfwNRzgue3TSN736RPVZeDArtHxPrMnlzC5VFknpBTsxeRjPE4n6aO1B1zc3ok/1ipqiAiIgIiICIiAiIgKF4f57efOa+cPiXmv21N5veL67Xs+yPb9rrrvtNa36lNFTGQV9q8HnK77mF4rMhvNHm13oKLxeko31UNqeIzEx2m7LWPcWg6BJc5oAO0FzoiICIiAiIgIiICIiAq18I66WWy8F8jrchxabNLPE2Dt7FTtLn1W54w0AD+S4tf8A1VZSq3MM4u3ECxXq18IMkx6bKbTc4aC5T1xdNFQNJDpfRaNPeG9AN62HjYc0gBZ0BBgjLW8jS0ab8nTuXYuGghoDjs66nWtrlAREQEREBERAREQEREBERAREQEREBERAXBAPeNrlEFTXmjquBoz/AD2S45Vmlrrnw1oxmnY2qfROGmSupwdO5OXldybAaGHv30s60XOK9WmiuEMc0UNXAydkdTE6KVrXNDgHscAWuAPVpGwehXrWu/hFcQsZ8Gq/ycVLtkl4muFbbXWqkwuKsBp7rMxwdG8McD2Qj53c8gGgHjoXODJA2IRfmF4FXhYXvIPCsvlRl9VAPhBLIZRBGIoYaqJnLSNYN9GhgMI3tzuZpc4nZP6eoCIiAiIgLqqamGippaiolZBTwsMkksrg1jGgbLiT0AA67K0c/wAKRxlFgwSzcOaGflrb7IK64Na4bbSRO/BtI+R8o2D/APCflXo8DDwosv8ACXzqmtWRX6koTYbJI6vtFPbgRfi54jNU+T4sXJzRc0beUFzyWjlcWxhsUMuvPGBuHX/hbltm8zWXOXy3UzUj5ZquKJxYYYN6ADnBwLuh1yOaSNh1h2HGLRi0FTDZrZSWuKqqZKydlJC2ISzSHmfI7Q6uce8nqvRabRQ2C2U1utlHT2630sYigpaWJscUTB3Na1oAAHyBetAREQEREBERAREQEREBERAREQEREBERAREQEREBa++FHw/4XcXaahtWZUlwut5tjZfExZZyyooxNyF5JJ7Ic3ZxnUgJ0NtHUqyOK+bTYta6ejt7xHdriXMil1zdhG3XaS6PQkbaBvpzOBIIBCpGKJsLSGg+k4vc4kkucTsuJPUkkkknqSdlei+HfDI2mnfY3+vdGv8AByao3HwInUF/prniGR1dlNJM2emdcnNqJ43tPMx/PG1gBBAPct6qTjtf46SFlTjVvnqWsaJZY7o9jXu11Ib2B5QT6tnXylQVF6L5XsX0/Wr3M3ksD4ebz9VaL74f+7p8PN5+qtF98P8A3dV+ivyvYvp+tXuZvJYHw83n6q0X3w/93T4ebz9VaL74f+7qvZZWQRPkke2ONgLnPcdBoHeSfUF10VdTXOjgq6Ooiq6SdgkingeHskYRsOa4dCCOoIU+WbFy3frV7mbyUDxu8HG/8eeLtxzTIL/DBSVLmMitdK0l0FOwBrYmSOGt62S7l6ucTobVweDrwI4NcG8tt1+bQ3yjyak520t1vdcJYI3SMdG7RhDGDbHuG5WAdeh3oqSIQHAgjYPeCtdfwnY64tTTl/CZ/eZM3k2l71yqc4PZlLQ3GLGKuQvo5WONuc7viLG7dAP/AG8oc5o/ihrhvXKBca8XtezV7Jizh1flOsAiIuMEREBERAREQEREBERAREQEREBERAREQEREFC8XpnT8R5GO+LBbYGsB305pJS4/8dAf1VE1YnHCwvp7tbcgjaTBJGLfUuA+IeYuhJ+QbdI3fyuaPWq1rX1EVFUPpIY6iqbG4xRSyGNj369FrnAO5QToE6OvkPcv0b4dXTXslE090W6JU7kUNF6z3fXE7Hr7Mgl/dFzHec7dIwSYrY2MJHM5t/lJA9Z14oNrs3tOk9J9mKqrbxH4j5bTSZHYLddamldWSMpbY2kofEZII5nRkPlfMJw8hpJcAAHdA0jqffkGZZlDaeJOQUmRCCnxW5yR01tNDC5k8bIoZHMkeRzaIeQC0tI6kl3QCeUPCC32m+SV1svV8tdHLWePyWekrAyjfMXczjy8vMA4jZaHBp2eml6qvhZaa2xZfaX1FYKfJ55Kisc17OeNz42RkRnl0BqNuth3UlcEYGNl7apv+PfaeX59yobf75kme3nM6G03wY7abBSRxujbRxzyVk0tP2x5y/4rA1zWgN0SdnmHRS/gp/oewn+hqT/otXmvnB22Xi81Fzp7tebJUVlMykrha6psbKxjGlrO1BY70g0kBzdHXrXbQ0OSYTa7fYMesVvudnttLDS09VcLy6Cd7WMDfTY2mcN9O8Hr36Hct1FNdGJNdfnrPf2dnd2CcIoab1nvTWJ2P7d5BL+6KQWCqu9XROfebfS22qEhDYaOsdVMLNDTud0cZB3vpr1Dr16ddOJFU2i/SUZe3zupb9Yp4zqSO6Ugb37PNMxjgPytc4f8VtCtdsAsL8kze2xBhdS297bhUv10byk9k3fymQAj7I3fItiV5D45XTOLRTHOI7fz/vq2dwiIvNIIiICIiAiIgIiICIiAiIgIiICIiAiIgIiIPPX0FNdaKejq4WVFLOwxyRSDbXNPeCqRynhRerBM+S0wvvds72sa8CqiHyEOIEgHyg832E9TeyLv2TbcXY6r4fKecTyVqy+OshJEtpu8Lh05ZLZUNP8Ac6/lC+Oaf6Ouf3dP+wtqUX2/ntX0/X+EtDVbmn+jrn93T/sJzT/R1z+7p/2FtSifPavp+v8ABaGq3NP9HXP7un/YTmn+jrn93T/sLalE+e1fT9f4LQ1YaKl5022XRx/ktttQSfyDk6rO2HAckyWVohtstqpSfSrLpGY+Uevli2HuP2ENB/lBbFItdfxzFmLUURE9V7GFxPE6HD7UKKiDnFx55qiTRknfoAucR6+gGh0AAA0As0iLzlddWJVNdc3mUERFgCIiAiIgIiICIiAiIgIiIP/Z",
+ "text/plain": [
+ ""
+ ]
+ },
+ "execution_count": 5,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "from IPython.display import Image\n",
+ "\n",
+ "Image(app.get_graph().draw_mermaid_png())"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "9487118a-25ea-4ecf-982b-10c72bec3325",
+ "metadata": {},
+ "source": [
+ "We can step through the execution as follows, printing out the summary as it is refined:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "id": "9572e4bd-5e7d-4884-8283-d516396d7c29",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Apples are typically red in color.\n",
+ "Apples are typically red in color, while blueberries are blue.\n",
+ "Apples are typically red in color, blueberries are blue, and bananas are yellow.\n"
+ ]
+ }
+ ],
+ "source": [
+ "async for step in app.astream(\n",
+ " {\"contents\": [doc.page_content for doc in documents]},\n",
+ " stream_mode=\"values\",\n",
+ "):\n",
+ " if summary := step.get(\"summary\"):\n",
+ " print(summary)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "cb390e43-58db-43af-9118-aed6f08ce351",
+ "metadata": {},
+ "source": [
+ "In the [LangSmith trace](https://smith.langchain.com/public/d6656f49-4fa1-44b9-b6d3-10af921037fa/r) we again recover three LLM calls, performing the same functions as before.\n",
+ "\n",
+ "Note that we can stream tokens from the application, including from intermediate steps:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "id": "105d71ee-0eb8-40bf-aa82-c94121dba2f2",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Ap|ples| are| characterized| by| their| red| color|.|\n",
+ "\n",
+ "\n",
+ "Ap|ples| are| characterized| by| their| red| color|,| while| blueberries| are| known| for| their| blue| hue|.|\n",
+ "\n",
+ "\n",
+ "Ap|ples| are| characterized| by| their| red| color|,| blueberries| are| known| for| their| blue| hue|,| and| bananas| are| recognized| for| their| yellow| color|.|\n",
+ "\n",
+ "\n"
+ ]
+ }
+ ],
+ "source": [
+ "async for event in app.astream_events(\n",
+ " {\"contents\": [doc.page_content for doc in documents]}, version=\"v2\"\n",
+ "):\n",
+ " kind = event[\"event\"]\n",
+ " if kind == \"on_chat_model_stream\":\n",
+ " content = event[\"data\"][\"chunk\"].content\n",
+ " if content:\n",
+ " print(content, end=\"|\")\n",
+ " elif kind == \"on_chat_model_end\":\n",
+ " print(\"\\n\\n\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "5ad2a8f1-e453-4f0b-8911-8c8ddf607f64",
+ "metadata": {},
+ "source": [
+ " \n",
+ "\n",
+ "## Next steps\n",
+ "\n",
+ "See [this tutorial](/docs/tutorials/summarization/) for more LLM-based summarization strategies.\n",
+ "\n",
+ "Check out the [LangGraph documentation](https://langchain-ai.github.io/langgraph/) for detail on building with LangGraph."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "f52a8a4b-4dbe-4a82-9267-e9a7c7fa188d",
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.10.4"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/docs/docs/versions/migrating_chains/retrieval_qa.ipynb b/docs/docs/versions/migrating_chains/retrieval_qa.ipynb
index 477e3cf6247..7cab33d02b4 100644
--- a/docs/docs/versions/migrating_chains/retrieval_qa.ipynb
+++ b/docs/docs/versions/migrating_chains/retrieval_qa.ipynb
@@ -82,13 +82,9 @@
"id": "c7e16438",
"metadata": {},
"source": [
- "import { ColumnContainer, Column } from \"@theme/Columns\";\n",
+ "## Legacy\n",
"\n",
- "\n",
- "\n",
- "\n",
- "\n",
- "#### Legacy"
+ ""
]
},
{
@@ -128,12 +124,11 @@
"id": "081948e5",
"metadata": {},
"source": [
- " \n",
+ "\n",
"\n",
- "\n",
+ "## LCEL\n",
"\n",
- "#### LCEL\n",
- "\n"
+ ""
]
},
{
@@ -184,9 +179,6 @@
"id": "d6f44fe8",
"metadata": {},
"source": [
- " \n",
- "\n",
- "\n",
"The LCEL implementation exposes the internals of what's happening around retrieving, formatting documents, and passing them through a prompt to the LLM, but it is more verbose. You can customize and wrap this composition logic in a helper function, or use the higher-level [`create_retrieval_chain`](https://api.python.langchain.com/en/latest/chains/langchain.chains.retrieval.create_retrieval_chain.html) and [`create_stuff_documents_chain`](https://api.python.langchain.com/en/latest/chains/langchain.chains.combine_documents.stuff.create_stuff_documents_chain.html) helper method:"
]
},
@@ -231,6 +223,8 @@
"id": "b2717810",
"metadata": {},
"source": [
+ "\n",
+ "\n",
"## Next steps\n",
"\n",
"Check out the [LCEL conceptual docs](/docs/concepts/#langchain-expression-language-lcel) for more background information."
diff --git a/docs/docs/versions/migrating_chains/stuff_docs_chain.ipynb b/docs/docs/versions/migrating_chains/stuff_docs_chain.ipynb
new file mode 100644
index 00000000000..c7e255b8845
--- /dev/null
+++ b/docs/docs/versions/migrating_chains/stuff_docs_chain.ipynb
@@ -0,0 +1,281 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "ed78c53c-55ad-4ea2-9cc2-a39a1963c098",
+ "metadata": {},
+ "source": [
+ "---\n",
+ "title: Migrating from StuffDocumentsChain\n",
+ "---\n",
+ "\n",
+ "[StuffDocumentsChain](https://api.python.langchain.com/en/latest/chains/langchain.chains.combine_documents.stuff.StuffDocumentsChain.html) combines documents by concatenating them into a single context window. It is a straightforward and effective strategy for combining documents for question-answering, summarization, and other purposes.\n",
+ "\n",
+ "[create_stuff_documents_chain](https://api.python.langchain.com/en/latest/chains/langchain.chains.combine_documents.stuff.create_stuff_documents_chain.html) is the recommended alternative. It functions the same as `StuffDocumentsChain`, with better support for streaming and batch functionality. Because it is a simple combination of [LCEL primitives](/docs/concepts/#langchain-expression-language-lcel), it is also easier to extend and incorporate into other LangChain applications.\n",
+ "\n",
+ "Below we will go through both `StuffDocumentsChain` and `create_stuff_documents_chain` on a simple example for illustrative purposes.\n",
+ "\n",
+ "Let's first load a chat model:\n",
+ "\n",
+ "```{=mdx}\n",
+ "import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
+ "\n",
+ "\n",
+ "```"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "id": "dac0bef2-9453-46f2-a893-f7569b6a0170",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# | output: false\n",
+ "# | echo: false\n",
+ "\n",
+ "from langchain_openai import ChatOpenAI\n",
+ "\n",
+ "llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "d4022d03-7b5e-4c81-98ff-5b82a2a4eaae",
+ "metadata": {},
+ "source": [
+ "## Example\n",
+ "\n",
+ "Let's go through an example where we analyze a set of documents. We first generate some simple documents for illustrative purposes:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "id": "24fa0ba9-e245-47d1-bc2e-6286dd884117",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from langchain_core.documents import Document\n",
+ "\n",
+ "documents = [\n",
+ " Document(page_content=\"Apples are red\", metadata={\"title\": \"apple_book\"}),\n",
+ " Document(page_content=\"Blueberries are blue\", metadata={\"title\": \"blueberry_book\"}),\n",
+ " Document(page_content=\"Bananas are yelow\", metadata={\"title\": \"banana_book\"}),\n",
+ "]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "3a769128-205f-417d-a25d-519e7cb03be7",
+ "metadata": {},
+ "source": [
+ "### Legacy\n",
+ "\n",
+ "\n",
+ "\n",
+ "Below we show an implementation with `StuffDocumentsChain`. We define the prompt template for a summarization task and instantiate a [LLMChain](https://api.python.langchain.com/en/latest/chains/langchain.chains.llm.LLMChain.html) object for this purpose. We define how documents are formatted into the prompt and ensure consistency among the keys in the various prompts."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 15,
+ "id": "9734c0f3-64e7-4ae6-8578-df03b3dabb26",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from langchain.chains import LLMChain, StuffDocumentsChain\n",
+ "from langchain_core.prompts import ChatPromptTemplate, PromptTemplate\n",
+ "\n",
+ "# This controls how each document will be formatted. Specifically,\n",
+ "# it will be passed to `format_document` - see that function for more\n",
+ "# details.\n",
+ "document_prompt = PromptTemplate(\n",
+ " input_variables=[\"page_content\"], template=\"{page_content}\"\n",
+ ")\n",
+ "document_variable_name = \"context\"\n",
+ "# The prompt here should take as an input variable the\n",
+ "# `document_variable_name`\n",
+ "prompt = ChatPromptTemplate.from_template(\"Summarize this content: {context}\")\n",
+ "\n",
+ "llm_chain = LLMChain(llm=llm, prompt=prompt)\n",
+ "chain = StuffDocumentsChain(\n",
+ " llm_chain=llm_chain,\n",
+ " document_prompt=document_prompt,\n",
+ " document_variable_name=document_variable_name,\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "0cb733bf-eb71-4fae-a8f4-d522924020cb",
+ "metadata": {},
+ "source": [
+ "We can now invoke our chain:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 19,
+ "id": "d7d1ce10-bbee-4cb0-879d-7de4f69191c4",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "'This content describes the colors of different fruits: apples are red, blueberries are blue, and bananas are yellow.'"
+ ]
+ },
+ "execution_count": 19,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "result = chain.invoke(documents)\n",
+ "result[\"output_text\"]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 20,
+ "id": "79b10d40-1521-433b-9026-6ec836ffeeb3",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "{'input_documents': [Document(metadata={'title': 'apple_book'}, page_content='Apples are red'), Document(metadata={'title': 'blueberry_book'}, page_content='Blueberries are blue'), Document(metadata={'title': 'banana_book'}, page_content='Bananas are yelow')], 'output_text': 'This content describes the colors of different fruits: apples are red, blueberries are blue, and bananas are yellow.'}\n"
+ ]
+ }
+ ],
+ "source": [
+ "for chunk in chain.stream(documents):\n",
+ " print(chunk)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "b4cb6a5b-37ea-48cc-a096-b948d3ff7e9f",
+ "metadata": {},
+ "source": [
+ " \n",
+ "\n",
+ "### LCEL\n",
+ "\n",
+ "\n",
+ "\n",
+ "Below we show an implementation using `create_stuff_documents_chain`:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 21,
+ "id": "de38f27a-c648-44be-8c37-0a458c2920a9",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from langchain.chains.combine_documents import create_stuff_documents_chain\n",
+ "from langchain_core.prompts import ChatPromptTemplate\n",
+ "\n",
+ "prompt = ChatPromptTemplate.from_template(\"Summarize this content: {context}\")\n",
+ "chain = create_stuff_documents_chain(llm, prompt)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "9d0e6996-9bf8-4097-9c1a-1c539eac3ed1",
+ "metadata": {},
+ "source": [
+ "Invoking the chain, we obtain a similar result as before:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 24,
+ "id": "f2d2bdfb-3a6a-464b-b4c2-e4252b2e53a0",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "'This content describes the colors of different fruits: apples are red, blueberries are blue, and bananas are yellow.'"
+ ]
+ },
+ "execution_count": 24,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "result = chain.invoke({\"context\": documents})\n",
+ "result"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "493e6270-c61d-46c5-91b3-0cf7740a88f9",
+ "metadata": {},
+ "source": [
+ "Note that this implementation supports streaming of output tokens:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 26,
+ "id": "b5adcabd-9bc1-4c91-a12b-7be82d64e457",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ " | This | content | describes | the | colors | of | different | fruits | : | apples | are | red | , | blue | berries | are | blue | , | and | bananas | are | yellow | . | | "
+ ]
+ }
+ ],
+ "source": [
+ "for chunk in chain.stream({\"context\": documents}):\n",
+ " print(chunk, end=\" | \")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "181c5633-38ea-4692-a869-32f4f78398e4",
+ "metadata": {},
+ "source": [
+ " \n",
+ "\n",
+ "## Next steps\n",
+ "\n",
+ "Check out the [LCEL conceptual docs](/docs/concepts/#langchain-expression-language-lcel) for more background information.\n",
+ "\n",
+ "See these [how-to guides](/docs/how_to/#qa-with-rag) for more on question-answering tasks with RAG.\n",
+ "\n",
+ "See [this tutorial](/docs/tutorials/summarization/) for more LLM-based summarization strategies."
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.10.4"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/docs/sidebars.js b/docs/sidebars.js
index a9a9d11cd3b..670768d07cd 100644
--- a/docs/sidebars.js
+++ b/docs/sidebars.js
@@ -93,7 +93,7 @@ module.exports = {
},
{
type: "category",
- label: "Migrating to LCEL",
+ label: "Migrating from v0.0 chains",
link: {type: 'doc', id: 'versions/migrating_chains/index'},
collapsible: false,
collapsed: false,
diff --git a/libs/langchain/langchain/agents/agent_toolkits/vectorstore/base.py b/libs/langchain/langchain/agents/agent_toolkits/vectorstore/base.py
index 79c2a477b59..a9b7fdc4528 100644
--- a/libs/langchain/langchain/agents/agent_toolkits/vectorstore/base.py
+++ b/libs/langchain/langchain/agents/agent_toolkits/vectorstore/base.py
@@ -2,6 +2,7 @@
from typing import Any, Dict, Optional
+from langchain_core._api import deprecated
from langchain_core.callbacks.base import BaseCallbackManager
from langchain_core.language_models import BaseLanguageModel
@@ -15,6 +16,16 @@ from langchain.agents.mrkl.base import ZeroShotAgent
from langchain.chains.llm import LLMChain
+@deprecated(
+ since="0.2.13",
+ removal="1.0",
+ message=(
+ "See API reference for this function for a replacement implementation: "
+ "https://api.python.langchain.com/en/latest/agents/langchain.agents.agent_toolkits.vectorstore.base.create_vectorstore_agent.html " # noqa: E501
+ "Read more here on how to create agents that query vector stores: "
+ "https://python.langchain.com/v0.2/docs/how_to/qa_chat_history_how_to/#agents"
+ ),
+)
def create_vectorstore_agent(
llm: BaseLanguageModel,
toolkit: VectorStoreToolkit,
@@ -26,6 +37,44 @@ def create_vectorstore_agent(
) -> AgentExecutor:
"""Construct a VectorStore agent from an LLM and tools.
+ Note: this class is deprecated. See below for a replacement that uses tool
+ calling methods and LangGraph. Install LangGraph with:
+
+ .. code-block:: bash
+
+ pip install -U langgraph
+
+ .. code-block:: python
+
+ from langchain_core.tools import create_retriever_tool
+ from langchain_core.vectorstores import InMemoryVectorStore
+ from langchain_openai import ChatOpenAI, OpenAIEmbeddings
+ from langgraph.prebuilt import create_react_agent
+
+ llm = ChatOpenAI(model="gpt-4o-mini", temperature=0)
+
+ vector_store = InMemoryVectorStore.from_texts(
+ [
+ "Dogs are great companions, known for their loyalty and friendliness.",
+ "Cats are independent pets that often enjoy their own space.",
+ ],
+ OpenAIEmbeddings(),
+ )
+
+ tool = create_retriever_tool(
+ vector_store.as_retriever(),
+ "pet_information_retriever",
+ "Fetches information about pets.",
+ )
+
+ agent = create_react_agent(llm, [tool])
+
+ for step in agent.stream(
+ {"messages": [("human", "What are dogs known for?")]},
+ stream_mode="values",
+ ):
+ step["messages"][-1].pretty_print()
+
Args:
llm (BaseLanguageModel): LLM that will be used by the agent
toolkit (VectorStoreToolkit): Set of tools for the agent
@@ -56,6 +105,16 @@ def create_vectorstore_agent(
)
+@deprecated(
+ since="0.2.13",
+ removal="1.0",
+ message=(
+ "See API reference for this function for a replacement implementation: "
+ "https://api.python.langchain.com/en/latest/agents/langchain.agents.agent_toolkits.vectorstore.base.create_vectorstore_router_agent.html " # noqa: E501
+ "Read more here on how to create agents that query vector stores: "
+ "https://python.langchain.com/v0.2/docs/how_to/qa_chat_history_how_to/#agents"
+ ),
+)
def create_vectorstore_router_agent(
llm: BaseLanguageModel,
toolkit: VectorStoreRouterToolkit,
@@ -67,6 +126,59 @@ def create_vectorstore_router_agent(
) -> AgentExecutor:
"""Construct a VectorStore router agent from an LLM and tools.
+ Note: this class is deprecated. See below for a replacement that uses tool
+ calling methods and LangGraph. Install LangGraph with:
+
+ .. code-block:: bash
+
+ pip install -U langgraph
+
+ .. code-block:: python
+
+ from langchain_core.tools import create_retriever_tool
+ from langchain_core.vectorstores import InMemoryVectorStore
+ from langchain_openai import ChatOpenAI, OpenAIEmbeddings
+ from langgraph.prebuilt import create_react_agent
+
+ llm = ChatOpenAI(model="gpt-4o-mini", temperature=0)
+
+ pet_vector_store = InMemoryVectorStore.from_texts(
+ [
+ "Dogs are great companions, known for their loyalty and friendliness.",
+ "Cats are independent pets that often enjoy their own space.",
+ ],
+ OpenAIEmbeddings(),
+ )
+
+ food_vector_store = InMemoryVectorStore.from_texts(
+ [
+ "Carrots are orange and delicious.",
+ "Apples are red and delicious.",
+ ],
+ OpenAIEmbeddings(),
+ )
+
+ tools = [
+ create_retriever_tool(
+ pet_vector_store.as_retriever(),
+ "pet_information_retriever",
+ "Fetches information about pets.",
+ ),
+ create_retriever_tool(
+ food_vector_store.as_retriever(),
+ "food_information_retriever",
+ "Fetches information about food.",
+ )
+ ]
+
+ agent = create_react_agent(llm, tools)
+
+ for step in agent.stream(
+ {"messages": [("human", "Tell me about carrots.")]},
+ stream_mode="values",
+ ):
+ step["messages"][-1].pretty_print()
+
Args:
llm (BaseLanguageModel): LLM that will be used by the agent
toolkit (VectorStoreRouterToolkit): Set of tools for the agent which have routing capability with multiple vector stores
diff --git a/libs/langchain/langchain/chains/__init__.py b/libs/langchain/langchain/chains/__init__.py
index f1c8061d594..39b9156bc65 100644
--- a/libs/langchain/langchain/chains/__init__.py
+++ b/libs/langchain/langchain/chains/__init__.py
@@ -59,6 +59,7 @@ _module_lookup = {
"OpenAIModerationChain": "langchain.chains.moderation",
"NatBotChain": "langchain.chains.natbot.base",
"create_citation_fuzzy_match_chain": "langchain.chains.openai_functions",
+ "create_citation_fuzzy_match_runnable": "langchain.chains.openai_functions",
"create_extraction_chain": "langchain.chains.openai_functions",
"create_extraction_chain_pydantic": "langchain.chains.openai_functions",
"create_qa_with_sources_chain": "langchain.chains.openai_functions",
diff --git a/libs/langchain/langchain/chains/api/base.py b/libs/langchain/langchain/chains/api/base.py
index 697d68f1ada..94896102dc6 100644
--- a/libs/langchain/langchain/chains/api/base.py
+++ b/libs/langchain/langchain/chains/api/base.py
@@ -5,6 +5,7 @@ from __future__ import annotations
from typing import Any, Dict, List, Optional, Sequence, Tuple
from urllib.parse import urlparse
+from langchain_core._api import deprecated
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
@@ -53,6 +54,15 @@ def _check_in_allowed_domain(url: str, limit_to_domains: Sequence[str]) -> bool:
try:
from langchain_community.utilities.requests import TextRequestsWrapper
+ @deprecated(
+ since="0.2.13",
+ message=(
+ "This class is deprecated and will be removed in langchain 1.0. "
+ "See API reference for replacement: "
+ "https://api.python.langchain.com/en/latest/chains/langchain.chains.api.base.APIChain.html" # noqa: E501
+ ),
+ removal="1.0",
+ )
class APIChain(Chain):
"""Chain that makes API calls and summarizes the responses to answer a question.
@@ -69,7 +79,117 @@ try:
what network access it has.
See https://python.langchain.com/docs/security for more information.
- """
+
+ Note: this class is deprecated. See below for a replacement implementation
+ using LangGraph. The benefits of this implementation are:
+
+ - Uses LLM tool calling features to encourage properly-formatted API requests;
+ - Support for both token-by-token and step-by-step streaming;
+ - Support for checkpointing and memory of chat history;
+ - Easier to modify or extend (e.g., with additional tools, structured responses, etc.)
+
+ Install LangGraph with:
+
+ .. code-block:: bash
+
+ pip install -U langgraph
+
+ .. code-block:: python
+
+ from typing import Annotated, Sequence
+ from typing_extensions import TypedDict
+
+ from langchain.chains.api.prompt import API_URL_PROMPT
+ from langchain_community.agent_toolkits.openapi.toolkit import RequestsToolkit
+ from langchain_community.utilities.requests import TextRequestsWrapper
+ from langchain_core.messages import BaseMessage
+ from langchain_core.prompts import ChatPromptTemplate
+ from langchain_openai import ChatOpenAI
+ from langchain_core.runnables import RunnableConfig
+ from langgraph.graph import END, StateGraph
+ from langgraph.graph.message import add_messages
+ from langgraph.prebuilt.tool_node import ToolNode
+
+ # NOTE: There are inherent risks in giving models discretion
+ # to execute real-world actions. We must "opt-in" to these
+ # risks by setting allow_dangerous_request=True to use these tools.
+ # This can be dangerous for calling unwanted requests. Please make
+ # sure your custom OpenAPI spec (yaml) is safe and that permissions
+ # associated with the tools are narrowly-scoped.
+ ALLOW_DANGEROUS_REQUESTS = True
+
+ # Subset of spec for https://jsonplaceholder.typicode.com
+ api_spec = \"\"\"
+ openapi: 3.0.0
+ info:
+ title: JSONPlaceholder API
+ version: 1.0.0
+ servers:
+ - url: https://jsonplaceholder.typicode.com
+ paths:
+ /posts:
+ get:
+ summary: Get posts
+ parameters: &id001
+ - name: _limit
+ in: query
+ required: false
+ schema:
+ type: integer
+ example: 2
+ description: Limit the number of results
+ \"\"\"
+
+ llm = ChatOpenAI(model="gpt-4o-mini", temperature=0)
+ toolkit = RequestsToolkit(
+ requests_wrapper=TextRequestsWrapper(headers={}), # no auth required
+ allow_dangerous_requests=ALLOW_DANGEROUS_REQUESTS,
+ )
+ tools = toolkit.get_tools()
+
+ api_request_chain = (
+ API_URL_PROMPT.partial(api_docs=api_spec)
+ | llm.bind_tools(tools, tool_choice="any")
+ )
+
+ class ChainState(TypedDict):
+ \"\"\"LangGraph state.\"\"\"
+
+ messages: Annotated[Sequence[BaseMessage], add_messages]
+
+
+ async def acall_request_chain(state: ChainState, config: RunnableConfig):
+ last_message = state["messages"][-1]
+ response = await api_request_chain.ainvoke(
+ {"question": last_message.content}, config
+ )
+ return {"messages": [response]}
+
+ async def acall_model(state: ChainState, config: RunnableConfig):
+ response = await llm.ainvoke(state["messages"], config)
+ return {"messages": [response]}
+
+ graph_builder = StateGraph(ChainState)
+ graph_builder.add_node("call_tool", acall_request_chain)
+ graph_builder.add_node("execute_tool", ToolNode(tools))
+ graph_builder.add_node("call_model", acall_model)
+ graph_builder.set_entry_point("call_tool")
+ graph_builder.add_edge("call_tool", "execute_tool")
+ graph_builder.add_edge("execute_tool", "call_model")
+ graph_builder.add_edge("call_model", END)
+ chain = graph_builder.compile()
+
+ .. code-block:: python
+
+ example_query = "Fetch the top two posts. What are their titles?"
+
+ events = chain.astream(
+ {"messages": [("user", example_query)]},
+ stream_mode="values",
+ )
+ async for event in events:
+ event["messages"][-1].pretty_print()
+ """ # noqa: E501
api_request_chain: LLMChain
api_answer_chain: LLMChain
diff --git a/libs/langchain/langchain/chains/combine_documents/map_rerank.py b/libs/langchain/langchain/chains/combine_documents/map_rerank.py
index e05592caf11..0fa346dee8b 100644
--- a/libs/langchain/langchain/chains/combine_documents/map_rerank.py
+++ b/libs/langchain/langchain/chains/combine_documents/map_rerank.py
@@ -25,7 +25,7 @@ class MapRerankDocumentsChain(BaseCombineDocumentsChain):
Example:
.. code-block:: python
- from langchain.chains import StuffDocumentsChain, LLMChain
+ from langchain.chains import MapRerankDocumentsChain, LLMChain
from langchain_core.prompts import PromptTemplate
from langchain_community.llms import OpenAI
from langchain.output_parsers.regex import RegexParser
@@ -39,7 +39,7 @@ class MapRerankDocumentsChain(BaseCombineDocumentsChain):
prompt_template = (
"Use the following context to tell me the chemical formula "
"for water. Output both your answer and a score of how confident "
- "you are. Context: {content}"
+ "you are. Context: {context}"
)
output_parser = RegexParser(
regex=r"(.*?)\nScore: (.*)",
diff --git a/libs/langchain/langchain/chains/combine_documents/stuff.py b/libs/langchain/langchain/chains/combine_documents/stuff.py
index 544adc50b7a..5ffd86c9718 100644
--- a/libs/langchain/langchain/chains/combine_documents/stuff.py
+++ b/libs/langchain/langchain/chains/combine_documents/stuff.py
@@ -2,6 +2,7 @@
from typing import Any, Dict, List, Optional, Tuple
+from langchain_core._api import deprecated
from langchain_core.callbacks import Callbacks
from langchain_core.documents import Document
from langchain_core.language_models import LanguageModelLike
@@ -95,6 +96,15 @@ def create_stuff_documents_chain(
).with_config(run_name="stuff_documents_chain")
+@deprecated(
+ since="0.2.13",
+ removal="1.0",
+ message=(
+ "This class is deprecated. Use the `create_stuff_documents_chain` constructor "
+ "instead. See migration guide here: "
+ "https://python.langchain.com/v0.2/docs/versions/migrating_chains/stuff_docs_chain/" # noqa: E501
+ ),
+)
class StuffDocumentsChain(BaseCombineDocumentsChain):
"""Chain that combines documents by stuffing into context.
diff --git a/libs/langchain/langchain/chains/elasticsearch_database/base.py b/libs/langchain/langchain/chains/elasticsearch_database/base.py
index 2e4b97c8dee..89875f2d8a4 100644
--- a/libs/langchain/langchain/chains/elasticsearch_database/base.py
+++ b/libs/langchain/langchain/chains/elasticsearch_database/base.py
@@ -6,14 +6,14 @@ from typing import TYPE_CHECKING, Any, Dict, List, Optional
from langchain_core.callbacks import CallbackManagerForChainRun
from langchain_core.language_models import BaseLanguageModel
-from langchain_core.output_parsers import BaseLLMOutputParser
+from langchain_core.output_parsers import BaseOutputParser, StrOutputParser
from langchain_core.output_parsers.json import SimpleJsonOutputParser
from langchain_core.prompts import BasePromptTemplate
from langchain_core.pydantic_v1 import root_validator
+from langchain_core.runnables import Runnable
from langchain.chains.base import Chain
from langchain.chains.elasticsearch_database.prompts import ANSWER_PROMPT, DSL_PROMPT
-from langchain.chains.llm import LLMChain
if TYPE_CHECKING:
from elasticsearch import Elasticsearch
@@ -35,9 +35,9 @@ class ElasticsearchDatabaseChain(Chain):
db_chain = ElasticsearchDatabaseChain.from_llm(OpenAI(), database)
"""
- query_chain: LLMChain
+ query_chain: Runnable
"""Chain for creating the ES query."""
- answer_chain: LLMChain
+ answer_chain: Runnable
"""Chain for answering the user question."""
database: Any
"""Elasticsearch database to connect to of type elasticsearch.Elasticsearch."""
@@ -135,9 +135,9 @@ class ElasticsearchDatabaseChain(Chain):
intermediate_steps: List = []
try:
intermediate_steps.append(query_inputs) # input: es generation
- es_cmd = self.query_chain.run(
- callbacks=_run_manager.get_child(),
- **query_inputs,
+ es_cmd = self.query_chain.invoke(
+ query_inputs,
+ config={"callbacks": _run_manager.get_child()},
)
_run_manager.on_text(es_cmd, color="green", verbose=self.verbose)
@@ -154,9 +154,9 @@ class ElasticsearchDatabaseChain(Chain):
_run_manager.on_text("\nAnswer:", verbose=self.verbose)
answer_inputs: dict = {"data": result, "input": input_text}
intermediate_steps.append(answer_inputs) # input: final answer
- final_result = self.answer_chain.run(
- callbacks=_run_manager.get_child(),
- **answer_inputs,
+ final_result = self.answer_chain.invoke(
+ answer_inputs,
+ config={"callbacks": _run_manager.get_child()},
)
intermediate_steps.append(final_result) # output: final answer
@@ -183,7 +183,7 @@ class ElasticsearchDatabaseChain(Chain):
*,
query_prompt: Optional[BasePromptTemplate] = None,
answer_prompt: Optional[BasePromptTemplate] = None,
- query_output_parser: Optional[BaseLLMOutputParser] = None,
+ query_output_parser: Optional[BaseOutputParser] = None,
**kwargs: Any,
) -> ElasticsearchDatabaseChain:
"""Convenience method to construct ElasticsearchDatabaseChain from an LLM.
@@ -199,11 +199,9 @@ class ElasticsearchDatabaseChain(Chain):
"""
query_prompt = query_prompt or DSL_PROMPT
query_output_parser = query_output_parser or SimpleJsonOutputParser()
- query_chain = LLMChain(
- llm=llm, prompt=query_prompt, output_parser=query_output_parser
- )
+ query_chain = query_prompt | llm | query_output_parser
answer_prompt = answer_prompt or ANSWER_PROMPT
- answer_chain = LLMChain(llm=llm, prompt=answer_prompt)
+ answer_chain = answer_prompt | llm | StrOutputParser()
return cls(
query_chain=query_chain,
answer_chain=answer_chain,
diff --git a/libs/langchain/langchain/chains/example_generator.py b/libs/langchain/langchain/chains/example_generator.py
index 8aae0565bfa..9cd4e6f01ee 100644
--- a/libs/langchain/langchain/chains/example_generator.py
+++ b/libs/langchain/langchain/chains/example_generator.py
@@ -1,11 +1,10 @@
from typing import List
from langchain_core.language_models import BaseLanguageModel
+from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts.few_shot import FewShotPromptTemplate
from langchain_core.prompts.prompt import PromptTemplate
-from langchain.chains.llm import LLMChain
-
TEST_GEN_TEMPLATE_SUFFIX = "Add another example."
@@ -19,5 +18,5 @@ def generate_example(
input_variables=[],
example_prompt=prompt_template,
)
- chain = LLMChain(llm=llm, prompt=prompt)
- return chain.predict()
+ chain = prompt | llm | StrOutputParser()
+ return chain.invoke({})
diff --git a/libs/langchain/langchain/chains/llm_checker/base.py b/libs/langchain/langchain/chains/llm_checker/base.py
index 2e2fa61d725..ea2bc546a57 100644
--- a/libs/langchain/langchain/chains/llm_checker/base.py
+++ b/libs/langchain/langchain/chains/llm_checker/base.py
@@ -5,6 +5,7 @@ from __future__ import annotations
import warnings
from typing import Any, Dict, List, Optional
+from langchain_core._api import deprecated
from langchain_core.callbacks import CallbackManagerForChainRun
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import PromptTemplate
@@ -63,6 +64,15 @@ def _load_question_to_checked_assertions_chain(
return question_to_checked_assertions_chain
+@deprecated(
+ since="0.2.13",
+ message=(
+ "See LangGraph guides for a variety of self-reflection and corrective "
+ "strategies for question-answering and other tasks: "
+ "https://langchain-ai.github.io/langgraph/tutorials/rag/langgraph_self_rag/"
+ ),
+ removal="1.0",
+)
class LLMCheckerChain(Chain):
"""Chain for question-answering with self-verification.
diff --git a/libs/langchain/langchain/chains/llm_summarization_checker/base.py b/libs/langchain/langchain/chains/llm_summarization_checker/base.py
index da310232cf7..f177f401529 100644
--- a/libs/langchain/langchain/chains/llm_summarization_checker/base.py
+++ b/libs/langchain/langchain/chains/llm_summarization_checker/base.py
@@ -6,6 +6,7 @@ import warnings
from pathlib import Path
from typing import Any, Dict, List, Optional
+from langchain_core._api import deprecated
from langchain_core.callbacks import CallbackManagerForChainRun
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts.prompt import PromptTemplate
@@ -65,6 +66,15 @@ def _load_sequential_chain(
return chain
+@deprecated(
+ since="0.2.13",
+ message=(
+ "See LangGraph guides for a variety of self-reflection and corrective "
+ "strategies for question-answering and other tasks: "
+ "https://langchain-ai.github.io/langgraph/tutorials/rag/langgraph_self_rag/"
+ ),
+ removal="1.0",
+)
class LLMSummarizationCheckerChain(Chain):
"""Chain for question-answering with self-verification.
diff --git a/libs/langchain/langchain/chains/loading.py b/libs/langchain/langchain/chains/loading.py
index da6bd0cc228..2371edf24ab 100644
--- a/libs/langchain/langchain/chains/loading.py
+++ b/libs/langchain/langchain/chains/loading.py
@@ -7,6 +7,7 @@ from pathlib import Path
from typing import TYPE_CHECKING, Any, Union
import yaml
+from langchain_core._api import deprecated
from langchain_core.prompts.loading import (
_load_output_parser,
load_prompt,
@@ -649,6 +650,14 @@ type_to_loader_dict = {
}
+@deprecated(
+ since="0.2.13",
+ message=(
+ "This function is deprecated and will be removed in langchain 1.0. "
+ "At that point chains must be imported from their respective modules."
+ ),
+ removal="1.0",
+)
def load_chain_from_config(config: dict, **kwargs: Any) -> Chain:
"""Load chain from Config Dict."""
if "_type" not in config:
@@ -662,6 +671,14 @@ def load_chain_from_config(config: dict, **kwargs: Any) -> Chain:
return chain_loader(config, **kwargs)
+@deprecated(
+ since="0.2.13",
+ message=(
+ "This function is deprecated and will be removed in langchain 1.0. "
+ "At that point chains must be imported from their respective modules."
+ ),
+ removal="1.0",
+)
def load_chain(path: Union[str, Path], **kwargs: Any) -> Chain:
"""Unified method for loading a chain from LangChainHub or local fs."""
if isinstance(path, str) and path.startswith("lc://"):
diff --git a/libs/langchain/langchain/chains/mapreduce.py b/libs/langchain/langchain/chains/mapreduce.py
index 359133f0de2..1eaccf67a85 100644
--- a/libs/langchain/langchain/chains/mapreduce.py
+++ b/libs/langchain/langchain/chains/mapreduce.py
@@ -8,6 +8,7 @@ from __future__ import annotations
from typing import Any, Dict, List, Mapping, Optional
+from langchain_core._api import deprecated
from langchain_core.callbacks import CallbackManagerForChainRun, Callbacks
from langchain_core.documents import Document
from langchain_core.language_models import BaseLanguageModel
@@ -22,6 +23,16 @@ from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.llm import LLMChain
+@deprecated(
+ since="0.2.13",
+ removal="1.0",
+ message=(
+ "Refer here for a recommended map-reduce implementation using langgraph: "
+ "https://langchain-ai.github.io/langgraph/how-tos/map-reduce/. See also "
+ "migration guide: "
+ "https://python.langchain.com/v0.2/docs/versions/migrating_chains/map_reduce_chain/" # noqa: E501
+ ),
+)
class MapReduceChain(Chain):
"""Map-reduce chain."""
diff --git a/libs/langchain/langchain/chains/openai_functions/__init__.py b/libs/langchain/langchain/chains/openai_functions/__init__.py
index 0bed9159a75..6312b619890 100644
--- a/libs/langchain/langchain/chains/openai_functions/__init__.py
+++ b/libs/langchain/langchain/chains/openai_functions/__init__.py
@@ -6,6 +6,7 @@ from langchain.chains.openai_functions.base import (
)
from langchain.chains.openai_functions.citation_fuzzy_match import (
create_citation_fuzzy_match_chain,
+ create_citation_fuzzy_match_runnable,
)
from langchain.chains.openai_functions.extraction import (
create_extraction_chain,
@@ -32,6 +33,7 @@ __all__ = [
"create_extraction_chain_pydantic",
"create_extraction_chain",
"create_citation_fuzzy_match_chain",
+ "create_citation_fuzzy_match_runnable",
"create_qa_with_structure_chain",
"create_qa_with_sources_chain",
"create_structured_output_chain",
diff --git a/libs/langchain/langchain/chains/openai_functions/citation_fuzzy_match.py b/libs/langchain/langchain/chains/openai_functions/citation_fuzzy_match.py
index 05bb27d7a9a..038489d13a6 100644
--- a/libs/langchain/langchain/chains/openai_functions/citation_fuzzy_match.py
+++ b/libs/langchain/langchain/chains/openai_functions/citation_fuzzy_match.py
@@ -1,10 +1,12 @@
from typing import Iterator, List
-from langchain_core.language_models import BaseLanguageModel
+from langchain_core._api import deprecated
+from langchain_core.language_models import BaseChatModel, BaseLanguageModel
from langchain_core.messages import HumanMessage, SystemMessage
from langchain_core.output_parsers.openai_functions import PydanticOutputFunctionsParser
from langchain_core.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate
from langchain_core.pydantic_v1 import BaseModel, Field
+from langchain_core.runnables import Runnable
from langchain.chains.llm import LLMChain
from langchain.chains.openai_functions.utils import get_llm_kwargs
@@ -61,6 +63,57 @@ class QuestionAnswer(BaseModel):
)
+def create_citation_fuzzy_match_runnable(llm: BaseChatModel) -> Runnable:
+ """Create a citation fuzzy match Runnable.
+
+ Example usage:
+
+ .. code-block:: python
+
+ from langchain.chains import create_citation_fuzzy_match_runnable
+ from langchain_openai import ChatOpenAI
+
+ llm = ChatOpenAI(model="gpt-4o-mini")
+
+ context = "Alice has blue eyes. Bob has brown eyes. Charlie has green eyes."
+ question = "What color are Bob's eyes?"
+
+ chain = create_citation_fuzzy_match_runnable(llm)
+ chain.invoke({"question": question, "context": context})
+
+ Args:
+ llm: Language model to use for the chain. Must implement bind_tools.
+
+ Returns:
+ Runnable that can be used to answer questions with citations.
+ """
+ if llm.bind_tools is BaseChatModel.bind_tools:
+ raise ValueError(
+ "Language model must implement bind_tools to use this function."
+ )
+ prompt = ChatPromptTemplate(
+ [
+ SystemMessage(
+ "You are a world class algorithm to answer "
+ "questions with correct and exact citations."
+ ),
+ HumanMessagePromptTemplate.from_template(
+ "Answer question using the following context."
+ "\n\n{context}"
+ "\n\nQuestion: {question}"
+ "\n\nTips: Make sure to cite your sources, "
+ "and use the exact words from the context."
+ ),
+ ]
+ )
+ return prompt | llm.with_structured_output(QuestionAnswer)
+
+
+@deprecated(
+ since="0.2.13",
+ removal="1.0",
+ alternative="create_citation_fuzzy_match_runnable",
+)
def create_citation_fuzzy_match_chain(llm: BaseLanguageModel) -> LLMChain:
"""Create a citation fuzzy match chain.
diff --git a/libs/langchain/langchain/chains/openai_functions/openapi.py b/libs/langchain/langchain/chains/openai_functions/openapi.py
index 79c78fdc2ef..de4ba38dcd7 100644
--- a/libs/langchain/langchain/chains/openai_functions/openapi.py
+++ b/libs/langchain/langchain/chains/openai_functions/openapi.py
@@ -6,6 +6,7 @@ from collections import defaultdict
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
import requests
+from langchain_core._api import deprecated
from langchain_core.callbacks import CallbackManagerForChainRun
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers.openai_functions import JsonOutputFunctionsParser
@@ -242,6 +243,15 @@ class SimpleRequestChain(Chain):
return {self.output_key: response}
+@deprecated(
+ since="0.2.13",
+ message=(
+ "This function is deprecated and will be removed in langchain 1.0. "
+ "See API reference for replacement: "
+ "https://api.python.langchain.com/en/latest/chains/langchain.chains.openai_functions.openapi.get_openapi_chain.html" # noqa: E501
+ ),
+ removal="1.0",
+)
def get_openapi_chain(
spec: Union[OpenAPISpec, str],
llm: Optional[BaseLanguageModel] = None,
@@ -255,13 +265,90 @@ def get_openapi_chain(
) -> SequentialChain:
"""Create a chain for querying an API from a OpenAPI spec.
+ Note: this class is deprecated. See below for a replacement implementation.
+ The benefits of this implementation are:
+
+ - Uses LLM tool calling features to encourage properly-formatted API requests;
+ - Includes async support.
+
+ .. code-block:: python
+
+ from typing import Any
+
+ from langchain.chains.openai_functions.openapi import openapi_spec_to_openai_fn
+ from langchain_community.utilities.openapi import OpenAPISpec
+ from langchain_core.prompts import ChatPromptTemplate
+ from langchain_openai import ChatOpenAI
+
+ # Define API spec. Can be JSON or YAML
+ api_spec = \"\"\"
+ {
+ "openapi": "3.1.0",
+ "info": {
+ "title": "JSONPlaceholder API",
+ "version": "1.0.0"
+ },
+ "servers": [
+ {
+ "url": "https://jsonplaceholder.typicode.com"
+ }
+ ],
+ "paths": {
+ "/posts": {
+ "get": {
+ "summary": "Get posts",
+ "parameters": [
+ {
+ "name": "_limit",
+ "in": "query",
+ "required": false,
+ "schema": {
+ "type": "integer",
+ "example": 2
+ },
+ "description": "Limit the number of results"
+ }
+ ]
+ }
+ }
+ }
+ }
+ \"\"\"
+
+ parsed_spec = OpenAPISpec.from_text(api_spec)
+ openai_fns, call_api_fn = openapi_spec_to_openai_fn(parsed_spec)
+ tools = [
+ {"type": "function", "function": fn}
+ for fn in openai_fns
+ ]
+
+ prompt = ChatPromptTemplate.from_template(
+ "Use the provided APIs to respond to this user query:\\n\\n{query}"
+ )
+ llm = ChatOpenAI(model="gpt-4o-mini", temperature=0).bind_tools(tools)
+
+ def _execute_tool(message) -> Any:
+ if tool_calls := message.tool_calls:
+ tool_call = message.tool_calls[0]
+ response = call_api_fn(name=tool_call["name"], fn_args=tool_call["args"])
+ response.raise_for_status()
+ return response.json()
+ else:
+ return message.content
+
+ chain = prompt | llm | _execute_tool
+
+ .. code-block:: python
+
+ response = chain.invoke({"query": "Get me top two posts."})
+
Args:
spec: OpenAPISpec or url/file/text string corresponding to one.
llm: language model, should be an OpenAI function-calling model, e.g.
`ChatOpenAI(model="gpt-3.5-turbo-0613")`.
prompt: Main prompt template to use.
request_chain: Chain for taking the functions output and executing the request.
- """
+ """ # noqa: E501
try:
from langchain_community.utilities.openapi import OpenAPISpec
except ImportError as e:
diff --git a/libs/langchain/langchain/chains/openai_functions/qa_with_structure.py b/libs/langchain/langchain/chains/openai_functions/qa_with_structure.py
index 9ef856d8b96..f13e2f9e522 100644
--- a/libs/langchain/langchain/chains/openai_functions/qa_with_structure.py
+++ b/libs/langchain/langchain/chains/openai_functions/qa_with_structure.py
@@ -1,5 +1,6 @@
from typing import Any, List, Optional, Type, Union, cast
+from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import HumanMessage, SystemMessage
from langchain_core.output_parsers import BaseLLMOutputParser
@@ -25,6 +26,15 @@ class AnswerWithSources(BaseModel):
)
+@deprecated(
+ since="0.2.13",
+ removal="1.0",
+ message=(
+ "This function is deprecated. Refer to this guide on retrieval and question "
+ "answering with structured responses: "
+ "https://python.langchain.com/v0.2/docs/how_to/qa_sources/#structure-sources-in-model-response" # noqa: E501
+ ),
+)
def create_qa_with_structure_chain(
llm: BaseLanguageModel,
schema: Union[dict, Type[BaseModel]],
@@ -95,6 +105,15 @@ def create_qa_with_structure_chain(
return chain
+@deprecated(
+ since="0.2.13",
+ removal="1.0",
+ message=(
+ "This function is deprecated. Refer to this guide on retrieval and question "
+ "answering with sources: "
+ "https://python.langchain.com/v0.2/docs/how_to/qa_sources/#structure-sources-in-model-response" # noqa: E501
+ ),
+)
def create_qa_with_sources_chain(
llm: BaseLanguageModel, verbose: bool = False, **kwargs: Any
) -> LLMChain:
diff --git a/libs/langchain/langchain/chains/openai_functions/tagging.py b/libs/langchain/langchain/chains/openai_functions/tagging.py
index e96a138dfd1..aab5421156a 100644
--- a/libs/langchain/langchain/chains/openai_functions/tagging.py
+++ b/libs/langchain/langchain/chains/openai_functions/tagging.py
@@ -1,5 +1,6 @@
from typing import Any, Optional
+from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers.openai_functions import (
JsonOutputFunctionsParser,
@@ -29,6 +30,21 @@ Passage:
"""
+@deprecated(
+ since="0.2.13",
+ message=(
+ "LangChain has introduced a method called `with_structured_output` that "
+ "is available on ChatModels capable of tool calling. "
+ "See API reference for this function for replacement: "
+ " " # noqa: E501
+ "You can read more about `with_structured_output` here: "
+ ". "
+ "If you notice other issues, please provide "
+ "feedback here: "
+ ""
+ ),
+ removal="1.0",
+)
def create_tagging_chain(
schema: dict,
llm: BaseLanguageModel,
@@ -38,6 +54,32 @@ def create_tagging_chain(
"""Create a chain that extracts information from a passage
based on a schema.
+ This function is deprecated. Please use `with_structured_output` instead.
+ See example usage below:
+
+ .. code-block:: python
+
+ from typing_extensions import Annotated, TypedDict
+ from langchain_anthropic import ChatAnthropic
+
+ class Joke(TypedDict):
+ \"\"\"Tagged joke.\"\"\"
+
+ setup: Annotated[str, ..., "The setup of the joke"]
+ punchline: Annotated[str, ..., "The punchline of the joke"]
+
+ # Or any other chat model that supports tools.
+ # Please reference to to the documentation of structured_output
+ # to see an up to date list of which models support
+ # with_structured_output.
+ model = ChatAnthropic(model="claude-3-haiku-20240307", temperature=0)
+ structured_llm = model.with_structured_output(Joke)
+ structured_llm.invoke(
+ "Why did the cat cross the road? To get to the other "
+ "side... and then lay down in the middle of it!"
+ )
+ Read more here: https://python.langchain.com/v0.2/docs/how_to/structured_output/
+
Args:
schema: The schema of the entities to extract.
llm: The language model to use.
@@ -59,6 +101,21 @@ def create_tagging_chain(
return chain
+@deprecated(
+ since="0.2.13",
+ message=(
+ "LangChain has introduced a method called `with_structured_output` that "
+ "is available on ChatModels capable of tool calling. "
+ "See API reference for this function for replacement: "
+ " " # noqa: E501
+ "You can read more about `with_structured_output` here: "
+ ". "
+ "If you notice other issues, please provide "
+ "feedback here: "
+ ""
+ ),
+ removal="1.0",
+)
def create_tagging_chain_pydantic(
pydantic_schema: Any,
llm: BaseLanguageModel,
@@ -68,6 +125,30 @@ def create_tagging_chain_pydantic(
"""Create a chain that extracts information from a passage
based on a pydantic schema.
+ This function is deprecated. Please use `with_structured_output` instead.
+ See example usage below:
+
+ .. code-block:: python
+
+ from langchain_core.pydantic_v1 import BaseModel, Field
+ from langchain_anthropic import ChatAnthropic
+
+ class Joke(BaseModel):
+ setup: str = Field(description="The setup of the joke")
+ punchline: str = Field(description="The punchline to the joke")
+
+ # Or any other chat model that supports tools.
+ # Please reference to to the documentation of structured_output
+ # to see an up to date list of which models support
+ # with_structured_output.
+ model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0)
+ structured_llm = model.with_structured_output(Joke)
+ structured_llm.invoke(
+ "Why did the cat cross the road? To get to the other "
+ "side... and then lay down in the middle of it!"
+ )
+ Read more here: https://python.langchain.com/v0.2/docs/how_to/structured_output/
+
Args:
pydantic_schema: The pydantic schema of the entities to extract.
llm: The language model to use.
diff --git a/libs/langchain/langchain/chains/qa_with_sources/base.py b/libs/langchain/langchain/chains/qa_with_sources/base.py
index c7324a7e32f..aed2d57cf91 100644
--- a/libs/langchain/langchain/chains/qa_with_sources/base.py
+++ b/libs/langchain/langchain/chains/qa_with_sources/base.py
@@ -7,6 +7,7 @@ import re
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Optional, Tuple
+from langchain_core._api import deprecated
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
@@ -30,6 +31,15 @@ from langchain.chains.qa_with_sources.map_reduce_prompt import (
)
+@deprecated(
+ since="0.2.13",
+ removal="1.0",
+ message=(
+ "This class is deprecated. Refer to this guide on retrieval and question "
+ "answering with sources: "
+ "https://python.langchain.com/v0.2/docs/how_to/qa_sources/"
+ ),
+)
class BaseQAWithSourcesChain(Chain, ABC):
"""Question answering chain with sources over documents."""
@@ -198,6 +208,15 @@ class BaseQAWithSourcesChain(Chain, ABC):
return result
+@deprecated(
+ since="0.2.13",
+ removal="1.0",
+ message=(
+ "This class is deprecated. Refer to this guide on retrieval and question "
+ "answering with sources: "
+ "https://python.langchain.com/v0.2/docs/how_to/qa_sources/"
+ ),
+)
class QAWithSourcesChain(BaseQAWithSourcesChain):
"""Question answering with sources over documents."""
diff --git a/libs/langchain/langchain/chains/qa_with_sources/loading.py b/libs/langchain/langchain/chains/qa_with_sources/loading.py
index c24a20bb556..24578216849 100644
--- a/libs/langchain/langchain/chains/qa_with_sources/loading.py
+++ b/libs/langchain/langchain/chains/qa_with_sources/loading.py
@@ -4,6 +4,7 @@ from __future__ import annotations
from typing import Any, Mapping, Optional, Protocol
+from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import BasePromptTemplate
@@ -151,6 +152,21 @@ def _load_refine_chain(
)
+@deprecated(
+ since="0.2.13",
+ removal="1.0",
+ message=(
+ "This function is deprecated. Refer to this guide on retrieval and question "
+ "answering with sources: "
+ "https://python.langchain.com/v0.2/docs/how_to/qa_sources/"
+ "\nSee also the following migration guides for replacements "
+ "based on `chain_type`:\n"
+ "stuff: https://python.langchain.com/v0.2/docs/versions/migrating_chains/stuff_docs_chain\n" # noqa: E501
+ "map_reduce: https://python.langchain.com/v0.2/docs/versions/migrating_chains/map_reduce_chain\n" # noqa: E501
+ "refine: https://python.langchain.com/v0.2/docs/versions/migrating_chains/refine_chain\n" # noqa: E501
+ "map_rerank: https://python.langchain.com/v0.2/docs/versions/migrating_chains/map_rerank_docs_chain\n" # noqa: E501
+ ),
+)
def load_qa_with_sources_chain(
llm: BaseLanguageModel,
chain_type: str = "stuff",
diff --git a/libs/langchain/langchain/chains/query_constructor/base.py b/libs/langchain/langchain/chains/query_constructor/base.py
index 0d303f6a616..419cd6002ad 100644
--- a/libs/langchain/langchain/chains/query_constructor/base.py
+++ b/libs/langchain/langchain/chains/query_constructor/base.py
@@ -5,6 +5,7 @@ from __future__ import annotations
import json
from typing import Any, Callable, List, Optional, Sequence, Tuple, Union, cast
+from langchain_core._api import deprecated
from langchain_core.exceptions import OutputParserException
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers import BaseOutputParser
@@ -257,6 +258,11 @@ def get_query_constructor_prompt(
)
+@deprecated(
+ since="0.2.13",
+ alternative="load_query_constructor_runnable",
+ removal="1.0",
+)
def load_query_constructor_chain(
llm: BaseLanguageModel,
document_contents: str,
diff --git a/libs/langchain/langchain/chains/question_answering/chain.py b/libs/langchain/langchain/chains/question_answering/chain.py
index f83ae8fa8dd..c83895dc2a8 100644
--- a/libs/langchain/langchain/chains/question_answering/chain.py
+++ b/libs/langchain/langchain/chains/question_answering/chain.py
@@ -2,6 +2,7 @@
from typing import Any, Mapping, Optional, Protocol
+from langchain_core._api import deprecated
from langchain_core.callbacks import BaseCallbackManager, Callbacks
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import BasePromptTemplate
@@ -216,6 +217,20 @@ def _load_refine_chain(
)
+@deprecated(
+ since="0.2.13",
+ removal="1.0",
+ message=(
+ "This class is deprecated. See the following migration guides for replacements "
+ "based on `chain_type`:\n"
+ "stuff: https://python.langchain.com/v0.2/docs/versions/migrating_chains/stuff_docs_chain\n" # noqa: E501
+ "map_reduce: https://python.langchain.com/v0.2/docs/versions/migrating_chains/map_reduce_chain\n" # noqa: E501
+ "refine: https://python.langchain.com/v0.2/docs/versions/migrating_chains/refine_chain\n" # noqa: E501
+ "map_rerank: https://python.langchain.com/v0.2/docs/versions/migrating_chains/map_rerank_docs_chain\n" # noqa: E501
+ "\nSee also guides on retrieval and question-answering here: "
+ "https://python.langchain.com/v0.2/docs/how_to/#qa-with-rag"
+ ),
+)
def load_qa_chain(
llm: BaseLanguageModel,
chain_type: str = "stuff",
diff --git a/libs/langchain/langchain/chains/retrieval_qa/base.py b/libs/langchain/langchain/chains/retrieval_qa/base.py
index 0b25dc00b0b..689dd8b0c21 100644
--- a/libs/langchain/langchain/chains/retrieval_qa/base.py
+++ b/libs/langchain/langchain/chains/retrieval_qa/base.py
@@ -28,6 +28,15 @@ from langchain.chains.question_answering import load_qa_chain
from langchain.chains.question_answering.stuff_prompt import PROMPT_SELECTOR
+@deprecated(
+ since="0.2.13",
+ removal="1.0",
+ message=(
+ "This class is deprecated. Use the `create_retrieval_chain` constructor "
+ "instead. See migration guide here: "
+ "https://python.langchain.com/v0.2/docs/versions/migrating_chains/retrieval_qa/"
+ ),
+)
class BaseRetrievalQA(Chain):
"""Base class for question-answering chains."""
@@ -194,7 +203,15 @@ class BaseRetrievalQA(Chain):
return {self.output_key: answer}
-@deprecated(since="0.1.17", alternative="create_retrieval_chain", removal="0.3.0")
+@deprecated(
+ since="0.1.17",
+ removal="1.0",
+ message=(
+ "This class is deprecated. Use the `create_retrieval_chain` constructor "
+ "instead. See migration guide here: "
+ "https://python.langchain.com/v0.2/docs/versions/migrating_chains/retrieval_qa/"
+ ),
+)
class RetrievalQA(BaseRetrievalQA):
"""Chain for question-answering against an index.
@@ -271,6 +288,15 @@ class RetrievalQA(BaseRetrievalQA):
return "retrieval_qa"
+@deprecated(
+ since="0.2.13",
+ removal="1.0",
+ message=(
+ "This class is deprecated. Use the `create_retrieval_chain` constructor "
+ "instead. See migration guide here: "
+ "https://python.langchain.com/v0.2/docs/versions/migrating_chains/retrieval_qa/"
+ ),
+)
class VectorDBQA(BaseRetrievalQA):
"""Chain for question-answering against a vector database."""
diff --git a/libs/langchain/langchain/chains/router/llm_router.py b/libs/langchain/langchain/chains/router/llm_router.py
index f08d34ab553..132f350e819 100644
--- a/libs/langchain/langchain/chains/router/llm_router.py
+++ b/libs/langchain/langchain/chains/router/llm_router.py
@@ -4,6 +4,7 @@ from __future__ import annotations
from typing import Any, Dict, List, Optional, Type, cast
+from langchain_core._api import deprecated
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
@@ -19,8 +20,82 @@ from langchain.chains import LLMChain
from langchain.chains.router.base import RouterChain
+@deprecated(
+ since="0.2.12",
+ removal="1.0",
+ message=(
+ "Use RunnableLambda to select from multiple prompt templates. See example "
+ "in API reference: "
+ "https://api.python.langchain.com/en/latest/chains/langchain.chains.router.llm_router.LLMRouterChain.html" # noqa: E501
+ ),
+)
class LLMRouterChain(RouterChain):
- """A router chain that uses an LLM chain to perform routing."""
+ """A router chain that uses an LLM chain to perform routing.
+
+ This class is deprecated. See below for a replacement, which offers several
+ benefits, including streaming and batch support.
+
+ Below is an example implementation:
+
+ .. code-block:: python
+
+ from operator import itemgetter
+ from typing import Literal
+ from typing_extensions import TypedDict
+
+ from langchain_core.output_parsers import StrOutputParser
+ from langchain_core.prompts import ChatPromptTemplate
+ from langchain_core.runnables import RunnableLambda, RunnablePassthrough
+ from langchain_openai import ChatOpenAI
+
+ llm = ChatOpenAI(model="gpt-4o-mini")
+
+ prompt_1 = ChatPromptTemplate.from_messages(
+ [
+ ("system", "You are an expert on animals."),
+ ("human", "{query}"),
+ ]
+ )
+ prompt_2 = ChatPromptTemplate.from_messages(
+ [
+ ("system", "You are an expert on vegetables."),
+ ("human", "{query}"),
+ ]
+ )
+
+ chain_1 = prompt_1 | llm | StrOutputParser()
+ chain_2 = prompt_2 | llm | StrOutputParser()
+
+ route_system = "Route the user's query to either the animal or vegetable expert."
+ route_prompt = ChatPromptTemplate.from_messages(
+ [
+ ("system", route_system),
+ ("human", "{query}"),
+ ]
+ )
+
+
+ class RouteQuery(TypedDict):
+ \"\"\"Route query to destination.\"\"\"
+ destination: Literal["animal", "vegetable"]
+
+
+ route_chain = (
+ route_prompt
+ | llm.with_structured_output(RouteQuery)
+ | itemgetter("destination")
+ )
+
+ chain = {
+ "destination": route_chain, # "animal" or "vegetable"
+ "query": lambda x: x["query"], # pass through input query
+ } | RunnableLambda(
+ # if animal, chain_1. otherwise, chain_2.
+ lambda x: chain_1 if x["destination"] == "animal" else chain_2,
+ )
+
+ chain.invoke({"query": "what color are carrots"})
+ """ # noqa: E501
llm_chain: LLMChain
"""LLM chain used to perform routing"""
diff --git a/libs/langchain/langchain/chains/router/multi_prompt.py b/libs/langchain/langchain/chains/router/multi_prompt.py
index fe10e1db7d6..214b9a2b372 100644
--- a/libs/langchain/langchain/chains/router/multi_prompt.py
+++ b/libs/langchain/langchain/chains/router/multi_prompt.py
@@ -4,6 +4,7 @@ from __future__ import annotations
from typing import Any, Dict, List, Optional
+from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import PromptTemplate
@@ -15,8 +16,82 @@ from langchain.chains.router.llm_router import LLMRouterChain, RouterOutputParse
from langchain.chains.router.multi_prompt_prompt import MULTI_PROMPT_ROUTER_TEMPLATE
+@deprecated(
+ since="0.2.12",
+ removal="1.0",
+ message=(
+ "Use RunnableLambda to select from multiple prompt templates. See example "
+ "in API reference: "
+ "https://api.python.langchain.com/en/latest/chains/langchain.chains.router.multi_prompt.MultiPromptChain.html" # noqa: E501
+ ),
+)
class MultiPromptChain(MultiRouteChain):
- """A multi-route chain that uses an LLM router chain to choose amongst prompts."""
+ """A multi-route chain that uses an LLM router chain to choose amongst prompts.
+
+ This class is deprecated. See below for a replacement, which offers several
+ benefits, including streaming and batch support.
+
+ Below is an example implementation:
+
+ .. code-block:: python
+
+ from operator import itemgetter
+ from typing import Literal
+ from typing_extensions import TypedDict
+
+ from langchain_core.output_parsers import StrOutputParser
+ from langchain_core.prompts import ChatPromptTemplate
+ from langchain_core.runnables import RunnableLambda, RunnablePassthrough
+ from langchain_openai import ChatOpenAI
+
+ llm = ChatOpenAI(model="gpt-4o-mini")
+
+ prompt_1 = ChatPromptTemplate.from_messages(
+ [
+ ("system", "You are an expert on animals."),
+ ("human", "{query}"),
+ ]
+ )
+ prompt_2 = ChatPromptTemplate.from_messages(
+ [
+ ("system", "You are an expert on vegetables."),
+ ("human", "{query}"),
+ ]
+ )
+
+ chain_1 = prompt_1 | llm | StrOutputParser()
+ chain_2 = prompt_2 | llm | StrOutputParser()
+
+ route_system = "Route the user's query to either the animal or vegetable expert."
+ route_prompt = ChatPromptTemplate.from_messages(
+ [
+ ("system", route_system),
+ ("human", "{query}"),
+ ]
+ )
+
+
+ class RouteQuery(TypedDict):
+ \"\"\"Route query to destination.\"\"\"
+ destination: Literal["animal", "vegetable"]
+
+
+ route_chain = (
+ route_prompt
+ | llm.with_structured_output(RouteQuery)
+ | itemgetter("destination")
+ )
+
+ chain = {
+ "destination": route_chain, # "animal" or "vegetable"
+ "query": lambda x: x["query"], # pass through input query
+ } | RunnableLambda(
+ # if animal, chain_1. otherwise, chain_2.
+ lambda x: chain_1 if x["destination"] == "animal" else chain_2,
+ )
+
+ chain.invoke({"query": "what color are carrots"})
+ """ # noqa: E501
@property
def output_keys(self) -> List[str]:
diff --git a/libs/langchain/langchain/memory/summary.py b/libs/langchain/langchain/memory/summary.py
index 166c1b3d4dd..23c3f2bca1f 100644
--- a/libs/langchain/langchain/memory/summary.py
+++ b/libs/langchain/langchain/memory/summary.py
@@ -2,6 +2,7 @@ from __future__ import annotations
from typing import Any, Dict, List, Type
+from langchain_core._api import deprecated
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import BaseMessage, SystemMessage, get_buffer_string
@@ -14,6 +15,14 @@ from langchain.memory.chat_memory import BaseChatMemory
from langchain.memory.prompt import SUMMARY_PROMPT
+@deprecated(
+ since="0.2.12",
+ removal="1.0",
+ message=(
+ "Refer here for how to incorporate summaries of conversation history: "
+ "https://langchain-ai.github.io/langgraph/how-tos/memory/add-summary-conversation-history/" # noqa: E501
+ ),
+)
class SummarizerMixin(BaseModel):
"""Mixin for summarizer."""
diff --git a/libs/langchain/tests/integration_tests/chains/openai_functions/test_openapi.py b/libs/langchain/tests/integration_tests/chains/openai_functions/test_openapi.py
index 5d7ea309f3d..d3c294cbd56 100644
--- a/libs/langchain/tests/integration_tests/chains/openai_functions/test_openapi.py
+++ b/libs/langchain/tests/integration_tests/chains/openai_functions/test_openapi.py
@@ -1,25 +1,38 @@
-import os
-from pathlib import Path
+import json
+
+import pytest
from langchain.chains.openai_functions.openapi import get_openapi_chain
-
-def test_openai_opeanapi() -> None:
- chain = get_openapi_chain(
- "https://www.klarna.com/us/shopping/public/openai/v0/api-docs/"
- )
- output = chain.run("What are some options for a men's large blue button down shirt")
-
- assert isinstance(output, dict)
+api_spec = {
+ "openapi": "3.0.0",
+ "info": {"title": "JSONPlaceholder API", "version": "1.0.0"},
+ "servers": [{"url": "https://jsonplaceholder.typicode.com"}],
+ "paths": {
+ "/posts": {
+ "get": {
+ "summary": "Get posts",
+ "parameters": [
+ {
+ "name": "_limit",
+ "in": "query",
+ "required": False,
+ "schema": {"type": "integer", "example": 2},
+ "description": "Limit the number of results",
+ },
+ ],
+ }
+ }
+ },
+}
-def test_openai_opeanapi_headers() -> None:
- BRANDFETCH_API_KEY = os.environ.get("BRANDFETCH_API_KEY")
- headers = {"Authorization": f"Bearer {BRANDFETCH_API_KEY}"}
- file_path = str(
- Path(__file__).parents[2] / "examples/brandfetch-brandfetch-2.0.0-resolved.json"
- )
- chain = get_openapi_chain(file_path, headers=headers)
- output = chain.run("I want to know about nike.comgg")
+@pytest.mark.requires("openapi_pydantic")
+@pytest.mark.requires("langchain_openai")
+def test_openai_openapi_chain() -> None:
+ from langchain_openai import ChatOpenAI
- assert isinstance(output, str)
+ llm = ChatOpenAI(model="gpt-4o-mini", temperature=0)
+ chain = get_openapi_chain(json.dumps(api_spec), llm)
+ output = chain.invoke({"query": "Fetch the top two posts."})
+ assert len(output["response"]) == 2
diff --git a/libs/langchain/tests/unit_tests/chains/test_imports.py b/libs/langchain/tests/unit_tests/chains/test_imports.py
index 797b81b440f..501c706fdf0 100644
--- a/libs/langchain/tests/unit_tests/chains/test_imports.py
+++ b/libs/langchain/tests/unit_tests/chains/test_imports.py
@@ -49,6 +49,7 @@ EXPECTED_ALL = [
"VectorDBQA",
"VectorDBQAWithSourcesChain",
"create_citation_fuzzy_match_chain",
+ "create_citation_fuzzy_match_runnable",
"create_extraction_chain",
"create_extraction_chain_pydantic",
"create_qa_with_sources_chain",