Change RunnableMap to RunnableParallel for consistency (#14142)

- **Description:** Change instances of RunnableMap to RunnableParallel,
as that should be the one used going forward. This makes it consistent
across the codebase.
This commit is contained in:
Alex Kira 2023-12-01 13:36:40 -08:00 committed by GitHub
parent 96f6b90349
commit 0caef3cde7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 37 additions and 33 deletions

View File

@ -146,7 +146,7 @@
"source": [
"### Branching and Merging\n",
"\n",
"You may want the output of one component to be processed by 2 or more other components. [RunnableMaps](https://api.python.langchain.com/en/latest/schema/langchain.schema.runnable.base.RunnableMap.html) let you split or fork the chain so multiple components can process the input in parallel. Later, other components can join or merge the results to synthesize a final response. This type of chain creates a computation graph that looks like the following:\n",
"You may want the output of one component to be processed by 2 or more other components. [RunnableParallels](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableParallel.html#langchain_core.runnables.base.RunnableParallel) let you split or fork the chain so multiple components can process the input in parallel. Later, other components can join or merge the results to synthesize a final response. This type of chain creates a computation graph that looks like the following:\n",
"\n",
"```text\n",
" Input\n",

View File

@ -317,7 +317,7 @@
"source": [
"## Simplifying input\n",
"\n",
"To make invocation even simpler, we can add a `RunnableMap` to take care of creating the prompt input dict for us:"
"To make invocation even simpler, we can add a `RunnableParallel` to take care of creating the prompt input dict for us:"
]
},
{
@ -327,9 +327,9 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.schema.runnable import RunnableMap, RunnablePassthrough\n",
"from langchain.schema.runnable import RunnableParallel, RunnablePassthrough\n",
"\n",
"map_ = RunnableMap(foo=RunnablePassthrough())\n",
"map_ = RunnableParallel(foo=RunnablePassthrough())\n",
"chain = (\n",
" map_\n",
" | prompt\n",

View File

@ -171,7 +171,7 @@
"outputs": [],
"source": [
"from langchain.schema import format_document\n",
"from langchain.schema.runnable import RunnableMap"
"from langchain.schema.runnable import RunnableParallel"
]
},
{
@ -256,7 +256,7 @@
"metadata": {},
"outputs": [],
"source": [
"_inputs = RunnableMap(\n",
"_inputs = RunnableParallel(\n",
" standalone_question=RunnablePassthrough.assign(\n",
" chat_history=lambda x: _format_chat_history(x[\"chat_history\"])\n",
" )\n",

View File

@ -104,7 +104,7 @@
"source": [
"Here the input to prompt is expected to be a map with keys \"context\" and \"question\". The user input is just the question. So we need to get the context using our retriever and passthrough the user input under the \"question\" key.\n",
"\n",
"Note that when composing a RunnableMap with another Runnable we don't even need to wrap our dictionary in the RunnableMap class — the type conversion is handled for us."
"Note that when composing a RunnableParallel with another Runnable we don't even need to wrap our dictionary in the RunnableParallel class — the type conversion is handled for us."
]
},
{
@ -114,7 +114,7 @@
"source": [
"## Parallelism\n",
"\n",
"RunnableMaps are also useful for running independent processes in parallel, since each Runnable in the map is executed in parallel. For example, we can see our earlier `joke_chain`, `poem_chain` and `map_chain` all have about the same runtime, even though `map_chain` executes both of the other two."
"RunnableParallel are also useful for running independent processes in parallel, since each Runnable in the map is executed in parallel. For example, we can see our earlier `joke_chain`, `poem_chain` and `map_chain` all have about the same runtime, even though `map_chain` executes both of the other two."
]
},
{

View File

@ -290,9 +290,9 @@
],
"source": [
"from langchain.schema.messages import HumanMessage\n",
"from langchain.schema.runnable import RunnableMap\n",
"from langchain.schema.runnable import RunnableParallel\n",
"\n",
"chain = RunnableMap({\"output_message\": ChatAnthropic(model=\"claude-2\")})\n",
"chain = RunnableParallel({\"output_message\": ChatAnthropic(model=\"claude-2\")})\n",
"chain_with_history = RunnableWithMessageHistory(\n",
" chain,\n",
" lambda session_id: RedisChatMessageHistory(session_id, url=REDIS_URL),\n",

View File

@ -667,7 +667,11 @@
"from langchain.chat_models.openai import ChatOpenAI\n",
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain.schema.output_parser import StrOutputParser\n",
"from langchain.schema.runnable import RunnableLambda, RunnableMap, RunnablePassthrough\n",
"from langchain.schema.runnable import (\n",
" RunnableLambda,\n",
" RunnableParallel,\n",
" RunnablePassthrough,\n",
")\n",
"\n",
"# 6. Create anonymizer chain\n",
"template = \"\"\"Answer the question based only on the following context:\n",
@ -680,7 +684,7 @@
"model = ChatOpenAI(temperature=0.3)\n",
"\n",
"\n",
"_inputs = RunnableMap(\n",
"_inputs = RunnableParallel(\n",
" question=RunnablePassthrough(),\n",
" # It is important to remember about question anonymization\n",
" anonymized_question=RunnableLambda(anonymizer.anonymize),\n",
@ -882,7 +886,7 @@
"\n",
"\n",
"chain_with_deanonymization = (\n",
" RunnableMap({\"question\": RunnablePassthrough()})\n",
" RunnableParallel({\"question\": RunnablePassthrough()})\n",
" | {\n",
" \"context\": itemgetter(\"question\")\n",
" | retriever\n",

View File

@ -846,7 +846,7 @@
"source": [
"from operator import itemgetter\n",
"\n",
"from langchain.schema.runnable import RunnableMap\n",
"from langchain.schema.runnable import RunnableParallel\n",
"\n",
"rag_chain_from_docs = (\n",
" {\n",
@ -857,7 +857,7 @@
" | llm\n",
" | StrOutputParser()\n",
")\n",
"rag_chain_with_source = RunnableMap(\n",
"rag_chain_with_source = RunnableParallel(\n",
" {\"documents\": retriever, \"question\": RunnablePassthrough()}\n",
") | {\n",
" \"documents\": lambda input: [doc.metadata for doc in input[\"documents\"]],\n",

View File

@ -2,7 +2,7 @@ from langchain.agents import AgentExecutor
from langchain.chat_models import ChatAnthropic
from langchain.prompts import ChatPromptTemplate
from langchain.schema.output_parser import StrOutputParser
from langchain.schema.runnable import RunnableMap, RunnablePassthrough
from langchain.schema.runnable import RunnableParallel, RunnablePassthrough
from .agent_scratchpad import format_agent_scratchpad
from .output_parser import parse_output
@ -29,7 +29,7 @@ chain = (
)
agent_chain = (
RunnableMap(
RunnableParallel(
{
"partial_completion": chain,
"intermediate_steps": lambda x: x["intermediate_steps"],

View File

@ -14,7 +14,7 @@ from langchain.schema.runnable import (
ConfigurableField,
RunnableBranch,
RunnableLambda,
RunnableMap,
RunnableParallel,
RunnablePassthrough,
)
from langchain.schema.runnable.utils import ConfigurableFieldSingleOption
@ -133,7 +133,7 @@ class ChatHistory(BaseModel):
question: str
_inputs = RunnableMap(
_inputs = RunnableParallel(
{
"question": lambda x: x["question"],
"chat_history": lambda x: _format_chat_history(x["chat_history"]),

View File

@ -11,7 +11,7 @@ from langchain.schema.output_parser import StrOutputParser
from langchain.schema.runnable import (
RunnableBranch,
RunnableLambda,
RunnableMap,
RunnableParallel,
RunnablePassthrough,
)
from langchain.vectorstores import Pinecone
@ -108,7 +108,7 @@ _search_query = RunnableBranch(
RunnableLambda(itemgetter("question")),
)
_inputs = RunnableMap(
_inputs = RunnableParallel(
{
"question": lambda x: x["question"],
"chat_history": lambda x: _format_chat_history(x["chat_history"]),

View File

@ -5,7 +5,7 @@ from langchain.chat_models import ChatOpenAI
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.schema import BaseMessage, format_document
from langchain.schema.output_parser import StrOutputParser
from langchain.schema.runnable import RunnableMap, RunnablePassthrough
from langchain.schema.runnable import RunnableParallel, RunnablePassthrough
from langchain.vectorstores.elasticsearch import ElasticsearchStore
from pydantic import BaseModel, Field
@ -49,7 +49,7 @@ class ChainInput(BaseModel):
question: str = Field(..., description="The question to answer.")
_inputs = RunnableMap(
_inputs = RunnableParallel(
standalone_question=RunnablePassthrough.assign(
chat_history=lambda x: _format_chat_history(x["chat_history"])
)

View File

@ -7,7 +7,7 @@ from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain.pydantic_v1 import BaseModel, Field
from langchain.schema.messages import AIMessage, HumanMessage
from langchain.schema.output_parser import StrOutputParser
from langchain.schema.runnable import RunnableLambda, RunnableMap
from langchain.schema.runnable import RunnableLambda, RunnableParallel
# Formatting for chat history
@ -96,7 +96,7 @@ class ChatHistory(BaseModel):
chat_history: List[Tuple[str, str]] = Field(..., extra={"widget": {"type": "chat"}})
_inputs = RunnableMap(
_inputs = RunnableParallel(
{
"question": RunnableLambda(
lambda x: _deidentify_with_replace(

View File

@ -13,7 +13,7 @@ from langchain.schema.output_parser import StrOutputParser
from langchain.schema.runnable import (
RunnableBranch,
RunnableLambda,
RunnableMap,
RunnableParallel,
RunnablePassthrough,
)
from langchain.vectorstores.timescalevector import TimescaleVector
@ -136,7 +136,7 @@ def get_retriever_with_metadata(x):
_retriever = RunnableLambda(get_retriever_with_metadata)
_inputs = RunnableMap(
_inputs = RunnableParallel(
{
"question": lambda x: x["question"],
"chat_history": lambda x: _format_chat_history(x["chat_history"]),