mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-28 01:19:31 +00:00
document lcel fallbacks (#8942)
This commit is contained in:
parent
e3056340da
commit
0a1be1d501
@ -1648,6 +1648,186 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"## Conversational Retrieval With Memory"
|
"## Conversational Retrieval With Memory"
|
||||||
]
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "92c87dd8-bb6f-4f32-a30d-8f5459ce6265",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Fallbacks\n",
|
||||||
|
"\n",
|
||||||
|
"With LCEL you can easily introduce fallbacks for any Runnable component, like an LLM."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 2,
|
||||||
|
"id": "1b1cb744-31fc-4261-ab25-65fe1fcad559",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"AIMessage(content='To get to the other side.', additional_kwargs={}, example=False)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 2,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"from langchain.chat_models import ChatOpenAI\n",
|
||||||
|
"\n",
|
||||||
|
"bad_llm = ChatOpenAI(model_name=\"gpt-fake\")\n",
|
||||||
|
"good_llm = ChatOpenAI(model_name=\"gpt-3.5-turbo\")\n",
|
||||||
|
"llm = bad_llm.with_fallbacks([good_llm])\n",
|
||||||
|
"\n",
|
||||||
|
"llm.invoke(\"Why did the the chicken cross the road?\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "b8cf3982-03f6-49b3-8ff5-7cd12444f19c",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Looking at the trace, we can see that the first model failed but the second succeeded, so we still got an output: https://smith.langchain.com/public/dfaf0bf6-d86d-43e9-b084-dd16a56df15c/r\n",
|
||||||
|
"\n",
|
||||||
|
"We can add an arbitrary sequence of fallbacks, which will be executed in order:"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 3,
|
||||||
|
"id": "31819be0-7f40-4e67-b5ab-61340027b948",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"AIMessage(content='To get to the other side.', additional_kwargs={}, example=False)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 3,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"llm = bad_llm.with_fallbacks([bad_llm, bad_llm, good_llm])\n",
|
||||||
|
"\n",
|
||||||
|
"llm.invoke(\"Why did the the chicken cross the road?\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "acad6e88-8046-450e-b005-db7e50f33b80",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Trace: https://smith.langchain.com/public/c09efd01-3184-4369-a225-c9da8efcaf47/r\n",
|
||||||
|
"\n",
|
||||||
|
"We can continue to use our Runnable with fallbacks the same way we use any Runnable, mean we can include it in sequences:"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 4,
|
||||||
|
"id": "bab114a1-bb93-4b7e-a639-e7e00f21aebc",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"AIMessage(content='To show off its incredible jumping skills! Kangaroos are truly amazing creatures.', additional_kwargs={}, example=False)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 4,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"from langchain.prompts import ChatPromptTemplate\n",
|
||||||
|
"\n",
|
||||||
|
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||||
|
" [\n",
|
||||||
|
" (\"system\", \"You're a nice assistant who always includes a compliment in your response\"),\n",
|
||||||
|
" (\"human\", \"Why did the {animal} cross the road\"),\n",
|
||||||
|
" ]\n",
|
||||||
|
")\n",
|
||||||
|
"chain = prompt | llm\n",
|
||||||
|
"chain.invoke({\"animal\": \"kangaroo\"})"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "58340afa-8187-4ffe-9bd2-7912fb733a15",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Trace: https://smith.langchain.com/public/ba03895f-f8bd-4c70-81b7-8b930353eabd/r\n",
|
||||||
|
"\n",
|
||||||
|
"Note, since every sequence of Runnables is itself a Runnable, we can create fallbacks for whole Sequences. We can also continue using the full interface, including asynchronous calls, batched calls, and streams:"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 5,
|
||||||
|
"id": "45aa3170-b2e6-430d-887b-bd879048060a",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"[\"\\n\\nAnswer: The rabbit crossed the road to get to the other side. That's quite clever of him!\",\n",
|
||||||
|
" '\\n\\nAnswer: The turtle crossed the road to get to the other side. You must be pretty clever to come up with that riddle!']"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 5,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"from langchain.llms import OpenAI\n",
|
||||||
|
"from langchain.prompts import PromptTemplate\n",
|
||||||
|
"\n",
|
||||||
|
"chat_prompt = ChatPromptTemplate.from_messages(\n",
|
||||||
|
" [\n",
|
||||||
|
" (\"system\", \"You're a nice assistant who always includes a compliment in your response\"),\n",
|
||||||
|
" (\"human\", \"Why did the {animal} cross the road\"),\n",
|
||||||
|
" ]\n",
|
||||||
|
")\n",
|
||||||
|
"chat_model = ChatOpenAI(model_name=\"gpt-fake\")\n",
|
||||||
|
"\n",
|
||||||
|
"prompt_template = \"\"\"Instructions: You should always include a compliment in your response.\n",
|
||||||
|
"\n",
|
||||||
|
"Question: Why did the {animal} cross the road?\"\"\"\n",
|
||||||
|
"prompt = PromptTemplate.from_template(prompt_template)\n",
|
||||||
|
"llm = OpenAI()\n",
|
||||||
|
"\n",
|
||||||
|
"bad_chain = chat_prompt | chat_model\n",
|
||||||
|
"good_chain = prompt | llm\n",
|
||||||
|
"chain = bad_chain.with_fallbacks([good_chain])\n",
|
||||||
|
"await chain.abatch([{\"animal\": \"rabbit\"}, {\"animal\": \"turtle\"}])"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "af6731c6-0c73-4b1d-a433-6e8f6ecce2bb",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Traces: \n",
|
||||||
|
"1. https://smith.langchain.com/public/ccd73236-9ae5-48a6-94b5-41210be18a46/r\n",
|
||||||
|
"2. https://smith.langchain.com/public/f43f608e-075c-45c7-bf73-b64e4d3f3082/r"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "3d2fe1fe-506b-4ee5-8056-8b9df801765f",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": []
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
@ -1666,7 +1846,7 @@
|
|||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.10.1"
|
"version": "3.9.1"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
|
Loading…
Reference in New Issue
Block a user