mirror of
https://github.com/hwchase17/langchain.git
synced 2025-06-30 10:23:30 +00:00
LCEL nits (#13155)
This commit is contained in:
parent
28cc60b347
commit
c63eb9d797
@ -5,7 +5,7 @@
|
|||||||
"id": "39eaf61b",
|
"id": "39eaf61b",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"# Configuration\n",
|
"# Configure chain internals at runtime\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Oftentimes you may want to experiment with, or even expose to the end user, multiple different ways of doing things.\n",
|
"Oftentimes you may want to experiment with, or even expose to the end user, multiple different ways of doing things.\n",
|
||||||
"In order to make this experience as easy as possible, we have defined two methods.\n",
|
"In order to make this experience as easy as possible, we have defined two methods.\n",
|
||||||
@ -594,7 +594,7 @@
|
|||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.10.1"
|
"version": "3.9.1"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
|
@ -5,7 +5,7 @@
|
|||||||
"id": "fbc4bf6e",
|
"id": "fbc4bf6e",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"# Run arbitrary functions\n",
|
"# Run custom functions\n",
|
||||||
"\n",
|
"\n",
|
||||||
"You can use arbitrary functions in the pipeline\n",
|
"You can use arbitrary functions in the pipeline\n",
|
||||||
"\n",
|
"\n",
|
||||||
@ -175,7 +175,7 @@
|
|||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.10.1"
|
"version": "3.9.1"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
|
@ -4,7 +4,7 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"# Custom generator functions\n",
|
"# Stream custom generator functions\n",
|
||||||
"\n",
|
"\n",
|
||||||
"You can use generator functions (ie. functions that use the `yield` keyword, and behave like iterators) in a LCEL pipeline.\n",
|
"You can use generator functions (ie. functions that use the `yield` keyword, and behave like iterators) in a LCEL pipeline.\n",
|
||||||
"\n",
|
"\n",
|
||||||
@ -21,15 +21,7 @@
|
|||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 1,
|
"execution_count": 1,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [
|
"outputs": [],
|
||||||
{
|
|
||||||
"name": "stdout",
|
|
||||||
"output_type": "stream",
|
|
||||||
"text": [
|
|
||||||
"lion, tiger, wolf, gorilla, panda\n"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"source": [
|
"source": [
|
||||||
"from typing import Iterator, List\n",
|
"from typing import Iterator, List\n",
|
||||||
"\n",
|
"\n",
|
||||||
@ -43,16 +35,51 @@
|
|||||||
")\n",
|
")\n",
|
||||||
"model = ChatOpenAI(temperature=0.0)\n",
|
"model = ChatOpenAI(temperature=0.0)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"\n",
|
"str_chain = prompt | model | StrOutputParser()"
|
||||||
"str_chain = prompt | model | StrOutputParser()\n",
|
|
||||||
"\n",
|
|
||||||
"print(str_chain.invoke({\"animal\": \"bear\"}))"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 2,
|
"execution_count": 2,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"lion, tiger, wolf, gorilla, panda"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"for chunk in str_chain.stream({\"animal\": \"bear\"}):\n",
|
||||||
|
" print(chunk, end=\"\", flush=True)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 8,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"'lion, tiger, wolf, gorilla, panda'"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 8,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"str_chain.invoke({\"animal\": \"bear\"})"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 4,
|
||||||
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# This is a custom parser that splits an iterator of llm tokens\n",
|
"# This is a custom parser that splits an iterator of llm tokens\n",
|
||||||
@ -77,22 +104,61 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 3,
|
"execution_count": 5,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"list_chain = str_chain | split_into_list"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 6,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
{
|
{
|
||||||
"name": "stdout",
|
"name": "stdout",
|
||||||
"output_type": "stream",
|
"output_type": "stream",
|
||||||
"text": [
|
"text": [
|
||||||
"['lion', 'tiger', 'wolf', 'gorilla', 'panda']\n"
|
"['lion']\n",
|
||||||
|
"['tiger']\n",
|
||||||
|
"['wolf']\n",
|
||||||
|
"['gorilla']\n",
|
||||||
|
"['panda']\n"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"source": [
|
"source": [
|
||||||
"list_chain = str_chain | split_into_list\n",
|
"for chunk in list_chain.stream({\"animal\": \"bear\"}):\n",
|
||||||
"\n",
|
" print(chunk, flush=True)"
|
||||||
"print(list_chain.invoke({\"animal\": \"bear\"}))"
|
|
||||||
]
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 7,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"['lion', 'tiger', 'wolf', 'gorilla', 'panda']"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 7,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"list_chain.invoke({\"animal\": \"bear\"})"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": []
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
@ -111,9 +177,9 @@
|
|||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.11.5"
|
"version": "3.9.1"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
"nbformat_minor": 2
|
"nbformat_minor": 4
|
||||||
}
|
}
|
||||||
|
@ -5,7 +5,7 @@
|
|||||||
"id": "b022ab74-794d-4c54-ad47-ff9549ddb9d2",
|
"id": "b022ab74-794d-4c54-ad47-ff9549ddb9d2",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"# Use RunnableParallel/RunnableMap\n",
|
"# Parallelize steps\n",
|
||||||
"\n",
|
"\n",
|
||||||
"RunnableParallel (aka. RunnableMap) makes it easy to execute multiple Runnables in parallel, and to return the output of these Runnables as a map."
|
"RunnableParallel (aka. RunnableMap) makes it easy to execute multiple Runnables in parallel, and to return the output of these Runnables as a map."
|
||||||
]
|
]
|
||||||
@ -195,7 +195,7 @@
|
|||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.10.1"
|
"version": "3.9.1"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
|
@ -5,7 +5,7 @@
|
|||||||
"id": "4b47436a",
|
"id": "4b47436a",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"# Route between multiple Runnables\n",
|
"# Dynamically route logic based on input\n",
|
||||||
"\n",
|
"\n",
|
||||||
"This notebook covers how to do routing in the LangChain Expression Language.\n",
|
"This notebook covers how to do routing in the LangChain Expression Language.\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
@ -8,7 +8,7 @@
|
|||||||
"---\n",
|
"---\n",
|
||||||
"sidebar_position: 0\n",
|
"sidebar_position: 0\n",
|
||||||
"title: Interface\n",
|
"title: Interface\n",
|
||||||
"---\n"
|
"---"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -31,26 +31,17 @@
|
|||||||
"- [`abatch`](#async-batch): call the chain on a list of inputs async\n",
|
"- [`abatch`](#async-batch): call the chain on a list of inputs async\n",
|
||||||
"- [`astream_log`](#async-stream-intermediate-steps): stream back intermediate steps as they happen, in addition to the final response\n",
|
"- [`astream_log`](#async-stream-intermediate-steps): stream back intermediate steps as they happen, in addition to the final response\n",
|
||||||
"\n",
|
"\n",
|
||||||
"The **input type** varies by component:\n",
|
"The **input type** and **output type** varies by component:\n",
|
||||||
"\n",
|
"\n",
|
||||||
"| Component | Input Type |\n",
|
"| Component | Input Type | Output Type |\n",
|
||||||
"| --- | --- |\n",
|
"| --- | --- | --- |\n",
|
||||||
"|Prompt|Dictionary|\n",
|
"| Prompt | Dictionary | PromptValue |\n",
|
||||||
"|Retriever|Single string|\n",
|
"| ChatModel | Single string, list of chat messages or a PromptValue | ChatMessage |\n",
|
||||||
"|LLM, ChatModel| Single string, list of chat messages or a PromptValue|\n",
|
"| LLM | Single string, list of chat messages or a PromptValue | String |\n",
|
||||||
"|Tool|Single string, or dictionary, depending on the tool|\n",
|
"| OutputParser | The output of an LLM or ChatModel | Depends on the parser |\n",
|
||||||
"|OutputParser|The output of an LLM or ChatModel|\n",
|
"| Retriever | Single string | List of Documents |\n",
|
||||||
|
"| Tool | Single string or dictionary, depending on the tool | Depends on the tool |\n",
|
||||||
"\n",
|
"\n",
|
||||||
"The **output type** also varies by component:\n",
|
|
||||||
"\n",
|
|
||||||
"| Component | Output Type |\n",
|
|
||||||
"| --- | --- |\n",
|
|
||||||
"| LLM | String |\n",
|
|
||||||
"| ChatModel | ChatMessage |\n",
|
|
||||||
"| Prompt | PromptValue |\n",
|
|
||||||
"| Retriever | List of documents |\n",
|
|
||||||
"| Tool | Depends on the tool |\n",
|
|
||||||
"| OutputParser | Depends on the parser |\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
"All runnables expose input and output **schemas** to inspect the inputs and outputs:\n",
|
"All runnables expose input and output **schemas** to inspect the inputs and outputs:\n",
|
||||||
"- [`input_schema`](#input-schema): an input Pydantic model auto-generated from the structure of the Runnable\n",
|
"- [`input_schema`](#input-schema): an input Pydantic model auto-generated from the structure of the Runnable\n",
|
||||||
@ -1161,7 +1152,7 @@
|
|||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.10.12"
|
"version": "3.9.1"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
|
Loading…
Reference in New Issue
Block a user