From 526ba235f313c7449cfb7c787b4264353cd5059a Mon Sep 17 00:00:00 2001
From: Bagatur <22008038+baskaryan@users.noreply.github.com>
Date: Mon, 13 May 2024 18:40:53 -0400
Subject: [PATCH] docs: fix prereq links (#21630)
---
docs/docs/how_to/assign.ipynb | 21 +++++-----
docs/docs/how_to/binding.ipynb | 15 ++++---
docs/docs/how_to/chat_model_caching.ipynb | 21 +++++-----
.../how_to/chat_token_usage_tracking.ipynb | 15 ++++---
docs/docs/how_to/configure.ipynb | 23 +++++------
docs/docs/how_to/custom_chat_model.ipynb | 17 ++++----
docs/docs/how_to/few_shot_examples.ipynb | 23 +++++------
docs/docs/how_to/few_shot_examples_chat.ipynb | 25 ++++++------
docs/docs/how_to/functions.ipynb | 19 +++++----
docs/docs/how_to/inspect.ipynb | 19 +++++----
docs/docs/how_to/logprobs.ipynb | 15 ++++---
docs/docs/how_to/message_history.ipynb | 25 ++++++------
docs/docs/how_to/output_parser_json.ipynb | 23 +++++------
docs/docs/how_to/output_parser_xml.ipynb | 24 +++++------
docs/docs/how_to/output_parser_yaml.ipynb | 26 ++++++------
docs/docs/how_to/parallel.ipynb | 15 ++++---
docs/docs/how_to/passthrough.ipynb | 16 ++++----
docs/docs/how_to/prompts_composition.ipynb | 13 +++---
docs/docs/how_to/prompts_partial.ipynb | 17 ++++----
docs/docs/how_to/routing.ipynb | 27 ++++++-------
docs/docs/how_to/sequence.ipynb | 21 +++++-----
docs/docs/how_to/streaming.ipynb | 21 +++++-----
docs/docs/how_to/structured_output.ipynb | 40 ++++++++-----------
docs/docs/how_to/tool_calling.ipynb | 21 +++++-----
docs/src/theme/PrerequisiteLinks.js | 19 ---------
25 files changed, 239 insertions(+), 282 deletions(-)
delete mode 100644 docs/src/theme/PrerequisiteLinks.js
diff --git a/docs/docs/how_to/assign.ipynb b/docs/docs/how_to/assign.ipynb
index c63c1530b97..f1baa2f7832 100644
--- a/docs/docs/how_to/assign.ipynb
+++ b/docs/docs/how_to/assign.ipynb
@@ -16,21 +16,20 @@
"source": [
"# How to add values to a chain's state\n",
"\n",
- "An alternate way of [passing data through](/docs/how_to/passthrough) steps of a chain is to leave the current values of the chain state unchanged while assigning a new value under a given key. The [`RunnablePassthrough.assign()`](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.passthrough.RunnablePassthrough.html#langchain_core.runnables.passthrough.RunnablePassthrough.assign) static method takes an input value and adds the extra arguments passed to the assign function.\n",
+ ":::info Prerequisites\n",
"\n",
- "This is useful in the common [LangChain Expression Language](/docs/concepts/#langchain-expression-language) pattern of additively creating a dictionary to use as input to a later step.\n",
- "\n",
- "```{=mdx}\n",
- "import PrerequisiteLinks from \"@theme/PrerequisiteLinks\";\n",
- "\n",
- "\n",
- "```\n",
+ "\n",
+ ":::\n",
+ "\n",
+ "An alternate way of [passing data through](/docs/how_to/passthrough) steps of a chain is to leave the current values of the chain state unchanged while assigning a new value under a given key. The [`RunnablePassthrough.assign()`](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.passthrough.RunnablePassthrough.html#langchain_core.runnables.passthrough.RunnablePassthrough.assign) static method takes an input value and adds the extra arguments passed to the assign function.\n",
+ "\n",
+ "This is useful in the common [LangChain Expression Language](/docs/concepts/#langchain-expression-language) pattern of additively creating a dictionary to use as input to a later step.\n",
"\n",
"Here's an example:"
]
@@ -184,9 +183,9 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.10.1"
+ "version": "3.9.1"
}
},
"nbformat": 4,
- "nbformat_minor": 2
+ "nbformat_minor": 4
}
diff --git a/docs/docs/how_to/binding.ipynb b/docs/docs/how_to/binding.ipynb
index 2dfbdc062c0..e71fd2cba35 100644
--- a/docs/docs/how_to/binding.ipynb
+++ b/docs/docs/how_to/binding.ipynb
@@ -18,17 +18,16 @@
"source": [
"# How to attach runtime arguments to a Runnable\n",
"\n",
- "Sometimes we want to invoke a [`Runnable`](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html) within a [RunnableSequence](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableSequence.html) with constant arguments that are not part of the output of the preceding Runnable in the sequence, and which are not part of the user input. We can use the [`Runnable.bind()`](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.bind) method to set these arguments ahead of time.\n",
+ ":::info Prerequisites\n",
"\n",
- "```{=mdx}\n",
- "import PrerequisiteLinks from \"@theme/PrerequisiteLinks\";\n",
- "\n",
- "\n",
- "```\n",
+ "\n",
+ ":::\n",
+ "\n",
+ "Sometimes we want to invoke a [`Runnable`](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html) within a [RunnableSequence](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableSequence.html) with constant arguments that are not part of the output of the preceding Runnable in the sequence, and which are not part of the user input. We can use the [`Runnable.bind()`](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.bind) method to set these arguments ahead of time.\n",
"\n",
"## Binding stop sequences\n",
"\n",
@@ -228,7 +227,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.10.1"
+ "version": "3.9.1"
}
},
"nbformat": 4,
diff --git a/docs/docs/how_to/chat_model_caching.ipynb b/docs/docs/how_to/chat_model_caching.ipynb
index 25190ee4374..1d1839c3fad 100644
--- a/docs/docs/how_to/chat_model_caching.ipynb
+++ b/docs/docs/how_to/chat_model_caching.ipynb
@@ -7,21 +7,20 @@
"source": [
"# How to cache chat model responses\n",
"\n",
+ ":::info Prerequisites\n",
+ "\n",
+ "This guide assumes familiarity with the following concepts:\n",
+ "- [Chat models](/docs/concepts/#chat-models)\n",
+ "- [LLMs](/docs/concepts/#llms)\n",
+ "\n",
+ ":::\n",
+ "\n",
"LangChain provides an optional caching layer for chat models. This is useful for two main reasons:\n",
"\n",
"- It can save you money by reducing the number of API calls you make to the LLM provider, if you're often requesting the same completion multiple times. This is especially useful during app development.\n",
"- It can speed up your application by reducing the number of API calls you make to the LLM provider.\n",
"\n",
- "This guide will walk you through how to enable this in your apps.\n",
- "\n",
- "```{=mdx}\n",
- "import PrerequisiteLinks from \"@theme/PrerequisiteLinks\";\n",
- "\n",
- "\n",
- "```"
+ "This guide will walk you through how to enable this in your apps."
]
},
{
@@ -267,7 +266,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.10.1"
+ "version": "3.9.1"
}
},
"nbformat": 4,
diff --git a/docs/docs/how_to/chat_token_usage_tracking.ipynb b/docs/docs/how_to/chat_token_usage_tracking.ipynb
index ea440e5c14b..fc0245c3f99 100644
--- a/docs/docs/how_to/chat_token_usage_tracking.ipynb
+++ b/docs/docs/how_to/chat_token_usage_tracking.ipynb
@@ -7,15 +7,14 @@
"source": [
"# How to track token usage in ChatModels\n",
"\n",
- "Tracking token usage to calculate cost is an important part of putting your app in production. This guide goes over how to obtain this information from your LangChain model calls.\n",
+ ":::info Prerequisites\n",
"\n",
- "```{=mdx}\n",
- "import PrerequisiteLinks from \"@theme/PrerequisiteLinks\";\n",
- "\n",
- "\n",
- "```"
+ "\n",
+ ":::\n",
+ "\n",
+ "Tracking token usage to calculate cost is an important part of putting your app in production. This guide goes over how to obtain this information from your LangChain model calls."
]
},
{
@@ -365,7 +364,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.10.1"
+ "version": "3.9.1"
}
},
"nbformat": 4,
diff --git a/docs/docs/how_to/configure.ipynb b/docs/docs/how_to/configure.ipynb
index 8a09a9ab119..19aaf7fcc0b 100644
--- a/docs/docs/how_to/configure.ipynb
+++ b/docs/docs/how_to/configure.ipynb
@@ -18,23 +18,22 @@
"source": [
"# How to configure runtime chain internals\n",
"\n",
+ ":::info Prerequisites\n",
+ "\n",
+ "This guide assumes familiarity with the following concepts:\n",
+ "- [LangChain Expression Language (LCEL)](/docs/concepts/#langchain-expression-language)\n",
+ "- [Chaining runnables](/docs/how_to/sequence/)\n",
+ "- [Binding runtime arguments](/docs/how_to/binding/)\n",
+ "\n",
+ ":::\n",
+ "\n",
"Sometimes you may want to experiment with, or even expose to the end user, multiple different ways of doing things within your chains.\n",
"This can include tweaking parameters such as temperature or even swapping out one model for another.\n",
"In order to make this experience as easy as possible, we have defined two methods.\n",
"\n",
"- A `configurable_fields` method. This lets you configure particular fields of a runnable.\n",
" - This is related to the [`.bind`](/docs/how_to/binding) method on runnables, but allows you to specify parameters for a given step in a chain at runtime rather than specifying them beforehand.\n",
- "- A `configurable_alternatives` method. With this method, you can list out alternatives for any particular runnable that can be set during runtime, and swap them for those specified alternatives.\n",
- "\n",
- "```{=mdx}\n",
- "import PrerequisiteLinks from \"@theme/PrerequisiteLinks\";\n",
- "\n",
- "\n",
- "```"
+ "- A `configurable_alternatives` method. With this method, you can list out alternatives for any particular runnable that can be set during runtime, and swap them for those specified alternatives."
]
},
{
@@ -613,7 +612,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.10.5"
+ "version": "3.9.1"
}
},
"nbformat": 4,
diff --git a/docs/docs/how_to/custom_chat_model.ipynb b/docs/docs/how_to/custom_chat_model.ipynb
index d3b3759322b..e512c307f36 100644
--- a/docs/docs/how_to/custom_chat_model.ipynb
+++ b/docs/docs/how_to/custom_chat_model.ipynb
@@ -7,20 +7,19 @@
"source": [
"# How to create a custom chat model class\n",
"\n",
+ ":::info Prerequisites\n",
+ "\n",
+ "This guide assumes familiarity with the following concepts:\n",
+ "- [Chat models](/docs/concepts/#chat-models)\n",
+ "\n",
+ ":::\n",
+ "\n",
"In this guide, we'll learn how to create a custom chat model using LangChain abstractions.\n",
"\n",
"Wrapping your LLM with the standard [`BaseChatModel`](https://api.python.langchain.com/en/latest/language_models/langchain_core.language_models.chat_models.BaseChatModel.html) interface allow you to use your LLM in existing LangChain programs with minimal code modifications!\n",
"\n",
"As an bonus, your LLM will automatically become a LangChain `Runnable` and will benefit from some optimizations out of the box (e.g., batch via a threadpool), async support, the `astream_events` API, etc.\n",
"\n",
- "```{=mdx}\n",
- "import PrerequisiteLinks from \"@theme/PrerequisiteLinks\";\n",
- "\n",
- "\n",
- "```\n",
- "\n",
"## Inputs and outputs\n",
"\n",
"First, we need to talk about **messages**, which are the inputs and outputs of chat models.\n",
@@ -562,7 +561,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.10.1"
+ "version": "3.9.1"
}
},
"nbformat": 4,
diff --git a/docs/docs/how_to/few_shot_examples.ipynb b/docs/docs/how_to/few_shot_examples.ipynb
index 2dd0ac0e9b2..41025d03fda 100644
--- a/docs/docs/how_to/few_shot_examples.ipynb
+++ b/docs/docs/how_to/few_shot_examples.ipynb
@@ -17,23 +17,22 @@
"source": [
"# How to use few shot examples\n",
"\n",
+ ":::info Prerequisites\n",
+ "\n",
+ "This guide assumes familiarity with the following concepts:\n",
+ "- [Prompt templates](/docs/concepts/#prompt-templates)\n",
+ "- [Example selectors](/docs/concepts/#example-selectors)\n",
+ "- [LLMs](/docs/concepts/#llms)\n",
+ "- [Vectorstores](/docs/concepts/#vectorstores)\n",
+ "\n",
+ ":::\n",
+ "\n",
"In this guide, we'll learn how to create a simple prompt template that provides the model with example inputs and outputs when generating. Providing the LLM with a few such examples is called few-shotting, and is a simple yet powerful way to guide generation and in some cases drastically improve model performance.\n",
"\n",
"A few-shot prompt template can be constructed from either a set of examples, or from an [Example Selector](https://api.python.langchain.com/en/latest/example_selectors/langchain_core.example_selectors.base.BaseExampleSelector.html) class responsible for choosing a subset of examples from the defined set.\n",
"\n",
"This guide will cover few-shotting with string prompt templates. For a guide on few-shotting with chat messages for chat models, see [here](/docs/how_to/few_shot_examples_chat/).\n",
"\n",
- "```{=mdx}\n",
- "import PrerequisiteLinks from \"@theme/PrerequisiteLinks\";\n",
- "\n",
- "\n",
- "```\n",
- "\n",
"## Create a formatter for the few-shot examples\n",
"\n",
"Configure a formatter that will format the few-shot examples into a string. This formatter should be a `PromptTemplate` object."
@@ -390,7 +389,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.10.1"
+ "version": "3.9.1"
}
},
"nbformat": 4,
diff --git a/docs/docs/how_to/few_shot_examples_chat.ipynb b/docs/docs/how_to/few_shot_examples_chat.ipynb
index 393470a818c..cf003eb7a90 100644
--- a/docs/docs/how_to/few_shot_examples_chat.ipynb
+++ b/docs/docs/how_to/few_shot_examples_chat.ipynb
@@ -17,24 +17,23 @@
"source": [
"# How to use few shot examples in chat models\n",
"\n",
+ ":::info Prerequisites\n",
+ "\n",
+ "This guide assumes familiarity with the following concepts:\n",
+ "- [Prompt templates](/docs/concepts/#prompt-templates)\n",
+ "- [Example selectors](/docs/concepts/#example-selectors)\n",
+ "- [Chat models](/docs/concepts/#chat-model)\n",
+ "- [Vectorstores](/docs/concepts/#vectorstores)\n",
+ "\n",
+ ":::\n",
+ "\n",
"This guide covers how to prompt a chat model with example inputs and outputs. Providing the model with a few such examples is called few-shotting, and is a simple yet powerful way to guide generation and in some cases drastically improve model performance.\n",
"\n",
"There does not appear to be solid consensus on how best to do few-shot prompting, and the optimal prompt compilation will likely vary by model. Because of this, we provide few-shot prompt templates like the [FewShotChatMessagePromptTemplate](https://api.python.langchain.com/en/latest/prompts/langchain_core.prompts.few_shot.FewShotChatMessagePromptTemplate.html?highlight=fewshot#langchain_core.prompts.few_shot.FewShotChatMessagePromptTemplate) as a flexible starting point, and you can modify or replace them as you see fit.\n",
"\n",
"The goal of few-shot prompt templates are to dynamically select examples based on an input, and then format the examples in a final prompt to provide for the model.\n",
"\n",
- "**Note:** The following code examples are for chat models only, since `FewShotChatMessagePromptTemplates` are designed to output formatted [chat messages](/docs/concepts/#message-types) rather than pure strings. For similar few-shot prompt examples for pure string templates compatible with completion models (LLMs), see the [few-shot prompt templates](/docs/how_to/few_shot_examples/) guide.\n",
- "\n",
- "```{=mdx}\n",
- "import PrerequisiteLinks from \"@theme/PrerequisiteLinks\";\n",
- "\n",
- "\n",
- "```"
+ "**Note:** The following code examples are for chat models only, since `FewShotChatMessagePromptTemplates` are designed to output formatted [chat messages](/docs/concepts/#message-types) rather than pure strings. For similar few-shot prompt examples for pure string templates compatible with completion models (LLMs), see the [few-shot prompt templates](/docs/how_to/few_shot_examples/) guide."
]
},
{
@@ -435,7 +434,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.10.1"
+ "version": "3.9.1"
}
},
"nbformat": 4,
diff --git a/docs/docs/how_to/functions.ipynb b/docs/docs/how_to/functions.ipynb
index 2c1a2681a07..8c8c23eab7c 100644
--- a/docs/docs/how_to/functions.ipynb
+++ b/docs/docs/how_to/functions.ipynb
@@ -18,6 +18,14 @@
"source": [
"# How to run custom functions\n",
"\n",
+ ":::info Prerequisites\n",
+ "\n",
+ "This guide assumes familiarity with the following concepts:\n",
+ "- [LangChain Expression Language (LCEL)](/docs/concepts/#langchain-expression-language)\n",
+ "- [Chaining runnables](/docs/how_to/sequence/)\n",
+ "\n",
+ ":::\n",
+ "\n",
"You can use arbitrary functions as [Runnables](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable). This is useful for formatting or when you need functionality not provided by other LangChain components, and custom functions used as Runnables are called [`RunnableLambdas`](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableLambda.html).\n",
"\n",
"Note that all inputs to these functions need to be a SINGLE argument. If you have a function that accepts multiple arguments, you should write a wrapper that accepts a single dict input and unpacks it into multiple argument.\n",
@@ -29,15 +37,6 @@
"- How to accept and use run metadata in your custom function\n",
"- How to stream with custom functions by having them return generators\n",
"\n",
- "```{=mdx}\n",
- "import PrerequisiteLinks from \"@theme/PrerequisiteLinks\";\n",
- "\n",
- "\n",
- "```\n",
- "\n",
"## Using the constructor\n",
"\n",
"Below, we explicitly wrap our custom logic using the `RunnableLambda` constructor:"
@@ -526,7 +525,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.10.1"
+ "version": "3.9.1"
}
},
"nbformat": 4,
diff --git a/docs/docs/how_to/inspect.ipynb b/docs/docs/how_to/inspect.ipynb
index 91916292fe3..f80e828a5ca 100644
--- a/docs/docs/how_to/inspect.ipynb
+++ b/docs/docs/how_to/inspect.ipynb
@@ -7,19 +7,18 @@
"source": [
"# How to inspect your runnables\n",
"\n",
+ ":::info Prerequisites\n",
+ "\n",
+ "This guide assumes familiarity with the following concepts:\n",
+ "- [LangChain Expression Language (LCEL)](/docs/concepts/#langchain-expression-language)\n",
+ "- [Chaining runnables](/docs/how_to/sequence/)\n",
+ "\n",
+ ":::\n",
+ "\n",
"Once you create a runnable with [LangChain Expression Language](/docs/concepts/#langchain-expression-language), you may often want to inspect it to get a better sense for what is going on. This notebook covers some methods for doing so.\n",
"\n",
"This guide shows some ways you can programmatically introspect the internal steps of chains. If you are instead interested in debugging issues in your chain, see [this section](/docs/how_to/debugging) instead.\n",
"\n",
- "```{=mdx}\n",
- "import PrerequisiteLinks from \"@theme/PrerequisiteLinks\";\n",
- "\n",
- "\n",
- "```\n",
- "\n",
"First, let's create an example chain. We will create one that does retrieval:"
]
},
@@ -222,7 +221,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.10.1"
+ "version": "3.9.1"
}
},
"nbformat": 4,
diff --git a/docs/docs/how_to/logprobs.ipynb b/docs/docs/how_to/logprobs.ipynb
index a62b5df57dc..4cf228e44a8 100644
--- a/docs/docs/how_to/logprobs.ipynb
+++ b/docs/docs/how_to/logprobs.ipynb
@@ -7,15 +7,14 @@
"source": [
"# How to get log probabilities from model calls\n",
"\n",
- "Certain chat models can be configured to return token-level log probabilities representing the likelihood of a given token. This guide walks through how to get this information in LangChain.\n",
+ ":::info Prerequisites\n",
"\n",
- "```{=mdx}\n",
- "import PrerequisiteLinks from \"@theme/PrerequisiteLinks\";\n",
- "\n",
- "\n",
- "```"
+ "\n",
+ ":::\n",
+ "\n",
+ "Certain chat models can be configured to return token-level log probabilities representing the likelihood of a given token. This guide walks through how to get this information in LangChain."
]
},
{
@@ -170,7 +169,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.10.1"
+ "version": "3.9.1"
}
},
"nbformat": 4,
diff --git a/docs/docs/how_to/message_history.ipynb b/docs/docs/how_to/message_history.ipynb
index 6169d8eb9e3..23ea00c7097 100644
--- a/docs/docs/how_to/message_history.ipynb
+++ b/docs/docs/how_to/message_history.ipynb
@@ -7,6 +7,17 @@
"source": [
"# How to add message history\n",
"\n",
+ ":::info Prerequisites\n",
+ "\n",
+ "This guide assumes familiarity with the following concepts:\n",
+ "- [LangChain Expression Language (LCEL)](/docs/concepts/#langchain-expression-language)\n",
+ "- [Chaining runnables](/docs/how_to/sequence/)\n",
+ "- [Configuring chain parameters at runtime](/docs/how_to/configure)\n",
+ "- [Prompt templates](/docs/concepts/#prompt-templates)\n",
+ "- [Chat Messages](/docs/concepts/#message-types)\n",
+ "\n",
+ ":::\n",
+ "\n",
"Passing conversation state into and out a chain is vital when building a chatbot. The [`RunnableWithMessageHistory`](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html#langchain_core.runnables.history.RunnableWithMessageHistory) class lets us add message history to certain types of chains. It wraps another Runnable and manages the chat message history for it.\n",
"\n",
"Specifically, it can be used for any Runnable that takes as input one of:\n",
@@ -21,18 +32,6 @@
"* a sequence of `BaseMessage`\n",
"* a dict with a key that contains a sequence of `BaseMessage`\n",
"\n",
- "```{=mdx}\n",
- "import PrerequisiteLinks from \"@theme/PrerequisiteLinks\";\n",
- "\n",
- "\n",
- "```\n",
- "\n",
"Let's take a look at some examples to see how it works. First we construct a runnable (which here accepts a dict as input and returns a message as output):\n",
"\n",
"```{=mdx}\n",
@@ -667,7 +666,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.10.1"
+ "version": "3.9.1"
}
},
"nbformat": 4,
diff --git a/docs/docs/how_to/output_parser_json.ipynb b/docs/docs/how_to/output_parser_json.ipynb
index 034e66467f4..ac4499b255f 100644
--- a/docs/docs/how_to/output_parser_json.ipynb
+++ b/docs/docs/how_to/output_parser_json.ipynb
@@ -7,23 +7,22 @@
"source": [
"# How to parse JSON output\n",
"\n",
- "While some model providers support [built-in ways to return structured output](/docs/how_to/structured_output), not all do. We can use an output parser to help users to specify an arbitrary JSON schema via the prompt, query a model for outputs that conform to that schema, and finally parse that schema as JSON.\n",
+ ":::info Prerequisites\n",
"\n",
- ":::{.callout-note}\n",
- "Keep in mind that large language models are leaky abstractions! You'll have to use an LLM with sufficient capacity to generate well-formed JSON.\n",
- ":::\n",
- "\n",
- "```{=mdx}\n",
- "import PrerequisiteLinks from \"@theme/PrerequisiteLinks\";\n",
- "\n",
- "\n",
- "```"
+ "\n",
+ ":::\n",
+ "\n",
+ "While some model providers support [built-in ways to return structured output](/docs/how_to/structured_output), not all do. We can use an output parser to help users to specify an arbitrary JSON schema via the prompt, query a model for outputs that conform to that schema, and finally parse that schema as JSON.\n",
+ "\n",
+ ":::{.callout-note}\n",
+ "Keep in mind that large language models are leaky abstractions! You'll have to use an LLM with sufficient capacity to generate well-formed JSON.\n",
+ ":::"
]
},
{
@@ -255,7 +254,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.10.1"
+ "version": "3.9.1"
}
},
"nbformat": 4,
diff --git a/docs/docs/how_to/output_parser_xml.ipynb b/docs/docs/how_to/output_parser_xml.ipynb
index ef57c04723e..9e9a0b98cb6 100644
--- a/docs/docs/how_to/output_parser_xml.ipynb
+++ b/docs/docs/how_to/output_parser_xml.ipynb
@@ -7,6 +7,17 @@
"source": [
"# How to parse XML output\n",
"\n",
+ ":::info Prerequisites\n",
+ "\n",
+ "This guide assumes familiarity with the following concepts:\n",
+ "- [Chat models](/docs/concepts/#chat-models)\n",
+ "- [Output parsers](/docs/concepts/#output-parsers)\n",
+ "- [Prompt templates](/docs/concepts/#prompt-templates)\n",
+ "- [Structured output](/docs/how_to/structured_output)\n",
+ "- [Chaining runnables together](/docs/how_to/sequence/)\n",
+ "\n",
+ ":::\n",
+ "\n",
"LLMs from different providers often have different strengths depending on the specific data they are trianed on. This also means that some may be \"better\" and more reliable at generating output in formats other than JSON.\n",
"\n",
"This guide shows you how to use the [`XMLOutputParser`](https://api.python.langchain.com/en/latest/output_parsers/langchain_core.output_parsers.xml.XMLOutputParser.html) to prompt models for XML output, then and parse that output into a usable format.\n",
@@ -15,17 +26,6 @@
"Keep in mind that large language models are leaky abstractions! You'll have to use an LLM with sufficient capacity to generate well-formed XML.\n",
":::\n",
"\n",
- "```{=mdx}\n",
- "import PrerequisiteLinks from \"@theme/PrerequisiteLinks\";\n",
- "\n",
- "\n",
- "```\n",
- "\n",
"In the following examples, we use Anthropic's Claude-2 model (https://docs.anthropic.com/claude/docs), which is one such model that is optimized for XML tags."
]
},
@@ -274,7 +274,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.10.1"
+ "version": "3.9.1"
}
},
"nbformat": 4,
diff --git a/docs/docs/how_to/output_parser_yaml.ipynb b/docs/docs/how_to/output_parser_yaml.ipynb
index 34b766719d4..bc948a386de 100644
--- a/docs/docs/how_to/output_parser_yaml.ipynb
+++ b/docs/docs/how_to/output_parser_yaml.ipynb
@@ -7,24 +7,24 @@
"source": [
"# How to parse YAML output\n",
"\n",
+ ":::info Prerequisites\n",
+ "\n",
+ "This guide assumes familiarity with the following concepts:\n",
+ "- [Chat models](/docs/concepts/#chat-models)\n",
+ "- [Output parsers](/docs/concepts/#output-parsers)\n",
+ "- [Prompt templates](/docs/concepts/#prompt-templates)\n",
+ "- [Structured output](/docs/how_to/structured_output)\n",
+ "- [Chaining runnables together](/docs/how_to/sequence/)\n",
+ "\n",
+ ":::\n",
+ "\n",
"LLMs from different providers often have different strengths depending on the specific data they are trianed on. This also means that some may be \"better\" and more reliable at generating output in formats other than JSON.\n",
"\n",
"This output parser allows users to specify an arbitrary schema and query LLMs for outputs that conform to that schema, using YAML to format their response.\n",
"\n",
":::{.callout-note}\n",
"Keep in mind that large language models are leaky abstractions! You'll have to use an LLM with sufficient capacity to generate well-formed YAML.\n",
- ":::\n",
- "\n",
- "```{=mdx}\n",
- "import PrerequisiteLinks from \"@theme/PrerequisiteLinks\";\n",
- "\n",
- "\n",
- "```"
+ ":::\n"
]
},
{
@@ -165,7 +165,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.10.1"
+ "version": "3.9.1"
}
},
"nbformat": 4,
diff --git a/docs/docs/how_to/parallel.ipynb b/docs/docs/how_to/parallel.ipynb
index fa9f58fa55c..6ad764573d6 100644
--- a/docs/docs/how_to/parallel.ipynb
+++ b/docs/docs/how_to/parallel.ipynb
@@ -18,16 +18,15 @@
"source": [
"# How to invoke runnables in parallel\n",
"\n",
- "The [`RunnableParallel`](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableParallel.html) primitive is essentially a dict whose values are runnables (or things that can be coerced to runnables, like functions). It runs all of its values in parallel, and each value is called with the overall input of the `RunnableParallel`. The final return value is a dict with the results of each value under its appropriate key.\n",
+ ":::info Prerequisites\n",
"\n",
- "```{=mdx}\n",
- "import PrerequisiteLinks from \"@theme/PrerequisiteLinks\";\n",
- "\n",
- "\n",
- "```\n",
+ "\n",
+ ":::\n",
+ "\n",
+ "The [`RunnableParallel`](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableParallel.html) primitive is essentially a dict whose values are runnables (or things that can be coerced to runnables, like functions). It runs all of its values in parallel, and each value is called with the overall input of the `RunnableParallel`. The final return value is a dict with the results of each value under its appropriate key.\n",
"\n",
"## Formatting with `RunnableParallels`\n",
"\n",
@@ -354,7 +353,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.10.1"
+ "version": "3.9.1"
}
},
"nbformat": 4,
diff --git a/docs/docs/how_to/passthrough.ipynb b/docs/docs/how_to/passthrough.ipynb
index 50dea1eb288..80ad9373eb4 100644
--- a/docs/docs/how_to/passthrough.ipynb
+++ b/docs/docs/how_to/passthrough.ipynb
@@ -18,18 +18,18 @@
"source": [
"# How to pass through arguments from one step to the next\n",
"\n",
- "When composing chains with several steps, sometimes you will want to pass data from previous steps unchanged for use as input to a later step. The [`RunnablePassthrough`](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.passthrough.RunnablePassthrough.html) class allows you to do just this, and is typically is used in conjuction with a [RunnableParallel](/docs/how_to/parallel/) to pass data through to a later step in your constructed chains.\n",
+ ":::info Prerequisites\n",
"\n",
- "```{=mdx}\n",
- "import PrerequisiteLinks from \"@theme/PrerequisiteLinks\";\n",
- "\n",
- "\n",
- "```\n",
+ "\n",
+ ":::\n",
+ "\n",
+ "\n",
+ "When composing chains with several steps, sometimes you will want to pass data from previous steps unchanged for use as input to a later step. The [`RunnablePassthrough`](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.passthrough.RunnablePassthrough.html) class allows you to do just this, and is typically is used in conjuction with a [RunnableParallel](/docs/how_to/parallel/) to pass data through to a later step in your constructed chains.\n",
"\n",
"See the example below:"
]
@@ -174,7 +174,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.10.1"
+ "version": "3.9.1"
}
},
"nbformat": 4,
diff --git a/docs/docs/how_to/prompts_composition.ipynb b/docs/docs/how_to/prompts_composition.ipynb
index 138b30e9d44..d28fb94d378 100644
--- a/docs/docs/how_to/prompts_composition.ipynb
+++ b/docs/docs/how_to/prompts_composition.ipynb
@@ -17,13 +17,14 @@
"source": [
"# How to compose prompts together\n",
"\n",
- "LangChain provides a user friendly interface for composing different parts of prompts together. You can do this with either string prompts or chat prompts. Constructing prompts this way allows for easy reuse of components.\n",
+ ":::info Prerequisites\n",
"\n",
- "```{=mdx}\n",
- "import PrerequisiteLinks from \"@theme/PrerequisiteLinks\";\n",
+ "This guide assumes familiarity with the following concepts:\n",
+ "- [Prompt templates](/docs/concepts/#prompt-templates)\n",
"\n",
- "\n",
- "```"
+ ":::\n",
+ "\n",
+ "LangChain provides a user friendly interface for composing different parts of prompts together. You can do this with either string prompts or chat prompts. Constructing prompts this way allows for easy reuse of components."
]
},
{
@@ -306,7 +307,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.10.1"
+ "version": "3.9.1"
}
},
"nbformat": 4,
diff --git a/docs/docs/how_to/prompts_partial.ipynb b/docs/docs/how_to/prompts_partial.ipynb
index aca86389ac5..5823b40bc01 100644
--- a/docs/docs/how_to/prompts_partial.ipynb
+++ b/docs/docs/how_to/prompts_partial.ipynb
@@ -17,6 +17,13 @@
"source": [
"# How to partially format prompt templates\n",
"\n",
+ ":::info Prerequisites\n",
+ "\n",
+ "This guide assumes familiarity with the following concepts:\n",
+ "- [Prompt templates](/docs/concepts/#prompt-templates)\n",
+ "\n",
+ ":::\n",
+ "\n",
"Like partially binding arguments to a function, it can make sense to \"partial\" a prompt template - e.g. pass in a subset of the required values, as to create a new prompt template which expects only the remaining subset of values.\n",
"\n",
"LangChain supports this in two ways:\n",
@@ -26,14 +33,6 @@
"\n",
"In the examples below, we go over the motivations for both use cases as well as how to do it in LangChain.\n",
"\n",
- "```{=mdx}\n",
- "import PrerequisiteLinks from \"@theme/PrerequisiteLinks\";\n",
- "\n",
- "\n",
- "```\n",
- "\n",
"## Partial with strings\n",
"\n",
"One common use case for wanting to partial a prompt template is if you get access to some of the variables in a prompt before others. For example, suppose you have a prompt template that requires two variables, `foo` and `baz`. If you get the `foo` value early on in your chain, but the `baz` value later, it can be inconvenient to pass both variables all the way through the chain. Instead, you can partial the prompt template with the `foo` value, and then pass the partialed prompt template along and just use that. Below is an example of doing this:\n"
@@ -191,7 +190,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.10.1"
+ "version": "3.9.1"
}
},
"nbformat": 4,
diff --git a/docs/docs/how_to/routing.ipynb b/docs/docs/how_to/routing.ipynb
index 8fd0776edfd..7929cda3155 100644
--- a/docs/docs/how_to/routing.ipynb
+++ b/docs/docs/how_to/routing.ipynb
@@ -18,6 +18,17 @@
"source": [
"# How to route execution within a chain\n",
"\n",
+ ":::info Prerequisites\n",
+ "\n",
+ "This guide assumes familiarity with the following concepts:\n",
+ "- [LangChain Expression Language (LCEL)](/docs/concepts/#langchain-expression-language)\n",
+ "- [Chaining runnables](/docs/how_to/sequence/)\n",
+ "- [Configuring chain parameters at runtime](/docs/how_to/configure)\n",
+ "- [Prompt templates](/docs/concepts/#prompt-templates)\n",
+ "- [Chat Messages](/docs/concepts/#message-types)\n",
+ "\n",
+ ":::\n",
+ "\n",
"Routing allows you to create non-deterministic chains where the output of a previous step defines the next step. Routing can help provide structure and consistency around interactions with models by allowing you to define states and use information related to those states as context to model calls.\n",
"\n",
"There are two ways to perform routing:\n",
@@ -25,19 +36,7 @@
"1. Conditionally return runnables from a [`RunnableLambda`](/docs/how_to/functions) (recommended)\n",
"2. Using a `RunnableBranch` (legacy)\n",
"\n",
- "We'll illustrate both methods using a two step sequence where the first step classifies an input question as being about `LangChain`, `Anthropic`, or `Other`, then routes to a corresponding prompt chain.\n",
- "\n",
- "```{=mdx}\n",
- "import PrerequisiteLinks from \"@theme/PrerequisiteLinks\";\n",
- "\n",
- "\n",
- "```"
+ "We'll illustrate both methods using a two step sequence where the first step classifies an input question as being about `LangChain`, `Anthropic`, or `Other`, then routes to a corresponding prompt chain."
]
},
{
@@ -474,7 +473,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.10.1"
+ "version": "3.9.1"
}
},
"nbformat": 4,
diff --git a/docs/docs/how_to/sequence.ipynb b/docs/docs/how_to/sequence.ipynb
index 37f91fb9675..1b34aef88bd 100644
--- a/docs/docs/how_to/sequence.ipynb
+++ b/docs/docs/how_to/sequence.ipynb
@@ -16,20 +16,19 @@
"source": [
"# How to chain runnables\n",
"\n",
- "One point about [LangChain Expression Language](/docs/concepts/#langchain-expression-language) is that any two runnables can be \"chained\" together into sequences. The output of the previous runnable's `.invoke()` call is passed as input to the next runnable. This can be done using the pipe operator (`|`), or the more explicit `.pipe()` method, which does the same thing.\n",
+ ":::info Prerequisites\n",
"\n",
- "The resulting [`RunnableSequence`](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableSequence.html) is itself a runnable, which means it can be invoked, streamed, or further chained just like any other runnable. Advantages of chaining runnables in this way are efficient streaming (the sequence will stream output as soon as it is available), and debugging and tracing with tools like [LangSmith](/docs/how_to/debugging).\n",
- "\n",
- "```{=mdx}\n",
- "import PrerequisiteLinks from \"@theme/PrerequisiteLinks\";\n",
- "\n",
- "\n",
- "```\n",
+ "\n",
+ ":::\n",
+ "\n",
+ "One point about [LangChain Expression Language](/docs/concepts/#langchain-expression-language) is that any two runnables can be \"chained\" together into sequences. The output of the previous runnable's `.invoke()` call is passed as input to the next runnable. This can be done using the pipe operator (`|`), or the more explicit `.pipe()` method, which does the same thing.\n",
+ "\n",
+ "The resulting [`RunnableSequence`](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableSequence.html) is itself a runnable, which means it can be invoked, streamed, or further chained just like any other runnable. Advantages of chaining runnables in this way are efficient streaming (the sequence will stream output as soon as it is available), and debugging and tracing with tools like [LangSmith](/docs/how_to/debugging).\n",
"\n",
"## The pipe operator\n",
"\n",
@@ -255,9 +254,9 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.10.1"
+ "version": "3.9.1"
}
},
"nbformat": 4,
- "nbformat_minor": 2
+ "nbformat_minor": 4
}
diff --git a/docs/docs/how_to/streaming.ipynb b/docs/docs/how_to/streaming.ipynb
index 25e6e6094ee..1011792203b 100644
--- a/docs/docs/how_to/streaming.ipynb
+++ b/docs/docs/how_to/streaming.ipynb
@@ -17,6 +17,15 @@
"source": [
"# How to stream\n",
"\n",
+ ":::info Prerequisites\n",
+ "\n",
+ "This guide assumes familiarity with the following concepts:\n",
+ "- [Chat models](/docs/concepts/#chat-models)\n",
+ "- [LangChain Expression Language](/docs/concepts/#langchain-expression-language)\n",
+ "- [Output parsers](/docs/concepts/#output-parsers)\n",
+ "\n",
+ ":::\n",
+ "\n",
"Streaming is critical in making applications based on LLMs feel responsive to end-users.\n",
"\n",
"Important LangChain primitives like [chat models](/docs/concepts/#chat-models), [output parsers](/docs/concepts/#output-parsers), [prompts](/docs/concepts/#prompt-templates), [retrievers](/docs/concepts/#retrievers), and [agents](/docs/concepts/#agents) implement the LangChain [Runnable Interface](/docs/concepts#interface).\n",
@@ -28,16 +37,6 @@
"\n",
"Let's take a look at both approaches, and try to understand how to use them.\n",
"\n",
- "```{=mdx}\n",
- "import PrerequisiteLinks from \"@theme/PrerequisiteLinks\";\n",
- "\n",
- "\n",
- "```\n",
- "\n",
"## Using Stream\n",
"\n",
"All `Runnable` objects implement a sync method called `stream` and an async variant called `astream`. \n",
@@ -1464,7 +1463,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.10.1"
+ "version": "3.9.1"
}
},
"nbformat": 4,
diff --git a/docs/docs/how_to/structured_output.ipynb b/docs/docs/how_to/structured_output.ipynb
index 4a2ab17b660..a3828ccd816 100644
--- a/docs/docs/how_to/structured_output.ipynb
+++ b/docs/docs/how_to/structured_output.ipynb
@@ -17,15 +17,15 @@
"source": [
"# How to return structured data from a model\n",
"\n",
+ ":::info Prerequisites\n",
+ "\n",
+ "This guide assumes familiarity with the following concepts:\n",
+ "- [Chat models](/docs/concepts/#chat-models)\n",
+ "- [Function/tool calling](/docs/concepts/#functiontool-calling)\n",
+ ":::\n",
+ "\n",
"It is often useful to have a model return output that matches some specific schema. One common use-case is extracting data from arbitrary text to insert into a traditional database or use with some other downstrem system. This guide will show you a few different strategies you can use to do this.\n",
"\n",
- "```{=mdx}\n",
- "import PrerequisiteLinks from \"@theme/PrerequisiteLinks\";\n",
- "\n",
- "\n",
- "```\n",
"\n",
"## The `.with_structured_output()` method\n",
"\n",
@@ -41,7 +41,7 @@
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
"\n",
"\n",
"```"
]
@@ -58,7 +58,7 @@
"\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
- "model = ChatOpenAI(\n",
+ "llm = ChatOpenAI(\n",
" model=\"gpt-4-0125-preview\",\n",
" temperature=0,\n",
")"
@@ -93,7 +93,7 @@
" rating: Optional[int] = Field(description=\"How funny the joke is, from 1 to 10\")\n",
"\n",
"\n",
- "structured_llm = model.with_structured_output(Joke)\n",
+ "structured_llm = llm.with_structured_output(Joke)\n",
"\n",
"structured_llm.invoke(\"Tell me a joke about cats\")"
]
@@ -133,7 +133,7 @@
}
],
"source": [
- "structured_llm = model.with_structured_output(\n",
+ "structured_llm = llm.with_structured_output(\n",
" {\n",
" \"name\": \"joke\",\n",
" \"description\": \"Joke to tell user.\",\n",
@@ -198,7 +198,7 @@
" output: Union[Joke, ConversationalResponse]\n",
"\n",
"\n",
- "structured_llm = model.with_structured_output(Response)\n",
+ "structured_llm = llm.with_structured_output(Response)\n",
"\n",
"structured_llm.invoke(\"Tell me a joke about cats\")"
]
@@ -262,7 +262,7 @@
}
],
"source": [
- "structured_llm = model.with_structured_output(Joke, method=\"json_mode\")\n",
+ "structured_llm = llm.with_structured_output(Joke, method=\"json_mode\")\n",
"\n",
"structured_llm.invoke(\n",
" \"Tell me a joke about cats, respond in JSON with `setup` and `punchline` keys\"\n",
@@ -296,7 +296,7 @@
"source": [
"from typing import List\n",
"\n",
- "from langchain.output_parsers import PydanticOutputParser\n",
+ "from langchain_core.output_parsers import PydanticOutputParser\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"\n",
@@ -395,7 +395,7 @@
}
],
"source": [
- "chain = prompt | model | parser\n",
+ "chain = prompt | llm | parser\n",
"\n",
"chain.invoke({\"query\": query})"
]
@@ -538,7 +538,7 @@
}
],
"source": [
- "chain = prompt | model | extract_json\n",
+ "chain = prompt | llm | extract_json\n",
"\n",
"chain.invoke({\"query\": query})"
]
@@ -554,12 +554,6 @@
"\n",
"To learn more, check out the other how-to guides in this section, or the conceptual guide on tool calling."
]
- },
- {
- "cell_type": "markdown",
- "id": "6e3759e2",
- "metadata": {},
- "source": []
}
],
"metadata": {
@@ -578,7 +572,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.10.4"
+ "version": "3.9.1"
}
},
"nbformat": 4,
diff --git a/docs/docs/how_to/tool_calling.ipynb b/docs/docs/how_to/tool_calling.ipynb
index 3a293612bb1..8ce1d8656c1 100644
--- a/docs/docs/how_to/tool_calling.ipynb
+++ b/docs/docs/how_to/tool_calling.ipynb
@@ -6,6 +6,14 @@
"source": [
"# How to use a chat model to call tools\n",
"\n",
+ ":::info Prerequisites\n",
+ "\n",
+ "This guide assumes familiarity with the following concepts:\n",
+ "- [Chat models](/docs/concepts/#chat-models)\n",
+ "- [LangChain Tools](/docs/concepts/#tools)\n",
+ "\n",
+ ":::\n",
+ "\n",
"```{=mdx}\n",
":::info\n",
"We use the term tool calling interchangeably with function calling. Although\n",
@@ -40,15 +48,6 @@
"LangChain implements standard interfaces for defining tools, passing them to LLMs, \n",
"and representing tool calls. This guide will show you how to use them.\n",
"\n",
- "```{=mdx}\n",
- "import PrerequisiteLinks from \"@theme/PrerequisiteLinks\";\n",
- "\n",
- "\n",
- "```\n",
- "\n",
"## Passing tools to chat models\n",
"\n",
"Chat models that support tool calling features implement a `.bind_tools` method, which \n",
@@ -706,9 +705,9 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.10.1"
+ "version": "3.9.1"
}
},
"nbformat": 4,
- "nbformat_minor": 2
+ "nbformat_minor": 4
}
diff --git a/docs/src/theme/PrerequisiteLinks.js b/docs/src/theme/PrerequisiteLinks.js
deleted file mode 100644
index 30c71a4b849..00000000000
--- a/docs/src/theme/PrerequisiteLinks.js
+++ /dev/null
@@ -1,19 +0,0 @@
-import React from "react";
-import { marked } from "marked";
-import DOMPurify from "isomorphic-dompurify";
-import Admonition from '@theme/Admonition';
-
-export default function PrerequisiteLinks({ content }) {
- return (
-
-
- This guide will assume familiarity with the following concepts:
-