From aa212c3d0ea9ba9bf09cefb1cf180ab9559aee6d Mon Sep 17 00:00:00 2001
From: Bagatur <22008038+baskaryan@users.noreply.github.com>
Date: Wed, 25 Oct 2023 12:09:41 -0700
Subject: [PATCH] rm .html from local doc links (#12293)
---
docs/docs/get_started/quickstart.mdx | 2 +-
docs/docs/guides/deployments/index.mdx | 8 ++++----
docs/docs/integrations/callbacks/argilla.ipynb | 2 +-
.../integrations/document_loaders/apify_dataset.ipynb | 2 +-
.../integrations/document_loaders/pandas_dataframe.ipynb | 2 +-
docs/docs/integrations/document_loaders/psychic.ipynb | 4 ++--
docs/docs/integrations/llms/llm_caching.ipynb | 6 +++---
docs/docs/integrations/platforms/aws.mdx | 4 ++--
docs/docs/integrations/platforms/google.mdx | 4 ++--
docs/docs/integrations/platforms/microsoft.mdx | 4 ++--
docs/docs/integrations/providers/airtable.md | 2 +-
docs/docs/integrations/providers/analyticdb.mdx | 2 +-
docs/docs/integrations/providers/apify.mdx | 4 ++--
docs/docs/integrations/providers/arangodb.mdx | 2 +-
docs/docs/integrations/providers/argilla.mdx | 2 +-
docs/docs/integrations/providers/chroma.mdx | 2 +-
docs/docs/integrations/providers/clarifai.mdx | 4 ++--
docs/docs/integrations/providers/cohere.mdx | 2 +-
docs/docs/integrations/providers/comet_tracking.ipynb | 2 +-
docs/docs/integrations/providers/ctransformers.mdx | 2 +-
docs/docs/integrations/providers/dashvector.mdx | 2 +-
docs/docs/integrations/providers/databricks.md | 4 ++--
docs/docs/integrations/providers/dingo.mdx | 2 +-
docs/docs/integrations/providers/epsilla.mdx | 2 +-
docs/docs/integrations/providers/golden.mdx | 2 +-
docs/docs/integrations/providers/google_serper.mdx | 2 +-
docs/docs/integrations/providers/gpt4all.mdx | 2 +-
docs/docs/integrations/providers/gradient.mdx | 2 +-
docs/docs/integrations/providers/huggingface.mdx | 2 +-
docs/docs/integrations/providers/infino.mdx | 2 +-
docs/docs/integrations/providers/jina.mdx | 2 +-
docs/docs/integrations/providers/lancedb.mdx | 2 +-
docs/docs/integrations/providers/llamacpp.mdx | 4 ++--
docs/docs/integrations/providers/marqo.md | 2 +-
docs/docs/integrations/providers/milvus.mdx | 2 +-
docs/docs/integrations/providers/minimax.mdx | 4 ++--
docs/docs/integrations/providers/mlflow_ai_gateway.mdx | 6 +++---
docs/docs/integrations/providers/mlflow_tracking.ipynb | 2 +-
docs/docs/integrations/providers/momento.mdx | 4 ++--
docs/docs/integrations/providers/motherduck.mdx | 2 +-
docs/docs/integrations/providers/myscale.mdx | 2 +-
docs/docs/integrations/providers/neo4j.mdx | 6 +++---
docs/docs/integrations/providers/notion.mdx | 4 ++--
docs/docs/integrations/providers/openllm.mdx | 2 +-
docs/docs/integrations/providers/opensearch.mdx | 2 +-
docs/docs/integrations/providers/openweathermap.mdx | 2 +-
docs/docs/integrations/providers/pgvector.mdx | 2 +-
docs/docs/integrations/providers/pinecone.mdx | 2 +-
docs/docs/integrations/providers/promptlayer.mdx | 4 ++--
docs/docs/integrations/providers/providers/semadb.mdx | 2 +-
docs/docs/integrations/providers/psychic.mdx | 2 +-
docs/docs/integrations/providers/qdrant.mdx | 2 +-
docs/docs/integrations/providers/redis.mdx | 6 +++---
docs/docs/integrations/providers/runhouse.mdx | 4 ++--
docs/docs/integrations/providers/serpapi.mdx | 2 +-
docs/docs/integrations/providers/sklearn.mdx | 2 +-
docs/docs/integrations/providers/spacy.mdx | 2 +-
docs/docs/integrations/providers/spreedly.mdx | 2 +-
docs/docs/integrations/providers/stripe.mdx | 2 +-
docs/docs/integrations/providers/tair.mdx | 2 +-
docs/docs/integrations/providers/telegram.mdx | 2 +-
docs/docs/integrations/providers/tencentvectordb.mdx | 2 +-
docs/docs/integrations/providers/trello.mdx | 2 +-
docs/docs/integrations/providers/typesense.mdx | 2 +-
docs/docs/integrations/providers/upstash.mdx | 2 +-
.../integrations/providers/vectara/vectara_chat.ipynb | 2 +-
docs/docs/integrations/providers/weaviate.mdx | 2 +-
docs/docs/integrations/providers/wolfram_alpha.mdx | 2 +-
docs/docs/integrations/providers/xinference.mdx | 4 ++--
docs/docs/integrations/providers/zilliz.mdx | 2 +-
docs/docs/integrations/toolkits/openapi_nla.ipynb | 2 +-
docs/docs/integrations/toolkits/spark.ipynb | 2 +-
docs/docs/integrations/tools/apify.ipynb | 4 ++--
docs/docs/integrations/vectorstores/typesense.ipynb | 2 +-
.../agents/how_to/custom_agent_with_tool_retrieval.ipynb | 2 +-
.../modules/agents/how_to/sharedmemory_for_tools.ipynb | 4 ++--
docs/docs/modules/callbacks/custom_chain.mdx | 2 +-
docs/docs/modules/chains/how_to/async_chain.ipynb | 2 +-
docs/docs/modules/chains/how_to/call_methods.ipynb | 2 +-
.../data_connection/document_loaders/file_directory.mdx | 2 +-
docs/docs/modules/memory/agent_with_memory.ipynb | 4 ++--
docs/docs/modules/memory/agent_with_memory_in_db.ipynb | 6 +++---
docs/docs/use_cases/qa_structured/sql.ipynb | 4 ++--
docs/docs/use_cases/question_answering/vector_db_qa.mdx | 6 +++---
84 files changed, 117 insertions(+), 117 deletions(-)
diff --git a/docs/docs/get_started/quickstart.mdx b/docs/docs/get_started/quickstart.mdx
index c99a8b68af8..d86f6dc50da 100644
--- a/docs/docs/get_started/quickstart.mdx
+++ b/docs/docs/get_started/quickstart.mdx
@@ -18,7 +18,7 @@ import CodeBlock from "@theme/CodeBlock";
-For more details, see our [Installation guide](/docs/get_started/installation.html).
+For more details, see our [Installation guide](/docs/get_started/installation).
## Environment setup
diff --git a/docs/docs/guides/deployments/index.mdx b/docs/docs/guides/deployments/index.mdx
index 09841cff147..8299aa02439 100644
--- a/docs/docs/guides/deployments/index.mdx
+++ b/docs/docs/guides/deployments/index.mdx
@@ -20,11 +20,11 @@ This guide aims to provide a comprehensive overview of the requirements for depl
Understanding these components is crucial when assessing serving systems. LangChain integrates with several open-source projects designed to tackle these issues, providing a robust framework for productionizing your LLM applications. Some notable frameworks include:
-- [Ray Serve](/docs/ecosystem/integrations/ray_serve.html)
+- [Ray Serve](/docs/ecosystem/integrations/ray_serve)
- [BentoML](https://github.com/bentoml/BentoML)
-- [OpenLLM](/docs/ecosystem/integrations/openllm.html)
-- [Modal](/docs/ecosystem/integrations/modal.html)
-- [Jina](/docs/ecosystem/integrations/jina.html#deployment)
+- [OpenLLM](/docs/ecosystem/integrations/openllm)
+- [Modal](/docs/ecosystem/integrations/modal)
+- [Jina](/docs/ecosystem/integrations/jina#deployment)
These links will provide further information on each ecosystem, assisting you in finding the best fit for your LLM deployment needs.
diff --git a/docs/docs/integrations/callbacks/argilla.ipynb b/docs/docs/integrations/callbacks/argilla.ipynb
index dd487223364..a31b096945d 100644
--- a/docs/docs/integrations/callbacks/argilla.ipynb
+++ b/docs/docs/integrations/callbacks/argilla.ipynb
@@ -14,7 +14,7 @@
"> using both human and machine feedback. We provide support for each step in the MLOps cycle, \n",
"> from data labeling to model monitoring.\n",
"\n",
- "\n",
+ "\n",
"
\n",
""
]
diff --git a/docs/docs/integrations/document_loaders/apify_dataset.ipynb b/docs/docs/integrations/document_loaders/apify_dataset.ipynb
index ebf65f9d16c..3dc3cc99fe0 100644
--- a/docs/docs/integrations/document_loaders/apify_dataset.ipynb
+++ b/docs/docs/integrations/document_loaders/apify_dataset.ipynb
@@ -13,7 +13,7 @@
"\n",
"## Prerequisites\n",
"\n",
- "You need to have an existing dataset on the Apify platform. If you don't have one, please first check out [this notebook](/docs/integrations/tools/apify.html) on how to use Apify to extract content from documentation, knowledge bases, help centers, or blogs."
+ "You need to have an existing dataset on the Apify platform. If you don't have one, please first check out [this notebook](/docs/integrations/tools/apify) on how to use Apify to extract content from documentation, knowledge bases, help centers, or blogs."
]
},
{
diff --git a/docs/docs/integrations/document_loaders/pandas_dataframe.ipynb b/docs/docs/integrations/document_loaders/pandas_dataframe.ipynb
index e3d268c9e18..d415a26beac 100644
--- a/docs/docs/integrations/document_loaders/pandas_dataframe.ipynb
+++ b/docs/docs/integrations/document_loaders/pandas_dataframe.ipynb
@@ -7,7 +7,7 @@
"source": [
"# Pandas DataFrame\n",
"\n",
- "This notebook goes over how to load data from a [pandas](https://pandas.pydata.org/pandas-docs/stable/user_guide/index.html) DataFrame."
+ "This notebook goes over how to load data from a [pandas](https://pandas.pydata.org/pandas-docs/stable/user_guide/index) DataFrame."
]
},
{
diff --git a/docs/docs/integrations/document_loaders/psychic.ipynb b/docs/docs/integrations/document_loaders/psychic.ipynb
index d4e8773a91d..720d90dc125 100644
--- a/docs/docs/integrations/document_loaders/psychic.ipynb
+++ b/docs/docs/integrations/document_loaders/psychic.ipynb
@@ -5,10 +5,10 @@
"metadata": {},
"source": [
"# Psychic\n",
- "This notebook covers how to load documents from `Psychic`. See [here](/docs/ecosystem/integrations/psychic.html) for more details.\n",
+ "This notebook covers how to load documents from `Psychic`. See [here](/docs/ecosystem/integrations/psychic) for more details.\n",
"\n",
"## Prerequisites\n",
- "1. Follow the Quick Start section in [this document](/docs/ecosystem/integrations/psychic.html)\n",
+ "1. Follow the Quick Start section in [this document](/docs/ecosystem/integrations/psychic)\n",
"2. Log into the [Psychic dashboard](https://dashboard.psychic.dev/) and get your secret key\n",
"3. Install the frontend react library into your web app and have a user authenticate a connection. The connection will be created using the connection id that you specify."
]
diff --git a/docs/docs/integrations/llms/llm_caching.ipynb b/docs/docs/integrations/llms/llm_caching.ipynb
index d164678eaa1..45bccb916ba 100644
--- a/docs/docs/integrations/llms/llm_caching.ipynb
+++ b/docs/docs/integrations/llms/llm_caching.ipynb
@@ -319,7 +319,7 @@
"metadata": {},
"source": [
"### Standard Cache\n",
- "Use [Redis](/docs/ecosystem/integrations/redis.html) to cache prompts and responses."
+ "Use [Redis](/docs/ecosystem/integrations/redis) to cache prompts and responses."
]
},
{
@@ -405,7 +405,7 @@
"metadata": {},
"source": [
"### Semantic Cache\n",
- "Use [Redis](/docs/ecosystem/integrations/redis.html) to cache prompts and responses and evaluate hits based on semantic similarity."
+ "Use [Redis](/docs/ecosystem/integrations/redis) to cache prompts and responses and evaluate hits based on semantic similarity."
]
},
{
@@ -730,7 +730,7 @@
},
"source": [
"## `Momento` Cache\n",
- "Use [Momento](/docs/ecosystem/integrations/momento.html) to cache prompts and responses.\n",
+ "Use [Momento](/docs/ecosystem/integrations/momento) to cache prompts and responses.\n",
"\n",
"Requires momento to use, uncomment below to install:"
]
diff --git a/docs/docs/integrations/platforms/aws.mdx b/docs/docs/integrations/platforms/aws.mdx
index 44799080c48..5c6d292d0a5 100644
--- a/docs/docs/integrations/platforms/aws.mdx
+++ b/docs/docs/integrations/platforms/aws.mdx
@@ -75,9 +75,9 @@ from langchain.llms.sagemaker_endpoint import ContentHandlerBase
>[AWS S3 Directory](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-folders.html)
>[AWS S3 Buckets](https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingBucket.html)
-See a [usage example for S3DirectoryLoader](/docs/integrations/document_loaders/aws_s3_directory.html).
+See a [usage example for S3DirectoryLoader](/docs/integrations/document_loaders/aws_s3_directory).
-See a [usage example for S3FileLoader](/docs/integrations/document_loaders/aws_s3_file.html).
+See a [usage example for S3FileLoader](/docs/integrations/document_loaders/aws_s3_file).
```python
from langchain.document_loaders import S3DirectoryLoader, S3FileLoader
diff --git a/docs/docs/integrations/platforms/google.mdx b/docs/docs/integrations/platforms/google.mdx
index b5df554e4e6..86f2e08d119 100644
--- a/docs/docs/integrations/platforms/google.mdx
+++ b/docs/docs/integrations/platforms/google.mdx
@@ -83,7 +83,7 @@ First, we need to install several python packages.
pip install google-api-python-client google-auth-httplib2 google-auth-oauthlib
```
-See a [usage example and authorizing instructions](/docs/integrations/document_loaders/google_drive.html).
+See a [usage example and authorizing instructions](/docs/integrations/document_loaders/google_drive).
```python
from langchain.document_loaders import GoogleDriveLoader
@@ -182,7 +182,7 @@ There exists a `GoogleSearchAPIWrapper` utility which wraps this API. To import
from langchain.utilities import GoogleSearchAPIWrapper
```
-For a more detailed walkthrough of this wrapper, see [this notebook](/docs/integrations/tools/google_search.html).
+For a more detailed walkthrough of this wrapper, see [this notebook](/docs/integrations/tools/google_search).
We can easily load this wrapper as a Tool (to use with an Agent). We can do this with:
diff --git a/docs/docs/integrations/platforms/microsoft.mdx b/docs/docs/integrations/platforms/microsoft.mdx
index ee3819bfe97..150dabe87b9 100644
--- a/docs/docs/integrations/platforms/microsoft.mdx
+++ b/docs/docs/integrations/platforms/microsoft.mdx
@@ -70,13 +70,13 @@ from langchain.chat_models import AzureChatOpenAI
pip install azure-storage-blob
```
-See a [usage example for the Azure Blob Storage](/docs/integrations/document_loaders/azure_blob_storage_container.html).
+See a [usage example for the Azure Blob Storage](/docs/integrations/document_loaders/azure_blob_storage_container).
```python
from langchain.document_loaders import AzureBlobStorageContainerLoader
```
-See a [usage example for the Azure Files](/docs/integrations/document_loaders/azure_blob_storage_file.html).
+See a [usage example for the Azure Files](/docs/integrations/document_loaders/azure_blob_storage_file).
```python
from langchain.document_loaders import AzureBlobStorageFileLoader
diff --git a/docs/docs/integrations/providers/airtable.md b/docs/docs/integrations/providers/airtable.md
index ce1edcecbd0..635452adb55 100644
--- a/docs/docs/integrations/providers/airtable.md
+++ b/docs/docs/integrations/providers/airtable.md
@@ -25,4 +25,4 @@ pip install pyairtable
from langchain.document_loaders import AirtableLoader
```
-See an [example](/docs/integrations/document_loaders/airtable.html).
+See an [example](/docs/integrations/document_loaders/airtable).
diff --git a/docs/docs/integrations/providers/analyticdb.mdx b/docs/docs/integrations/providers/analyticdb.mdx
index b83e7a0a45e..cde6db5fb40 100644
--- a/docs/docs/integrations/providers/analyticdb.mdx
+++ b/docs/docs/integrations/providers/analyticdb.mdx
@@ -12,4 +12,4 @@ To import this vectorstore:
from langchain.vectorstores import AnalyticDB
```
-For a more detailed walkthrough of the AnalyticDB wrapper, see [this notebook](/docs/integrations/vectorstores/analyticdb.html)
+For a more detailed walkthrough of the AnalyticDB wrapper, see [this notebook](/docs/integrations/vectorstores/analyticdb)
diff --git a/docs/docs/integrations/providers/apify.mdx b/docs/docs/integrations/providers/apify.mdx
index cafd99179d9..6bc7486b376 100644
--- a/docs/docs/integrations/providers/apify.mdx
+++ b/docs/docs/integrations/providers/apify.mdx
@@ -32,7 +32,7 @@ You can use the `ApifyWrapper` to run Actors on the Apify platform.
from langchain.utilities import ApifyWrapper
```
-For a more detailed walkthrough of this wrapper, see [this notebook](/docs/integrations/tools/apify.html).
+For a more detailed walkthrough of this wrapper, see [this notebook](/docs/integrations/tools/apify).
### Loader
@@ -43,4 +43,4 @@ You can also use our `ApifyDatasetLoader` to get data from Apify dataset.
from langchain.document_loaders import ApifyDatasetLoader
```
-For a more detailed walkthrough of this loader, see [this notebook](/docs/integrations/document_loaders/apify_dataset.html).
+For a more detailed walkthrough of this loader, see [this notebook](/docs/integrations/document_loaders/apify_dataset).
diff --git a/docs/docs/integrations/providers/arangodb.mdx b/docs/docs/integrations/providers/arangodb.mdx
index 990e33ea36e..2bf68a9a2b9 100644
--- a/docs/docs/integrations/providers/arangodb.mdx
+++ b/docs/docs/integrations/providers/arangodb.mdx
@@ -13,7 +13,7 @@ pip install python-arango
Connect your ArangoDB Database with a chat model to get insights on your data.
-See the notebook example [here](/docs/use_cases/graph/graph_arangodb_qa.html).
+See the notebook example [here](/docs/use_cases/graph/graph_arangodb_qa).
```python
from arango import ArangoClient
diff --git a/docs/docs/integrations/providers/argilla.mdx b/docs/docs/integrations/providers/argilla.mdx
index 2024db079e2..209a69647be 100644
--- a/docs/docs/integrations/providers/argilla.mdx
+++ b/docs/docs/integrations/providers/argilla.mdx
@@ -22,7 +22,7 @@ If you don't you can refer to [Argilla - 🚀 Quickstart](https://docs.argilla.i
## Tracking
-See a [usage example of `ArgillaCallbackHandler`](/docs/integrations/callbacks/argilla.html).
+See a [usage example of `ArgillaCallbackHandler`](/docs/integrations/callbacks/argilla).
```python
from langchain.callbacks import ArgillaCallbackHandler
diff --git a/docs/docs/integrations/providers/chroma.mdx b/docs/docs/integrations/providers/chroma.mdx
index f642428b6f3..089f9fa64b1 100644
--- a/docs/docs/integrations/providers/chroma.mdx
+++ b/docs/docs/integrations/providers/chroma.mdx
@@ -18,7 +18,7 @@ whether for semantic search or example selection.
from langchain.vectorstores import Chroma
```
-For a more detailed walkthrough of the Chroma wrapper, see [this notebook](/docs/integrations/vectorstores/chroma.html)
+For a more detailed walkthrough of the Chroma wrapper, see [this notebook](/docs/integrations/vectorstores/chroma)
## Retriever
diff --git a/docs/docs/integrations/providers/clarifai.mdx b/docs/docs/integrations/providers/clarifai.mdx
index 69347f563c8..b54e096a58d 100644
--- a/docs/docs/integrations/providers/clarifai.mdx
+++ b/docs/docs/integrations/providers/clarifai.mdx
@@ -25,7 +25,7 @@ from langchain.llms import Clarifai
llm = Clarifai(pat=CLARIFAI_PAT, user_id=USER_ID, app_id=APP_ID, model_id=MODEL_ID)
```
-For more details, the docs on the Clarifai LLM wrapper provide a [detailed walkthrough](/docs/integrations/llms/clarifai.html).
+For more details, the docs on the Clarifai LLM wrapper provide a [detailed walkthrough](/docs/integrations/llms/clarifai).
### Text Embedding Models
@@ -37,7 +37,7 @@ There is a Clarifai Embedding model in LangChain, which you can access with:
from langchain.embeddings import ClarifaiEmbeddings
embeddings = ClarifaiEmbeddings(pat=CLARIFAI_PAT, user_id=USER_ID, app_id=APP_ID, model_id=MODEL_ID)
```
-For more details, the docs on the Clarifai Embeddings wrapper provide a [detailed walkthrough](/docs/integrations/text_embedding/clarifai.html).
+For more details, the docs on the Clarifai Embeddings wrapper provide a [detailed walkthrough](/docs/integrations/text_embedding/clarifai).
## Vectorstore
diff --git a/docs/docs/integrations/providers/cohere.mdx b/docs/docs/integrations/providers/cohere.mdx
index 768a6b64511..69e8d0b4d97 100644
--- a/docs/docs/integrations/providers/cohere.mdx
+++ b/docs/docs/integrations/providers/cohere.mdx
@@ -27,7 +27,7 @@ There exists an Cohere Embedding model, which you can access with
```python
from langchain.embeddings import CohereEmbeddings
```
-For a more detailed walkthrough of this, see [this notebook](/docs/integrations/text_embedding/cohere.html)
+For a more detailed walkthrough of this, see [this notebook](/docs/integrations/text_embedding/cohere)
## Retriever
diff --git a/docs/docs/integrations/providers/comet_tracking.ipynb b/docs/docs/integrations/providers/comet_tracking.ipynb
index a5ae494aaa9..f1f88aa7aec 100644
--- a/docs/docs/integrations/providers/comet_tracking.ipynb
+++ b/docs/docs/integrations/providers/comet_tracking.ipynb
@@ -20,7 +20,7 @@
"source": [
"In this guide we will demonstrate how to track your Langchain Experiments, Evaluation Metrics, and LLM Sessions with [Comet](https://www.comet.com/site/?utm_source=langchain&utm_medium=referral&utm_campaign=comet_notebook). \n",
"\n",
- "\n",
+ "\n",
"
\n",
"\n",
"\n",
diff --git a/docs/docs/integrations/providers/ctransformers.mdx b/docs/docs/integrations/providers/ctransformers.mdx
index 282d6ce38c5..35dc7725207 100644
--- a/docs/docs/integrations/providers/ctransformers.mdx
+++ b/docs/docs/integrations/providers/ctransformers.mdx
@@ -54,4 +54,4 @@ llm = CTransformers(model='marella/gpt-2-ggml', config=config)
See [Documentation](https://github.com/marella/ctransformers#config) for a list of available parameters.
-For a more detailed walkthrough of this, see [this notebook](/docs/integrations/llms/ctransformers.html).
+For a more detailed walkthrough of this, see [this notebook](/docs/integrations/llms/ctransformers).
diff --git a/docs/docs/integrations/providers/dashvector.mdx b/docs/docs/integrations/providers/dashvector.mdx
index d1ab24a0d0b..1d42ad84213 100644
--- a/docs/docs/integrations/providers/dashvector.mdx
+++ b/docs/docs/integrations/providers/dashvector.mdx
@@ -21,4 +21,4 @@ You may import the vectorstore by:
from langchain.vectorstores import DashVector
```
-For a detailed walkthrough of the DashVector wrapper, please refer to [this notebook](/docs/integrations/vectorstores/dashvector.html)
+For a detailed walkthrough of the DashVector wrapper, please refer to [this notebook](/docs/integrations/vectorstores/dashvector)
diff --git a/docs/docs/integrations/providers/databricks.md b/docs/docs/integrations/providers/databricks.md
index 6dbabbce60c..3c5c19de5c8 100644
--- a/docs/docs/integrations/providers/databricks.md
+++ b/docs/docs/integrations/providers/databricks.md
@@ -33,11 +33,11 @@ See [MLflow AI Gateway](/docs/integrations/providers/mlflow_ai_gateway).
Databricks as an LLM provider
-----------------------------
-The notebook [Wrap Databricks endpoints as LLMs](/docs/integrations/llms/databricks.html) illustrates the method to wrap Databricks endpoints as LLMs in LangChain. It supports two types of endpoints: the serving endpoint, which is recommended for both production and development, and the cluster driver proxy app, which is recommended for interactive development.
+The notebook [Wrap Databricks endpoints as LLMs](/docs/integrations/llms/databricks) illustrates the method to wrap Databricks endpoints as LLMs in LangChain. It supports two types of endpoints: the serving endpoint, which is recommended for both production and development, and the cluster driver proxy app, which is recommended for interactive development.
Databricks endpoints support Dolly, but are also great for hosting models like MPT-7B or any other models from the Hugging Face ecosystem. Databricks endpoints can also be used with proprietary models like OpenAI to provide a governance layer for enterprises.
Databricks Dolly
----------------
-Databricks’ Dolly is an instruction-following large language model trained on the Databricks machine learning platform that is licensed for commercial use. The model is available on Hugging Face Hub as databricks/dolly-v2-12b. See the notebook [Hugging Face Hub](/docs/integrations/llms/huggingface_hub.html) for instructions to access it through the Hugging Face Hub integration with LangChain.
+Databricks’ Dolly is an instruction-following large language model trained on the Databricks machine learning platform that is licensed for commercial use. The model is available on Hugging Face Hub as databricks/dolly-v2-12b. See the notebook [Hugging Face Hub](/docs/integrations/llms/huggingface_hub) for instructions to access it through the Hugging Face Hub integration with LangChain.
diff --git a/docs/docs/integrations/providers/dingo.mdx b/docs/docs/integrations/providers/dingo.mdx
index ed04973519d..ab5bffa65e3 100644
--- a/docs/docs/integrations/providers/dingo.mdx
+++ b/docs/docs/integrations/providers/dingo.mdx
@@ -16,4 +16,4 @@ To import this vectorstore:
from langchain.vectorstores import Dingo
```
-For a more detailed walkthrough of the DingoDB wrapper, see [this notebook](/docs/integrations/vectorstores/dingo.html)
+For a more detailed walkthrough of the DingoDB wrapper, see [this notebook](/docs/integrations/vectorstores/dingo)
diff --git a/docs/docs/integrations/providers/epsilla.mdx b/docs/docs/integrations/providers/epsilla.mdx
index 40e91329e45..fb4fa4039c2 100644
--- a/docs/docs/integrations/providers/epsilla.mdx
+++ b/docs/docs/integrations/providers/epsilla.mdx
@@ -20,4 +20,4 @@ To import this vectorstore:
from langchain.vectorstores import Epsilla
```
-For a more detailed walkthrough of the Epsilla wrapper, see [this notebook](/docs/integrations/vectorstores/epsilla.html)
\ No newline at end of file
+For a more detailed walkthrough of the Epsilla wrapper, see [this notebook](/docs/integrations/vectorstores/epsilla)
\ No newline at end of file
diff --git a/docs/docs/integrations/providers/golden.mdx b/docs/docs/integrations/providers/golden.mdx
index 21398a2a5dc..5f1c5c15703 100644
--- a/docs/docs/integrations/providers/golden.mdx
+++ b/docs/docs/integrations/providers/golden.mdx
@@ -20,7 +20,7 @@ There exists a GoldenQueryAPIWrapper utility which wraps this API. To import thi
from langchain.utilities.golden_query import GoldenQueryAPIWrapper
```
-For a more detailed walkthrough of this wrapper, see [this notebook](/docs/integrations/tools/golden_query.html).
+For a more detailed walkthrough of this wrapper, see [this notebook](/docs/integrations/tools/golden_query).
### Tool
diff --git a/docs/docs/integrations/providers/google_serper.mdx b/docs/docs/integrations/providers/google_serper.mdx
index 74cf86d8e20..b44065372dd 100644
--- a/docs/docs/integrations/providers/google_serper.mdx
+++ b/docs/docs/integrations/providers/google_serper.mdx
@@ -59,7 +59,7 @@ So the final answer is: El Palmar, Spain
'El Palmar, Spain'
```
-For a more detailed walkthrough of this wrapper, see [this notebook](/docs/integrations/tools/google_serper.html).
+For a more detailed walkthrough of this wrapper, see [this notebook](/docs/integrations/tools/google_serper).
### Tool
diff --git a/docs/docs/integrations/providers/gpt4all.mdx b/docs/docs/integrations/providers/gpt4all.mdx
index 72e5145a34b..0917bff1a30 100644
--- a/docs/docs/integrations/providers/gpt4all.mdx
+++ b/docs/docs/integrations/providers/gpt4all.mdx
@@ -45,4 +45,4 @@ model("Once upon a time, ", callbacks=callbacks)
You can find links to model file downloads in the [pyllamacpp](https://github.com/nomic-ai/pyllamacpp) repository.
-For a more detailed walkthrough of this, see [this notebook](/docs/integrations/llms/gpt4all.html)
+For a more detailed walkthrough of this, see [this notebook](/docs/integrations/llms/gpt4all)
diff --git a/docs/docs/integrations/providers/gradient.mdx b/docs/docs/integrations/providers/gradient.mdx
index 0ef07e79de0..d143d595293 100644
--- a/docs/docs/integrations/providers/gradient.mdx
+++ b/docs/docs/integrations/providers/gradient.mdx
@@ -24,4 +24,4 @@ There exists an Gradient Embedding model, which you can access with
```python
from langchain.embeddings import GradientEmbeddings
```
-For a more detailed walkthrough of this, see [this notebook](/docs/integrations/text_embedding/gradient.html)
+For a more detailed walkthrough of this, see [this notebook](/docs/integrations/text_embedding/gradient)
diff --git a/docs/docs/integrations/providers/huggingface.mdx b/docs/docs/integrations/providers/huggingface.mdx
index 27fe4d42db7..7fd03a381ea 100644
--- a/docs/docs/integrations/providers/huggingface.mdx
+++ b/docs/docs/integrations/providers/huggingface.mdx
@@ -30,7 +30,7 @@ To use a the wrapper for a model hosted on Hugging Face Hub:
```python
from langchain.llms import HuggingFaceHub
```
-For a more detailed walkthrough of the Hugging Face Hub wrapper, see [this notebook](/docs/integrations/llms/huggingface_hub.html)
+For a more detailed walkthrough of the Hugging Face Hub wrapper, see [this notebook](/docs/integrations/llms/huggingface_hub)
### Embeddings
diff --git a/docs/docs/integrations/providers/infino.mdx b/docs/docs/integrations/providers/infino.mdx
index 2fb2cc62e4c..d11c502a377 100644
--- a/docs/docs/integrations/providers/infino.mdx
+++ b/docs/docs/integrations/providers/infino.mdx
@@ -28,7 +28,7 @@ you don't, follow the next steps to start it:
## Using Infino
-See a [usage example of `InfinoCallbackHandler`](/docs/integrations/callbacks/infino.html).
+See a [usage example of `InfinoCallbackHandler`](/docs/integrations/callbacks/infino).
```python
from langchain.callbacks import InfinoCallbackHandler
diff --git a/docs/docs/integrations/providers/jina.mdx b/docs/docs/integrations/providers/jina.mdx
index dec677f6d15..181b57a4abf 100644
--- a/docs/docs/integrations/providers/jina.mdx
+++ b/docs/docs/integrations/providers/jina.mdx
@@ -15,7 +15,7 @@ There exists a Jina Embeddings wrapper, which you can access with
```python
from langchain.embeddings import JinaEmbeddings
```
-For a more detailed walkthrough of this, see [this notebook](/docs/integrations/text_embedding/jina.html)
+For a more detailed walkthrough of this, see [this notebook](/docs/integrations/text_embedding/jina)
## Deployment
diff --git a/docs/docs/integrations/providers/lancedb.mdx b/docs/docs/integrations/providers/lancedb.mdx
index 6e5ae741153..1275e690bc3 100644
--- a/docs/docs/integrations/providers/lancedb.mdx
+++ b/docs/docs/integrations/providers/lancedb.mdx
@@ -20,4 +20,4 @@ To import this vectorstore:
from langchain.vectorstores import LanceDB
```
-For a more detailed walkthrough of the LanceDB wrapper, see [this notebook](/docs/integrations/vectorstores/lancedb.html)
+For a more detailed walkthrough of the LanceDB wrapper, see [this notebook](/docs/integrations/vectorstores/lancedb)
diff --git a/docs/docs/integrations/providers/llamacpp.mdx b/docs/docs/integrations/providers/llamacpp.mdx
index a7a2f335ec2..53c29b6ff31 100644
--- a/docs/docs/integrations/providers/llamacpp.mdx
+++ b/docs/docs/integrations/providers/llamacpp.mdx
@@ -15,7 +15,7 @@ There exists a LlamaCpp LLM wrapper, which you can access with
```python
from langchain.llms import LlamaCpp
```
-For a more detailed walkthrough of this, see [this notebook](/docs/integrations/llms/llamacpp.html)
+For a more detailed walkthrough of this, see [this notebook](/docs/integrations/llms/llamacpp)
### Embeddings
@@ -23,4 +23,4 @@ There exists a LlamaCpp Embeddings wrapper, which you can access with
```python
from langchain.embeddings import LlamaCppEmbeddings
```
-For a more detailed walkthrough of this, see [this notebook](/docs/integrations/text_embedding/llamacpp.html)
+For a more detailed walkthrough of this, see [this notebook](/docs/integrations/text_embedding/llamacpp)
diff --git a/docs/docs/integrations/providers/marqo.md b/docs/docs/integrations/providers/marqo.md
index d26e08fb13b..3a6e24e35c1 100644
--- a/docs/docs/integrations/providers/marqo.md
+++ b/docs/docs/integrations/providers/marqo.md
@@ -28,4 +28,4 @@ To import this vectorstore:
from langchain.vectorstores import Marqo
```
-For a more detailed walkthrough of the Marqo wrapper and some of its unique features, see [this notebook](/docs/integrations/vectorstores/marqo.html)
+For a more detailed walkthrough of the Marqo wrapper and some of its unique features, see [this notebook](/docs/integrations/vectorstores/marqo)
diff --git a/docs/docs/integrations/providers/milvus.mdx b/docs/docs/integrations/providers/milvus.mdx
index 509cd5294ba..9f963233f5e 100644
--- a/docs/docs/integrations/providers/milvus.mdx
+++ b/docs/docs/integrations/providers/milvus.mdx
@@ -22,4 +22,4 @@ To import this vectorstore:
from langchain.vectorstores import Milvus
```
-For a more detailed walkthrough of the `Miluvs` wrapper, see [this notebook](/docs/integrations/vectorstores/milvus.html)
+For a more detailed walkthrough of the `Miluvs` wrapper, see [this notebook](/docs/integrations/vectorstores/milvus)
diff --git a/docs/docs/integrations/providers/minimax.mdx b/docs/docs/integrations/providers/minimax.mdx
index 18fc101c0e8..cbeb74d62d5 100644
--- a/docs/docs/integrations/providers/minimax.mdx
+++ b/docs/docs/integrations/providers/minimax.mdx
@@ -11,7 +11,7 @@ Get a [Minimax group id](https://api.minimax.chat/user-center/basic-information)
## LLM
There exists a Minimax LLM wrapper, which you can access with
-See a [usage example](/docs/modules/model_io/models/llms/integrations/minimax.html).
+See a [usage example](/docs/modules/model_io/models/llms/integrations/minimax).
```python
from langchain.llms import Minimax
@@ -19,7 +19,7 @@ from langchain.llms import Minimax
## Chat Models
-See a [usage example](/docs/modules/model_io/models/chat/integrations/minimax.html)
+See a [usage example](/docs/modules/model_io/models/chat/integrations/minimax)
```python
from langchain.chat_models import MiniMaxChat
diff --git a/docs/docs/integrations/providers/mlflow_ai_gateway.mdx b/docs/docs/integrations/providers/mlflow_ai_gateway.mdx
index 6931a90cf97..3b944092f1c 100644
--- a/docs/docs/integrations/providers/mlflow_ai_gateway.mdx
+++ b/docs/docs/integrations/providers/mlflow_ai_gateway.mdx
@@ -1,9 +1,9 @@
# MLflow AI Gateway
->[The MLflow AI Gateway](https://www.mlflow.org/docs/latest/gateway/index.html) service is a powerful tool designed to streamline the usage and management of various large
+>[The MLflow AI Gateway](https://www.mlflow.org/docs/latest/gateway/index) service is a powerful tool designed to streamline the usage and management of various large
> language model (LLM) providers, such as OpenAI and Anthropic, within an organization. It offers a high-level interface
> that simplifies the interaction with these services by providing a unified endpoint to handle specific LLM related requests.
-> See [the MLflow AI Gateway documentation](https://mlflow.org/docs/latest/gateway/index.html) for more details.
+> See [the MLflow AI Gateway documentation](https://mlflow.org/docs/latest/gateway/index) for more details.
## Installation and Setup
@@ -52,7 +52,7 @@ mlflow gateway start --config-path /path/to/config.yaml
> This module exports multivariate LangChain models in the langchain flavor and univariate LangChain
> models in the pyfunc flavor.
-See the [API documentation and examples](https://www.mlflow.org/docs/latest/python_api/mlflow.langchain.html).
+See the [API documentation and examples](https://www.mlflow.org/docs/latest/python_api/mlflow.langchain).
diff --git a/docs/docs/integrations/providers/mlflow_tracking.ipynb b/docs/docs/integrations/providers/mlflow_tracking.ipynb
index 5d78b429132..0797315466b 100644
--- a/docs/docs/integrations/providers/mlflow_tracking.ipynb
+++ b/docs/docs/integrations/providers/mlflow_tracking.ipynb
@@ -7,7 +7,7 @@
"source": [
"# MLflow\n",
"\n",
- ">[MLflow](https://www.mlflow.org/docs/latest/what-is-mlflow.html) is a versatile, expandable, open-source platform for managing workflows and artifacts across the machine learning lifecycle. It has built-in integrations with many popular ML libraries, but can be used with any library, algorithm, or deployment tool. It is designed to be extensible, so you can write plugins to support new workflows, libraries, and tools.\n",
+ ">[MLflow](https://www.mlflow.org/docs/latest/what-is-mlflow) is a versatile, expandable, open-source platform for managing workflows and artifacts across the machine learning lifecycle. It has built-in integrations with many popular ML libraries, but can be used with any library, algorithm, or deployment tool. It is designed to be extensible, so you can write plugins to support new workflows, libraries, and tools.\n",
"\n",
"This notebook goes over how to track your LangChain experiments into your `MLflow Server`"
]
diff --git a/docs/docs/integrations/providers/momento.mdx b/docs/docs/integrations/providers/momento.mdx
index da21a2296fe..71f857148d2 100644
--- a/docs/docs/integrations/providers/momento.mdx
+++ b/docs/docs/integrations/providers/momento.mdx
@@ -50,10 +50,10 @@ Momento can be used as a distributed memory store for LLMs.
### Chat Message History Memory
-See [this notebook](/docs/integrations/memory/momento_chat_message_history.html) for a walkthrough of how to use Momento as a memory store for chat message history.
+See [this notebook](/docs/integrations/memory/momento_chat_message_history) for a walkthrough of how to use Momento as a memory store for chat message history.
## Vector Store
Momento Vector Index (MVI) can be used as a vector store.
-See [this notebook](/docs/integrations/vectorstores/momento_vector_index.html) for a walkthrough of how to use MVI as a vector store.
+See [this notebook](/docs/integrations/vectorstores/momento_vector_index) for a walkthrough of how to use MVI as a vector store.
diff --git a/docs/docs/integrations/providers/motherduck.mdx b/docs/docs/integrations/providers/motherduck.mdx
index 69d426059e9..05b91b54e22 100644
--- a/docs/docs/integrations/providers/motherduck.mdx
+++ b/docs/docs/integrations/providers/motherduck.mdx
@@ -31,7 +31,7 @@ db = SQLDatabase.from_uri(conn_str)
db_chain = SQLDatabaseChain.from_llm(OpenAI(temperature=0), db, verbose=True)
```
-From here, see the [SQL Chain](/docs/use_cases/tabular/sqlite.html) documentation on how to use.
+From here, see the [SQL Chain](/docs/use_cases/tabular/sqlite) documentation on how to use.
## LLMCache
diff --git a/docs/docs/integrations/providers/myscale.mdx b/docs/docs/integrations/providers/myscale.mdx
index 27a53b8002d..367b6d36281 100644
--- a/docs/docs/integrations/providers/myscale.mdx
+++ b/docs/docs/integrations/providers/myscale.mdx
@@ -63,4 +63,4 @@ To import this vectorstore:
from langchain.vectorstores import MyScale
```
-For a more detailed walkthrough of the MyScale wrapper, see [this notebook](/docs/integrations/vectorstores/myscale.html)
+For a more detailed walkthrough of the MyScale wrapper, see [this notebook](/docs/integrations/vectorstores/myscale)
diff --git a/docs/docs/integrations/providers/neo4j.mdx b/docs/docs/integrations/providers/neo4j.mdx
index a507422903d..232de5c53f7 100644
--- a/docs/docs/integrations/providers/neo4j.mdx
+++ b/docs/docs/integrations/providers/neo4j.mdx
@@ -29,7 +29,7 @@ To import this vectorstore:
from langchain.vectorstores import Neo4jVector
```
-For a more detailed walkthrough of the Neo4j vector index wrapper, see [documentation](/docs/integrations/vectorstores/neo4jvector.html)
+For a more detailed walkthrough of the Neo4j vector index wrapper, see [documentation](/docs/integrations/vectorstores/neo4jvector)
### GraphCypherQAChain
@@ -41,7 +41,7 @@ from langchain.graphs import Neo4jGraph
from langchain.chains import GraphCypherQAChain
```
-For a more detailed walkthrough of Cypher generating chain, see [documentation](/docs/use_cases/graph/graph_cypher_qa.html)
+For a more detailed walkthrough of Cypher generating chain, see [documentation](/docs/use_cases/graph/graph_cypher_qa)
### Constructing a knowledge graph from text
@@ -55,4 +55,4 @@ from langchain.graphs import Neo4jGraph
from langchain_experimental.graph_transformers.diffbot import DiffbotGraphTransformer
```
-For a more detailed walkthrough generating graphs from text, see [documentation](/docs/use_cases/graph/diffbot_graphtransformer.html)
+For a more detailed walkthrough generating graphs from text, see [documentation](/docs/use_cases/graph/diffbot_graphtransformer)
diff --git a/docs/docs/integrations/providers/notion.mdx b/docs/docs/integrations/providers/notion.mdx
index 216a88c9f9b..6e16b1233ee 100644
--- a/docs/docs/integrations/providers/notion.mdx
+++ b/docs/docs/integrations/providers/notion.mdx
@@ -12,14 +12,14 @@ All instructions are in examples below.
We have two different loaders: `NotionDirectoryLoader` and `NotionDBLoader`.
-See a [usage example for the NotionDirectoryLoader](/docs/integrations/document_loaders/notion.html).
+See a [usage example for the NotionDirectoryLoader](/docs/integrations/document_loaders/notion).
```python
from langchain.document_loaders import NotionDirectoryLoader
```
-See a [usage example for the NotionDBLoader](/docs/integrations/document_loaders/notiondb.html).
+See a [usage example for the NotionDBLoader](/docs/integrations/document_loaders/notiondb).
```python
diff --git a/docs/docs/integrations/providers/openllm.mdx b/docs/docs/integrations/providers/openllm.mdx
index a6ec980f664..1f24af8ed22 100644
--- a/docs/docs/integrations/providers/openllm.mdx
+++ b/docs/docs/integrations/providers/openllm.mdx
@@ -67,4 +67,4 @@ llm("What is the difference between a duck and a goose? And why there are so man
### Usage
For a more detailed walkthrough of the OpenLLM Wrapper, see the
-[example notebook](/docs/integrations/llms/openllm.html)
+[example notebook](/docs/integrations/llms/openllm)
diff --git a/docs/docs/integrations/providers/opensearch.mdx b/docs/docs/integrations/providers/opensearch.mdx
index 2761548c818..6e428635dc3 100644
--- a/docs/docs/integrations/providers/opensearch.mdx
+++ b/docs/docs/integrations/providers/opensearch.mdx
@@ -18,4 +18,4 @@ To import this vectorstore:
from langchain.vectorstores import OpenSearchVectorSearch
```
-For a more detailed walkthrough of the OpenSearch wrapper, see [this notebook](/docs/integrations/vectorstores/opensearch.html)
+For a more detailed walkthrough of the OpenSearch wrapper, see [this notebook](/docs/integrations/vectorstores/opensearch)
diff --git a/docs/docs/integrations/providers/openweathermap.mdx b/docs/docs/integrations/providers/openweathermap.mdx
index fa346cf2bc6..b38fe5db264 100644
--- a/docs/docs/integrations/providers/openweathermap.mdx
+++ b/docs/docs/integrations/providers/openweathermap.mdx
@@ -29,7 +29,7 @@ There exists a OpenWeatherMapAPIWrapper utility which wraps this API. To import
from langchain.utilities.openweathermap import OpenWeatherMapAPIWrapper
```
-For a more detailed walkthrough of this wrapper, see [this notebook](/docs/integrations/tools/openweathermap.html).
+For a more detailed walkthrough of this wrapper, see [this notebook](/docs/integrations/tools/openweathermap).
### Tool
diff --git a/docs/docs/integrations/providers/pgvector.mdx b/docs/docs/integrations/providers/pgvector.mdx
index d632a8959b3..e7cefb5b791 100644
--- a/docs/docs/integrations/providers/pgvector.mdx
+++ b/docs/docs/integrations/providers/pgvector.mdx
@@ -26,4 +26,4 @@ from langchain.vectorstores.pgvector import PGVector
### Usage
-For a more detailed walkthrough of the PGVector Wrapper, see [this notebook](/docs/integrations/vectorstores/pgvector.html)
+For a more detailed walkthrough of the PGVector Wrapper, see [this notebook](/docs/integrations/vectorstores/pgvector)
diff --git a/docs/docs/integrations/providers/pinecone.mdx b/docs/docs/integrations/providers/pinecone.mdx
index 3dd1e55e69d..61a95527609 100644
--- a/docs/docs/integrations/providers/pinecone.mdx
+++ b/docs/docs/integrations/providers/pinecone.mdx
@@ -21,4 +21,4 @@ whether for semantic search or example selection.
from langchain.vectorstores import Pinecone
```
-For a more detailed walkthrough of the Pinecone vectorstore, see [this notebook](/docs/integrations/vectorstores/pinecone.html)
+For a more detailed walkthrough of the Pinecone vectorstore, see [this notebook](/docs/integrations/vectorstores/pinecone)
diff --git a/docs/docs/integrations/providers/promptlayer.mdx b/docs/docs/integrations/providers/promptlayer.mdx
index 1a7b1cb9262..3f2a74ffc06 100644
--- a/docs/docs/integrations/providers/promptlayer.mdx
+++ b/docs/docs/integrations/providers/promptlayer.mdx
@@ -40,10 +40,10 @@ for res in llm_results.generations:
```
You can use the PromptLayer request ID to add a prompt, score, or other metadata to your request. [Read more about it here](https://magniv.notion.site/Track-4deee1b1f7a34c1680d085f82567dab9).
-This LLM is identical to the [OpenAI](/docs/ecosystem/integrations/openai.html) LLM, except that
+This LLM is identical to the [OpenAI](/docs/ecosystem/integrations/openai) LLM, except that
- all your requests will be logged to your PromptLayer account
- you can add `pl_tags` when instantiating to tag your requests on PromptLayer
- you can add `return_pl_id` when instantiating to return a PromptLayer request id to use [while tracking requests](https://magniv.notion.site/Track-4deee1b1f7a34c1680d085f82567dab9).
-PromptLayer also provides native wrappers for [`PromptLayerChatOpenAI`](/docs/integrations/chat/promptlayer_chatopenai.html) and `PromptLayerOpenAIChat`
+PromptLayer also provides native wrappers for [`PromptLayerChatOpenAI`](/docs/integrations/chat/promptlayer_chatopenai) and `PromptLayerOpenAIChat`
diff --git a/docs/docs/integrations/providers/providers/semadb.mdx b/docs/docs/integrations/providers/providers/semadb.mdx
index 4978b89d63b..700f44fab42 100644
--- a/docs/docs/integrations/providers/providers/semadb.mdx
+++ b/docs/docs/integrations/providers/providers/semadb.mdx
@@ -16,4 +16,4 @@ There is a basic wrapper around `SemaDB` collections allowing you to use it as a
from langchain.vectorstores import SemaDB
```
-You can follow a tutorial on how to use the wrapper in [this notebook](/docs/integrations/vectorstores/semadb.html).
\ No newline at end of file
+You can follow a tutorial on how to use the wrapper in [this notebook](/docs/integrations/vectorstores/semadb).
\ No newline at end of file
diff --git a/docs/docs/integrations/providers/psychic.mdx b/docs/docs/integrations/providers/psychic.mdx
index 0bae7e5b21c..c29fe6e3316 100644
--- a/docs/docs/integrations/providers/psychic.mdx
+++ b/docs/docs/integrations/providers/psychic.mdx
@@ -16,7 +16,7 @@ view these connections from the dashboard and retrieve data using the server-sid
1. Create an account in the [dashboard](https://dashboard.psychic.dev/).
2. Use the [react library](https://docs.psychic.dev/sidekick-link) to add the Psychic link modal to your frontend react app. You will use this to connect the SaaS apps.
-3. Once you have created a connection, you can use the `PsychicLoader` by following the [example notebook](/docs/integrations/document_loaders/psychic.html)
+3. Once you have created a connection, you can use the `PsychicLoader` by following the [example notebook](/docs/integrations/document_loaders/psychic)
## Advantages vs Other Document Loaders
diff --git a/docs/docs/integrations/providers/qdrant.mdx b/docs/docs/integrations/providers/qdrant.mdx
index 33dfcb266cb..ace4c34f9d8 100644
--- a/docs/docs/integrations/providers/qdrant.mdx
+++ b/docs/docs/integrations/providers/qdrant.mdx
@@ -24,4 +24,4 @@ To import this vectorstore:
from langchain.vectorstores import Qdrant
```
-For a more detailed walkthrough of the Qdrant wrapper, see [this notebook](/docs/integrations/vectorstores/qdrant.html)
+For a more detailed walkthrough of the Qdrant wrapper, see [this notebook](/docs/integrations/vectorstores/qdrant)
diff --git a/docs/docs/integrations/providers/redis.mdx b/docs/docs/integrations/providers/redis.mdx
index b2995325793..b9e4c675321 100644
--- a/docs/docs/integrations/providers/redis.mdx
+++ b/docs/docs/integrations/providers/redis.mdx
@@ -103,7 +103,7 @@ To import this vectorstore:
from langchain.vectorstores import Redis
```
-For a more detailed walkthrough of the Redis vectorstore wrapper, see [this notebook](/docs/integrations/vectorstores/redis.html).
+For a more detailed walkthrough of the Redis vectorstore wrapper, see [this notebook](/docs/integrations/vectorstores/redis).
### Retriever
@@ -114,7 +114,7 @@ Redis can be used to persist LLM conversations.
#### Vector Store Retriever Memory
-For a more detailed walkthrough of the `VectorStoreRetrieverMemory` wrapper, see [this notebook](/docs/modules/memory/types/vectorstore_retriever_memory.html).
+For a more detailed walkthrough of the `VectorStoreRetrieverMemory` wrapper, see [this notebook](/docs/modules/memory/types/vectorstore_retriever_memory).
#### Chat Message History Memory
-For a detailed example of Redis to cache conversation message history, see [this notebook](/docs/integrations/memory/redis_chat_message_history.html).
+For a detailed example of Redis to cache conversation message history, see [this notebook](/docs/integrations/memory/redis_chat_message_history).
diff --git a/docs/docs/integrations/providers/runhouse.mdx b/docs/docs/integrations/providers/runhouse.mdx
index 28b6d7eeb38..8039882b0bc 100644
--- a/docs/docs/integrations/providers/runhouse.mdx
+++ b/docs/docs/integrations/providers/runhouse.mdx
@@ -15,7 +15,7 @@ custom LLMs, you can use the `SelfHostedPipeline` parent class.
from langchain.llms import SelfHostedPipeline, SelfHostedHuggingFaceLLM
```
-For a more detailed walkthrough of the Self-hosted LLMs, see [this notebook](/docs/integrations/llms/runhouse.html)
+For a more detailed walkthrough of the Self-hosted LLMs, see [this notebook](/docs/integrations/llms/runhouse)
## Self-hosted Embeddings
There are several ways to use self-hosted embeddings with LangChain via Runhouse.
@@ -26,4 +26,4 @@ the `SelfHostedEmbedding` class.
from langchain.llms import SelfHostedPipeline, SelfHostedHuggingFaceLLM
```
-For a more detailed walkthrough of the Self-hosted Embeddings, see [this notebook](/docs/integrations/text_embedding/self-hosted.html)
+For a more detailed walkthrough of the Self-hosted Embeddings, see [this notebook](/docs/integrations/text_embedding/self-hosted)
diff --git a/docs/docs/integrations/providers/serpapi.mdx b/docs/docs/integrations/providers/serpapi.mdx
index e692492c02c..0e31f0e6cf6 100644
--- a/docs/docs/integrations/providers/serpapi.mdx
+++ b/docs/docs/integrations/providers/serpapi.mdx
@@ -17,7 +17,7 @@ There exists a SerpAPI utility which wraps this API. To import this utility:
from langchain.utilities import SerpAPIWrapper
```
-For a more detailed walkthrough of this wrapper, see [this notebook](/docs/integrations/tools/serpapi.html).
+For a more detailed walkthrough of this wrapper, see [this notebook](/docs/integrations/tools/serpapi).
### Tool
diff --git a/docs/docs/integrations/providers/sklearn.mdx b/docs/docs/integrations/providers/sklearn.mdx
index 341bb671d0a..5dedf25391e 100644
--- a/docs/docs/integrations/providers/sklearn.mdx
+++ b/docs/docs/integrations/providers/sklearn.mdx
@@ -19,4 +19,4 @@ To import this vectorstore:
from langchain.vectorstores import SKLearnVectorStore
```
-For a more detailed walkthrough of the SKLearnVectorStore wrapper, see [this notebook](/docs/integrations/vectorstores/sklearn.html).
+For a more detailed walkthrough of the SKLearnVectorStore wrapper, see [this notebook](/docs/integrations/vectorstores/sklearn).
diff --git a/docs/docs/integrations/providers/spacy.mdx b/docs/docs/integrations/providers/spacy.mdx
index ab9b6858985..4ff18381697 100644
--- a/docs/docs/integrations/providers/spacy.mdx
+++ b/docs/docs/integrations/providers/spacy.mdx
@@ -13,7 +13,7 @@ pip install spacy
## Text Splitter
-See a [usage example](/docs/modules/data_connection/document_transformers/text_splitters/split_by_token.html#spacy).
+See a [usage example](/docs/modules/data_connection/document_transformers/text_splitters/split_by_token#spacy).
```python
from langchain.text_splitter import SpacyTextSplitter
diff --git a/docs/docs/integrations/providers/spreedly.mdx b/docs/docs/integrations/providers/spreedly.mdx
index 5790ef2e474..e7996b62242 100644
--- a/docs/docs/integrations/providers/spreedly.mdx
+++ b/docs/docs/integrations/providers/spreedly.mdx
@@ -4,7 +4,7 @@
## Installation and Setup
-See [setup instructions](/docs/integrations/document_loaders/spreedly.html).
+See [setup instructions](/docs/integrations/document_loaders/spreedly).
## Document Loader
diff --git a/docs/docs/integrations/providers/stripe.mdx b/docs/docs/integrations/providers/stripe.mdx
index 923e77cad2f..05cc6d46397 100644
--- a/docs/docs/integrations/providers/stripe.mdx
+++ b/docs/docs/integrations/providers/stripe.mdx
@@ -5,7 +5,7 @@
## Installation and Setup
-See [setup instructions](/docs/integrations/document_loaders/stripe.html).
+See [setup instructions](/docs/integrations/document_loaders/stripe).
## Document Loader
diff --git a/docs/docs/integrations/providers/tair.mdx b/docs/docs/integrations/providers/tair.mdx
index 4bfcd769493..8a0e6ad24cc 100644
--- a/docs/docs/integrations/providers/tair.mdx
+++ b/docs/docs/integrations/providers/tair.mdx
@@ -19,4 +19,4 @@ To import this vectorstore:
from langchain.vectorstores import Tair
```
-For a more detailed walkthrough of the Tair wrapper, see [this notebook](/docs/integrations/vectorstores/tair.html)
+For a more detailed walkthrough of the Tair wrapper, see [this notebook](/docs/integrations/vectorstores/tair)
diff --git a/docs/docs/integrations/providers/telegram.mdx b/docs/docs/integrations/providers/telegram.mdx
index b9a8bec0ea4..25ebd990b5c 100644
--- a/docs/docs/integrations/providers/telegram.mdx
+++ b/docs/docs/integrations/providers/telegram.mdx
@@ -5,7 +5,7 @@
## Installation and Setup
-See [setup instructions](/docs/integrations/document_loaders/telegram.html).
+See [setup instructions](/docs/integrations/document_loaders/telegram).
## Document Loader
diff --git a/docs/docs/integrations/providers/tencentvectordb.mdx b/docs/docs/integrations/providers/tencentvectordb.mdx
index 0ce5f1142ea..e1db644f471 100644
--- a/docs/docs/integrations/providers/tencentvectordb.mdx
+++ b/docs/docs/integrations/providers/tencentvectordb.mdx
@@ -12,4 +12,4 @@ To import this vectorstore:
from langchain.vectorstores import TencentVectorDB
```
-For a more detailed walkthrough of the TencentVectorDB wrapper, see [this notebook](/docs/integrations/vectorstores/tencentvectordb.html)
+For a more detailed walkthrough of the TencentVectorDB wrapper, see [this notebook](/docs/integrations/vectorstores/tencentvectordb)
diff --git a/docs/docs/integrations/providers/trello.mdx b/docs/docs/integrations/providers/trello.mdx
index 99bf2cf4cea..0aecb76cfca 100644
--- a/docs/docs/integrations/providers/trello.mdx
+++ b/docs/docs/integrations/providers/trello.mdx
@@ -10,7 +10,7 @@
pip install py-trello beautifulsoup4
```
-See [setup instructions](/docs/integrations/document_loaders/trello.html).
+See [setup instructions](/docs/integrations/document_loaders/trello).
## Document Loader
diff --git a/docs/docs/integrations/providers/typesense.mdx b/docs/docs/integrations/providers/typesense.mdx
index 62d5581139e..472d2a40fd3 100644
--- a/docs/docs/integrations/providers/typesense.mdx
+++ b/docs/docs/integrations/providers/typesense.mdx
@@ -1,7 +1,7 @@
# Typesense
> [Typesense](https://typesense.org) is an open-source, in-memory search engine, that you can either
-> [self-host](https://typesense.org/docs/guide/install-typesense.html#option-2-local-machine-self-hosting) or run
+> [self-host](https://typesense.org/docs/guide/install-typesense#option-2-local-machine-self-hosting) or run
> on [Typesense Cloud](https://cloud.typesense.org/).
> `Typesense` focuses on performance by storing the entire index in RAM (with a backup on disk) and also
> focuses on providing an out-of-the-box developer experience by simplifying available options and setting good defaults.
diff --git a/docs/docs/integrations/providers/upstash.mdx b/docs/docs/integrations/providers/upstash.mdx
index 66f4c3df468..ff39b876497 100644
--- a/docs/docs/integrations/providers/upstash.mdx
+++ b/docs/docs/integrations/providers/upstash.mdx
@@ -39,4 +39,4 @@ langchain.llm_cache = UpstashRedisCache(redis_=Redis(url=URL, token=TOKEN))
Upstash Redis can be used to persist LLM conversations.
#### Chat Message History Memory
-An example of Upstash Redis for caching conversation message history can be seen in [this notebook](/docs/integrations/memory/upstash_redis_chat_message_history.html).
+An example of Upstash Redis for caching conversation message history can be seen in [this notebook](/docs/integrations/memory/upstash_redis_chat_message_history).
diff --git a/docs/docs/integrations/providers/vectara/vectara_chat.ipynb b/docs/docs/integrations/providers/vectara/vectara_chat.ipynb
index 206c4d42323..1f38eb9782b 100644
--- a/docs/docs/integrations/providers/vectara/vectara_chat.ipynb
+++ b/docs/docs/integrations/providers/vectara/vectara_chat.ipynb
@@ -7,7 +7,7 @@
"source": [
"# Chat Over Documents with Vectara\n",
"\n",
- "This notebook is based on the [chat_vector_db](https://github.com/langchain-ai/langchain/blob/master/docs/modules/chains/index_examples/chat_vector_db.html) notebook, but using Vectara as the vector database."
+ "This notebook is based on the [chat_vector_db](https://github.com/langchain-ai/langchain/blob/master/docs/modules/chains/index_examples/chat_vector_db) notebook, but using Vectara as the vector database."
]
},
{
diff --git a/docs/docs/integrations/providers/weaviate.mdx b/docs/docs/integrations/providers/weaviate.mdx
index 1c358ec6c64..9f5bf64cd6c 100644
--- a/docs/docs/integrations/providers/weaviate.mdx
+++ b/docs/docs/integrations/providers/weaviate.mdx
@@ -35,4 +35,4 @@ To import this vectorstore:
from langchain.vectorstores import Weaviate
```
-For a more detailed walkthrough of the Weaviate wrapper, see [this notebook](/docs/integrations/vectorstores/weaviate.html)
+For a more detailed walkthrough of the Weaviate wrapper, see [this notebook](/docs/integrations/vectorstores/weaviate)
diff --git a/docs/docs/integrations/providers/wolfram_alpha.mdx b/docs/docs/integrations/providers/wolfram_alpha.mdx
index 5c98a52be45..5e25d5e2dbf 100644
--- a/docs/docs/integrations/providers/wolfram_alpha.mdx
+++ b/docs/docs/integrations/providers/wolfram_alpha.mdx
@@ -25,7 +25,7 @@ There exists a WolframAlphaAPIWrapper utility which wraps this API. To import th
from langchain.utilities.wolfram_alpha import WolframAlphaAPIWrapper
```
-For a more detailed walkthrough of this wrapper, see [this notebook](/docs/integrations/tools/wolfram_alpha.html).
+For a more detailed walkthrough of this wrapper, see [this notebook](/docs/integrations/tools/wolfram_alpha).
### Tool
diff --git a/docs/docs/integrations/providers/xinference.mdx b/docs/docs/integrations/providers/xinference.mdx
index 41a7f44bbc8..0029deef21c 100644
--- a/docs/docs/integrations/providers/xinference.mdx
+++ b/docs/docs/integrations/providers/xinference.mdx
@@ -93,10 +93,10 @@ llm(
### Usage
For more information and detailed examples, refer to the
-[example for xinference LLMs](/docs/integrations/llms/xinference.html)
+[example for xinference LLMs](/docs/integrations/llms/xinference)
### Embeddings
Xinference also supports embedding queries and documents. See
-[example for xinference embeddings](/docs/integrations/text_embedding/xinference.html)
+[example for xinference embeddings](/docs/integrations/text_embedding/xinference)
for a more detailed demo.
\ No newline at end of file
diff --git a/docs/docs/integrations/providers/zilliz.mdx b/docs/docs/integrations/providers/zilliz.mdx
index e37123eb947..b791adeb6c3 100644
--- a/docs/docs/integrations/providers/zilliz.mdx
+++ b/docs/docs/integrations/providers/zilliz.mdx
@@ -19,4 +19,4 @@ whether for semantic search or example selection.
from langchain.vectorstores import Milvus
```
-For a more detailed walkthrough of the Miluvs wrapper, see [this notebook](/docs/integrations/vectorstores/zilliz.html)
+For a more detailed walkthrough of the Miluvs wrapper, see [this notebook](/docs/integrations/vectorstores/zilliz)
diff --git a/docs/docs/integrations/toolkits/openapi_nla.ipynb b/docs/docs/integrations/toolkits/openapi_nla.ipynb
index a731e282d09..09f54b5d5ac 100644
--- a/docs/docs/integrations/toolkits/openapi_nla.ipynb
+++ b/docs/docs/integrations/toolkits/openapi_nla.ipynb
@@ -11,7 +11,7 @@
"\n",
"This notebook demonstrates a sample composition of the `Speak`, `Klarna`, and `Spoonacluar` APIs.\n",
"\n",
- "For a detailed walkthrough of the OpenAPI chains wrapped within the NLAToolkit, see the [OpenAPI Operation Chain](/docs/use_cases/apis/openapi.html) notebook.\n",
+ "For a detailed walkthrough of the OpenAPI chains wrapped within the NLAToolkit, see the [OpenAPI Operation Chain](/docs/use_cases/apis/openapi) notebook.\n",
"\n",
"### First, import dependencies and load the LLM"
]
diff --git a/docs/docs/integrations/toolkits/spark.ipynb b/docs/docs/integrations/toolkits/spark.ipynb
index d55075c2b00..ce39f54a26f 100644
--- a/docs/docs/integrations/toolkits/spark.ipynb
+++ b/docs/docs/integrations/toolkits/spark.ipynb
@@ -379,7 +379,7 @@
"agent.run(\n",
" \"\"\"\n",
"who bought the most expensive ticket?\n",
- "You can find all supported function types in https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/dataframe.html\n",
+ "You can find all supported function types in https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/dataframe\n",
"\"\"\"\n",
")"
]
diff --git a/docs/docs/integrations/tools/apify.ipynb b/docs/docs/integrations/tools/apify.ipynb
index d5cc8571d2d..20656982e1f 100644
--- a/docs/docs/integrations/tools/apify.ipynb
+++ b/docs/docs/integrations/tools/apify.ipynb
@@ -6,7 +6,7 @@
"source": [
"# Apify\n",
"\n",
- "This notebook shows how to use the [Apify integration](/docs/ecosystem/integrations/apify.html) for LangChain.\n",
+ "This notebook shows how to use the [Apify integration](/docs/ecosystem/integrations/apify) for LangChain.\n",
"\n",
"[Apify](https://apify.com) is a cloud platform for web scraping and data extraction,\n",
"which provides an [ecosystem](https://apify.com/store) of more than a thousand\n",
@@ -72,7 +72,7 @@
"source": [
"Then run the Actor, wait for it to finish, and fetch its results from the Apify dataset into a LangChain document loader.\n",
"\n",
- "Note that if you already have some results in an Apify dataset, you can load them directly using `ApifyDatasetLoader`, as shown in [this notebook](/docs/integrations/document_loaders/apify_dataset.html). In that notebook, you'll also find the explanation of the `dataset_mapping_function`, which is used to map fields from the Apify dataset records to LangChain `Document` fields."
+ "Note that if you already have some results in an Apify dataset, you can load them directly using `ApifyDatasetLoader`, as shown in [this notebook](/docs/integrations/document_loaders/apify_dataset). In that notebook, you'll also find the explanation of the `dataset_mapping_function`, which is used to map fields from the Apify dataset records to LangChain `Document` fields."
]
},
{
diff --git a/docs/docs/integrations/vectorstores/typesense.ipynb b/docs/docs/integrations/vectorstores/typesense.ipynb
index 94655a629c2..7bb0c5caf51 100644
--- a/docs/docs/integrations/vectorstores/typesense.ipynb
+++ b/docs/docs/integrations/vectorstores/typesense.ipynb
@@ -6,7 +6,7 @@
"source": [
"# Typesense\n",
"\n",
- "> [Typesense](https://typesense.org) is an open-source, in-memory search engine, that you can either [self-host](https://typesense.org/docs/guide/install-typesense.html#option-2-local-machine-self-hosting) or run on [Typesense Cloud](https://cloud.typesense.org/).\n",
+ "> [Typesense](https://typesense.org) is an open-source, in-memory search engine, that you can either [self-host](https://typesense.org/docs/guide/install-typesense#option-2-local-machine-self-hosting) or run on [Typesense Cloud](https://cloud.typesense.org/).\n",
">\n",
"> Typesense focuses on performance by storing the entire index in RAM (with a backup on disk) and also focuses on providing an out-of-the-box developer experience by simplifying available options and setting good defaults.\n",
">\n",
diff --git a/docs/docs/modules/agents/how_to/custom_agent_with_tool_retrieval.ipynb b/docs/docs/modules/agents/how_to/custom_agent_with_tool_retrieval.ipynb
index 5471eec006f..8f3a0b4c6f2 100644
--- a/docs/docs/modules/agents/how_to/custom_agent_with_tool_retrieval.ipynb
+++ b/docs/docs/modules/agents/how_to/custom_agent_with_tool_retrieval.ipynb
@@ -7,7 +7,7 @@
"source": [
"# Custom agent with tool retrieval\n",
"\n",
- "This notebook builds off of [this notebook](/docs/modules/agents/how_to/custom_llm_agent.html) and assumes familiarity with how agents work.\n",
+ "This notebook builds off of [this notebook](/docs/modules/agents/how_to/custom_llm_agent) and assumes familiarity with how agents work.\n",
"\n",
"The novel idea introduced in this notebook is the idea of using retrieval to select the set of tools to use to answer an agent query. This is useful when you have many many tools to select from. You cannot put the description of all the tools in the prompt (because of context length issues) so instead you dynamically select the N tools you do want to consider using at run time.\n",
"\n",
diff --git a/docs/docs/modules/agents/how_to/sharedmemory_for_tools.ipynb b/docs/docs/modules/agents/how_to/sharedmemory_for_tools.ipynb
index 2d0d4ed2d26..7c831554da1 100644
--- a/docs/docs/modules/agents/how_to/sharedmemory_for_tools.ipynb
+++ b/docs/docs/modules/agents/how_to/sharedmemory_for_tools.ipynb
@@ -9,8 +9,8 @@
"\n",
"This notebook goes over adding memory to **both** an Agent and its tools. Before going through this notebook, please walk through the following notebooks, as this will build on top of both of them:\n",
"\n",
- "- [Adding memory to an LLM Chain](/docs/modules/memory/integrations/adding_memory.html)\n",
- "- [Custom Agents](/docs/modules/agents/how_to/custom_agent.html)\n",
+ "- [Adding memory to an LLM Chain](/docs/modules/memory/integrations/adding_memory)\n",
+ "- [Custom Agents](/docs/modules/agents/how_to/custom_agent)\n",
"\n",
"We are going to create a custom Agent. The agent has access to a conversation memory, search tool, and a summarization tool. The summarization tool also needs access to the conversation memory."
]
diff --git a/docs/docs/modules/callbacks/custom_chain.mdx b/docs/docs/modules/callbacks/custom_chain.mdx
index bc64de30412..6ec068eea23 100644
--- a/docs/docs/modules/callbacks/custom_chain.mdx
+++ b/docs/docs/modules/callbacks/custom_chain.mdx
@@ -1,6 +1,6 @@
# Callbacks for custom chains
When you create a custom chain you can easily set it up to use the same callback system as all the built-in chains.
-`_call`, `_generate`, `_run`, and equivalent async methods on Chains / LLMs / Chat Models / Agents / Tools now receive a 2nd argument called `run_manager` which is bound to that run, and contains the logging methods that can be used by that object (i.e. `on_llm_new_token`). This is useful when constructing a custom chain. See this guide for more information on how to [create custom chains and use callbacks inside them](/docs/modules/chains/how_to/custom_chain.html).
+`_call`, `_generate`, `_run`, and equivalent async methods on Chains / LLMs / Chat Models / Agents / Tools now receive a 2nd argument called `run_manager` which is bound to that run, and contains the logging methods that can be used by that object (i.e. `on_llm_new_token`). This is useful when constructing a custom chain. See this guide for more information on how to [create custom chains and use callbacks inside them](/docs/modules/chains/how_to/custom_chain).
diff --git a/docs/docs/modules/chains/how_to/async_chain.ipynb b/docs/docs/modules/chains/how_to/async_chain.ipynb
index bd3bfd498e9..32fb65009ff 100644
--- a/docs/docs/modules/chains/how_to/async_chain.ipynb
+++ b/docs/docs/modules/chains/how_to/async_chain.ipynb
@@ -9,7 +9,7 @@
"\n",
"LangChain provides async support for Chains by leveraging the [asyncio](https://docs.python.org/3/library/asyncio.html) library.\n",
"\n",
- "Async methods are currently supported in `LLMChain` (through `arun`, `apredict`, `acall`) and `LLMMathChain` (through `arun` and `acall`), `ChatVectorDBChain`, and [QA chains](/docs/use_cases/question_answering/question_answering.html). Async support for other chains is on the roadmap."
+ "Async methods are currently supported in `LLMChain` (through `arun`, `apredict`, `acall`) and `LLMMathChain` (through `arun` and `acall`), `ChatVectorDBChain`, and [QA chains](/docs/use_cases/question_answering/question_answering). Async support for other chains is on the roadmap."
]
},
{
diff --git a/docs/docs/modules/chains/how_to/call_methods.ipynb b/docs/docs/modules/chains/how_to/call_methods.ipynb
index 626f0b21b61..4f51ff179a2 100644
--- a/docs/docs/modules/chains/how_to/call_methods.ipynb
+++ b/docs/docs/modules/chains/how_to/call_methods.ipynb
@@ -147,7 +147,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "Tips: You can easily integrate a `Chain` object as a `Tool` in your `Agent` via its `run` method. See an example [here](/docs/modules/agents/tools/how_to/custom_tools.html)."
+ "Tips: You can easily integrate a `Chain` object as a `Tool` in your `Agent` via its `run` method. See an example [here](/docs/modules/agents/tools/how_to/custom_tools)."
]
},
{
diff --git a/docs/docs/modules/data_connection/document_loaders/file_directory.mdx b/docs/docs/modules/data_connection/document_loaders/file_directory.mdx
index 12f49278fa2..600a58a5692 100644
--- a/docs/docs/modules/data_connection/document_loaders/file_directory.mdx
+++ b/docs/docs/modules/data_connection/document_loaders/file_directory.mdx
@@ -2,7 +2,7 @@
This covers how to load all documents in a directory.
-Under the hood, by default this uses the [UnstructuredLoader](/docs/integrations/document_loaders/unstructured_file.html).
+Under the hood, by default this uses the [UnstructuredLoader](/docs/integrations/document_loaders/unstructured_file).
```python
from langchain.document_loaders import DirectoryLoader
diff --git a/docs/docs/modules/memory/agent_with_memory.ipynb b/docs/docs/modules/memory/agent_with_memory.ipynb
index b0801b9baef..5d889a2ca5a 100644
--- a/docs/docs/modules/memory/agent_with_memory.ipynb
+++ b/docs/docs/modules/memory/agent_with_memory.ipynb
@@ -9,8 +9,8 @@
"\n",
"This notebook goes over adding memory to an Agent. Before going through this notebook, please walkthrough the following notebooks, as this will build on top of both of them:\n",
"\n",
- "- [Memory in LLMChain](/docs/modules/memory/how_to/adding_memory.html)\n",
- "- [Custom Agents](/docs/modules/agents/how_to/custom_agent.html)\n",
+ "- [Memory in LLMChain](/docs/modules/memory/how_to/adding_memory)\n",
+ "- [Custom Agents](/docs/modules/agents/how_to/custom_agent)\n",
"\n",
"In order to add a memory to an agent we are going to perform the following steps:\n",
"\n",
diff --git a/docs/docs/modules/memory/agent_with_memory_in_db.ipynb b/docs/docs/modules/memory/agent_with_memory_in_db.ipynb
index adf48b9e040..b1aae1b92e7 100644
--- a/docs/docs/modules/memory/agent_with_memory_in_db.ipynb
+++ b/docs/docs/modules/memory/agent_with_memory_in_db.ipynb
@@ -9,9 +9,9 @@
"\n",
"This notebook goes over adding memory to an Agent where the memory uses an external message store. Before going through this notebook, please walkthrough the following notebooks, as this will build on top of both of them:\n",
"\n",
- "- [Memory in LLMChain](/docs/modules/memory/how_to/adding_memory.html)\n",
- "- [Custom Agents](/docs/modules/agents/how_to/custom_agent.html)\n",
- "- [Memory in Agent](/docs/modules/memory/how_to/agent_with_memory.html)\n",
+ "- [Memory in LLMChain](/docs/modules/memory/how_to/adding_memory)\n",
+ "- [Custom Agents](/docs/modules/agents/how_to/custom_agent)\n",
+ "- [Memory in Agent](/docs/modules/memory/how_to/agent_with_memory)\n",
"\n",
"In order to add a memory with an external message store to an agent we are going to do the following steps:\n",
"\n",
diff --git a/docs/docs/use_cases/qa_structured/sql.ipynb b/docs/docs/use_cases/qa_structured/sql.ipynb
index 032b4eefb4a..ad1b0974dd8 100644
--- a/docs/docs/use_cases/qa_structured/sql.ipynb
+++ b/docs/docs/use_cases/qa_structured/sql.ipynb
@@ -1115,8 +1115,8 @@
"To learn more about the SQL Agent and how it works we refer to the [SQL Agent Toolkit](/docs/integrations/toolkits/sql_database) documentation.\n",
"\n",
"You can also check Agents for other document types:\n",
- "- [Pandas Agent](/docs/integrations/toolkits/pandas.html)\n",
- "- [CSV Agent](/docs/integrations/toolkits/csv.html)"
+ "- [Pandas Agent](/docs/integrations/toolkits/pandas)\n",
+ "- [CSV Agent](/docs/integrations/toolkits/csv)"
]
},
{
diff --git a/docs/docs/use_cases/question_answering/vector_db_qa.mdx b/docs/docs/use_cases/question_answering/vector_db_qa.mdx
index a727a01dc33..4ba6b2da0fa 100644
--- a/docs/docs/use_cases/question_answering/vector_db_qa.mdx
+++ b/docs/docs/use_cases/question_answering/vector_db_qa.mdx
@@ -42,7 +42,7 @@ qa.run(query)
## Chain Type
-You can easily specify different chain types to load and use in the RetrievalQA chain. For a more detailed walkthrough of these types, please see [this notebook](/docs/modules/chains/additional/question_answering.html).
+You can easily specify different chain types to load and use in the RetrievalQA chain. For a more detailed walkthrough of these types, please see [this notebook](/docs/modules/chains/additional/question_answering).
There are two ways to load different chain types. First, you can specify the chain type argument in the `from_chain_type` method. This allows you to pass in the name of the chain type you want to use. For example, in the below we change the chain type to `map_reduce`.
@@ -65,7 +65,7 @@ qa.run(query)
-The above way allows you to really simply change the chain_type, but it doesn't provide a ton of flexibility over parameters to that chain type. If you want to control those parameters, you can load the chain directly (as you did in [this notebook](/docs/modules/chains/additional/question_answering.html)) and then pass that directly to the RetrievalQA chain with the `combine_documents_chain` parameter. For example:
+The above way allows you to really simply change the chain_type, but it doesn't provide a ton of flexibility over parameters to that chain type. If you want to control those parameters, you can load the chain directly (as you did in [this notebook](/docs/modules/chains/additional/question_answering)) and then pass that directly to the RetrievalQA chain with the `combine_documents_chain` parameter. For example:
```python
@@ -89,7 +89,7 @@ qa.run(query)
## Custom Prompts
-You can pass in custom prompts to do question answering. These prompts are the same prompts as you can pass into the [base question answering chain](/docs/modules/chains/additional/question_answering.html)
+You can pass in custom prompts to do question answering. These prompts are the same prompts as you can pass into the [base question answering chain](/docs/modules/chains/additional/question_answering)
```python