mirror of
https://github.com/hwchase17/langchain.git
synced 2025-04-29 04:16:02 +00:00
parent
f203229b51
commit
d081a5400a
@ -35,7 +35,7 @@
|
||||
"- Creating a [Retriever](/docs/concepts/retrievers) to expose specific information to our agent\n",
|
||||
"- Using a Search [Tool](/docs/concepts/tools) to look up things online\n",
|
||||
"- [`Chat History`](/docs/concepts/chat_history), which allows a chatbot to \"remember\" past interactions and take them into account when responding to follow-up questions. \n",
|
||||
"- Debugging and tracing your application using [LangSmith](/docs/concepts/#langsmith)\n",
|
||||
"- Debugging and tracing your application using [LangSmith](https://docs.smith.langchain.com/)\n",
|
||||
"\n",
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
|
@ -13,7 +13,7 @@
|
||||
"<Prerequisites titlesAndLinks={[\n",
|
||||
" [\"Chat models\", \"/docs/concepts/chat_models\"],\n",
|
||||
" [\"Few-shot-prompting\", \"/docs/concepts/few-shot-prompting\"],\n",
|
||||
" [\"LangSmith\", \"/docs/concepts/#langsmith\"],\n",
|
||||
" [\"LangSmith\", \"https://docs.smith.langchain.com/\"],\n",
|
||||
"]} />\n",
|
||||
"\n",
|
||||
"\n",
|
||||
|
@ -23,7 +23,7 @@
|
||||
"- [Prompt templates](/docs/concepts/prompt_templates)\n",
|
||||
"- [Example selectors](/docs/concepts/example_selectors)\n",
|
||||
"- [LLMs](/docs/concepts/text_llms)\n",
|
||||
"- [Vectorstores](/docs/concepts/#vector-stores)\n",
|
||||
"- [Vectorstores](/docs/concepts/vectorstores)\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
|
@ -23,7 +23,7 @@
|
||||
"- [Prompt templates](/docs/concepts/prompt_templates)\n",
|
||||
"- [Example selectors](/docs/concepts/example_selectors)\n",
|
||||
"- [Chat models](/docs/concepts/chat_models)\n",
|
||||
"- [Vectorstores](/docs/concepts/#vector-stores)\n",
|
||||
"- [Vectorstores](/docs/concepts/vectorstores)\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
"\n",
|
||||
|
@ -159,7 +159,7 @@ What LangChain calls [LLMs](/docs/concepts/text_llms) are older forms of languag
|
||||
|
||||
### Vector stores
|
||||
|
||||
[Vector stores](/docs/concepts/#vector-stores) are databases that can efficiently store and retrieve embeddings.
|
||||
[Vector stores](/docs/concepts/vectorstores) are databases that can efficiently store and retrieve embeddings.
|
||||
|
||||
- [How to: use a vector store to retrieve data](/docs/how_to/vectorstores)
|
||||
|
||||
|
@ -16,7 +16,7 @@ Retrievers accept a string query as input and return a list of [Documents](https
|
||||
|
||||
For specifics on how to use retrievers, see the [relevant how-to guides here](/docs/how_to/#retrievers).
|
||||
|
||||
Note that all [vector stores](/docs/concepts/#vector-stores) can be [cast to retrievers](/docs/how_to/vectorstore_retriever/).
|
||||
Note that all [vector stores](/docs/concepts/vectorstores) can be [cast to retrievers](/docs/how_to/vectorstore_retriever/).
|
||||
Refer to the vector store [integration docs](/docs/integrations/vectorstores/) for available vector stores.
|
||||
This page lists custom retrievers, implemented via subclassing [BaseRetriever](/docs/how_to/custom_retriever/).
|
||||
|
||||
|
@ -7,7 +7,7 @@ sidebar_class_name: hidden
|
||||
|
||||
import { CategoryTable, IndexTable } from "@theme/FeatureTables";
|
||||
|
||||
A [vector store](/docs/concepts/#vector-stores) stores [embedded](/docs/concepts/embedding_models) data and performs similarity search.
|
||||
A [vector store](/docs/concepts/vectorstores) stores [embedded](/docs/concepts/embedding_models) data and performs similarity search.
|
||||
|
||||
<CategoryTable category="vectorstores" />
|
||||
|
||||
|
@ -27,7 +27,7 @@
|
||||
"\n",
|
||||
"- Using [LangChain Expression Language (LCEL)](/docs/concepts/lcel) to chain components together\n",
|
||||
"\n",
|
||||
"- Debugging and tracing your application using [LangSmith](/docs/concepts/#langsmith)\n",
|
||||
"- Debugging and tracing your application using [LangSmith](https://docs.smith.langchain.com/)\n",
|
||||
"\n",
|
||||
"- Deploying your application with [LangServe](/docs/concepts/#langserve)\n",
|
||||
"\n",
|
||||
|
@ -14,7 +14,7 @@
|
||||
"- [Chat Models](/docs/concepts/chat_models)\n",
|
||||
"- [Chaining runnables](/docs/how_to/sequence/)\n",
|
||||
"- [Embeddings](/docs/concepts/embedding_models)\n",
|
||||
"- [Vector stores](/docs/concepts/#vector-stores)\n",
|
||||
"- [Vector stores](/docs/concepts/vectorstores)\n",
|
||||
"- [Retrieval-augmented generation](/docs/tutorials/rag/)\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
|
@ -26,7 +26,7 @@
|
||||
"- [Document loaders](/docs/concepts/document_loaders)\n",
|
||||
"- [Chat models](/docs/concepts/chat_models)\n",
|
||||
"- [Embeddings](/docs/concepts/embedding_models)\n",
|
||||
"- [Vector stores](/docs/concepts/#vector-stores)\n",
|
||||
"- [Vector stores](/docs/concepts/vectorstores)\n",
|
||||
"- [Retrieval-augmented generation](/docs/tutorials/rag/)\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
@ -117,7 +117,7 @@
|
||||
"\n",
|
||||
"## Question answering with RAG\n",
|
||||
"\n",
|
||||
"Next, you'll prepare the loaded documents for later retrieval. Using a [text splitter](/docs/concepts/text_splitters), you'll split your loaded documents into smaller documents that can more easily fit into an LLM's context window, then load them into a [vector store](/docs/concepts/#vector-stores). You can then create a [retriever](/docs/concepts/retrievers) from the vector store for use in our RAG chain:\n",
|
||||
"Next, you'll prepare the loaded documents for later retrieval. Using a [text splitter](/docs/concepts/text_splitters), you'll split your loaded documents into smaller documents that can more easily fit into an LLM's context window, then load them into a [vector store](/docs/concepts/vectorstores). You can then create a [retriever](/docs/concepts/retrievers) from the vector store for use in our RAG chain:\n",
|
||||
"\n",
|
||||
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
|
||||
"\n",
|
||||
|
@ -24,7 +24,7 @@
|
||||
"- [Chat history](/docs/concepts/chat_history)\n",
|
||||
"- [Chat models](/docs/concepts/chat_models)\n",
|
||||
"- [Embeddings](/docs/concepts/embedding_models)\n",
|
||||
"- [Vector stores](/docs/concepts/#vector-stores)\n",
|
||||
"- [Vector stores](/docs/concepts/vectorstores)\n",
|
||||
"- [Retrieval-augmented generation](/docs/tutorials/rag/)\n",
|
||||
"- [Tools](/docs/concepts/tools)\n",
|
||||
"- [Agents](/docs/concepts/agents)\n",
|
||||
|
@ -24,7 +24,7 @@
|
||||
"- [Document loaders](/docs/concepts/document_loaders)\n",
|
||||
"- [Chat models](/docs/concepts/chat_models)\n",
|
||||
"- [Embeddings](/docs/concepts/embedding_models)\n",
|
||||
"- [Vector stores](/docs/concepts/#vector-stores)\n",
|
||||
"- [Vector stores](/docs/concepts/vectorstores)\n",
|
||||
"- [Retrieval](/docs/concepts/retrieval)\n",
|
||||
"\n",
|
||||
":::\n",
|
||||
|
@ -41,7 +41,7 @@
|
||||
"### Indexing\n",
|
||||
"1. **Load**: First we need to load our data. This is done with [Document Loaders](/docs/concepts/document_loaders).\n",
|
||||
"2. **Split**: [Text splitters](/docs/concepts/text_splitters) break large `Documents` into smaller chunks. This is useful both for indexing data and for passing it in to a model, since large chunks are harder to search over and won't fit in a model's finite context window.\n",
|
||||
"3. **Store**: We need somewhere to store and index our splits, so that they can later be searched over. This is often done using a [VectorStore](/docs/concepts/#vector-stores) and [Embeddings](/docs/concepts/embedding_models) model.\n",
|
||||
"3. **Store**: We need somewhere to store and index our splits, so that they can later be searched over. This is often done using a [VectorStore](/docs/concepts/vectorstores) and [Embeddings](/docs/concepts/embedding_models) model.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
|
@ -3,47 +3,59 @@ import multiprocessing
|
||||
import re
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
# List of 4-tuples (integration_name, display_name, concept_page, how_to_fragment)
|
||||
INTEGRATION_INFO = [
|
||||
("chat", "Chat model", "chat_models", "chat-models"),
|
||||
("llms", "LLM", "text_llms", "llms"),
|
||||
("text_embedding", "Embedding model", "embedding_models", "embedding-models"),
|
||||
("document_loaders", "Document loader", "document_loaders", "document-loaders"),
|
||||
("vectorstores", "Vector store", "vectorstores", "vector-stores"),
|
||||
("retrievers", "Retriever", "retrievers", "retrievers"),
|
||||
("tools", "Tool", "tools", "tools"),
|
||||
# stores is a special case because there are no key-value store how-tos yet
|
||||
# this is a placeholder for when we do have them
|
||||
# for now the related links section will only contain the conceptual guide.
|
||||
("stores", "Key-value store", "key_value_stores", "key-value-stores"),
|
||||
]
|
||||
|
||||
def _generate_related_links_section(integration_type: str, notebook_name: str):
|
||||
concept_display_name = None
|
||||
concept_heading = None
|
||||
if integration_type == "chat":
|
||||
concept_display_name = "Chat model"
|
||||
concept_heading = "chat-models"
|
||||
elif integration_type == "llms":
|
||||
concept_display_name = "LLM"
|
||||
concept_heading = "llms"
|
||||
elif integration_type == "text_embedding":
|
||||
concept_display_name = "Embedding model"
|
||||
concept_heading = "embedding-models"
|
||||
elif integration_type == "document_loaders":
|
||||
concept_display_name = "Document loader"
|
||||
concept_heading = "document-loaders"
|
||||
elif integration_type == "vectorstores":
|
||||
concept_display_name = "Vector store"
|
||||
concept_heading = "vector-stores"
|
||||
elif integration_type == "retrievers":
|
||||
concept_display_name = "Retriever"
|
||||
concept_heading = "retrievers"
|
||||
elif integration_type == "tools":
|
||||
concept_display_name = "Tool"
|
||||
concept_heading = "tools"
|
||||
elif integration_type == "stores":
|
||||
concept_display_name = "Key-value store"
|
||||
concept_heading = "key-value-stores"
|
||||
# Special case because there are no key-value store how-tos yet
|
||||
return f"""## Related
|
||||
# Create a dictionary with key being the first element (integration_name) and value being the rest of the tuple
|
||||
INTEGRATION_INFO_DICT = {
|
||||
integration_name: rest for integration_name, *rest in INTEGRATION_INFO
|
||||
}
|
||||
|
||||
- [{concept_display_name} conceptual guide](/docs/concepts/#{concept_heading})
|
||||
RELATED_LINKS_SECTION = """## Related
|
||||
- {concept_display_name} [conceptual guide](/docs/concepts/{concept_page})
|
||||
- {concept_display_name} [how-to guides](/docs/how_to/#{how_to_fragment})
|
||||
"""
|
||||
else:
|
||||
|
||||
RELATED_LINKS_WITHOUT_HOW_TO_SECTION = """## Related
|
||||
- {concept_display_name} [conceptual guide](/docs/concepts/{concept_page})
|
||||
"""
|
||||
|
||||
|
||||
def _generate_related_links_section(
|
||||
integration_type: str, notebook_name: str
|
||||
) -> Optional[str]:
|
||||
if integration_type not in INTEGRATION_INFO_DICT:
|
||||
return None
|
||||
return f"""## Related
|
||||
concept_display_name, concept_page, how_to_fragment = INTEGRATION_INFO_DICT[
|
||||
integration_type
|
||||
]
|
||||
|
||||
- {concept_display_name} [conceptual guide](/docs/concepts/#{concept_heading})
|
||||
- {concept_display_name} [how-to guides](/docs/how_to/#{concept_heading})
|
||||
"""
|
||||
# Special case because there are no key-value store how-tos yet
|
||||
if integration_type == "stores":
|
||||
return RELATED_LINKS_WITHOUT_HOW_TO_SECTION.format(
|
||||
concept_display_name=concept_display_name,
|
||||
concept_page=concept_page,
|
||||
)
|
||||
|
||||
return RELATED_LINKS_SECTION.format(
|
||||
concept_display_name=concept_display_name,
|
||||
concept_page=concept_page,
|
||||
how_to_fragment=how_to_fragment,
|
||||
)
|
||||
|
||||
|
||||
def _process_path(doc_path: Path):
|
||||
|
Loading…
Reference in New Issue
Block a user