diff --git a/docs/modules/indexes/text_splitters.rst b/docs/modules/indexes/text_splitters.rst index 9b8b66fb21b..a8e037a3c04 100644 --- a/docs/modules/indexes/text_splitters.rst +++ b/docs/modules/indexes/text_splitters.rst @@ -33,10 +33,8 @@ For an introduction to the default text splitter and generic functionality see: Usage examples for the text splitters: - `Character <./text_splitters/examples/character_text_splitter.html>`_ -- `LaTeX <./text_splitters/examples/latex.html>`_ -- `Markdown <./text_splitters/examples/markdown.html>`_ +- `Code (including HTML, Markdown, Latex, Python, etc) <./text_splitters/examples/code_splitter.html>`_ - `NLTK <./text_splitters/examples/nltk.html>`_ -- `Python code <./text_splitters/examples/python.html>`_ - `Recursive Character <./text_splitters/examples/recursive_text_splitter.html>`_ - `spaCy <./text_splitters/examples/spacy.html>`_ - `tiktoken (OpenAI) <./text_splitters/examples/tiktoken_splitter.html>`_ @@ -49,10 +47,8 @@ Usage examples for the text splitters: :hidden: ./text_splitters/examples/character_text_splitter.ipynb - ./text_splitters/examples/latex.ipynb - ./text_splitters/examples/markdown.ipynb + ./text_splitters/examples/code_splitter.ipynb ./text_splitters/examples/nltk.ipynb - ./text_splitters/examples/python.ipynb ./text_splitters/examples/recursive_text_splitter.ipynb ./text_splitters/examples/spacy.ipynb ./text_splitters/examples/tiktoken_splitter.ipynb diff --git a/docs/modules/indexes/text_splitters/examples/code_splitter.ipynb b/docs/modules/indexes/text_splitters/examples/code_splitter.ipynb index c769dd4a549..674159f6c2d 100644 --- a/docs/modules/indexes/text_splitters/examples/code_splitter.ipynb +++ b/docs/modules/indexes/text_splitters/examples/code_splitter.ipynb @@ -1,7 +1,6 @@ { "cells": [ { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -12,64 +11,94 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 1, "metadata": {}, "outputs": [], "source": [ "from langchain.text_splitter import (\n", - " CodeTextSplitter,\n", + " RecursiveCharacterTextSplitter,\n", " Language,\n", ")" ] }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Choose a language to use" - ] - }, { "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [], - "source": [ - "python_splitter = CodeTextSplitter(\n", - " language=Language.PYTHON, chunk_size=16, chunk_overlap=0\n", - ")\n", - "js_splitter = CodeTextSplitter(\n", - " language=Language.JS, chunk_size=16, chunk_overlap=0\n", - ")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Split the code" - ] - }, - { - "cell_type": "code", - "execution_count": 8, + "execution_count": 2, "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "[Document(page_content='def', metadata={}),\n", - " Document(page_content='hello_world():', metadata={}),\n", - " Document(page_content='print(\"Hello,', metadata={}),\n", - " Document(page_content='World!\")', metadata={}),\n", - " Document(page_content='# Call the', metadata={}),\n", - " Document(page_content='function', metadata={}),\n", - " Document(page_content='hello_world()', metadata={})]" + "['cpp',\n", + " 'go',\n", + " 'java',\n", + " 'js',\n", + " 'php',\n", + " 'proto',\n", + " 'python',\n", + " 'rst',\n", + " 'ruby',\n", + " 'rust',\n", + " 'scala',\n", + " 'swift',\n", + " 'markdown',\n", + " 'latex',\n", + " 'html']" ] }, - "execution_count": 8, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Full list of support languages\n", + "[e.value for e in Language]" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "['\\nclass ', '\\ndef ', '\\n\\tdef ', '\\n\\n', '\\n', ' ', '']" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# You can also see the separators used for a given language\n", + "RecursiveCharacterTextSplitter.get_separators_for_language(Language.PYTHON)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Python\n", + "\n", + "Here's an example using the PythonTextSplitter" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[Document(page_content='def hello_world():\\n print(\"Hello, World!\")', metadata={}),\n", + " Document(page_content='# Call the function\\nhello_world()', metadata={})]" + ] + }, + "execution_count": 4, "metadata": {}, "output_type": "execute_result" } @@ -82,31 +111,34 @@ "# Call the function\n", "hello_world()\n", "\"\"\"\n", - "\n", + "python_splitter = RecursiveCharacterTextSplitter.from_language(\n", + " language=Language.PYTHON, chunk_size=50, chunk_overlap=0\n", + ")\n", "python_docs = python_splitter.create_documents([PYTHON_CODE])\n", "python_docs" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## JS\n", + "Here's an example using the JS text splitter" + ] + }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 5, "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "[Document(page_content='function', metadata={}),\n", - " Document(page_content='helloWorld() {', metadata={}),\n", - " Document(page_content='console.log(\"He', metadata={}),\n", - " Document(page_content='llo,', metadata={}),\n", - " Document(page_content='World!\");', metadata={}),\n", - " Document(page_content='}', metadata={}),\n", - " Document(page_content='// Call the', metadata={}),\n", - " Document(page_content='function', metadata={}),\n", - " Document(page_content='helloWorld();', metadata={})]" + "[Document(page_content='function helloWorld() {\\n console.log(\"Hello, World!\");\\n}', metadata={}),\n", + " Document(page_content='// Call the function\\nhelloWorld();', metadata={})]" ] }, - "execution_count": 9, + "execution_count": 5, "metadata": {}, "output_type": "execute_result" } @@ -121,10 +153,234 @@ "helloWorld();\n", "\"\"\"\n", "\n", + "js_splitter = RecursiveCharacterTextSplitter.from_language(\n", + " language=Language.JS, chunk_size=60, chunk_overlap=0\n", + ")\n", "js_docs = js_splitter.create_documents([JS_CODE])\n", "js_docs" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Markdown\n", + "\n", + "Here's an example using the Markdown text splitter." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "markdown_text = \"\"\"\n", + "# š¦ļøš LangChain\n", + "\n", + "ā” Building applications with LLMs through composability ā”\n", + "\n", + "## Quick Install\n", + "\n", + "```bash\n", + "# Hopefully this code block isn't split\n", + "pip install langchain\n", + "```\n", + "\n", + "As an open source project in a rapidly developing field, we are extremely open to contributions.\n", + "\"\"\"\n" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[Document(page_content='# š¦ļøš LangChain', metadata={}),\n", + " Document(page_content='ā” Building applications with LLMs through composability ā”', metadata={}),\n", + " Document(page_content='## Quick Install', metadata={}),\n", + " Document(page_content=\"```bash\\n# Hopefully this code block isn't split\", metadata={}),\n", + " Document(page_content='pip install langchain', metadata={}),\n", + " Document(page_content='```', metadata={}),\n", + " Document(page_content='As an open source project in a rapidly developing field, we', metadata={}),\n", + " Document(page_content='are extremely open to contributions.', metadata={})]" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "md_splitter = RecursiveCharacterTextSplitter.from_language(\n", + " language=Language.MARKDOWN, chunk_size=60, chunk_overlap=0\n", + ")\n", + "md_docs = md_splitter.create_documents([markdown_text])\n", + "md_docs" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Latex\n", + "\n", + "Here's an example on Latex text" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "latex_text = \"\"\"\n", + "\\documentclass{article}\n", + "\n", + "\\begin{document}\n", + "\n", + "\\maketitle\n", + "\n", + "\\section{Introduction}\n", + "Large language models (LLMs) are a type of machine learning model that can be trained on vast amounts of text data to generate human-like language. In recent years, LLMs have made significant advances in a variety of natural language processing tasks, including language translation, text generation, and sentiment analysis.\n", + "\n", + "\\subsection{History of LLMs}\n", + "The earliest LLMs were developed in the 1980s and 1990s, but they were limited by the amount of data that could be processed and the computational power available at the time. In the past decade, however, advances in hardware and software have made it possible to train LLMs on massive datasets, leading to significant improvements in performance.\n", + "\n", + "\\subsection{Applications of LLMs}\n", + "LLMs have many applications in industry, including chatbots, content creation, and virtual assistants. They can also be used in academia for research in linguistics, psychology, and computational linguistics.\n", + "\n", + "\\end{document}\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[Document(page_content='\\\\documentclass{article}\\n\\n\\x08egin{document}\\n\\n\\\\maketitle', metadata={}),\n", + " Document(page_content='\\\\section{Introduction}', metadata={}),\n", + " Document(page_content='Large language models (LLMs) are a type of machine learning', metadata={}),\n", + " Document(page_content='model that can be trained on vast amounts of text data to', metadata={}),\n", + " Document(page_content='generate human-like language. In recent years, LLMs have', metadata={}),\n", + " Document(page_content='made significant advances in a variety of natural language', metadata={}),\n", + " Document(page_content='processing tasks, including language translation, text', metadata={}),\n", + " Document(page_content='generation, and sentiment analysis.', metadata={}),\n", + " Document(page_content='\\\\subsection{History of LLMs}', metadata={}),\n", + " Document(page_content='The earliest LLMs were developed in the 1980s and 1990s,', metadata={}),\n", + " Document(page_content='but they were limited by the amount of data that could be', metadata={}),\n", + " Document(page_content='processed and the computational power available at the', metadata={}),\n", + " Document(page_content='time. In the past decade, however, advances in hardware and', metadata={}),\n", + " Document(page_content='software have made it possible to train LLMs on massive', metadata={}),\n", + " Document(page_content='datasets, leading to significant improvements in', metadata={}),\n", + " Document(page_content='performance.', metadata={}),\n", + " Document(page_content='\\\\subsection{Applications of LLMs}', metadata={}),\n", + " Document(page_content='LLMs have many applications in industry, including', metadata={}),\n", + " Document(page_content='chatbots, content creation, and virtual assistants. They', metadata={}),\n", + " Document(page_content='can also be used in academia for research in linguistics,', metadata={}),\n", + " Document(page_content='psychology, and computational linguistics.', metadata={}),\n", + " Document(page_content='\\\\end{document}', metadata={})]" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "latex_splitter = RecursiveCharacterTextSplitter.from_language(\n", + " language=Language.MARKDOWN, chunk_size=60, chunk_overlap=0\n", + ")\n", + "latex_docs = latex_splitter.create_documents([latex_text])\n", + "latex_docs" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## HTML\n", + "\n", + "Here's an example using an HTML text splitter" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "html_text = \"\"\"\n", + "\n", + "\n", + "
\n", + "ā” Building applications with LLMs through composability ā”
\n", + "ā” Building applications with LLMs through', metadata={}),\n", + " Document(page_content='composability ā”
', metadata={}),\n", + " Document(page_content='ā” Building applications with LLMs through composability ā”
\n", - "ā” Building applications with LLMs through composability ā”
\\n