diff --git a/docs/docs/modules/data_connection/document_transformers/HTML_header_metadata.ipynb b/docs/docs/modules/data_connection/document_transformers/HTML_header_metadata.ipynb index 067d313cf05..d0a0e73b478 100644 --- a/docs/docs/modules/data_connection/document_transformers/HTML_header_metadata.ipynb +++ b/docs/docs/modules/data_connection/document_transformers/HTML_header_metadata.ipynb @@ -10,7 +10,7 @@ } }, "source": [ - "# HTMLHeaderTextSplitter\n", + "# Split by HTML header \n", "## Description and motivation\n", "Similar in concept to the `MarkdownHeaderTextSplitter`, the `HTMLHeaderTextSplitter` is a \"structure-aware\" chunker that splits text at the element level and adds metadata for each header \"relevant\" to any given chunk. It can return chunks element by element or combine elements with the same metadata, with the objectives of (a) keeping related text grouped (more or less) semantically and (b) preserving context-rich information encoded in document structures. It can be used with other text splitters as part of a chunking pipeline.\n", "\n", diff --git a/docs/docs/modules/data_connection/document_transformers/HTML_section_aware_splitter.ipynb b/docs/docs/modules/data_connection/document_transformers/HTML_section_aware_splitter.ipynb new file mode 100644 index 00000000000..39e03404ba5 --- /dev/null +++ b/docs/docs/modules/data_connection/document_transformers/HTML_section_aware_splitter.ipynb @@ -0,0 +1,173 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "c95fcd15cd52c944", + "metadata": { + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } + }, + "source": [ + "# Split by HTML section\n", + "## Description and motivation\n", + "Similar in concept to the [HTMLHeaderTextSplitter](/docs/modules/data_connection/document_transformers/HTML_header_metadata), the `HTMLSectionSplitter` is a \"structure-aware\" chunker that splits text at the element level and adds metadata for each header \"relevant\" to any given chunk. It can return chunks element by element or combine elements with the same metadata, with the objectives of (a) keeping related text grouped (more or less) semantically and (b) preserving context-rich information encoded in document structures. It can be used with other text splitters as part of a chunking pipeline. Internally, it uses the `RecursiveCharacterTextSplitter` when the section size is larger than the chunk size. It also considers the font size of the text to determine whether it is a section or not based on the determined font size threshold. Use `xslt_path` to provide an absolute path to transform the HTML so that it can detect sections based on provided tags. The default is to use the `converting_to_header.xslt` file in the `data_connection/document_transformers` directory. This is for converting the html to a format/layout that is easier to detect sections. For example, `span` based on their font size can be converted to header tags to be detected as a section.\n", + "\n", + "## Usage examples\n", + "#### 1) With an HTML string:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "initial_id", + "metadata": { + "ExecuteTime": { + "end_time": "2023-10-02T18:57:49.208965400Z", + "start_time": "2023-10-02T18:57:48.899756Z" + }, + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } + }, + "outputs": [], + "source": [ + "from langchain_text_splitters import HTMLSectionSplitter\n", + "\n", + "html_string = \"\"\"\n", + " \n", + " \n", + " \n", + "
\n", + "

Foo

\n", + "

Some intro text about Foo.

\n", + "
\n", + "

Bar main section

\n", + "

Some intro text about Bar.

\n", + "

Bar subsection 1

\n", + "

Some text about the first subtopic of Bar.

\n", + "

Bar subsection 2

\n", + "

Some text about the second subtopic of Bar.

\n", + "
\n", + "
\n", + "

Baz

\n", + "

Some text about Baz

\n", + "
\n", + "
\n", + "

Some concluding text about Foo

\n", + "
\n", + " \n", + " \n", + "\"\"\"\n", + "\n", + "headers_to_split_on = [(\"h1\", \"Header 1\"), (\"h2\", \"Header 2\")]\n", + "\n", + "html_splitter = HTMLSectionSplitter(headers_to_split_on=headers_to_split_on)\n", + "html_header_splits = html_splitter.split_text(html_string)\n", + "html_header_splits" + ] + }, + { + "cell_type": "markdown", + "id": "e29b4aade2a0070c", + "metadata": { + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } + }, + "source": [ + "#### 2) Pipelined to another splitter, with html loaded from a html string content:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6ada8ea093ea0475", + "metadata": { + "ExecuteTime": { + "end_time": "2023-10-02T18:57:51.016141300Z", + "start_time": "2023-10-02T18:57:50.647495400Z" + }, + "collapsed": false, + "jupyter": { + "outputs_hidden": false + } + }, + "outputs": [], + "source": [ + "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", + "\n", + "html_string = \"\"\"\n", + " \n", + " \n", + " \n", + "
\n", + "

Foo

\n", + "

Some intro text about Foo.

\n", + "
\n", + "

Bar main section

\n", + "

Some intro text about Bar.

\n", + "

Bar subsection 1

\n", + "

Some text about the first subtopic of Bar.

\n", + "

Bar subsection 2

\n", + "

Some text about the second subtopic of Bar.

\n", + "
\n", + "
\n", + "

Baz

\n", + "

Some text about Baz

\n", + "
\n", + "
\n", + "

Some concluding text about Foo

\n", + "
\n", + " \n", + " \n", + "\"\"\"\n", + "\n", + "headers_to_split_on = [\n", + " (\"h1\", \"Header 1\"),\n", + " (\"h2\", \"Header 2\"),\n", + " (\"h3\", \"Header 3\"),\n", + " (\"h4\", \"Header 4\"),\n", + "]\n", + "\n", + "html_splitter = HTMLSectionSplitter(headers_to_split_on=headers_to_split_on)\n", + "\n", + "html_header_splits = html_splitter.split_text(html_string)\n", + "\n", + "chunk_size = 500\n", + "chunk_overlap = 30\n", + "text_splitter = RecursiveCharacterTextSplitter(\n", + " chunk_size=chunk_size, chunk_overlap=chunk_overlap\n", + ")\n", + "\n", + "# Split\n", + "splits = text_splitter.split_documents(html_header_splits)\n", + "splits" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.1" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/libs/text-splitters/langchain_text_splitters/__init__.py b/libs/text-splitters/langchain_text_splitters/__init__.py index 2f74b35251b..df147d7c667 100644 --- a/libs/text-splitters/langchain_text_splitters/__init__.py +++ b/libs/text-splitters/langchain_text_splitters/__init__.py @@ -30,7 +30,11 @@ from langchain_text_splitters.character import ( CharacterTextSplitter, RecursiveCharacterTextSplitter, ) -from langchain_text_splitters.html import ElementType, HTMLHeaderTextSplitter +from langchain_text_splitters.html import ( + ElementType, + HTMLHeaderTextSplitter, + HTMLSectionSplitter, +) from langchain_text_splitters.json import RecursiveJsonSplitter from langchain_text_splitters.konlpy import KonlpyTextSplitter from langchain_text_splitters.latex import LatexTextSplitter @@ -65,6 +69,7 @@ __all__ = [ "HeaderType", "LineType", "HTMLHeaderTextSplitter", + "HTMLSectionSplitter", "MarkdownHeaderTextSplitter", "MarkdownTextSplitter", "CharacterTextSplitter", diff --git a/libs/text-splitters/langchain_text_splitters/html.py b/libs/text-splitters/langchain_text_splitters/html.py index 0b48f7cdd64..6ad27314c02 100644 --- a/libs/text-splitters/langchain_text_splitters/html.py +++ b/libs/text-splitters/langchain_text_splitters/html.py @@ -1,12 +1,16 @@ from __future__ import annotations +import copy +import os import pathlib from io import BytesIO, StringIO -from typing import Any, Dict, List, Tuple, TypedDict +from typing import Any, Dict, Iterable, List, Optional, Tuple, TypedDict, cast import requests from langchain_core.documents import Document +from langchain_text_splitters.character import RecursiveCharacterTextSplitter + class ElementType(TypedDict): """Element type as typed dict.""" @@ -158,3 +162,157 @@ class HTMLHeaderTextSplitter: Document(page_content=chunk["content"], metadata=chunk["metadata"]) for chunk in elements ] + + +class HTMLSectionSplitter: + """ + Splitting HTML files based on specified tag and font sizes. + Requires lxml package. + """ + + def __init__( + self, + headers_to_split_on: List[Tuple[str, str]], + xslt_path: str = "xsl/converting_to_header.xslt", + **kwargs: Any, + ) -> None: + """Create a new HTMLSectionSplitter. + + Args: + headers_to_split_on: list of tuples of headers we want to track mapped to + (arbitrary) keys for metadata. Allowed header values: h1, h2, h3, h4, + h5, h6 e.g. [("h1", "Header 1"), ("h2", "Header 2"]. + xslt_path: path to xslt file for document transformation. + Needed for html contents that using different format and layouts. + """ + self.headers_to_split_on = dict(headers_to_split_on) + self.xslt_path = xslt_path + self.kwargs = kwargs + + def split_documents(self, documents: Iterable[Document]) -> List[Document]: + """Split documents.""" + texts, metadatas = [], [] + for doc in documents: + texts.append(doc.page_content) + metadatas.append(doc.metadata) + results = self.create_documents(texts, metadatas=metadatas) + + text_splitter = RecursiveCharacterTextSplitter(**self.kwargs) + + return text_splitter.split_documents(results) + + def split_text(self, text: str) -> List[Document]: + """Split HTML text string + + Args: + text: HTML text + """ + return self.split_text_from_file(StringIO(text)) + + def create_documents( + self, texts: List[str], metadatas: Optional[List[dict]] = None + ) -> List[Document]: + """Create documents from a list of texts.""" + _metadatas = metadatas or [{}] * len(texts) + documents = [] + for i, text in enumerate(texts): + for chunk in self.split_text(text): + metadata = copy.deepcopy(_metadatas[i]) + + for key in chunk.metadata.keys(): + if chunk.metadata[key] == "#TITLE#": + chunk.metadata[key] = metadata["Title"] + metadata = {**metadata, **chunk.metadata} + new_doc = Document(page_content=chunk.page_content, metadata=metadata) + documents.append(new_doc) + return documents + + def split_html_by_headers( + self, html_doc: str + ) -> Dict[str, Dict[str, Optional[str]]]: + try: + from bs4 import BeautifulSoup, PageElement # type: ignore[import-untyped] + except ImportError as e: + raise ImportError( + "Unable to import BeautifulSoup/PageElement, \ + please install with `pip install \ + bs4`." + ) from e + + soup = BeautifulSoup(html_doc, "html.parser") + headers = list(self.headers_to_split_on.keys()) + sections: Dict[str, Dict[str, Optional[str]]] = {} + + headers = soup.find_all(["body"] + headers) + + for i, header in enumerate(headers): + header_element: PageElement = header + if i == 0: + current_header = "#TITLE#" + current_header_tag = "h1" + section_content: List = [] + else: + current_header = header_element.text.strip() + current_header_tag = header_element.name + section_content = [] + for element in header_element.next_elements: + if i + 1 < len(headers) and element == headers[i + 1]: + break + if isinstance(element, str): + section_content.append(element) + content = " ".join(section_content).strip() + + if content != "": + sections[current_header] = { + "content": content, + "tag_name": current_header_tag, + } + + return sections + + def convert_possible_tags_to_header(self, html_content: str) -> str: + if self.xslt_path is None: + return html_content + + try: + from lxml import etree + except ImportError as e: + raise ImportError( + "Unable to import lxml, please install with `pip install lxml`." + ) from e + # use lxml library to parse html document and return xml ElementTree + parser = etree.HTMLParser() + tree = etree.parse(StringIO(html_content), parser) + + # document transformation for "structure-aware" chunking is handled with xsl. + # this is needed for htmls files that using different font sizes and layouts + # check to see if self.xslt_path is a relative path or absolute path + if not os.path.isabs(self.xslt_path): + xslt_path = pathlib.Path(__file__).parent / self.xslt_path + + xslt_tree = etree.parse(xslt_path) + transform = etree.XSLT(xslt_tree) + result = transform(tree) + return str(result) + + def split_text_from_file(self, file: Any) -> List[Document]: + """Split HTML file + + Args: + file: HTML file + """ + file_content = file.getvalue() + file_content = self.convert_possible_tags_to_header(file_content) + sections = self.split_html_by_headers(file_content) + + return [ + Document( + cast(str, sections[section_key]["content"]), + metadata={ + self.headers_to_split_on[ + str(sections[section_key]["tag_name"]) + ]: section_key + }, + ) + for section_key in sections.keys() + ] diff --git a/libs/text-splitters/langchain_text_splitters/xsl/converting_to_header.xslt b/libs/text-splitters/langchain_text_splitters/xsl/converting_to_header.xslt new file mode 100644 index 00000000000..620e13f54b1 --- /dev/null +++ b/libs/text-splitters/langchain_text_splitters/xsl/converting_to_header.xslt @@ -0,0 +1,29 @@ + + + + + + + + + + + + + + + + +

+ +

+
+ + + + + + +
+
+
\ No newline at end of file diff --git a/libs/text-splitters/poetry.lock b/libs/text-splitters/poetry.lock index b7c624c6697..62aa6978e38 100644 --- a/libs/text-splitters/poetry.lock +++ b/libs/text-splitters/poetry.lock @@ -1334,7 +1334,7 @@ files = [ [[package]] name = "langchain-core" -version = "0.1.34" +version = "0.1.36" description = "Building applications with LLMs through composability" optional = false python-versions = ">=3.8.1,<4.0" @@ -3772,9 +3772,9 @@ docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.link testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy (>=0.9.1)", "pytest-ruff"] [extras] -extended-testing = ["lxml"] +extended-testing = ["beautifulsoup4", "lxml"] [metadata] lock-version = "2.0" python-versions = ">=3.8.1,<4.0" -content-hash = "fe806285e128aea892ae5bad012f7fe7ecb7dc85ba2261c2eb5788d3c529ce2e" +content-hash = "859cbee6d81c50d2c7dfc5367e76a45659324171f87512667df314825e12f843" diff --git a/libs/text-splitters/pyproject.toml b/libs/text-splitters/pyproject.toml index 90ef5b08d6b..9a0fabd97c0 100644 --- a/libs/text-splitters/pyproject.toml +++ b/libs/text-splitters/pyproject.toml @@ -12,6 +12,7 @@ repository = "https://github.com/langchain-ai/langchain" python = ">=3.8.1,<4.0" langchain-core = "^0.1.28" lxml = {version = ">=4.9.3,<6.0", optional = true} +beautifulsoup4 = {version = "^4.12.3", optional = true} [tool.poetry.group.lint] optional = true @@ -59,7 +60,7 @@ dependencies = {} [tool.poetry.extras] extended_testing = [ - "lxml", + "lxml", "beautifulsoup4" ] [tool.ruff.lint] @@ -74,7 +75,7 @@ select = [ disallow_untyped_defs = "True" [[tool.mypy.overrides]] -module = ["transformers", "sentence_transformers", "nltk.tokenize", "konlpy.tag"] +module = ["transformers", "sentence_transformers", "nltk.tokenize", "konlpy.tag", "bs4"] ignore_missing_imports = "True" [tool.coverage.run] diff --git a/libs/text-splitters/tests/unit_tests/test_text_splitters.py b/libs/text-splitters/tests/unit_tests/test_text_splitters.py index 825fc4397f1..d59f06678b5 100644 --- a/libs/text-splitters/tests/unit_tests/test_text_splitters.py +++ b/libs/text-splitters/tests/unit_tests/test_text_splitters.py @@ -17,7 +17,7 @@ from langchain_text_splitters import ( ) from langchain_text_splitters.base import split_text_on_tokens from langchain_text_splitters.character import CharacterTextSplitter -from langchain_text_splitters.html import HTMLHeaderTextSplitter +from langchain_text_splitters.html import HTMLHeaderTextSplitter, HTMLSectionSplitter from langchain_text_splitters.json import RecursiveJsonSplitter from langchain_text_splitters.markdown import MarkdownHeaderTextSplitter from langchain_text_splitters.python import PythonCodeTextSplitter @@ -1340,6 +1340,162 @@ def test_split_text_on_tokens() -> None: assert output == expected_output +@pytest.mark.requires("lxml") +@pytest.mark.requires("bs4") +def test_section_aware_happy_path_splitting_based_on_header_1_2() -> None: + # arrange + html_string = """ + + +
+

Foo

+

Some intro text about Foo.

+
+

Bar main section

+

Some intro text about Bar.

+

Bar subsection 1

+

Some text about the first subtopic of Bar.

+

Bar subsection 2

+

Some text about the second subtopic of Bar.

+
+
+

Baz

+

Some text about Baz

+
+
+

Some concluding text about Foo

+
+ + """ + + sec_splitter = HTMLSectionSplitter( + headers_to_split_on=[("h1", "Header 1"), ("h2", "Header 2")] + ) + + docs = sec_splitter.split_text(html_string) + + assert len(docs) == 3 + assert docs[0].metadata["Header 1"] == "Foo" + assert docs[0].page_content == "Foo \n Some intro text about Foo." + + assert docs[1].page_content == ( + "Bar main section \n Some intro text about Bar. \n " + "Bar subsection 1 \n Some text about the first subtopic of Bar. \n " + "Bar subsection 2 \n Some text about the second subtopic of Bar." + ) + assert docs[1].metadata["Header 2"] == "Bar main section" + + assert ( + docs[2].page_content + == "Baz \n Some text about Baz \n \n \n Some concluding text about Foo" + ) + # Baz \n Some text about Baz \n \n \n Some concluding text about Foo + # Baz \n Some text about Baz \n \n Some concluding text about Foo + assert docs[2].metadata["Header 2"] == "Baz" + + +@pytest.mark.requires("lxml") +@pytest.mark.requires("bs4") +def test_happy_path_splitting_based_on_header_with_font_size() -> None: + # arrange + html_string = """ + + +
+ Foo +

Some intro text about Foo.

+
+

Bar main section

+

Some intro text about Bar.

+

Bar subsection 1

+

Some text about the first subtopic of Bar.

+

Bar subsection 2

+

Some text about the second subtopic of Bar.

+
+
+

Baz

+

Some text about Baz

+
+
+

Some concluding text about Foo

+
+ + """ + + sec_splitter = HTMLSectionSplitter( + headers_to_split_on=[("h1", "Header 1"), ("h2", "Header 2")] + ) + + docs = sec_splitter.split_text(html_string) + + assert len(docs) == 3 + assert docs[0].page_content == "Foo \n Some intro text about Foo." + assert docs[0].metadata["Header 1"] == "Foo" + + assert docs[1].page_content == ( + "Bar main section \n Some intro text about Bar. \n " + "Bar subsection 1 \n Some text about the first subtopic of Bar. \n " + "Bar subsection 2 \n Some text about the second subtopic of Bar." + ) + assert docs[1].metadata["Header 2"] == "Bar main section" + + assert docs[2].page_content == ( + "Baz \n Some text about Baz \n \n \n Some concluding text about Foo" + ) + assert docs[2].metadata["Header 2"] == "Baz" + + +@pytest.mark.requires("lxml") +@pytest.mark.requires("bs4") +def test_happy_path_splitting_based_on_header_with_whitespace_chars() -> None: + # arrange + html_string = """ + + +
+ \nFoo +

Some intro text about Foo.

+
+

Bar main section

+

Some intro text about Bar.

+

Bar subsection 1

+

Some text about the first subtopic of Bar.

+

Bar subsection 2

+

Some text about the second subtopic of Bar.

+
+
+

Baz

+

Some text about Baz

+
+
+

Some concluding text about Foo

+
+ + """ + + sec_splitter = HTMLSectionSplitter( + headers_to_split_on=[("h1", "Header 1"), ("h2", "Header 2")] + ) + + docs = sec_splitter.split_text(html_string) + + assert len(docs) == 3 + assert docs[0].page_content == "Foo \n Some intro text about Foo." + assert docs[0].metadata["Header 1"] == "Foo" + + assert docs[1].page_content == ( + "Bar main section \n Some intro text about Bar. \n " + "Bar subsection 1 \n Some text about the first subtopic of Bar. \n " + "Bar subsection 2 \n Some text about the second subtopic of Bar." + ) + assert docs[1].metadata["Header 2"] == "Bar main section" + + assert docs[2].page_content == ( + "Baz \n Some text about Baz \n \n \n Some concluding text about Foo" + ) + assert docs[2].metadata["Header 2"] == "Baz" + + def test_split_json() -> None: """Test json text splitter""" max_chunk = 800