mirror of
https://github.com/hwchase17/langchain.git
synced 2025-09-03 20:16:52 +00:00
text_splitters: Add HTMLSemanticPreservingSplitter (#25911)
**Description:** With current HTML splitters, they rely on secondary use of the `RecursiveCharacterSplitter` to further chunk the document into manageable chunks. The issue with this is it fails to maintain important structures such as tables, lists, etc within HTML. This Implementation of a HTML splitter, allows the user to define a maximum chunk size, HTML elements to preserve in full, options to preserve `<a>` href links in the output and custom handlers. The core splitting begins with headers, similar to `HTMLHeaderSplitter`. If these sections exceed the length of the `max_chunk_size` further recursive splitting is triggered. During this splitting, elements listed to preserve, will be excluded from the splitting process. This can cause chunks to be slightly larger then the max size, depending on preserved length. However, all contextual relevance of the preserved item remains intact. **Custom Handlers**: Sometimes, companies such as Atlassian have custom HTML elements, that are not parsed by default with `BeautifulSoup`. Custom handlers allows a user to provide a function to be ran whenever a specific html tag is encountered. This allows the user to preserve and gather information within custom html tags that `bs4` will potentially miss during extraction. **Dependencies:** User will need to install `bs4` in their project to utilise this class I have also added in `how_to` and unit tests, which require `bs4` to run, otherwise they will be skipped. Flowchart of process:  --------- Co-authored-by: Bagatur <baskaryan@gmail.com> Co-authored-by: Chester Curme <chester.curme@gmail.com>
This commit is contained in:
@@ -33,6 +33,7 @@ from langchain_text_splitters.html import (
|
||||
ElementType,
|
||||
HTMLHeaderTextSplitter,
|
||||
HTMLSectionSplitter,
|
||||
HTMLSemanticPreservingSplitter,
|
||||
)
|
||||
from langchain_text_splitters.json import RecursiveJsonSplitter
|
||||
from langchain_text_splitters.konlpy import KonlpyTextSplitter
|
||||
@@ -70,6 +71,7 @@ __all__ = [
|
||||
"LineType",
|
||||
"HTMLHeaderTextSplitter",
|
||||
"HTMLSectionSplitter",
|
||||
"HTMLSemanticPreservingSplitter",
|
||||
"MarkdownHeaderTextSplitter",
|
||||
"MarkdownTextSplitter",
|
||||
"CharacterTextSplitter",
|
||||
|
@@ -2,11 +2,24 @@ from __future__ import annotations
|
||||
|
||||
import copy
|
||||
import pathlib
|
||||
import re
|
||||
from io import BytesIO, StringIO
|
||||
from typing import Any, Dict, Iterable, List, Optional, Tuple, TypedDict, cast
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
Dict,
|
||||
Iterable,
|
||||
List,
|
||||
Optional,
|
||||
Sequence,
|
||||
Tuple,
|
||||
TypedDict,
|
||||
cast,
|
||||
)
|
||||
|
||||
import requests
|
||||
from langchain_core.documents import Document
|
||||
from langchain_core._api import beta
|
||||
from langchain_core.documents import BaseDocumentTransformer, Document
|
||||
|
||||
from langchain_text_splitters.character import RecursiveCharacterTextSplitter
|
||||
|
||||
@@ -350,3 +363,484 @@ class HTMLSectionSplitter:
|
||||
)
|
||||
for section in sections
|
||||
]
|
||||
|
||||
|
||||
@beta()
|
||||
class HTMLSemanticPreservingSplitter(BaseDocumentTransformer):
|
||||
"""Split HTML content preserving semantic structure.
|
||||
|
||||
Splits HTML content by headers into generalized chunks, preserving semantic
|
||||
structure. If chunks exceed the maximum chunk size, it uses
|
||||
RecursiveCharacterTextSplitter for further splitting.
|
||||
|
||||
The splitter preserves full HTML elements (e.g., <table>, <ul>) and converts
|
||||
links to Markdown-like links. It can also preserve images, videos, and audio
|
||||
elements by converting them into Markdown format. Note that some chunks may
|
||||
exceed the maximum size to maintain semantic integrity.
|
||||
|
||||
.. versionadded: 0.3.5
|
||||
|
||||
Args:
|
||||
headers_to_split_on (List[Tuple[str, str]]): HTML headers (e.g., "h1", "h2")
|
||||
that define content sections.
|
||||
max_chunk_size (int): Maximum size for each chunk, with allowance for
|
||||
exceeding this limit to preserve semantics.
|
||||
chunk_overlap (int): Number of characters to overlap between chunks to ensure
|
||||
contextual continuity.
|
||||
separators (List[str]): Delimiters used by RecursiveCharacterTextSplitter for
|
||||
further splitting.
|
||||
elements_to_preserve (List[str]): HTML tags (e.g., <table>, <ul>) to remain
|
||||
intact during splitting.
|
||||
preserve_links (bool): Converts <a> tags to Markdown links ([text](url)).
|
||||
preserve_images (bool): Converts <img> tags to Markdown images ().
|
||||
preserve_videos (bool): Converts <video> tags to Markdown
|
||||
video links ().
|
||||
preserve_audio (bool): Converts <audio> tags to Markdown
|
||||
audio links ().
|
||||
custom_handlers (Dict[str, Callable[[Any], str]]): Optional custom handlers for
|
||||
specific HTML tags, allowing tailored extraction or processing.
|
||||
stopword_removal (bool): Optionally remove stopwords from the text.
|
||||
stopword_lang (str): The language of stopwords to remove.
|
||||
normalize_text (bool): Optionally normalize text
|
||||
(e.g., lowercasing, removing punctuation).
|
||||
external_metadata (Optional[Dict[str, str]]): Additional metadata to attach to
|
||||
the Document objects.
|
||||
allowlist_tags (Optional[List[str]]): Only these tags will be retained in
|
||||
the HTML.
|
||||
denylist_tags (Optional[List[str]]): These tags will be removed from the HTML.
|
||||
preserve_parent_metadata (bool): Whether to pass through parent document
|
||||
metadata to split documents when calling
|
||||
``transform_documents/atransform_documents()``.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_text_splitters.html import HTMLSemanticPreservingSplitter
|
||||
|
||||
def custom_iframe_extractor(iframe_tag):
|
||||
```
|
||||
Custom handler function to extract the 'src' attribute from an <iframe> tag.
|
||||
Converts the iframe to a Markdown-like link: [iframe:<src>](src).
|
||||
|
||||
Args:
|
||||
iframe_tag (bs4.element.Tag): The <iframe> tag to be processed.
|
||||
|
||||
Returns:
|
||||
str: A formatted string representing the iframe in Markdown-like format.
|
||||
```
|
||||
iframe_src = iframe_tag.get('src', '')
|
||||
return f"[iframe:{iframe_src}]({iframe_src})"
|
||||
|
||||
text_splitter = HTMLSemanticPreservingSplitter(
|
||||
headers_to_split_on=[("h1", "Header 1"), ("h2", "Header 2")],
|
||||
max_chunk_size=500,
|
||||
preserve_links=True,
|
||||
preserve_images=True,
|
||||
custom_handlers={"iframe": custom_iframe_extractor}
|
||||
)
|
||||
""" # noqa: E501, D214
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
headers_to_split_on: List[Tuple[str, str]],
|
||||
*,
|
||||
max_chunk_size: int = 1000,
|
||||
chunk_overlap: int = 0,
|
||||
separators: Optional[List[str]] = None,
|
||||
elements_to_preserve: Optional[List[str]] = None,
|
||||
preserve_links: bool = False,
|
||||
preserve_images: bool = False,
|
||||
preserve_videos: bool = False,
|
||||
preserve_audio: bool = False,
|
||||
custom_handlers: Optional[Dict[str, Callable[[Any], str]]] = None,
|
||||
stopword_removal: bool = False,
|
||||
stopword_lang: str = "english",
|
||||
normalize_text: bool = False,
|
||||
external_metadata: Optional[Dict[str, str]] = None,
|
||||
allowlist_tags: Optional[List[str]] = None,
|
||||
denylist_tags: Optional[List[str]] = None,
|
||||
preserve_parent_metadata: bool = False,
|
||||
):
|
||||
"""Initialize splitter."""
|
||||
try:
|
||||
from bs4 import BeautifulSoup, Tag
|
||||
|
||||
self._BeautifulSoup = BeautifulSoup
|
||||
self._Tag = Tag
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"Could not import BeautifulSoup. "
|
||||
"Please install it with 'pip install bs4'."
|
||||
)
|
||||
|
||||
self._headers_to_split_on = sorted(headers_to_split_on)
|
||||
self._max_chunk_size = max_chunk_size
|
||||
self._elements_to_preserve = elements_to_preserve or []
|
||||
self._preserve_links = preserve_links
|
||||
self._preserve_images = preserve_images
|
||||
self._preserve_videos = preserve_videos
|
||||
self._preserve_audio = preserve_audio
|
||||
self._custom_handlers = custom_handlers or {}
|
||||
self._stopword_removal = stopword_removal
|
||||
self._stopword_lang = stopword_lang
|
||||
self._normalize_text = normalize_text
|
||||
self._external_metadata = external_metadata or {}
|
||||
self._allowlist_tags = allowlist_tags
|
||||
self._preserve_parent_metadata = preserve_parent_metadata
|
||||
if allowlist_tags:
|
||||
self._allowlist_tags = list(
|
||||
set(allowlist_tags + [header[0] for header in headers_to_split_on])
|
||||
)
|
||||
self._denylist_tags = denylist_tags
|
||||
if denylist_tags:
|
||||
self._denylist_tags = [
|
||||
tag
|
||||
for tag in denylist_tags
|
||||
if tag not in [header[0] for header in headers_to_split_on]
|
||||
]
|
||||
if separators:
|
||||
self._recursive_splitter = RecursiveCharacterTextSplitter(
|
||||
separators=separators,
|
||||
chunk_size=max_chunk_size,
|
||||
chunk_overlap=chunk_overlap,
|
||||
)
|
||||
else:
|
||||
self._recursive_splitter = RecursiveCharacterTextSplitter(
|
||||
chunk_size=max_chunk_size, chunk_overlap=chunk_overlap
|
||||
)
|
||||
|
||||
if self._stopword_removal:
|
||||
try:
|
||||
import nltk # type: ignore
|
||||
from nltk.corpus import stopwords # type: ignore
|
||||
|
||||
nltk.download("stopwords")
|
||||
self._stopwords = set(stopwords.words(self._stopword_lang))
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"Could not import nltk. Please install it with 'pip install nltk'."
|
||||
)
|
||||
|
||||
def split_text(self, text: str) -> List[Document]:
|
||||
"""Splits the provided HTML text into smaller chunks based on the configuration.
|
||||
|
||||
Args:
|
||||
text (str): The HTML content to be split.
|
||||
|
||||
Returns:
|
||||
List[Document]: A list of Document objects containing the split content.
|
||||
"""
|
||||
soup = self._BeautifulSoup(text, "html.parser")
|
||||
|
||||
self._process_media(soup)
|
||||
|
||||
if self._preserve_links:
|
||||
self._process_links(soup)
|
||||
|
||||
if self._allowlist_tags or self._denylist_tags:
|
||||
self._filter_tags(soup)
|
||||
|
||||
return self._process_html(soup)
|
||||
|
||||
def transform_documents(
|
||||
self, documents: Sequence[Document], **kwargs: Any
|
||||
) -> List[Document]:
|
||||
"""Transform sequence of documents by splitting them."""
|
||||
transformed = []
|
||||
for doc in documents:
|
||||
splits = self.split_text(doc.page_content)
|
||||
if self._preserve_parent_metadata:
|
||||
splits = [
|
||||
Document(
|
||||
page_content=split_doc.page_content,
|
||||
metadata={**doc.metadata, **split_doc.metadata},
|
||||
)
|
||||
for split_doc in splits
|
||||
]
|
||||
transformed.extend(splits)
|
||||
return transformed
|
||||
|
||||
def _process_media(self, soup: Any) -> None:
|
||||
"""Processes the media elements.
|
||||
|
||||
Process elements in the HTML content by wrapping them in a <media-wrapper> tag
|
||||
and converting them to Markdown format.
|
||||
|
||||
Args:
|
||||
soup (Any): Parsed HTML content using BeautifulSoup.
|
||||
"""
|
||||
if self._preserve_images:
|
||||
for img_tag in soup.find_all("img"):
|
||||
img_src = img_tag.get("src", "")
|
||||
markdown_img = f""
|
||||
wrapper = soup.new_tag("media-wrapper")
|
||||
wrapper.string = markdown_img
|
||||
img_tag.replace_with(wrapper)
|
||||
|
||||
if self._preserve_videos:
|
||||
for video_tag in soup.find_all("video"):
|
||||
video_src = video_tag.get("src", "")
|
||||
markdown_video = f""
|
||||
wrapper = soup.new_tag("media-wrapper")
|
||||
wrapper.string = markdown_video
|
||||
video_tag.replace_with(wrapper)
|
||||
|
||||
if self._preserve_audio:
|
||||
for audio_tag in soup.find_all("audio"):
|
||||
audio_src = audio_tag.get("src", "")
|
||||
markdown_audio = f""
|
||||
wrapper = soup.new_tag("media-wrapper")
|
||||
wrapper.string = markdown_audio
|
||||
audio_tag.replace_with(wrapper)
|
||||
|
||||
def _process_links(self, soup: Any) -> None:
|
||||
"""Processes the links in the HTML content.
|
||||
|
||||
Args:
|
||||
soup (Any): Parsed HTML content using BeautifulSoup.
|
||||
"""
|
||||
for a_tag in soup.find_all("a"):
|
||||
a_href = a_tag.get("href", "")
|
||||
a_text = a_tag.get_text(strip=True)
|
||||
markdown_link = f"[{a_text}]({a_href})"
|
||||
wrapper = soup.new_tag("link-wrapper")
|
||||
wrapper.string = markdown_link
|
||||
a_tag.replace_with(markdown_link)
|
||||
|
||||
def _filter_tags(self, soup: Any) -> None:
|
||||
"""Filters the HTML content based on the allowlist and denylist tags.
|
||||
|
||||
Args:
|
||||
soup (Any): Parsed HTML content using BeautifulSoup.
|
||||
"""
|
||||
if self._allowlist_tags:
|
||||
for tag in soup.find_all(True):
|
||||
if tag.name not in self._allowlist_tags:
|
||||
tag.decompose()
|
||||
|
||||
if self._denylist_tags:
|
||||
for tag in soup.find_all(self._denylist_tags):
|
||||
tag.decompose()
|
||||
|
||||
def _normalize_and_clean_text(self, text: str) -> str:
|
||||
"""Normalizes the text by removing extra spaces and newlines.
|
||||
|
||||
Args:
|
||||
text (str): The text to be normalized.
|
||||
|
||||
Returns:
|
||||
str: The normalized text.
|
||||
"""
|
||||
if self._normalize_text:
|
||||
text = text.lower()
|
||||
text = re.sub(r"[^\w\s]", "", text)
|
||||
text = re.sub(r"\s+", " ", text).strip()
|
||||
|
||||
if self._stopword_removal:
|
||||
text = " ".join(
|
||||
[word for word in text.split() if word not in self._stopwords]
|
||||
)
|
||||
|
||||
return text
|
||||
|
||||
def _process_html(self, soup: Any) -> List[Document]:
|
||||
"""Processes the HTML content using BeautifulSoup and splits it using headers.
|
||||
|
||||
Args:
|
||||
soup (Any): Parsed HTML content using BeautifulSoup.
|
||||
|
||||
Returns:
|
||||
List[Document]: A list of Document objects containing the split content.
|
||||
"""
|
||||
documents: List[Document] = []
|
||||
current_headers: Dict[str, str] = {}
|
||||
current_content: List[str] = []
|
||||
preserved_elements: Dict[str, str] = {}
|
||||
placeholder_count: int = 0
|
||||
|
||||
def _get_element_text(element: Any) -> str:
|
||||
"""Recursively extracts and processes the text of an element.
|
||||
|
||||
Applies custom handlers where applicable, and ensures correct spacing.
|
||||
|
||||
Args:
|
||||
element (Any): The HTML element to process.
|
||||
|
||||
Returns:
|
||||
str: The processed text of the element.
|
||||
"""
|
||||
if element.name in self._custom_handlers:
|
||||
return self._custom_handlers[element.name](element)
|
||||
|
||||
text = ""
|
||||
|
||||
if element.name is not None:
|
||||
for child in element.children:
|
||||
child_text = _get_element_text(child).strip()
|
||||
if text and child_text:
|
||||
text += " "
|
||||
text += child_text
|
||||
elif element.string:
|
||||
text += element.string
|
||||
|
||||
return self._normalize_and_clean_text(text)
|
||||
|
||||
elements = soup.find_all(recursive=False)
|
||||
|
||||
def _process_element(
|
||||
element: List[Any],
|
||||
documents: List[Document],
|
||||
current_headers: Dict[str, str],
|
||||
current_content: List[str],
|
||||
preserved_elements: Dict[str, str],
|
||||
placeholder_count: int,
|
||||
) -> Tuple[List[Document], Dict[str, str], List[str], Dict[str, str], int]:
|
||||
for elem in element:
|
||||
if elem.name.lower() in ["html", "body", "div"]:
|
||||
children = elem.find_all(recursive=False)
|
||||
(
|
||||
documents,
|
||||
current_headers,
|
||||
current_content,
|
||||
preserved_elements,
|
||||
placeholder_count,
|
||||
) = _process_element(
|
||||
children,
|
||||
documents,
|
||||
current_headers,
|
||||
current_content,
|
||||
preserved_elements,
|
||||
placeholder_count,
|
||||
)
|
||||
continue
|
||||
|
||||
if elem.name in [h[0] for h in self._headers_to_split_on]:
|
||||
if current_content:
|
||||
documents.extend(
|
||||
self._create_documents(
|
||||
current_headers,
|
||||
" ".join(current_content),
|
||||
preserved_elements,
|
||||
)
|
||||
)
|
||||
current_content.clear()
|
||||
preserved_elements.clear()
|
||||
header_name = elem.get_text(strip=True)
|
||||
current_headers = {
|
||||
dict(self._headers_to_split_on)[elem.name]: header_name
|
||||
}
|
||||
elif elem.name in self._elements_to_preserve:
|
||||
placeholder = f"PRESERVED_{placeholder_count}"
|
||||
preserved_elements[placeholder] = _get_element_text(elem)
|
||||
current_content.append(placeholder)
|
||||
placeholder_count += 1
|
||||
else:
|
||||
content = _get_element_text(elem)
|
||||
if content:
|
||||
current_content.append(content)
|
||||
|
||||
return (
|
||||
documents,
|
||||
current_headers,
|
||||
current_content,
|
||||
preserved_elements,
|
||||
placeholder_count,
|
||||
)
|
||||
|
||||
# Process the elements
|
||||
(
|
||||
documents,
|
||||
current_headers,
|
||||
current_content,
|
||||
preserved_elements,
|
||||
placeholder_count,
|
||||
) = _process_element(
|
||||
elements,
|
||||
documents,
|
||||
current_headers,
|
||||
current_content,
|
||||
preserved_elements,
|
||||
placeholder_count,
|
||||
)
|
||||
|
||||
# Handle any remaining content
|
||||
if current_content:
|
||||
documents.extend(
|
||||
self._create_documents(
|
||||
current_headers, " ".join(current_content), preserved_elements
|
||||
)
|
||||
)
|
||||
|
||||
return documents
|
||||
|
||||
def _create_documents(
|
||||
self, headers: dict, content: str, preserved_elements: dict
|
||||
) -> List[Document]:
|
||||
"""Creates Document objects from the provided headers, content, and elements.
|
||||
|
||||
Args:
|
||||
headers (dict): The headers to attach as metadata to the Document.
|
||||
content (str): The content of the Document.
|
||||
preserved_elements (dict): Preserved elements to be reinserted
|
||||
into the content.
|
||||
|
||||
Returns:
|
||||
List[Document]: A list of Document objects.
|
||||
"""
|
||||
content = re.sub(r"\s+", " ", content).strip()
|
||||
|
||||
metadata = {**headers, **self._external_metadata}
|
||||
|
||||
if len(content) <= self._max_chunk_size:
|
||||
page_content = self._reinsert_preserved_elements(
|
||||
content, preserved_elements
|
||||
)
|
||||
return [Document(page_content=page_content, metadata=metadata)]
|
||||
else:
|
||||
return self._further_split_chunk(content, metadata, preserved_elements)
|
||||
|
||||
def _further_split_chunk(
|
||||
self, content: str, metadata: dict, preserved_elements: dict
|
||||
) -> List[Document]:
|
||||
"""Further splits the content into smaller chunks.
|
||||
|
||||
Args:
|
||||
content (str): The content to be split.
|
||||
metadata (dict): Metadata to attach to each chunk.
|
||||
preserved_elements (dict): Preserved elements
|
||||
to be reinserted into each chunk.
|
||||
|
||||
Returns:
|
||||
List[Document]: A list of Document objects containing the split content.
|
||||
"""
|
||||
splits = self._recursive_splitter.split_text(content)
|
||||
result = []
|
||||
|
||||
for split in splits:
|
||||
split_with_preserved = self._reinsert_preserved_elements(
|
||||
split, preserved_elements
|
||||
)
|
||||
if split_with_preserved.strip():
|
||||
result.append(
|
||||
Document(
|
||||
page_content=split_with_preserved.strip(), metadata=metadata
|
||||
)
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
def _reinsert_preserved_elements(
|
||||
self, content: str, preserved_elements: dict
|
||||
) -> str:
|
||||
"""Reinserts preserved elements into the content into their original positions.
|
||||
|
||||
Args:
|
||||
content (str): The content where placeholders need to be replaced.
|
||||
preserved_elements (dict): Preserved elements to be reinserted.
|
||||
|
||||
Returns:
|
||||
str: The content with placeholders replaced by preserved elements.
|
||||
"""
|
||||
for placeholder, preserved_content in preserved_elements.items():
|
||||
content = content.replace(placeholder, preserved_content.strip())
|
||||
return content
|
||||
|
@@ -17,7 +17,11 @@ from langchain_text_splitters import (
|
||||
)
|
||||
from langchain_text_splitters.base import split_text_on_tokens
|
||||
from langchain_text_splitters.character import CharacterTextSplitter
|
||||
from langchain_text_splitters.html import HTMLHeaderTextSplitter, HTMLSectionSplitter
|
||||
from langchain_text_splitters.html import (
|
||||
HTMLHeaderTextSplitter,
|
||||
HTMLSectionSplitter,
|
||||
HTMLSemanticPreservingSplitter,
|
||||
)
|
||||
from langchain_text_splitters.json import RecursiveJsonSplitter
|
||||
from langchain_text_splitters.markdown import (
|
||||
ExperimentalMarkdownSyntaxTextSplitter,
|
||||
@@ -2452,3 +2456,360 @@ $csvContent | ForEach-Object {
|
||||
"$csvContent | ForEach-Object {\n $_.ProcessName\n}",
|
||||
"# End of script",
|
||||
]
|
||||
|
||||
|
||||
def custom_iframe_extractor(iframe_tag: Any) -> str:
|
||||
iframe_src = iframe_tag.get("src", "")
|
||||
return f"[iframe:{iframe_src}]({iframe_src})"
|
||||
|
||||
|
||||
@pytest.mark.requires("bs4")
|
||||
def test_html_splitter_with_custom_extractor() -> None:
|
||||
"""Test HTML splitting with a custom extractor."""
|
||||
html_content = """
|
||||
<h1>Section 1</h1>
|
||||
<p>This is an iframe:</p>
|
||||
<iframe src="http://example.com"></iframe>
|
||||
"""
|
||||
splitter = HTMLSemanticPreservingSplitter(
|
||||
headers_to_split_on=[("h1", "Header 1")],
|
||||
custom_handlers={"iframe": custom_iframe_extractor},
|
||||
max_chunk_size=1000,
|
||||
)
|
||||
documents = splitter.split_text(html_content)
|
||||
|
||||
expected = [
|
||||
Document(
|
||||
page_content="This is an iframe: [iframe:http://example.com](http://example.com)",
|
||||
metadata={"Header 1": "Section 1"},
|
||||
),
|
||||
]
|
||||
|
||||
assert documents == expected
|
||||
|
||||
|
||||
@pytest.mark.requires("bs4")
|
||||
def test_html_splitter_with_href_links() -> None:
|
||||
"""Test HTML splitting with href links."""
|
||||
html_content = """
|
||||
<h1>Section 1</h1>
|
||||
<p>This is a link to <a href="http://example.com">example.com</a></p>
|
||||
"""
|
||||
splitter = HTMLSemanticPreservingSplitter(
|
||||
headers_to_split_on=[("h1", "Header 1")],
|
||||
preserve_links=True,
|
||||
max_chunk_size=1000,
|
||||
)
|
||||
documents = splitter.split_text(html_content)
|
||||
|
||||
expected = [
|
||||
Document(
|
||||
page_content="This is a link to [example.com](http://example.com)",
|
||||
metadata={"Header 1": "Section 1"},
|
||||
),
|
||||
]
|
||||
|
||||
assert documents == expected
|
||||
|
||||
|
||||
@pytest.mark.requires("bs4")
|
||||
def test_html_splitter_with_nested_elements() -> None:
|
||||
"""Test HTML splitting with nested elements."""
|
||||
html_content = """
|
||||
<h1>Main Section</h1>
|
||||
<div>
|
||||
<p>Some text here.</p>
|
||||
<div>
|
||||
<p>Nested content.</p>
|
||||
</div>
|
||||
</div>
|
||||
"""
|
||||
splitter = HTMLSemanticPreservingSplitter(
|
||||
headers_to_split_on=[("h1", "Header 1")], max_chunk_size=1000
|
||||
)
|
||||
documents = splitter.split_text(html_content)
|
||||
|
||||
expected = [
|
||||
Document(
|
||||
page_content="Some text here. Nested content.",
|
||||
metadata={"Header 1": "Main Section"},
|
||||
),
|
||||
]
|
||||
|
||||
assert documents == expected
|
||||
|
||||
|
||||
@pytest.mark.requires("bs4")
|
||||
def test_html_splitter_with_preserved_elements() -> None:
|
||||
"""Test HTML splitting with preserved elements like <table>, <ul> with low chunk
|
||||
size."""
|
||||
html_content = """
|
||||
<h1>Section 1</h1>
|
||||
<table>
|
||||
<tr><td>Row 1</td></tr>
|
||||
<tr><td>Row 2</td></tr>
|
||||
</table>
|
||||
<ul>
|
||||
<li>Item 1</li>
|
||||
<li>Item 2</li>
|
||||
</ul>
|
||||
"""
|
||||
splitter = HTMLSemanticPreservingSplitter(
|
||||
headers_to_split_on=[("h1", "Header 1")],
|
||||
elements_to_preserve=["table", "ul"],
|
||||
max_chunk_size=50, # Deliberately low to test preservation
|
||||
)
|
||||
documents = splitter.split_text(html_content)
|
||||
|
||||
expected = [
|
||||
Document(
|
||||
page_content="Row 1 Row 2 Item 1 Item 2",
|
||||
metadata={"Header 1": "Section 1"},
|
||||
),
|
||||
]
|
||||
|
||||
assert documents == expected # Shouldn't split the table or ul
|
||||
|
||||
|
||||
@pytest.mark.requires("bs4")
|
||||
def test_html_splitter_with_no_further_splits() -> None:
|
||||
"""Test HTML splitting that requires no further splits beyond sections."""
|
||||
html_content = """
|
||||
<h1>Section 1</h1>
|
||||
<p>Some content here.</p>
|
||||
<h1>Section 2</h1>
|
||||
<p>More content here.</p>
|
||||
"""
|
||||
splitter = HTMLSemanticPreservingSplitter(
|
||||
headers_to_split_on=[("h1", "Header 1")], max_chunk_size=1000
|
||||
)
|
||||
documents = splitter.split_text(html_content)
|
||||
|
||||
expected = [
|
||||
Document(page_content="Some content here.", metadata={"Header 1": "Section 1"}),
|
||||
Document(page_content="More content here.", metadata={"Header 1": "Section 2"}),
|
||||
]
|
||||
|
||||
assert documents == expected # No further splits, just sections
|
||||
|
||||
|
||||
@pytest.mark.requires("bs4")
|
||||
def test_html_splitter_with_small_chunk_size() -> None:
|
||||
"""Test HTML splitting with a very small chunk size to validate chunking."""
|
||||
html_content = """
|
||||
<h1>Section 1</h1>
|
||||
<p>This is some long text that should be split into multiple chunks due to the
|
||||
small chunk size.</p>
|
||||
"""
|
||||
splitter = HTMLSemanticPreservingSplitter(
|
||||
headers_to_split_on=[("h1", "Header 1")], max_chunk_size=20, chunk_overlap=5
|
||||
)
|
||||
documents = splitter.split_text(html_content)
|
||||
|
||||
expected = [
|
||||
Document(page_content="This is some long", metadata={"Header 1": "Section 1"}),
|
||||
Document(page_content="long text that", metadata={"Header 1": "Section 1"}),
|
||||
Document(page_content="that should be", metadata={"Header 1": "Section 1"}),
|
||||
Document(page_content="be split into", metadata={"Header 1": "Section 1"}),
|
||||
Document(page_content="into multiple", metadata={"Header 1": "Section 1"}),
|
||||
Document(page_content="chunks due to the", metadata={"Header 1": "Section 1"}),
|
||||
Document(page_content="the small chunk", metadata={"Header 1": "Section 1"}),
|
||||
Document(page_content="size.", metadata={"Header 1": "Section 1"}),
|
||||
]
|
||||
|
||||
assert documents == expected # Should split into multiple chunks
|
||||
|
||||
|
||||
@pytest.mark.requires("bs4")
|
||||
def test_html_splitter_with_denylist_tags() -> None:
|
||||
"""Test HTML splitting with denylist tag filtering."""
|
||||
html_content = """
|
||||
<h1>Section 1</h1>
|
||||
<p>This paragraph should be kept.</p>
|
||||
<span>This span should be removed.</span>
|
||||
"""
|
||||
splitter = HTMLSemanticPreservingSplitter(
|
||||
headers_to_split_on=[("h1", "Header 1")],
|
||||
denylist_tags=["span"],
|
||||
max_chunk_size=1000,
|
||||
)
|
||||
documents = splitter.split_text(html_content)
|
||||
|
||||
expected = [
|
||||
Document(
|
||||
page_content="This paragraph should be kept.",
|
||||
metadata={"Header 1": "Section 1"},
|
||||
),
|
||||
]
|
||||
|
||||
assert documents == expected
|
||||
|
||||
|
||||
@pytest.mark.requires("bs4")
|
||||
def test_html_splitter_with_external_metadata() -> None:
|
||||
"""Test HTML splitting with external metadata integration."""
|
||||
html_content = """
|
||||
<h1>Section 1</h1>
|
||||
<p>This is some content.</p>
|
||||
"""
|
||||
splitter = HTMLSemanticPreservingSplitter(
|
||||
headers_to_split_on=[("h1", "Header 1")],
|
||||
external_metadata={"source": "example.com"},
|
||||
max_chunk_size=1000,
|
||||
)
|
||||
documents = splitter.split_text(html_content)
|
||||
|
||||
expected = [
|
||||
Document(
|
||||
page_content="This is some content.",
|
||||
metadata={"Header 1": "Section 1", "source": "example.com"},
|
||||
),
|
||||
]
|
||||
|
||||
assert documents == expected
|
||||
|
||||
|
||||
@pytest.mark.requires("bs4")
|
||||
def test_html_splitter_with_text_normalization() -> None:
|
||||
"""Test HTML splitting with text normalization."""
|
||||
html_content = """
|
||||
<h1>Section 1</h1>
|
||||
<p>This is some TEXT that should be normalized!</p>
|
||||
"""
|
||||
splitter = HTMLSemanticPreservingSplitter(
|
||||
headers_to_split_on=[("h1", "Header 1")],
|
||||
normalize_text=True,
|
||||
max_chunk_size=1000,
|
||||
)
|
||||
documents = splitter.split_text(html_content)
|
||||
|
||||
expected = [
|
||||
Document(
|
||||
page_content="this is some text that should be normalized",
|
||||
metadata={"Header 1": "Section 1"},
|
||||
),
|
||||
]
|
||||
|
||||
assert documents == expected
|
||||
|
||||
|
||||
@pytest.mark.requires("bs4")
|
||||
def test_html_splitter_with_allowlist_tags() -> None:
|
||||
"""Test HTML splitting with allowlist tag filtering."""
|
||||
html_content = """
|
||||
<h1>Section 1</h1>
|
||||
<p>This paragraph should be kept.</p>
|
||||
<span>This span should be kept.</span>
|
||||
<div>This div should be removed.</div>
|
||||
"""
|
||||
splitter = HTMLSemanticPreservingSplitter(
|
||||
headers_to_split_on=[("h1", "Header 1")],
|
||||
allowlist_tags=["p", "span"],
|
||||
max_chunk_size=1000,
|
||||
)
|
||||
documents = splitter.split_text(html_content)
|
||||
|
||||
expected = [
|
||||
Document(
|
||||
page_content="This paragraph should be kept. This span should be kept.",
|
||||
metadata={"Header 1": "Section 1"},
|
||||
),
|
||||
]
|
||||
|
||||
assert documents == expected
|
||||
|
||||
|
||||
@pytest.mark.requires("bs4")
|
||||
def test_html_splitter_with_mixed_preserve_and_filter() -> None:
|
||||
"""Test HTML splitting with both preserved elements and denylist tags."""
|
||||
html_content = """
|
||||
<h1>Section 1</h1>
|
||||
<table>
|
||||
<tr>
|
||||
<td>Keep this table</td>
|
||||
<td>Cell contents kept, span removed
|
||||
<span>This span should be removed.</span>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
<p>This paragraph should be kept.</p>
|
||||
<span>This span should be removed.</span>
|
||||
"""
|
||||
splitter = HTMLSemanticPreservingSplitter(
|
||||
headers_to_split_on=[("h1", "Header 1")],
|
||||
elements_to_preserve=["table"],
|
||||
denylist_tags=["span"],
|
||||
max_chunk_size=1000,
|
||||
)
|
||||
documents = splitter.split_text(html_content)
|
||||
|
||||
expected = [
|
||||
Document(
|
||||
page_content="Keep this table Cell contents kept, span removed"
|
||||
" This paragraph should be kept.",
|
||||
metadata={"Header 1": "Section 1"},
|
||||
),
|
||||
]
|
||||
|
||||
assert documents == expected
|
||||
|
||||
|
||||
@pytest.mark.requires("bs4")
|
||||
def test_html_splitter_with_no_headers() -> None:
|
||||
"""Test HTML splitting when there are no headers to split on."""
|
||||
html_content = """
|
||||
<p>This is content without any headers.</p>
|
||||
<p>It should still produce a valid document.</p>
|
||||
"""
|
||||
splitter = HTMLSemanticPreservingSplitter(
|
||||
headers_to_split_on=[],
|
||||
max_chunk_size=1000,
|
||||
)
|
||||
documents = splitter.split_text(html_content)
|
||||
|
||||
expected = [
|
||||
Document(
|
||||
page_content="This is content without any headers. It should still produce"
|
||||
" a valid document.",
|
||||
metadata={},
|
||||
),
|
||||
]
|
||||
|
||||
assert documents == expected
|
||||
|
||||
|
||||
@pytest.mark.requires("bs4")
|
||||
def test_html_splitter_with_media_preservation() -> None:
|
||||
"""Test HTML splitting with media elements preserved and converted to Markdown-like
|
||||
links."""
|
||||
html_content = """
|
||||
<h1>Section 1</h1>
|
||||
<p>This is an image:</p>
|
||||
<img src="http://example.com/image.png" />
|
||||
<p>This is a video:</p>
|
||||
<video src="http://example.com/video.mp4"></video>
|
||||
<p>This is audio:</p>
|
||||
<audio src="http://example.com/audio.mp3"></audio>
|
||||
"""
|
||||
splitter = HTMLSemanticPreservingSplitter(
|
||||
headers_to_split_on=[("h1", "Header 1")],
|
||||
preserve_images=True,
|
||||
preserve_videos=True,
|
||||
preserve_audio=True,
|
||||
max_chunk_size=1000,
|
||||
)
|
||||
documents = splitter.split_text(html_content)
|
||||
|
||||
expected = [
|
||||
Document(
|
||||
page_content="This is an image: ![image:http://example.com/image.png]"
|
||||
"(http://example.com/image.png) "
|
||||
"This is a video: ![video:http://example.com/video.mp4]"
|
||||
"(http://example.com/video.mp4) "
|
||||
"This is audio: ![audio:http://example.com/audio.mp3]"
|
||||
"(http://example.com/audio.mp3)",
|
||||
metadata={"Header 1": "Section 1"},
|
||||
),
|
||||
]
|
||||
|
||||
assert documents == expected
|
||||
|
Reference in New Issue
Block a user