",
]
def test_md_header_text_splitter_1() -> None:
"""Test markdown splitter by header: Case 1."""
markdown_document = (
"# Foo\n\n"
" ## Bar\n\n"
"Hi this is Jim\n\n"
"Hi this is Joe\n\n"
" ## Baz\n\n"
" Hi this is Molly"
)
headers_to_split_on = [
("#", "Header 1"),
("##", "Header 2"),
]
markdown_splitter = MarkdownHeaderTextSplitter(
headers_to_split_on=headers_to_split_on,
)
output = markdown_splitter.split_text(markdown_document)
expected_output = [
Document(
page_content="Hi this is Jim \nHi this is Joe",
metadata={"Header 1": "Foo", "Header 2": "Bar"},
),
Document(
page_content="Hi this is Molly",
metadata={"Header 1": "Foo", "Header 2": "Baz"},
),
]
assert output == expected_output
def test_md_header_text_splitter_2() -> None:
"""Test markdown splitter by header: Case 2."""
markdown_document = (
"# Foo\n\n"
" ## Bar\n\n"
"Hi this is Jim\n\n"
"Hi this is Joe\n\n"
" ### Boo \n\n"
" Hi this is Lance \n\n"
" ## Baz\n\n"
" Hi this is Molly"
)
headers_to_split_on = [
("#", "Header 1"),
("##", "Header 2"),
("###", "Header 3"),
]
markdown_splitter = MarkdownHeaderTextSplitter(
headers_to_split_on=headers_to_split_on,
)
output = markdown_splitter.split_text(markdown_document)
expected_output = [
Document(
page_content="Hi this is Jim \nHi this is Joe",
metadata={"Header 1": "Foo", "Header 2": "Bar"},
),
Document(
page_content="Hi this is Lance",
metadata={"Header 1": "Foo", "Header 2": "Bar", "Header 3": "Boo"},
),
Document(
page_content="Hi this is Molly",
metadata={"Header 1": "Foo", "Header 2": "Baz"},
),
]
assert output == expected_output
def test_md_header_text_splitter_3() -> None:
"""Test markdown splitter by header: Case 3."""
markdown_document = (
"# Foo\n\n"
" ## Bar\n\n"
"Hi this is Jim\n\n"
"Hi this is Joe\n\n"
" ### Boo \n\n"
" Hi this is Lance \n\n"
" #### Bim \n\n"
" Hi this is John \n\n"
" ## Baz\n\n"
" Hi this is Molly"
)
headers_to_split_on = [
("#", "Header 1"),
("##", "Header 2"),
("###", "Header 3"),
("####", "Header 4"),
]
markdown_splitter = MarkdownHeaderTextSplitter(
headers_to_split_on=headers_to_split_on,
)
output = markdown_splitter.split_text(markdown_document)
expected_output = [
Document(
page_content="Hi this is Jim \nHi this is Joe",
metadata={"Header 1": "Foo", "Header 2": "Bar"},
),
Document(
page_content="Hi this is Lance",
metadata={"Header 1": "Foo", "Header 2": "Bar", "Header 3": "Boo"},
),
Document(
page_content="Hi this is John",
metadata={
"Header 1": "Foo",
"Header 2": "Bar",
"Header 3": "Boo",
"Header 4": "Bim",
},
),
Document(
page_content="Hi this is Molly",
metadata={"Header 1": "Foo", "Header 2": "Baz"},
),
]
assert output == expected_output
def test_md_header_text_splitter_preserve_headers_1() -> None:
"""Test markdown splitter by header: Preserve Headers."""
markdown_document = (
"# Foo\n\n"
" ## Bat\n\n"
"Hi this is Jim\n\n"
"Hi Joe\n\n"
"## Baz\n\n"
"# Bar\n\n"
"This is Alice\n\n"
"This is Bob"
)
headers_to_split_on = [
("#", "Header 1"),
]
markdown_splitter = MarkdownHeaderTextSplitter(
headers_to_split_on=headers_to_split_on,
strip_headers=False,
)
output = markdown_splitter.split_text(markdown_document)
expected_output = [
Document(
page_content="# Foo \n## Bat \nHi this is Jim \nHi Joe \n## Baz",
metadata={"Header 1": "Foo"},
),
Document(
page_content="# Bar \nThis is Alice \nThis is Bob",
metadata={"Header 1": "Bar"},
),
]
assert output == expected_output
def test_md_header_text_splitter_preserve_headers_2() -> None:
"""Test markdown splitter by header: Preserve Headers."""
markdown_document = (
"# Foo\n\n"
" ## Bar\n\n"
"Hi this is Jim\n\n"
"Hi this is Joe\n\n"
"### Boo \n\n"
"Hi this is Lance\n\n"
"## Baz\n\n"
"Hi this is Molly\n"
" ## Buz\n"
"# Bop"
)
headers_to_split_on = [
("#", "Header 1"),
("##", "Header 2"),
("###", "Header 3"),
]
markdown_splitter = MarkdownHeaderTextSplitter(
headers_to_split_on=headers_to_split_on,
strip_headers=False,
)
output = markdown_splitter.split_text(markdown_document)
expected_output = [
Document(
page_content="# Foo \n## Bar \nHi this is Jim \nHi this is Joe",
metadata={"Header 1": "Foo", "Header 2": "Bar"},
),
Document(
page_content="### Boo \nHi this is Lance",
metadata={"Header 1": "Foo", "Header 2": "Bar", "Header 3": "Boo"},
),
Document(
page_content="## Baz \nHi this is Molly",
metadata={"Header 1": "Foo", "Header 2": "Baz"},
),
Document(
page_content="## Buz",
metadata={"Header 1": "Foo", "Header 2": "Buz"},
),
Document(page_content="# Bop", metadata={"Header 1": "Bop"}),
]
assert output == expected_output
@pytest.mark.parametrize("fence", [("```"), ("~~~")])
def test_md_header_text_splitter_fenced_code_block(fence: str) -> None:
"""Test markdown splitter by header: Fenced code block."""
markdown_document = (
f"# This is a Header\n\n{fence}\nfoo()\n# Not a header\nbar()\n{fence}"
)
headers_to_split_on = [
("#", "Header 1"),
("##", "Header 2"),
]
markdown_splitter = MarkdownHeaderTextSplitter(
headers_to_split_on=headers_to_split_on,
)
output = markdown_splitter.split_text(markdown_document)
expected_output = [
Document(
page_content=f"{fence}\nfoo()\n# Not a header\nbar()\n{fence}",
metadata={"Header 1": "This is a Header"},
),
]
assert output == expected_output
@pytest.mark.parametrize(("fence", "other_fence"), [("```", "~~~"), ("~~~", "```")])
def test_md_header_text_splitter_fenced_code_block_interleaved(
fence: str, other_fence: str
) -> None:
"""Test markdown splitter by header: Interleaved fenced code block."""
markdown_document = (
"# This is a Header\n\n"
f"{fence}\n"
"foo\n"
"# Not a header\n"
f"{other_fence}\n"
"# Not a header\n"
f"{fence}"
)
headers_to_split_on = [
("#", "Header 1"),
("##", "Header 2"),
]
markdown_splitter = MarkdownHeaderTextSplitter(
headers_to_split_on=headers_to_split_on,
)
output = markdown_splitter.split_text(markdown_document)
expected_output = [
Document(
page_content=(
f"{fence}\nfoo\n# Not a header\n{other_fence}\n# Not a header\n{fence}"
),
metadata={"Header 1": "This is a Header"},
),
]
assert output == expected_output
@pytest.mark.parametrize("characters", ["\ufeff"])
def test_md_header_text_splitter_with_invisible_characters(characters: str) -> None:
"""Test markdown splitter by header: Fenced code block."""
markdown_document = f"{characters}# Foo\n\nfoo()\n{characters}## Bar\n\nbar()"
headers_to_split_on = [
("#", "Header 1"),
("##", "Header 2"),
]
markdown_splitter = MarkdownHeaderTextSplitter(
headers_to_split_on=headers_to_split_on,
)
output = markdown_splitter.split_text(markdown_document)
expected_output = [
Document(
page_content="foo()",
metadata={"Header 1": "Foo"},
),
Document(
page_content="bar()",
metadata={"Header 1": "Foo", "Header 2": "Bar"},
),
]
assert output == expected_output
EXPERIMENTAL_MARKDOWN_DOCUMENT = (
"# My Header 1\n"
"Content for header 1\n"
"## Header 2\n"
"Content for header 2\n"
"### Header 3\n"
"Content for header 3\n"
"## Header 2 Again\n"
"This should be tagged with Header 1 and Header 2 Again\n"
"```python\n"
"def func_definition():\n"
" print('Keep the whitespace consistent')\n"
"```\n"
"# Header 1 again\n"
"We should also split on the horizontal line\n"
"----\n"
"This will be a new doc but with the same header metadata\n\n"
"And it includes a new paragraph"
)
def test_experimental_markdown_syntax_text_splitter() -> None:
"""Test experimental markdown syntax splitter."""
markdown_splitter = ExperimentalMarkdownSyntaxTextSplitter()
output = markdown_splitter.split_text(EXPERIMENTAL_MARKDOWN_DOCUMENT)
expected_output = [
Document(
page_content="Content for header 1\n",
metadata={"Header 1": "My Header 1"},
),
Document(
page_content="Content for header 2\n",
metadata={"Header 1": "My Header 1", "Header 2": "Header 2"},
),
Document(
page_content="Content for header 3\n",
metadata={
"Header 1": "My Header 1",
"Header 2": "Header 2",
"Header 3": "Header 3",
},
),
Document(
page_content="This should be tagged with Header 1 and Header 2 Again\n",
metadata={"Header 1": "My Header 1", "Header 2": "Header 2 Again"},
),
Document(
page_content=(
"```python\ndef func_definition():\n "
"print('Keep the whitespace consistent')\n```\n"
),
metadata={
"Code": "python",
"Header 1": "My Header 1",
"Header 2": "Header 2 Again",
},
),
Document(
page_content="We should also split on the horizontal line\n",
metadata={"Header 1": "Header 1 again"},
),
Document(
page_content=(
"This will be a new doc but with the same header metadata\n\n"
"And it includes a new paragraph"
),
metadata={"Header 1": "Header 1 again"},
),
]
assert output == expected_output
def test_experimental_markdown_syntax_text_splitter_header_configuration() -> None:
"""Test experimental markdown syntax splitter."""
headers_to_split_on = [("#", "Encabezamiento 1")]
markdown_splitter = ExperimentalMarkdownSyntaxTextSplitter(
headers_to_split_on=headers_to_split_on
)
output = markdown_splitter.split_text(EXPERIMENTAL_MARKDOWN_DOCUMENT)
expected_output = [
Document(
page_content=(
"Content for header 1\n"
"## Header 2\n"
"Content for header 2\n"
"### Header 3\n"
"Content for header 3\n"
"## Header 2 Again\n"
"This should be tagged with Header 1 and Header 2 Again\n"
),
metadata={"Encabezamiento 1": "My Header 1"},
),
Document(
page_content=(
"```python\ndef func_definition():\n "
"print('Keep the whitespace consistent')\n```\n"
),
metadata={"Code": "python", "Encabezamiento 1": "My Header 1"},
),
Document(
page_content="We should also split on the horizontal line\n",
metadata={"Encabezamiento 1": "Header 1 again"},
),
Document(
page_content=(
"This will be a new doc but with the same header metadata\n\n"
"And it includes a new paragraph"
),
metadata={"Encabezamiento 1": "Header 1 again"},
),
]
assert output == expected_output
def test_experimental_markdown_syntax_text_splitter_with_headers() -> None:
"""Test experimental markdown syntax splitter."""
markdown_splitter = ExperimentalMarkdownSyntaxTextSplitter(strip_headers=False)
output = markdown_splitter.split_text(EXPERIMENTAL_MARKDOWN_DOCUMENT)
expected_output = [
Document(
page_content="# My Header 1\nContent for header 1\n",
metadata={"Header 1": "My Header 1"},
),
Document(
page_content="## Header 2\nContent for header 2\n",
metadata={"Header 1": "My Header 1", "Header 2": "Header 2"},
),
Document(
page_content="### Header 3\nContent for header 3\n",
metadata={
"Header 1": "My Header 1",
"Header 2": "Header 2",
"Header 3": "Header 3",
},
),
Document(
page_content=(
"## Header 2 Again\n"
"This should be tagged with Header 1 and Header 2 Again\n"
),
metadata={"Header 1": "My Header 1", "Header 2": "Header 2 Again"},
),
Document(
page_content=(
"```python\ndef func_definition():\n "
"print('Keep the whitespace consistent')\n```\n"
),
metadata={
"Code": "python",
"Header 1": "My Header 1",
"Header 2": "Header 2 Again",
},
),
Document(
page_content=(
"# Header 1 again\nWe should also split on the horizontal line\n"
),
metadata={"Header 1": "Header 1 again"},
),
Document(
page_content=(
"This will be a new doc but with the same header metadata\n\n"
"And it includes a new paragraph"
),
metadata={"Header 1": "Header 1 again"},
),
]
assert output == expected_output
def test_experimental_markdown_syntax_text_splitter_split_lines() -> None:
"""Test experimental markdown syntax splitter."""
markdown_splitter = ExperimentalMarkdownSyntaxTextSplitter(return_each_line=True)
output = markdown_splitter.split_text(EXPERIMENTAL_MARKDOWN_DOCUMENT)
expected_output = [
Document(
page_content="Content for header 1", metadata={"Header 1": "My Header 1"}
),
Document(
page_content="Content for header 2",
metadata={"Header 1": "My Header 1", "Header 2": "Header 2"},
),
Document(
page_content="Content for header 3",
metadata={
"Header 1": "My Header 1",
"Header 2": "Header 2",
"Header 3": "Header 3",
},
),
Document(
page_content="This should be tagged with Header 1 and Header 2 Again",
metadata={"Header 1": "My Header 1", "Header 2": "Header 2 Again"},
),
Document(
page_content="```python",
metadata={
"Code": "python",
"Header 1": "My Header 1",
"Header 2": "Header 2 Again",
},
),
Document(
page_content="def func_definition():",
metadata={
"Code": "python",
"Header 1": "My Header 1",
"Header 2": "Header 2 Again",
},
),
Document(
page_content=" print('Keep the whitespace consistent')",
metadata={
"Code": "python",
"Header 1": "My Header 1",
"Header 2": "Header 2 Again",
},
),
Document(
page_content="```",
metadata={
"Code": "python",
"Header 1": "My Header 1",
"Header 2": "Header 2 Again",
},
),
Document(
page_content="We should also split on the horizontal line",
metadata={"Header 1": "Header 1 again"},
),
Document(
page_content="This will be a new doc but with the same header metadata",
metadata={"Header 1": "Header 1 again"},
),
Document(
page_content="And it includes a new paragraph",
metadata={"Header 1": "Header 1 again"},
),
]
assert output == expected_output
EXPERIMENTAL_MARKDOWN_DOCUMENTS = [
(
"# My Header 1 From Document 1\n"
"Content for header 1 from Document 1\n"
"## Header 2 From Document 1\n"
"Content for header 2 from Document 1\n"
"```python\n"
"def func_definition():\n"
" print('Keep the whitespace consistent')\n"
"```\n"
"# Header 1 again From Document 1\n"
"We should also split on the horizontal line\n"
"----\n"
"This will be a new doc but with the same header metadata\n\n"
"And it includes a new paragraph"
),
(
"# My Header 1 From Document 2\n"
"Content for header 1 from Document 2\n"
"## Header 2 From Document 2\n"
"Content for header 2 from Document 2\n"
"```python\n"
"def func_definition():\n"
" print('Keep the whitespace consistent')\n"
"```\n"
"# Header 1 again From Document 2\n"
"We should also split on the horizontal line\n"
"----\n"
"This will be a new doc but with the same header metadata\n\n"
"And it includes a new paragraph"
),
]
def test_experimental_markdown_syntax_text_splitter_on_multi_files() -> None:
"""Test experimental markdown syntax splitter split
on default called consecutively on two files."""
markdown_splitter = ExperimentalMarkdownSyntaxTextSplitter()
output = []
for experimental_markdown_document in EXPERIMENTAL_MARKDOWN_DOCUMENTS:
output += markdown_splitter.split_text(experimental_markdown_document)
expected_output = [
Document(
page_content="Content for header 1 from Document 1\n",
metadata={"Header 1": "My Header 1 From Document 1"},
),
Document(
page_content="Content for header 2 from Document 1\n",
metadata={
"Header 1": "My Header 1 From Document 1",
"Header 2": "Header 2 From Document 1",
},
),
Document(
page_content=(
"```python\ndef func_definition():\n "
"print('Keep the whitespace consistent')\n```\n"
),
metadata={
"Code": "python",
"Header 1": "My Header 1 From Document 1",
"Header 2": "Header 2 From Document 1",
},
),
Document(
page_content="We should also split on the horizontal line\n",
metadata={"Header 1": "Header 1 again From Document 1"},
),
Document(
page_content=(
"This will be a new doc but with the same header metadata\n\n"
"And it includes a new paragraph"
),
metadata={"Header 1": "Header 1 again From Document 1"},
),
Document(
page_content="Content for header 1 from Document 2\n",
metadata={"Header 1": "My Header 1 From Document 2"},
),
Document(
page_content="Content for header 2 from Document 2\n",
metadata={
"Header 1": "My Header 1 From Document 2",
"Header 2": "Header 2 From Document 2",
},
),
Document(
page_content=(
"```python\ndef func_definition():\n "
"print('Keep the whitespace consistent')\n```\n"
),
metadata={
"Code": "python",
"Header 1": "My Header 1 From Document 2",
"Header 2": "Header 2 From Document 2",
},
),
Document(
page_content="We should also split on the horizontal line\n",
metadata={"Header 1": "Header 1 again From Document 2"},
),
Document(
page_content=(
"This will be a new doc but with the same header metadata\n\n"
"And it includes a new paragraph"
),
metadata={"Header 1": "Header 1 again From Document 2"},
),
]
assert output == expected_output
def test_experimental_markdown_syntax_text_splitter_split_lines_on_multi_files() -> (
None
):
"""Test experimental markdown syntax splitter split
on each line called consecutively on two files."""
markdown_splitter = ExperimentalMarkdownSyntaxTextSplitter(return_each_line=True)
output = []
for experimental_markdown_document in EXPERIMENTAL_MARKDOWN_DOCUMENTS:
output += markdown_splitter.split_text(experimental_markdown_document)
expected_output = [
Document(
page_content="Content for header 1 from Document 1",
metadata={"Header 1": "My Header 1 From Document 1"},
),
Document(
page_content="Content for header 2 from Document 1",
metadata={
"Header 1": "My Header 1 From Document 1",
"Header 2": "Header 2 From Document 1",
},
),
Document(
page_content="```python",
metadata={
"Code": "python",
"Header 1": "My Header 1 From Document 1",
"Header 2": "Header 2 From Document 1",
},
),
Document(
page_content="def func_definition():",
metadata={
"Code": "python",
"Header 1": "My Header 1 From Document 1",
"Header 2": "Header 2 From Document 1",
},
),
Document(
page_content=" print('Keep the whitespace consistent')",
metadata={
"Code": "python",
"Header 1": "My Header 1 From Document 1",
"Header 2": "Header 2 From Document 1",
},
),
Document(
page_content="```",
metadata={
"Code": "python",
"Header 1": "My Header 1 From Document 1",
"Header 2": "Header 2 From Document 1",
},
),
Document(
page_content="We should also split on the horizontal line",
metadata={"Header 1": "Header 1 again From Document 1"},
),
Document(
page_content="This will be a new doc but with the same header metadata",
metadata={"Header 1": "Header 1 again From Document 1"},
),
Document(
page_content="And it includes a new paragraph",
metadata={"Header 1": "Header 1 again From Document 1"},
),
Document(
page_content="Content for header 1 from Document 2",
metadata={"Header 1": "My Header 1 From Document 2"},
),
Document(
page_content="Content for header 2 from Document 2",
metadata={
"Header 1": "My Header 1 From Document 2",
"Header 2": "Header 2 From Document 2",
},
),
Document(
page_content="```python",
metadata={
"Code": "python",
"Header 1": "My Header 1 From Document 2",
"Header 2": "Header 2 From Document 2",
},
),
Document(
page_content="def func_definition():",
metadata={
"Code": "python",
"Header 1": "My Header 1 From Document 2",
"Header 2": "Header 2 From Document 2",
},
),
Document(
page_content=" print('Keep the whitespace consistent')",
metadata={
"Code": "python",
"Header 1": "My Header 1 From Document 2",
"Header 2": "Header 2 From Document 2",
},
),
Document(
page_content="```",
metadata={
"Code": "python",
"Header 1": "My Header 1 From Document 2",
"Header 2": "Header 2 From Document 2",
},
),
Document(
page_content="We should also split on the horizontal line",
metadata={"Header 1": "Header 1 again From Document 2"},
),
Document(
page_content="This will be a new doc but with the same header metadata",
metadata={"Header 1": "Header 1 again From Document 2"},
),
Document(
page_content="And it includes a new paragraph",
metadata={"Header 1": "Header 1 again From Document 2"},
),
]
assert output == expected_output
def test_experimental_markdown_syntax_text_splitter_with_header_on_multi_files() -> (
None
):
"""Test experimental markdown splitter
by header called consecutively on two files"""
markdown_splitter = ExperimentalMarkdownSyntaxTextSplitter(strip_headers=False)
output = []
for experimental_markdown_document in EXPERIMENTAL_MARKDOWN_DOCUMENTS:
output += markdown_splitter.split_text(experimental_markdown_document)
expected_output = [
Document(
page_content="# My Header 1 From Document 1\n"
"Content for header 1 from Document 1\n",
metadata={"Header 1": "My Header 1 From Document 1"},
),
Document(
page_content="## Header 2 From Document 1\n"
"Content for header 2 from Document 1\n",
metadata={
"Header 1": "My Header 1 From Document 1",
"Header 2": "Header 2 From Document 1",
},
),
Document(
page_content=(
"```python\ndef func_definition():\n "
"print('Keep the whitespace consistent')\n```\n"
),
metadata={
"Code": "python",
"Header 1": "My Header 1 From Document 1",
"Header 2": "Header 2 From Document 1",
},
),
Document(
page_content="# Header 1 again From Document 1\n"
"We should also split on the horizontal line\n",
metadata={"Header 1": "Header 1 again From Document 1"},
),
Document(
page_content=(
"This will be a new doc but with the same header metadata\n\n"
"And it includes a new paragraph"
),
metadata={"Header 1": "Header 1 again From Document 1"},
),
Document(
page_content="# My Header 1 From Document 2\n"
"Content for header 1 from Document 2\n",
metadata={"Header 1": "My Header 1 From Document 2"},
),
Document(
page_content="## Header 2 From Document 2\n"
"Content for header 2 from Document 2\n",
metadata={
"Header 1": "My Header 1 From Document 2",
"Header 2": "Header 2 From Document 2",
},
),
Document(
page_content=(
"```python\ndef func_definition():\n "
"print('Keep the whitespace consistent')\n```\n"
),
metadata={
"Code": "python",
"Header 1": "My Header 1 From Document 2",
"Header 2": "Header 2 From Document 2",
},
),
Document(
page_content="# Header 1 again From Document 2\n"
"We should also split on the horizontal line\n",
metadata={"Header 1": "Header 1 again From Document 2"},
),
Document(
page_content=(
"This will be a new doc but with the same header metadata\n\n"
"And it includes a new paragraph"
),
metadata={"Header 1": "Header 1 again From Document 2"},
),
]
assert output == expected_output
def test_experimental_markdown_syntax_text_splitter_header_config_on_multi_files() -> (
None
):
"""Test experimental markdown splitter
by header configuration called consecutively on two files"""
headers_to_split_on = [("#", "Encabezamiento 1")]
markdown_splitter = ExperimentalMarkdownSyntaxTextSplitter(
headers_to_split_on=headers_to_split_on
)
output = []
for experimental_markdown_document in EXPERIMENTAL_MARKDOWN_DOCUMENTS:
output += markdown_splitter.split_text(experimental_markdown_document)
expected_output = [
Document(
page_content="Content for header 1 from Document 1\n"
"## Header 2 From Document 1\n"
"Content for header 2 from Document 1\n",
metadata={"Encabezamiento 1": "My Header 1 From Document 1"},
),
Document(
page_content=(
"```python\ndef func_definition():\n "
"print('Keep the whitespace consistent')\n```\n"
),
metadata={
"Code": "python",
"Encabezamiento 1": "My Header 1 From Document 1",
},
),
Document(
page_content="We should also split on the horizontal line\n",
metadata={"Encabezamiento 1": "Header 1 again From Document 1"},
),
Document(
page_content=(
"This will be a new doc but with the same header metadata\n\n"
"And it includes a new paragraph"
),
metadata={"Encabezamiento 1": "Header 1 again From Document 1"},
),
Document(
page_content="Content for header 1 from Document 2\n"
"## Header 2 From Document 2\n"
"Content for header 2 from Document 2\n",
metadata={"Encabezamiento 1": "My Header 1 From Document 2"},
),
Document(
page_content=(
"```python\ndef func_definition():\n "
"print('Keep the whitespace consistent')\n```\n"
),
metadata={
"Code": "python",
"Encabezamiento 1": "My Header 1 From Document 2",
},
),
Document(
page_content="We should also split on the horizontal line\n",
metadata={"Encabezamiento 1": "Header 1 again From Document 2"},
),
Document(
page_content=(
"This will be a new doc but with the same header metadata\n\n"
"And it includes a new paragraph"
),
metadata={"Encabezamiento 1": "Header 1 again From Document 2"},
),
]
assert output == expected_output
def test_solidity_code_splitter() -> None:
splitter = RecursiveCharacterTextSplitter.from_language(
Language.SOL, chunk_size=CHUNK_SIZE, chunk_overlap=0
)
code = """pragma solidity ^0.8.20;
contract HelloWorld {
function add(uint a, uint b) pure public returns(uint) {
return a + b;
}
}
"""
chunks = splitter.split_text(code)
assert chunks == [
"pragma solidity",
"^0.8.20;",
"contract",
"HelloWorld {",
"function",
"add(uint a,",
"uint b) pure",
"public",
"returns(uint) {",
"return a",
"+ b;",
"}\n }",
]
def test_lua_code_splitter() -> None:
splitter = RecursiveCharacterTextSplitter.from_language(
Language.LUA, chunk_size=CHUNK_SIZE, chunk_overlap=0
)
code = """
local variable = 10
function add(a, b)
return a + b
end
if variable > 5 then
for i=1, variable do
while i < variable do
repeat
print(i)
i = i + 1
until i >= variable
end
end
end
"""
chunks = splitter.split_text(code)
assert chunks == [
"local variable",
"= 10",
"function add(a,",
"b)",
"return a +",
"b",
"end",
"if variable > 5",
"then",
"for i=1,",
"variable do",
"while i",
"< variable do",
"repeat",
"print(i)",
"i = i + 1",
"until i >=",
"variable",
"end",
"end\nend",
]
def test_haskell_code_splitter() -> None:
splitter = RecursiveCharacterTextSplitter.from_language(
Language.HASKELL, chunk_size=CHUNK_SIZE, chunk_overlap=0
)
code = """
main :: IO ()
main = do
putStrLn "Hello, World!"
-- Some sample functions
add :: Int -> Int -> Int
add x y = x + y
"""
# Adjusted expected chunks to account for indentation and newlines
expected_chunks = [
"main ::",
"IO ()",
"main = do",
"putStrLn",
'"Hello, World!"',
"--",
"Some sample",
"functions",
"add :: Int ->",
"Int -> Int",
"add x y = x",
"+ y",
]
chunks = splitter.split_text(code)
assert chunks == expected_chunks
@pytest.fixture
@pytest.mark.requires("bs4")
def html_header_splitter_splitter_factory() -> Callable[
[list[tuple[str, str]]], HTMLHeaderTextSplitter
]:
"""
Fixture to create an HTMLHeaderTextSplitter instance with given headers.
This factory allows dynamic creation of splitters with different headers.
"""
def _create_splitter(
headers_to_split_on: list[tuple[str, str]],
) -> HTMLHeaderTextSplitter:
return HTMLHeaderTextSplitter(headers_to_split_on=headers_to_split_on)
return _create_splitter
@pytest.mark.parametrize(
("headers_to_split_on", "html_input", "expected_documents", "test_case"),
[
(
# Test Case 1: Split on h1 and h2
[("h1", "Header 1"), ("h2", "Header 2")],
"""
""",
[
Document(
page_content="Main Title", metadata={"Header 1": "Main Title"}
),
Document(
page_content="Subsection",
metadata={"Header 1": "Main Title", "Header 2": "Subsection"},
),
Document(
page_content="Details of subsection.",
metadata={"Header 1": "Main Title", "Header 2": "Subsection"},
),
Document(
page_content="Sub-subsection",
metadata={
"Header 1": "Main Title",
"Header 2": "Subsection",
"Header 3": "Sub-subsection",
},
),
Document(
page_content="More details.",
metadata={
"Header 1": "Main Title",
"Header 2": "Subsection",
"Header 3": "Sub-subsection",
},
),
Document(
page_content="Another Main Title",
metadata={"Header 1": "Another Main Title"},
),
Document(
page_content="Content under another main title.",
metadata={"Header 1": "Another Main Title"},
),
],
"Nested headers with h1, h2, and h3",
),
(
# Test Case 3: No headers
[("h1", "Header 1")],
"""
Paragraph one.
Paragraph two.
Paragraph three.
""",
[
Document(
page_content="Paragraph one. \nParagraph two. \nParagraph three.",
metadata={},
)
],
"No headers present",
),
(
# Test Case 4: Multiple headers of the same level
[("h1", "Header 1")],
"""
Chapter 1
Content of chapter 1.
Chapter 2
Content of chapter 2.
Chapter 3
Content of chapter 3.
""",
[
Document(page_content="Chapter 1", metadata={"Header 1": "Chapter 1"}),
Document(
page_content="Content of chapter 1.",
metadata={"Header 1": "Chapter 1"},
),
Document(page_content="Chapter 2", metadata={"Header 1": "Chapter 2"}),
Document(
page_content="Content of chapter 2.",
metadata={"Header 1": "Chapter 2"},
),
Document(page_content="Chapter 3", metadata={"Header 1": "Chapter 3"}),
Document(
page_content="Content of chapter 3.",
metadata={"Header 1": "Chapter 3"},
),
],
"Multiple headers of the same level",
),
(
# Test Case 5: Headers with no content
[("h1", "Header 1"), ("h2", "Header 2")],
"""
Header 1
Header 2
Header 3
""",
[
Document(page_content="Header 1", metadata={"Header 1": "Header 1"}),
Document(
page_content="Header 2",
metadata={"Header 1": "Header 1", "Header 2": "Header 2"},
),
Document(page_content="Header 3", metadata={"Header 1": "Header 3"}),
],
"Headers with no associated content",
),
],
)
@pytest.mark.requires("bs4")
def test_html_header_text_splitter(
html_header_splitter_splitter_factory: Any,
headers_to_split_on: list[tuple[str, str]],
html_input: str,
expected_documents: list[Document],
test_case: str,
) -> None:
"""
Test the HTML header text splitter.
Args:
html_header_splitter_splitter_factory (Any): Factory function to create
the HTML header splitter.
headers_to_split_on (List[Tuple[str, str]]): List of headers to split on.
html_input (str): The HTML input string to be split.
expected_documents (List[Document]): List of expected Document objects.
test_case (str): Description of the test case.
Raises:
AssertionError: If the number of documents or their content/metadata
does not match the expected values.
"""
splitter = html_header_splitter_splitter_factory(
headers_to_split_on=headers_to_split_on
)
docs = splitter.split_text(html_input)
assert len(docs) == len(expected_documents), (
f"Test Case '{test_case}' Failed: Number of documents mismatch. "
f"Expected {len(expected_documents)}, got {len(docs)}."
)
for idx, (doc, expected) in enumerate(zip(docs, expected_documents), start=1):
assert doc.page_content == expected.page_content, (
f"Test Case '{test_case}' Failed at Document {idx}: "
f"Content mismatch.\nExpected: {expected.page_content}"
"\nGot: {doc.page_content}"
)
assert doc.metadata == expected.metadata, (
f"Test Case '{test_case}' Failed at Document {idx}: "
f"Metadata mismatch.\nExpected: {expected.metadata}\nGot: {doc.metadata}"
)
@pytest.mark.parametrize(
("headers_to_split_on", "html_content", "expected_output", "test_case"),
[
(
# Test Case A: Split on h1 and h2 with h3 in content
[("h1", "Header 1"), ("h2", "Header 2"), ("h3", "Header 3")],
"""
Foo
Some intro text about Foo.
Bar main section
Some intro text about Bar.
Bar subsection 1
Some text about the first subtopic of Bar.
Bar subsection 2
Some text about the second subtopic of Bar.
Baz
Some text about Baz
Some concluding text about Foo
""",
[
Document(metadata={"Header 1": "Foo"}, page_content="Foo"),
Document(
metadata={"Header 1": "Foo"},
page_content="Some intro text about Foo.",
),
Document(
metadata={"Header 1": "Foo", "Header 2": "Bar main section"},
page_content="Bar main section",
),
Document(
metadata={"Header 1": "Foo", "Header 2": "Bar main section"},
page_content="Some intro text about Bar.",
),
Document(
metadata={
"Header 1": "Foo",
"Header 2": "Bar main section",
"Header 3": "Bar subsection 1",
},
page_content="Bar subsection 1",
),
Document(
metadata={
"Header 1": "Foo",
"Header 2": "Bar main section",
"Header 3": "Bar subsection 1",
},
page_content="Some text about the first subtopic of Bar.",
),
Document(
metadata={
"Header 1": "Foo",
"Header 2": "Bar main section",
"Header 3": "Bar subsection 2",
},
page_content="Bar subsection 2",
),
Document(
metadata={
"Header 1": "Foo",
"Header 2": "Bar main section",
"Header 3": "Bar subsection 2",
},
page_content="Some text about the second subtopic of Bar.",
),
Document(
metadata={"Header 1": "Foo", "Header 2": "Baz"}, page_content="Baz"
),
Document(
metadata={"Header 1": "Foo"},
page_content=(
"Some text about Baz \nSome concluding text about Foo"
),
),
],
"Test Case A: Split on h1, h2, and h3 with nested headers",
),
(
# Test Case B: Split on h1 only without any headers
[("h1", "Header 1")],
"""
Paragraph one.
Paragraph two.
Paragraph three.
""",
[
Document(
metadata={},
page_content="Paragraph one. \nParagraph two. \nParagraph three.",
)
],
"Test Case B: Split on h1 only without any headers",
),
],
)
@pytest.mark.requires("bs4")
def test_additional_html_header_text_splitter(
html_header_splitter_splitter_factory: Any,
headers_to_split_on: list[tuple[str, str]],
html_content: str,
expected_output: list[Document],
test_case: str,
) -> None:
"""
Test the HTML header text splitter.
Args:
html_header_splitter_splitter_factory (Any): Factory function to create
the HTML header splitter.
headers_to_split_on (List[Tuple[str, str]]): List of headers to split on.
html_content (str): HTML content to be split.
expected_output (List[Document]): Expected list of Document objects.
test_case (str): Description of the test case.
Raises:
AssertionError: If the number of documents or their content/metadata
does not match the expected output.
"""
splitter = html_header_splitter_splitter_factory(
headers_to_split_on=headers_to_split_on
)
docs = splitter.split_text(html_content)
assert len(docs) == len(expected_output), (
f"{test_case} Failed: Number of documents mismatch. "
f"Expected {len(expected_output)}, got {len(docs)}."
)
for idx, (doc, expected) in enumerate(zip(docs, expected_output), start=1):
assert doc.page_content == expected.page_content, (
f"{test_case} Failed at Document {idx}: "
f"Content mismatch.\nExpected: {expected.page_content}\n"
"Got: {doc.page_content}"
)
assert doc.metadata == expected.metadata, (
f"{test_case} Failed at Document {idx}: "
f"Metadata mismatch.\nExpected: {expected.metadata}\nGot: {doc.metadata}"
)
@pytest.mark.parametrize(
("headers_to_split_on", "html_content", "expected_output", "test_case"),
[
(
# Test Case C: Split on h1, h2, and h3 with no headers present
[("h1", "Header 1"), ("h2", "Header 2"), ("h3", "Header 3")],
"""
Just some random text without headers.
More text here.
""",
[
Document(
page_content="Just some random text without headers."
" \nMore text here.",
metadata={},
)
],
"Test Case C: Split on h1, h2, and h3 without any headers",
)
],
)
@pytest.mark.requires("bs4")
def test_html_no_headers_with_multiple_splitters(
html_header_splitter_splitter_factory: Any,
headers_to_split_on: list[tuple[str, str]],
html_content: str,
expected_output: list[Document],
test_case: str,
) -> None:
"""
Test HTML content splitting without headers using multiple splitters.
Args:
html_header_splitter_splitter_factory (Any): Factory to create the
HTML header splitter.
headers_to_split_on (List[Tuple[str, str]]): List of headers to split on.
html_content (str): HTML content to be split.
expected_output (List[Document]): Expected list of Document objects
after splitting.
test_case (str): Description of the test case.
Raises:
AssertionError: If the number of documents or their content/metadata
does not match the expected output.
"""
splitter = html_header_splitter_splitter_factory(
headers_to_split_on=headers_to_split_on
)
docs = splitter.split_text(html_content)
assert len(docs) == len(expected_output), (
f"{test_case} Failed: Number of documents mismatch. "
f"Expected {len(expected_output)}, got {len(docs)}."
)
for idx, (doc, expected) in enumerate(zip(docs, expected_output), start=1):
assert doc.page_content == expected.page_content, (
f"{test_case} Failed at Document {idx}: "
f"Content mismatch.\nExpected: {expected.page_content}\n"
"Got: {doc.page_content}"
)
assert doc.metadata == expected.metadata, (
f"{test_case} Failed at Document {idx}: "
f"Metadata mismatch.\nExpected: {expected.metadata}\nGot: {doc.metadata}"
)
def test_split_text_on_tokens() -> None:
"""Test splitting by tokens per chunk."""
text = "foo bar baz 123"
tokenizer = Tokenizer(
chunk_overlap=3,
tokens_per_chunk=7,
decode=(lambda it: "".join(chr(i) for i in it)),
encode=(lambda it: [ord(c) for c in it]),
)
output = split_text_on_tokens(text=text, tokenizer=tokenizer)
expected_output = ["foo bar", "bar baz", "baz 123"]
assert output == expected_output
@pytest.mark.requires("bs4")
@pytest.mark.requires("lxml")
def test_section_aware_happy_path_splitting_based_on_header_1_2() -> None:
# arrange
html_string = """
Foo
Some intro text about Foo.
Bar main section
Some intro text about Bar.
Bar subsection 1
Some text about the first subtopic of Bar.
Bar subsection 2
Some text about the second subtopic of Bar.
Baz
Some text about Baz
Some concluding text about Foo
"""
sec_splitter = HTMLSectionSplitter(
headers_to_split_on=[("h1", "Header 1"), ("h2", "Header 2")]
)
docs = sec_splitter.split_text(html_string)
assert len(docs) == 3
assert docs[0].metadata["Header 1"] == "Foo"
assert docs[0].page_content == "Foo \n Some intro text about Foo."
assert docs[1].page_content == (
"Bar main section \n Some intro text about Bar. \n "
"Bar subsection 1 \n Some text about the first subtopic of Bar. \n "
"Bar subsection 2 \n Some text about the second subtopic of Bar."
)
assert docs[1].metadata["Header 2"] == "Bar main section"
assert (
docs[2].page_content
== "Baz \n Some text about Baz \n \n \n Some concluding text about Foo"
)
# Baz \n Some text about Baz \n \n \n Some concluding text about Foo
# Baz \n Some text about Baz \n \n Some concluding text about Foo
assert docs[2].metadata["Header 2"] == "Baz"
@pytest.mark.requires("bs4")
@pytest.mark.requires("lxml")
def test_happy_path_splitting_based_on_header_with_font_size() -> None:
# arrange
html_string = """
Foo
Some intro text about Foo.
Bar main section
Some intro text about Bar.
Bar subsection 1
Some text about the first subtopic of Bar.
Bar subsection 2
Some text about the second subtopic of Bar.
Baz
Some text about Baz
Some concluding text about Foo
"""
sec_splitter = HTMLSectionSplitter(
headers_to_split_on=[("h1", "Header 1"), ("h2", "Header 2")]
)
docs = sec_splitter.split_text(html_string)
assert len(docs) == 3
assert docs[0].page_content == "Foo \n Some intro text about Foo."
assert docs[0].metadata["Header 1"] == "Foo"
assert docs[1].page_content == (
"Bar main section \n Some intro text about Bar. \n "
"Bar subsection 1 \n Some text about the first subtopic of Bar. \n "
"Bar subsection 2 \n Some text about the second subtopic of Bar."
)
assert docs[1].metadata["Header 2"] == "Bar main section"
assert docs[2].page_content == (
"Baz \n Some text about Baz \n \n \n Some concluding text about Foo"
)
assert docs[2].metadata["Header 2"] == "Baz"
@pytest.mark.requires("bs4")
@pytest.mark.requires("lxml")
def test_happy_path_splitting_based_on_header_with_whitespace_chars() -> None:
# arrange
html_string = """
\nFoo
Some intro text about Foo.
Bar main section
Some intro text about Bar.
Bar subsection 1
Some text about the first subtopic of Bar.
Bar subsection 2
Some text about the second subtopic of Bar.
Baz
Some text about Baz
Some concluding text about Foo
"""
sec_splitter = HTMLSectionSplitter(
headers_to_split_on=[("h1", "Header 1"), ("h2", "Header 2")]
)
docs = sec_splitter.split_text(html_string)
assert len(docs) == 3
assert docs[0].page_content == "Foo \n Some intro text about Foo."
assert docs[0].metadata["Header 1"] == "Foo"
assert docs[1].page_content == (
"Bar main section \n Some intro text about Bar. \n "
"Bar subsection 1 \n Some text about the first subtopic of Bar. \n "
"Bar subsection 2 \n Some text about the second subtopic of Bar."
)
assert docs[1].metadata["Header 2"] == "Bar main section"
assert docs[2].page_content == (
"Baz \n Some text about Baz \n \n \n Some concluding text about Foo"
)
assert docs[2].metadata["Header 2"] == "Baz"
@pytest.mark.requires("bs4")
@pytest.mark.requires("lxml")
def test_happy_path_splitting_with_duplicate_header_tag() -> None:
# arrange
html_string = """
Foo
Some intro text about Foo.
Bar main section
Some intro text about Bar.
Bar subsection 1
Some text about the first subtopic of Bar.
Bar subsection 2
Some text about the second subtopic of Bar.
Foo
Some text about Baz
Foo
Some concluding text about Foo
"""
sec_splitter = HTMLSectionSplitter(
headers_to_split_on=[("h1", "Header 1"), ("h2", "Header 2")]
)
docs = sec_splitter.split_text(html_string)
assert len(docs) == 4
assert docs[0].page_content == "Foo \n Some intro text about Foo."
assert docs[0].metadata["Header 1"] == "Foo"
assert docs[1].page_content == (
"Bar main section \n Some intro text about Bar. \n "
"Bar subsection 1 \n Some text about the first subtopic of Bar. \n "
"Bar subsection 2 \n Some text about the second subtopic of Bar."
)
assert docs[1].metadata["Header 2"] == "Bar main section"
assert docs[2].page_content == "Foo \n Some text about Baz"
assert docs[2].metadata["Header 2"] == "Foo"
assert docs[3].page_content == "Foo \n \n Some concluding text about Foo"
assert docs[3].metadata["Header 1"] == "Foo"
def test_split_json() -> None:
"""Test json text splitter"""
max_chunk = 800
splitter = RecursiveJsonSplitter(max_chunk_size=max_chunk)
def random_val() -> str:
return "".join(random.choices(string.ascii_letters, k=random.randint(4, 12)))
test_data: Any = {
"val0": random_val(),
"val1": {f"val1{i}": random_val() for i in range(100)},
}
test_data["val1"]["val16"] = {f"val16{i}": random_val() for i in range(100)}
# uses create_docs and split_text
docs = splitter.create_documents(texts=[test_data])
output = [len(doc.page_content) < max_chunk * 1.05 for doc in docs]
expected_output = [True for doc in docs]
assert output == expected_output
def test_split_json_with_lists() -> None:
"""Test json text splitter with list conversion"""
max_chunk = 800
splitter = RecursiveJsonSplitter(max_chunk_size=max_chunk)
def random_val() -> str:
return "".join(random.choices(string.ascii_letters, k=random.randint(4, 12)))
test_data: Any = {
"val0": random_val(),
"val1": {f"val1{i}": random_val() for i in range(100)},
}
test_data["val1"]["val16"] = {f"val16{i}": random_val() for i in range(100)}
test_data_list: Any = {"testPreprocessing": [test_data]}
# test text splitter
texts = splitter.split_text(json_data=test_data)
texts_list = splitter.split_text(json_data=test_data_list, convert_lists=True)
assert len(texts_list) >= len(texts)
def test_split_json_many_calls() -> None:
x = {"a": 1, "b": 2}
y = {"c": 3, "d": 4}
splitter = RecursiveJsonSplitter()
chunk0 = splitter.split_json(x)
assert chunk0 == [{"a": 1, "b": 2}]
chunk1 = splitter.split_json(y)
assert chunk1 == [{"c": 3, "d": 4}]
# chunk0 is now altered by creating chunk1
assert chunk0 == [{"a": 1, "b": 2}]
chunk0_output = [{"a": 1, "b": 2}]
chunk1_output = [{"c": 3, "d": 4}]
assert chunk0 == chunk0_output
assert chunk1 == chunk1_output
def test_powershell_code_splitter_short_code() -> None:
splitter = RecursiveCharacterTextSplitter.from_language(
Language.POWERSHELL, chunk_size=60, chunk_overlap=0
)
code = """
# Check if a file exists
$filePath = "C:\\temp\\file.txt"
if (Test-Path $filePath) {
# File exists
} else {
# File does not exist
}
"""
chunks = splitter.split_text(code)
assert chunks == [
'# Check if a file exists\n$filePath = "C:\\temp\\file.txt"',
"if (Test-Path $filePath) {\n # File exists\n} else {",
"# File does not exist\n}",
]
def test_powershell_code_splitter_longer_code() -> None:
splitter = RecursiveCharacterTextSplitter.from_language(
Language.POWERSHELL, chunk_size=60, chunk_overlap=0
)
code = """
# Get a list of all processes and export to CSV
$processes = Get-Process
$processes | Export-Csv -Path "C:\\temp\\processes.csv" -NoTypeInformation
# Read the CSV file and display its content
$csvContent = Import-Csv -Path "C:\\temp\\processes.csv"
$csvContent | ForEach-Object {
$_.ProcessName
}
# End of script
"""
chunks = splitter.split_text(code)
assert chunks == [
"# Get a list of all processes and export to CSV",
"$processes = Get-Process",
'$processes | Export-Csv -Path "C:\\temp\\processes.csv"',
"-NoTypeInformation",
"# Read the CSV file and display its content",
'$csvContent = Import-Csv -Path "C:\\temp\\processes.csv"',
"$csvContent | ForEach-Object {\n $_.ProcessName\n}",
"# End of script",
]
FAKE_VISUALBASIC6_TEXT = """
Option Explicit
Public Function SumTwoIntegers(ByVal a As Integer, ByVal b As Integer) As Integer
SumTwoIntegers = a + b
End Function
Public Sub Main()
Dim i As Integer
Dim limit As Integer
i = 0
limit = 50
While i < limit
i = SumTwoIntegers(i, 1)
If i = limit \\ 2 Then
MsgBox "Halfway there! i = " & i
End If
Wend
MsgBox "Done! Final value of i: " & i
End Sub
"""
def test_visualbasic6_code_splitter() -> None:
splitter = RecursiveCharacterTextSplitter.from_language(
Language.VISUALBASIC6,
chunk_size=CHUNK_SIZE,
chunk_overlap=0,
)
chunks = splitter.split_text(FAKE_VISUALBASIC6_TEXT)
assert chunks == [
"Option Explicit",
"Public Function",
"SumTwoIntegers(",
"ByVal",
"a As Integer,",
"ByVal b As",
"Integer) As",
"Integer",
"SumTwoIntegers",
"= a + b",
"End Function",
"Public Sub",
"Main()",
"Dim i As",
"Integer",
"Dim limit",
"As Integer",
"i = 0",
"limit = 50",
"While i <",
"limit",
"i =",
"SumTwoIntegers(",
"i,",
"1)",
"If i =",
"limit \\ 2 Then",
'MsgBox "Halfway',
'there! i = " &',
"i",
"End If",
"Wend",
"MsgBox",
'"Done! Final',
'value of i: " &',
"i",
"End Sub",
]
def custom_iframe_extractor(iframe_tag: Any) -> str:
iframe_src = iframe_tag.get("src", "")
return f"[iframe:{iframe_src}]({iframe_src})"
@pytest.mark.requires("bs4")
def test_html_splitter_with_custom_extractor() -> None:
"""Test HTML splitting with a custom extractor."""
html_content = """
"""
splitter = HTMLSemanticPreservingSplitter(
headers_to_split_on=[("h1", "Header 1")],
preserve_links=True,
max_chunk_size=1000,
)
documents = splitter.split_text(html_content)
expected = [
Document(
page_content="This is a link to [example.com](http://example.com)",
metadata={"Header 1": "Section 1"},
),
]
assert documents == expected
@pytest.mark.requires("bs4")
def test_html_splitter_with_nested_elements() -> None:
"""Test HTML splitting with nested elements."""
html_content = """
Main Section
Some text here.
Nested content.
"""
splitter = HTMLSemanticPreservingSplitter(
headers_to_split_on=[("h1", "Header 1")], max_chunk_size=1000
)
documents = splitter.split_text(html_content)
expected = [
Document(
page_content="Some text here. Nested content.",
metadata={"Header 1": "Main Section"},
),
]
assert documents == expected
@pytest.mark.requires("bs4")
def test_html_splitter_with_preserved_elements() -> None:
"""Test HTML splitting with preserved elements like
,
with low chunk
size."""
html_content = """
Section 1
Row 1
Row 2
Item 1
Item 2
"""
splitter = HTMLSemanticPreservingSplitter(
headers_to_split_on=[("h1", "Header 1")],
elements_to_preserve=["table", "ul"],
max_chunk_size=50, # Deliberately low to test preservation
)
documents = splitter.split_text(html_content)
expected = [
Document(
page_content="Row 1 Row 2 Item 1 Item 2",
metadata={"Header 1": "Section 1"},
),
]
assert documents == expected # Shouldn't split the table or ul
@pytest.mark.requires("bs4")
def test_html_splitter_with_no_further_splits() -> None:
"""Test HTML splitting that requires no further splits beyond sections."""
html_content = """
Section 1
Some content here.
Section 2
More content here.
"""
splitter = HTMLSemanticPreservingSplitter(
headers_to_split_on=[("h1", "Header 1")], max_chunk_size=1000
)
documents = splitter.split_text(html_content)
expected = [
Document(page_content="Some content here.", metadata={"Header 1": "Section 1"}),
Document(page_content="More content here.", metadata={"Header 1": "Section 2"}),
]
assert documents == expected # No further splits, just sections
@pytest.mark.requires("bs4")
def test_html_splitter_with_small_chunk_size() -> None:
"""Test HTML splitting with a very small chunk size to validate chunking."""
html_content = """
Section 1
This is some long text that should be split into multiple chunks due to the
small chunk size.
"""
splitter = HTMLSemanticPreservingSplitter(
headers_to_split_on=[("h1", "Header 1")], max_chunk_size=20, chunk_overlap=5
)
documents = splitter.split_text(html_content)
expected = [
Document(page_content="This is some long", metadata={"Header 1": "Section 1"}),
Document(page_content="long text that", metadata={"Header 1": "Section 1"}),
Document(page_content="that should be", metadata={"Header 1": "Section 1"}),
Document(page_content="be split into", metadata={"Header 1": "Section 1"}),
Document(page_content="into multiple", metadata={"Header 1": "Section 1"}),
Document(page_content="chunks due to the", metadata={"Header 1": "Section 1"}),
Document(page_content="the small chunk", metadata={"Header 1": "Section 1"}),
Document(page_content="size.", metadata={"Header 1": "Section 1"}),
]
assert documents == expected # Should split into multiple chunks
@pytest.mark.requires("bs4")
def test_html_splitter_with_denylist_tags() -> None:
"""Test HTML splitting with denylist tag filtering."""
html_content = """
Section 1
This paragraph should be kept.
This span should be removed.
"""
splitter = HTMLSemanticPreservingSplitter(
headers_to_split_on=[("h1", "Header 1")],
denylist_tags=["span"],
max_chunk_size=1000,
)
documents = splitter.split_text(html_content)
expected = [
Document(
page_content="This paragraph should be kept.",
metadata={"Header 1": "Section 1"},
),
]
assert documents == expected
@pytest.mark.requires("bs4")
def test_html_splitter_with_external_metadata() -> None:
"""Test HTML splitting with external metadata integration."""
html_content = """
Section 1
This is some content.
"""
splitter = HTMLSemanticPreservingSplitter(
headers_to_split_on=[("h1", "Header 1")],
external_metadata={"source": "example.com"},
max_chunk_size=1000,
)
documents = splitter.split_text(html_content)
expected = [
Document(
page_content="This is some content.",
metadata={"Header 1": "Section 1", "source": "example.com"},
),
]
assert documents == expected
@pytest.mark.requires("bs4")
def test_html_splitter_with_text_normalization() -> None:
"""Test HTML splitting with text normalization."""
html_content = """
Section 1
This is some TEXT that should be normalized!
"""
splitter = HTMLSemanticPreservingSplitter(
headers_to_split_on=[("h1", "Header 1")],
normalize_text=True,
max_chunk_size=1000,
)
documents = splitter.split_text(html_content)
expected = [
Document(
page_content="this is some text that should be normalized",
metadata={"Header 1": "Section 1"},
),
]
assert documents == expected
@pytest.mark.requires("bs4")
def test_html_splitter_with_allowlist_tags() -> None:
"""Test HTML splitting with allowlist tag filtering."""
html_content = """
Section 1
This paragraph should be kept.
This span should be kept.
This div should be removed.
"""
splitter = HTMLSemanticPreservingSplitter(
headers_to_split_on=[("h1", "Header 1")],
allowlist_tags=["p", "span"],
max_chunk_size=1000,
)
documents = splitter.split_text(html_content)
expected = [
Document(
page_content="This paragraph should be kept. This span should be kept.",
metadata={"Header 1": "Section 1"},
),
]
assert documents == expected
@pytest.mark.requires("bs4")
def test_html_splitter_with_mixed_preserve_and_filter() -> None:
"""Test HTML splitting with both preserved elements and denylist tags."""
html_content = """
Section 1
Keep this table
Cell contents kept, span removed
This span should be removed.
This paragraph should be kept.
This span should be removed.
"""
splitter = HTMLSemanticPreservingSplitter(
headers_to_split_on=[("h1", "Header 1")],
elements_to_preserve=["table"],
denylist_tags=["span"],
max_chunk_size=1000,
)
documents = splitter.split_text(html_content)
expected = [
Document(
page_content="Keep this table Cell contents kept, span removed"
" This paragraph should be kept.",
metadata={"Header 1": "Section 1"},
),
]
assert documents == expected
@pytest.mark.requires("bs4")
def test_html_splitter_with_no_headers() -> None:
"""Test HTML splitting when there are no headers to split on."""
html_content = """
This is content without any headers.
It should still produce a valid document.
"""
splitter = HTMLSemanticPreservingSplitter(
headers_to_split_on=[],
max_chunk_size=1000,
)
documents = splitter.split_text(html_content)
expected = [
Document(
page_content="This is content without any headers. It should still produce"
" a valid document.",
metadata={},
),
]
assert documents == expected
@pytest.mark.requires("bs4")
def test_html_splitter_with_media_preservation() -> None:
"""Test HTML splitting with media elements preserved and converted to Markdown-like
links."""
html_content = """
Section 1
This is an image:
This is a video:
This is audio:
"""
splitter = HTMLSemanticPreservingSplitter(
headers_to_split_on=[("h1", "Header 1")],
preserve_images=True,
preserve_videos=True,
preserve_audio=True,
max_chunk_size=1000,
)
documents = splitter.split_text(html_content)
expected = [
Document(
page_content="This is an image: ![image:http://example.com/image.png]"
"(http://example.com/image.png) "
"This is a video: ![video:http://example.com/video.mp4]"
"(http://example.com/video.mp4) "
"This is audio: ![audio:http://example.com/audio.mp3]"
"(http://example.com/audio.mp3)",
metadata={"Header 1": "Section 1"},
),
]
assert documents == expected
@pytest.mark.requires("bs4")
def test_html_splitter_keep_separator_true() -> None:
"""Test HTML splitting with keep_separator=True"""
html_content = """
Section 1
This is some text. This is some other text.
"""
splitter = HTMLSemanticPreservingSplitter(
headers_to_split_on=[("h1", "Header 1")],
max_chunk_size=10,
separators=[". "],
keep_separator=True,
)
documents = splitter.split_text(html_content)
expected = [
Document(
page_content="This is some text",
metadata={"Header 1": "Section 1"},
),
Document(
page_content=". This is some other text.",
metadata={"Header 1": "Section 1"},
),
]
assert documents == expected
@pytest.mark.requires("bs4")
def test_html_splitter_keep_separator_false() -> None:
"""Test HTML splitting with keep_separator=False"""
html_content = """
Section 1
This is some text. This is some other text.
"""
splitter = HTMLSemanticPreservingSplitter(
headers_to_split_on=[("h1", "Header 1")],
max_chunk_size=10,
separators=[". "],
keep_separator=False,
)
documents = splitter.split_text(html_content)
expected = [
Document(
page_content="This is some text",
metadata={"Header 1": "Section 1"},
),
Document(
page_content="This is some other text.",
metadata={"Header 1": "Section 1"},
),
]
assert documents == expected
@pytest.mark.requires("bs4")
def test_html_splitter_keep_separator_start() -> None:
"""Test HTML splitting with keep_separator="start" """
html_content = """
Section 1
This is some text. This is some other text.
"""
splitter = HTMLSemanticPreservingSplitter(
headers_to_split_on=[("h1", "Header 1")],
max_chunk_size=10,
separators=[". "],
keep_separator="start",
)
documents = splitter.split_text(html_content)
expected = [
Document(
page_content="This is some text",
metadata={"Header 1": "Section 1"},
),
Document(
page_content=". This is some other text.",
metadata={"Header 1": "Section 1"},
),
]
assert documents == expected
@pytest.mark.requires("bs4")
def test_html_splitter_keep_separator_end() -> None:
"""Test HTML splitting with keep_separator="end" """
html_content = """
Section 1
This is some text. This is some other text.
"""
splitter = HTMLSemanticPreservingSplitter(
headers_to_split_on=[("h1", "Header 1")],
max_chunk_size=10,
separators=[". "],
keep_separator="end",
)
documents = splitter.split_text(html_content)
expected = [
Document(
page_content="This is some text.",
metadata={"Header 1": "Section 1"},
),
Document(
page_content="This is some other text.",
metadata={"Header 1": "Section 1"},
),
]
assert documents == expected
@pytest.mark.requires("bs4")
def test_html_splitter_keep_separator_default() -> None:
"""Test HTML splitting with keep_separator not set"""
html_content = """
Section 1
This is some text. This is some other text.
"""
splitter = HTMLSemanticPreservingSplitter(
headers_to_split_on=[("h1", "Header 1")], max_chunk_size=10, separators=[". "]
)
documents = splitter.split_text(html_content)
expected = [
Document(
page_content="This is some text",
metadata={"Header 1": "Section 1"},
),
Document(
page_content=". This is some other text.",
metadata={"Header 1": "Section 1"},
),
]
assert documents == expected
def test_character_text_splitter_discard_regex_separator_on_merge() -> None:
"""Test that regex lookahead separator is not re-inserted when merging."""
text = "SCE191 First chunk. SCE103 Second chunk."
splitter = CharacterTextSplitter(
separator=r"(?=SCE\d{3})",
is_separator_regex=True,
chunk_size=200,
chunk_overlap=0,
keep_separator=False,
)
output = splitter.split_text(text)
assert output == ["SCE191 First chunk. SCE103 Second chunk."]
@pytest.mark.parametrize(
("separator", "is_regex", "text", "chunk_size", "expected"),
[
# 1) regex lookaround & split happens
# "abcmiddef" split by "(?<=mid)" → ["abcmid","def"], chunk_size=5 keeps both
(r"(?<=mid)", True, "abcmiddef", 5, ["abcmid", "def"]),
# 2) regex lookaround & no split
# chunk_size=100 merges back into ["abcmiddef"]
(r"(?<=mid)", True, "abcmiddef", 100, ["abcmiddef"]),
# 3) literal separator & split happens
# split on "mid" → ["abc","def"], chunk_size=3 keeps both
("mid", False, "abcmiddef", 3, ["abc", "def"]),
# 4) literal separator & no split
# chunk_size=100 merges back into ["abcmiddef"]
("mid", False, "abcmiddef", 100, ["abcmiddef"]),
],
)
def test_character_text_splitter_chunk_size_effect(
separator: str,
*,
is_regex: bool,
text: str,
chunk_size: int,
expected: list[str],
) -> None:
splitter = CharacterTextSplitter(
separator=separator,
is_separator_regex=is_regex,
chunk_size=chunk_size,
chunk_overlap=0,
keep_separator=False,
)
assert splitter.split_text(text) == expected