Files
langchain/libs/text-splitters/langchain_text_splitters/nltk.py
Christophe Bornet fd69425439 style(text-splitters): fix some ruff preview rules (#34665)
Co-authored-by: Mason Daugherty <mason@langchain.dev>
Co-authored-by: Mason Daugherty <github@mdrxy.com>
2026-01-09 17:28:18 -05:00

73 lines
2.3 KiB
Python

"""NLTK text splitter."""
from __future__ import annotations
from typing import Any
from typing_extensions import override
from langchain_text_splitters.base import TextSplitter
try:
import nltk
_HAS_NLTK = True
except ImportError:
_HAS_NLTK = False
class NLTKTextSplitter(TextSplitter):
"""Splitting text using NLTK package."""
def __init__(
self,
separator: str = "\n\n",
language: str = "english",
*,
use_span_tokenize: bool = False,
**kwargs: Any,
) -> None:
"""Initialize the NLTK splitter.
Args:
separator: The separator to use when combining splits.
language: The language to use.
use_span_tokenize: Whether to use `span_tokenize` instead of
`sent_tokenize`.
Raises:
ImportError: If NLTK is not installed.
ValueError: If `use_span_tokenize` is `True` and separator is not `''`.
"""
super().__init__(**kwargs)
self._separator = separator
self._language = language
self._use_span_tokenize = use_span_tokenize
if self._use_span_tokenize and self._separator:
msg = "When use_span_tokenize is True, separator should be ''"
raise ValueError(msg)
if not _HAS_NLTK:
msg = "NLTK is not installed, please install it with `pip install nltk`."
raise ImportError(msg)
if self._use_span_tokenize:
self._tokenizer = nltk.tokenize._get_punkt_tokenizer(self._language) # noqa: SLF001
else:
self._tokenizer = nltk.tokenize.sent_tokenize
@override
def split_text(self, text: str) -> list[str]:
# First we naively split the large input into a bunch of smaller ones.
if self._use_span_tokenize:
spans = list(self._tokenizer.span_tokenize(text))
splits = []
for i, (start, end) in enumerate(spans):
if i > 0:
prev_end = spans[i - 1][1]
sentence = text[prev_end:start] + text[start:end]
else:
sentence = text[start:end]
splits.append(sentence)
else:
splits = self._tokenizer(text, language=self._language)
return self._merge_splits(splits, self._separator)