mirror of
https://github.com/hwchase17/langchain.git
synced 2025-09-09 06:53:59 +00:00
text-splitters[minor], langchain[minor], community[patch], templates, docs: langchain-text-splitters 0.0.1 (#18346)
This commit is contained in:
31
libs/text-splitters/langchain_text_splitters/nltk.py
Normal file
31
libs/text-splitters/langchain_text_splitters/nltk.py
Normal file
@@ -0,0 +1,31 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, List
|
||||
|
||||
from langchain_text_splitters.base import TextSplitter
|
||||
|
||||
|
||||
class NLTKTextSplitter(TextSplitter):
|
||||
"""Splitting text using NLTK package."""
|
||||
|
||||
def __init__(
|
||||
self, separator: str = "\n\n", language: str = "english", **kwargs: Any
|
||||
) -> None:
|
||||
"""Initialize the NLTK splitter."""
|
||||
super().__init__(**kwargs)
|
||||
try:
|
||||
from nltk.tokenize import sent_tokenize
|
||||
|
||||
self._tokenizer = sent_tokenize
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"NLTK is not installed, please install it with `pip install nltk`."
|
||||
)
|
||||
self._separator = separator
|
||||
self._language = language
|
||||
|
||||
def split_text(self, text: str) -> List[str]:
|
||||
"""Split incoming text and return chunks."""
|
||||
# First we naively split the large input into a bunch of smaller ones.
|
||||
splits = self._tokenizer(text, language=self._language)
|
||||
return self._merge_splits(splits, self._separator)
|
Reference in New Issue
Block a user