mirror of
https://github.com/hwchase17/langchain.git
synced 2025-09-17 07:26:16 +00:00
feat(core): add sanitize_for_postgres
utility to fix PostgreSQL NUL byte DataError (#32157)
This PR fixes the PostgreSQL NUL byte issue that causes `psycopg.DataError` when inserting documents containing `\x00` bytes into PostgreSQL-based vector stores. ## Problem PostgreSQL text fields cannot contain NUL (0x00) bytes. When documents with such characters are processed by PGVector or langchain-postgres implementations, they fail with: ``` (psycopg.DataError) PostgreSQL text fields cannot contain NUL (0x00) bytes ``` This commonly occurs when processing PDFs, documents from various loaders, or text extracted by libraries like unstructured that may contain embedded NUL bytes. ## Solution Added `sanitize_for_postgres()` utility function to `langchain_core.utils.strings` that removes or replaces NUL bytes from text content. ### Key Features - **Simple API**: `sanitize_for_postgres(text, replacement="")` - **Configurable**: Replace NUL bytes with empty string (default) or space for readability - **Comprehensive**: Handles all problematic examples from the original issue - **Well-tested**: Complete unit tests with real-world examples - **Backward compatible**: No breaking changes, purely additive ### Usage Example ```python from langchain_core.utils import sanitize_for_postgres from langchain_core.documents import Document # Before: This would fail with DataError problematic_content = "Getting\x00Started with embeddings" # After: Clean the content before database insertion clean_content = sanitize_for_postgres(problematic_content) # Result: "GettingStarted with embeddings" # Or preserve readability with spaces readable_content = sanitize_for_postgres(problematic_content, " ") # Result: "Getting Started with embeddings" # Use in Document processing doc = Document(page_content=clean_content, metadata={...}) ``` ### Integration Pattern PostgreSQL vector store implementations should sanitize content before insertion: ```python def add_documents(self, documents: List[Document]) -> List[str]: # Sanitize documents before insertion sanitized_docs = [] for doc in documents: sanitized_content = sanitize_for_postgres(doc.page_content, " ") sanitized_doc = Document( page_content=sanitized_content, metadata=doc.metadata, id=doc.id ) sanitized_docs.append(sanitized_doc) return self._insert_documents_to_db(sanitized_docs) ``` ## Changes Made - Added `sanitize_for_postgres()` function in `langchain_core/utils/strings.py` - Updated `langchain_core/utils/__init__.py` to export the new function - Added comprehensive unit tests in `tests/unit_tests/utils/test_strings.py` - Validated against all examples from the original issue report ## Testing All tests pass, including: - Basic NUL byte removal and replacement - Multiple consecutive NUL bytes - Empty string handling - Real examples from the GitHub issue - Backward compatibility with existing string utilities This utility enables PostgreSQL integrations in both langchain-community and langchain-postgres packages to handle documents with NUL bytes reliably. Fixes #26033. <!-- START COPILOT CODING AGENT TIPS --> --- 💬 Share your feedback on Copilot coding agent for the chance to win a $200 gift card! Click [here](https://survey.alchemer.com/s3/8343779/Copilot-Coding-agent) to start the survey. --------- Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com> Co-authored-by: mdrxy <61371264+mdrxy@users.noreply.github.com> Co-authored-by: Mason Daugherty <github@mdrxy.com>
This commit is contained in:
@@ -27,6 +27,7 @@ EXPECTED_ALL = [
|
||||
"pre_init",
|
||||
"from_env",
|
||||
"secret_from_env",
|
||||
"sanitize_for_postgres",
|
||||
]
|
||||
|
||||
|
||||
|
49
libs/core/tests/unit_tests/utils/test_strings.py
Normal file
49
libs/core/tests/unit_tests/utils/test_strings.py
Normal file
@@ -0,0 +1,49 @@
|
||||
"""Test string utilities."""
|
||||
|
||||
from langchain_core.utils.strings import (
|
||||
comma_list,
|
||||
sanitize_for_postgres,
|
||||
stringify_dict,
|
||||
stringify_value,
|
||||
)
|
||||
|
||||
|
||||
def test_sanitize_for_postgres() -> None:
|
||||
"""Test sanitizing text for PostgreSQL compatibility."""
|
||||
# Test with NUL bytes
|
||||
text_with_nul = "Hello\x00world\x00test"
|
||||
expected = "Helloworldtest"
|
||||
assert sanitize_for_postgres(text_with_nul) == expected
|
||||
|
||||
# Test with replacement character
|
||||
expected_with_replacement = "Hello world test"
|
||||
assert sanitize_for_postgres(text_with_nul, " ") == expected_with_replacement
|
||||
|
||||
# Test with text without NUL bytes
|
||||
clean_text = "Hello world"
|
||||
assert sanitize_for_postgres(clean_text) == clean_text
|
||||
|
||||
# Test empty string
|
||||
assert sanitize_for_postgres("") == ""
|
||||
|
||||
# Test with multiple consecutive NUL bytes
|
||||
text_with_multiple_nuls = "Hello\x00\x00\x00world"
|
||||
assert sanitize_for_postgres(text_with_multiple_nuls) == "Helloworld"
|
||||
assert sanitize_for_postgres(text_with_multiple_nuls, "-") == "Hello---world"
|
||||
|
||||
|
||||
def test_existing_string_functions() -> None:
|
||||
"""Test existing string functions still work."""
|
||||
# Test comma_list
|
||||
assert comma_list([1, 2, 3]) == "1, 2, 3"
|
||||
assert comma_list(["a", "b", "c"]) == "a, b, c"
|
||||
|
||||
# Test stringify_value
|
||||
assert stringify_value("hello") == "hello"
|
||||
assert stringify_value(42) == "42"
|
||||
|
||||
# Test stringify_dict
|
||||
data = {"key": "value", "number": 123}
|
||||
result = stringify_dict(data)
|
||||
assert "key: value" in result
|
||||
assert "number: 123" in result
|
Reference in New Issue
Block a user