This commit is contained in:
Nathan Lenas 2024-11-28 15:02:58 +01:00 committed by GitHub
commit 494bb9eea9
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 136 additions and 24 deletions

View File

@ -40,7 +40,12 @@ class BaseIngestComponent(abc.ABC):
self.transformations = transformations self.transformations = transformations
@abc.abstractmethod @abc.abstractmethod
def ingest(self, file_name: str, file_data: Path) -> list[Document]: def ingest(
self,
file_name: str,
file_data: Path,
file_metadata: dict[str, Any] | None = None,
) -> list[Document]:
pass pass
@abc.abstractmethod @abc.abstractmethod
@ -117,9 +122,16 @@ class SimpleIngestComponent(BaseIngestComponentWithIndex):
) -> None: ) -> None:
super().__init__(storage_context, embed_model, transformations, *args, **kwargs) super().__init__(storage_context, embed_model, transformations, *args, **kwargs)
def ingest(self, file_name: str, file_data: Path) -> list[Document]: def ingest(
self,
file_name: str,
file_data: Path,
file_metadata: dict[str, Any] | None = None,
) -> list[Document]:
logger.info("Ingesting file_name=%s", file_name) logger.info("Ingesting file_name=%s", file_name)
documents = IngestionHelper.transform_file_into_documents(file_name, file_data) documents = IngestionHelper.transform_file_into_documents(
file_name, file_data, file_metadata
)
logger.info( logger.info(
"Transformed file=%s into count=%s documents", file_name, len(documents) "Transformed file=%s into count=%s documents", file_name, len(documents)
) )
@ -175,9 +187,16 @@ class BatchIngestComponent(BaseIngestComponentWithIndex):
processes=self.count_workers processes=self.count_workers
) )
def ingest(self, file_name: str, file_data: Path) -> list[Document]: def ingest(
self,
file_name: str,
file_data: Path,
file_metadata: dict[str, Any] | None = None,
) -> list[Document]:
logger.info("Ingesting file_name=%s", file_name) logger.info("Ingesting file_name=%s", file_name)
documents = IngestionHelper.transform_file_into_documents(file_name, file_data) documents = IngestionHelper.transform_file_into_documents(
file_name, file_data, file_metadata
)
logger.info( logger.info(
"Transformed file=%s into count=%s documents", file_name, len(documents) "Transformed file=%s into count=%s documents", file_name, len(documents)
) )
@ -185,6 +204,7 @@ class BatchIngestComponent(BaseIngestComponentWithIndex):
return self._save_docs(documents) return self._save_docs(documents)
def bulk_ingest(self, files: list[tuple[str, Path]]) -> list[Document]: def bulk_ingest(self, files: list[tuple[str, Path]]) -> list[Document]:
documents = list( documents = list(
itertools.chain.from_iterable( itertools.chain.from_iterable(
self._file_to_documents_work_pool.starmap( self._file_to_documents_work_pool.starmap(
@ -257,12 +277,18 @@ class ParallelizedIngestComponent(BaseIngestComponentWithIndex):
processes=self.count_workers processes=self.count_workers
) )
def ingest(self, file_name: str, file_data: Path) -> list[Document]: def ingest(
self,
file_name: str,
file_data: Path,
file_metadata: dict[str, Any] | None = None,
) -> list[Document]:
logger.info("Ingesting file_name=%s", file_name) logger.info("Ingesting file_name=%s", file_name)
# Running in a single (1) process to release the current # Running in a single (1) process to release the current
# thread, and take a dedicated CPU core for computation # thread, and take a dedicated CPU core for computation
documents = self._file_to_documents_work_pool.apply( documents = self._file_to_documents_work_pool.apply(
IngestionHelper.transform_file_into_documents, (file_name, file_data) IngestionHelper.transform_file_into_documents,
(file_name, file_data, file_metadata),
) )
logger.info( logger.info(
"Transformed file=%s into count=%s documents", file_name, len(documents) "Transformed file=%s into count=%s documents", file_name, len(documents)
@ -271,9 +297,9 @@ class ParallelizedIngestComponent(BaseIngestComponentWithIndex):
return self._save_docs(documents) return self._save_docs(documents)
def bulk_ingest(self, files: list[tuple[str, Path]]) -> list[Document]: def bulk_ingest(self, files: list[tuple[str, Path]]) -> list[Document]:
# Lightweight threads, used for parallelize the # Lightweight threads, used for parallelize the
# underlying IO calls made in the ingestion # underlying IO calls made in the ingestion
documents = list( documents = list(
itertools.chain.from_iterable( itertools.chain.from_iterable(
self._ingest_work_pool.starmap(self.ingest, files) self._ingest_work_pool.starmap(self.ingest, files)
@ -459,8 +485,15 @@ class PipelineIngestComponent(BaseIngestComponentWithIndex):
self.node_q.put(("flush", None, None, None)) self.node_q.put(("flush", None, None, None))
self.node_q.join() self.node_q.join()
def ingest(self, file_name: str, file_data: Path) -> list[Document]: def ingest(
documents = IngestionHelper.transform_file_into_documents(file_name, file_data) self,
file_name: str,
file_data: Path,
file_metadata: dict[str, Any] | None = None,
) -> list[Document]:
documents = IngestionHelper.transform_file_into_documents(
file_name, file_data, file_metadata
)
self.doc_q.put(("process", file_name, documents)) self.doc_q.put(("process", file_name, documents))
self._flush() self._flush()
return documents return documents

View File

@ -1,5 +1,6 @@
import logging import logging
from pathlib import Path from pathlib import Path
from typing import Any
from llama_index.core.readers import StringIterableReader from llama_index.core.readers import StringIterableReader
from llama_index.core.readers.base import BaseReader from llama_index.core.readers.base import BaseReader
@ -69,11 +70,13 @@ class IngestionHelper:
@staticmethod @staticmethod
def transform_file_into_documents( def transform_file_into_documents(
file_name: str, file_data: Path file_name: str, file_data: Path, file_metadata: dict[str, Any] | None = None
) -> list[Document]: ) -> list[Document]:
documents = IngestionHelper._load_file_to_documents(file_name, file_data) documents = IngestionHelper._load_file_to_documents(file_name, file_data)
for document in documents: for document in documents:
document.metadata.update(file_metadata or {})
document.metadata["file_name"] = file_name document.metadata["file_name"] = file_name
IngestionHelper._exclude_metadata(documents) IngestionHelper._exclude_metadata(documents)
return documents return documents

View File

@ -1,6 +1,7 @@
from typing import Literal import json
from typing import Any, Literal
from fastapi import APIRouter, Depends, HTTPException, Request, UploadFile from fastapi import APIRouter, Depends, Form, HTTPException, Request, UploadFile
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from private_gpt.server.ingest.ingest_service import IngestService from private_gpt.server.ingest.ingest_service import IngestService
@ -20,6 +21,16 @@ class IngestTextBody(BaseModel):
"Chinese martial arts." "Chinese martial arts."
] ]
) )
metadata: dict[str, Any] = Field(
None,
examples=[
{
"title": "Avatar: The Last Airbender",
"author": "Michael Dante DiMartino, Bryan Konietzko",
"year": "2005",
}
],
)
class IngestResponse(BaseModel): class IngestResponse(BaseModel):
@ -38,9 +49,16 @@ def ingest(request: Request, file: UploadFile) -> IngestResponse:
@ingest_router.post("/ingest/file", tags=["Ingestion"]) @ingest_router.post("/ingest/file", tags=["Ingestion"])
def ingest_file(request: Request, file: UploadFile) -> IngestResponse: def ingest_file(
request: Request, file: UploadFile, metadata: str = Form(None)
) -> IngestResponse:
"""Ingests and processes a file, storing its chunks to be used as context. """Ingests and processes a file, storing its chunks to be used as context.
metadata: Optional metadata to be associated with the file.
You do not have to specify this field if not needed.
The metadata needs to be in JSON format.
e.g. {"title": "Avatar: The Last Airbender", "year": "2005"}
The context obtained from files is later used in The context obtained from files is later used in
`/chat/completions`, `/completions`, and `/chunks` APIs. `/chat/completions`, `/completions`, and `/chunks` APIs.
@ -57,7 +75,11 @@ def ingest_file(request: Request, file: UploadFile) -> IngestResponse:
service = request.state.injector.get(IngestService) service = request.state.injector.get(IngestService)
if file.filename is None: if file.filename is None:
raise HTTPException(400, "No file name provided") raise HTTPException(400, "No file name provided")
ingested_documents = service.ingest_bin_data(file.filename, file.file)
metadata_dict = None if metadata is None else json.loads(metadata)
ingested_documents = service.ingest_bin_data(
file.filename, file.file, metadata_dict
)
return IngestResponse(object="list", model="private-gpt", data=ingested_documents) return IngestResponse(object="list", model="private-gpt", data=ingested_documents)
@ -73,11 +95,12 @@ def ingest_text(request: Request, body: IngestTextBody) -> IngestResponse:
extracted Metadata (which is later used to improve context retrieval). That ID extracted Metadata (which is later used to improve context retrieval). That ID
can be used to filter the context used to create responses in can be used to filter the context used to create responses in
`/chat/completions`, `/completions`, and `/chunks` APIs. `/chat/completions`, `/completions`, and `/chunks` APIs.
""" """
service = request.state.injector.get(IngestService) service = request.state.injector.get(IngestService)
if len(body.file_name) == 0: if len(body.file_name) == 0:
raise HTTPException(400, "No file name provided") raise HTTPException(400, "No file name provided")
ingested_documents = service.ingest_text(body.file_name, body.text) ingested_documents = service.ingest_text(body.file_name, body.text, body.metadata)
return IngestResponse(object="list", model="private-gpt", data=ingested_documents) return IngestResponse(object="list", model="private-gpt", data=ingested_documents)

View File

@ -48,7 +48,12 @@ class IngestService:
settings=settings(), settings=settings(),
) )
def _ingest_data(self, file_name: str, file_data: AnyStr) -> list[IngestedDoc]: def _ingest_data(
self,
file_name: str,
file_data: AnyStr,
file_metadata: dict[str, str] | None = None,
) -> list[IngestedDoc]:
logger.debug("Got file data of size=%s to ingest", len(file_data)) logger.debug("Got file data of size=%s to ingest", len(file_data))
# llama-index mainly supports reading from files, so # llama-index mainly supports reading from files, so
# we have to create a tmp file to read for it to work # we have to create a tmp file to read for it to work
@ -60,27 +65,37 @@ class IngestService:
path_to_tmp.write_bytes(file_data) path_to_tmp.write_bytes(file_data)
else: else:
path_to_tmp.write_text(str(file_data)) path_to_tmp.write_text(str(file_data))
return self.ingest_file(file_name, path_to_tmp) return self.ingest_file(file_name, path_to_tmp, file_metadata)
finally: finally:
tmp.close() tmp.close()
path_to_tmp.unlink() path_to_tmp.unlink()
def ingest_file(self, file_name: str, file_data: Path) -> list[IngestedDoc]: def ingest_file(
self,
file_name: str,
file_data: Path,
file_metadata: dict[str, str] | None = None,
) -> list[IngestedDoc]:
logger.info("Ingesting file_name=%s", file_name) logger.info("Ingesting file_name=%s", file_name)
documents = self.ingest_component.ingest(file_name, file_data) documents = self.ingest_component.ingest(file_name, file_data, file_metadata)
logger.info("Finished ingestion file_name=%s", file_name) logger.info("Finished ingestion file_name=%s", file_name)
return [IngestedDoc.from_document(document) for document in documents] return [IngestedDoc.from_document(document) for document in documents]
def ingest_text(self, file_name: str, text: str) -> list[IngestedDoc]: def ingest_text(
self, file_name: str, text: str, metadata: dict[str, str] | None = None
) -> list[IngestedDoc]:
logger.debug("Ingesting text data with file_name=%s", file_name) logger.debug("Ingesting text data with file_name=%s", file_name)
return self._ingest_data(file_name, text) return self._ingest_data(file_name, text, metadata)
def ingest_bin_data( def ingest_bin_data(
self, file_name: str, raw_file_data: BinaryIO self,
file_name: str,
raw_file_data: BinaryIO,
file_metadata: dict[str, str] | None = None,
) -> list[IngestedDoc]: ) -> list[IngestedDoc]:
logger.debug("Ingesting binary data with file_name=%s", file_name) logger.debug("Ingesting binary data with file_name=%s", file_name)
file_data = raw_file_data.read() file_data = raw_file_data.read()
return self._ingest_data(file_name, file_data) return self._ingest_data(file_name, file_data, file_metadata)
def bulk_ingest(self, files: list[tuple[str, Path]]) -> list[IngestedDoc]: def bulk_ingest(self, files: list[tuple[str, Path]]) -> list[IngestedDoc]:
logger.info("Ingesting file_names=%s", [f[0] for f in files]) logger.info("Ingesting file_names=%s", [f[0] for f in files])

View File

@ -1,4 +1,6 @@
import json
from pathlib import Path from pathlib import Path
from typing import Any
import pytest import pytest
from fastapi.testclient import TestClient from fastapi.testclient import TestClient
@ -18,6 +20,20 @@ class IngestHelper:
ingest_result = IngestResponse.model_validate(response.json()) ingest_result = IngestResponse.model_validate(response.json())
return ingest_result return ingest_result
def ingest_file_with_metadata(
self, path: Path, metadata: dict[str, Any]
) -> IngestResponse:
files = {
"file": (path.name, path.open("rb")),
"metadata": (None, json.dumps(metadata)),
}
response = self.test_client.post("/v1/ingest/file", files=files)
assert response.status_code == 200
ingest_result = IngestResponse.model_validate(response.json())
return ingest_result
@pytest.fixture @pytest.fixture
def ingest_helper(test_client: TestClient) -> IngestHelper: def ingest_helper(test_client: TestClient) -> IngestHelper:

View File

@ -44,3 +44,25 @@ def test_ingest_plain_text(test_client: TestClient) -> None:
assert response.status_code == 200 assert response.status_code == 200
ingest_result = IngestResponse.model_validate(response.json()) ingest_result = IngestResponse.model_validate(response.json())
assert len(ingest_result.data) == 1 assert len(ingest_result.data) == 1
def test_ingest_text_with_metadata(test_client: TestClient):
response = test_client.post(
"/v1/ingest/text",
json={"file_name": "file_name", "text": "text", "metadata": {"foo": "bar"}},
)
assert response.status_code == 200
ingest_result = IngestResponse.model_validate(response.json())
assert len(ingest_result.data) == 1
assert ingest_result.data[0].doc_metadata == {
"file_name": "file_name",
"foo": "bar",
}
def test_ingest_accepts_txt_files_with_metadata(ingest_helper: IngestHelper) -> None:
path = Path(__file__).parents[0] / "test.txt"
ingest_result = ingest_helper.ingest_file_with_metadata(path, {"foo": "bar"})
assert len(ingest_result.data) == 1
assert ingest_result.data[0].doc_metadata == {"file_name": "test.txt", "foo": "bar"}