From 34d48d7b4d7f7c3a31fa954adc507992ea9df9a6 Mon Sep 17 00:00:00 2001 From: imartinez Date: Thu, 29 Feb 2024 14:50:47 +0100 Subject: [PATCH] Format fixes --- private_gpt/components/ingest/ingest_helper.py | 11 +++++------ private_gpt/components/llm/llm_component.py | 3 +-- .../components/vector_store/vector_store_component.py | 6 ++++-- private_gpt/launcher.py | 2 +- private_gpt/server/chat/chat_service.py | 1 + 5 files changed, 12 insertions(+), 11 deletions(-) diff --git a/private_gpt/components/ingest/ingest_helper.py b/private_gpt/components/ingest/ingest_helper.py index 15920c3b..9178e783 100644 --- a/private_gpt/components/ingest/ingest_helper.py +++ b/private_gpt/components/ingest/ingest_helper.py @@ -1,6 +1,5 @@ import logging from pathlib import Path -from typing import Dict, Type from llama_index.core.readers import StringIterableReader from llama_index.core.readers.base import BaseReader @@ -11,7 +10,7 @@ logger = logging.getLogger(__name__) # Inspired by the `llama_index.core.readers.file.base` module -def _try_loading_included_file_formats() -> Dict[str, Type[BaseReader]]: +def _try_loading_included_file_formats() -> dict[str, type[BaseReader]]: try: from llama_index.readers.file.docs import DocxReader, HWPReader, PDFReader from llama_index.readers.file.epub import EpubReader @@ -19,13 +18,13 @@ def _try_loading_included_file_formats() -> Dict[str, Type[BaseReader]]: from llama_index.readers.file.ipynb import IPYNBReader from llama_index.readers.file.markdown import MarkdownReader from llama_index.readers.file.mbox import MboxReader - from llama_index.readers.file.tabular import PandasCSVReader from llama_index.readers.file.slides import PptxReader + from llama_index.readers.file.tabular import PandasCSVReader from llama_index.readers.file.video_audio import VideoAudioReader - except ImportError: - raise ImportError("`llama-index-readers-file` package not found") + except ImportError as e: + raise ImportError("`llama-index-readers-file` package not found") from e - default_file_reader_cls: Dict[str, Type[BaseReader]] = { + default_file_reader_cls: dict[str, type[BaseReader]] = { ".hwp": HWPReader, ".pdf": PDFReader, ".docx": DocxReader, diff --git a/private_gpt/components/llm/llm_component.py b/private_gpt/components/llm/llm_component.py index d9d5eceb..5956182d 100644 --- a/private_gpt/components/llm/llm_component.py +++ b/private_gpt/components/llm/llm_component.py @@ -2,15 +2,14 @@ import logging from injector import inject, singleton from llama_index.core.llms import LLM, MockLLM -from llama_index.core.utils import set_global_tokenizer from llama_index.core.settings import Settings as LlamaIndexSettings +from llama_index.core.utils import set_global_tokenizer from transformers import AutoTokenizer # type: ignore from private_gpt.components.llm.prompt_helper import get_prompt_style from private_gpt.paths import models_cache_path, models_path from private_gpt.settings.settings import Settings - logger = logging.getLogger(__name__) diff --git a/private_gpt/components/vector_store/vector_store_component.py b/private_gpt/components/vector_store/vector_store_component.py index ca60b4b4..b5541d65 100644 --- a/private_gpt/components/vector_store/vector_store_component.py +++ b/private_gpt/components/vector_store/vector_store_component.py @@ -60,12 +60,14 @@ class VectorStoreComponent: case "chroma": try: - from private_gpt.components.vector_store.batched_chroma import \ - BatchedChromaVectorStore import chromadb # type: ignore from chromadb.config import ( # type: ignore Settings as ChromaSettings, ) + + from private_gpt.components.vector_store.batched_chroma import ( + BatchedChromaVectorStore, + ) except ImportError as e: raise ImportError( "ChromaDB dependencies not found, install with `poetry install --extras chroma`" diff --git a/private_gpt/launcher.py b/private_gpt/launcher.py index 1be2b838..5cce8c72 100644 --- a/private_gpt/launcher.py +++ b/private_gpt/launcher.py @@ -6,6 +6,7 @@ from fastapi.middleware.cors import CORSMiddleware from injector import Injector from llama_index.core.callbacks import CallbackManager from llama_index.core.callbacks.global_handlers import create_global_handler +from llama_index.core.settings import Settings as LlamaIndexSettings from private_gpt.server.chat.chat_router import chat_router from private_gpt.server.chunks.chunks_router import chunks_router @@ -14,7 +15,6 @@ from private_gpt.server.embeddings.embeddings_router import embeddings_router from private_gpt.server.health.health_router import health_router from private_gpt.server.ingest.ingest_router import ingest_router from private_gpt.settings.settings import Settings -from llama_index.core.settings import Settings as LlamaIndexSettings logger = logging.getLogger(__name__) diff --git a/private_gpt/server/chat/chat_service.py b/private_gpt/server/chat/chat_service.py index 6b94f352..4fcd30cb 100644 --- a/private_gpt/server/chat/chat_service.py +++ b/private_gpt/server/chat/chat_service.py @@ -105,6 +105,7 @@ class ChatService: # TODO ContextChatEngine is still not migrated by LlamaIndex to accept # llm directly, so we are passing legacy ServiceContext until it is fixed. from llama_index.core import ServiceContext + return ContextChatEngine.from_defaults( system_prompt=system_prompt, retriever=vector_index_retriever,