Format fixes

This commit is contained in:
imartinez 2024-02-29 14:50:47 +01:00
parent 8c390812ff
commit 34d48d7b4d
5 changed files with 12 additions and 11 deletions

View File

@ -1,6 +1,5 @@
import logging
from pathlib import Path
from typing import Dict, Type
from llama_index.core.readers import StringIterableReader
from llama_index.core.readers.base import BaseReader
@ -11,7 +10,7 @@ logger = logging.getLogger(__name__)
# Inspired by the `llama_index.core.readers.file.base` module
def _try_loading_included_file_formats() -> Dict[str, Type[BaseReader]]:
def _try_loading_included_file_formats() -> dict[str, type[BaseReader]]:
try:
from llama_index.readers.file.docs import DocxReader, HWPReader, PDFReader
from llama_index.readers.file.epub import EpubReader
@ -19,13 +18,13 @@ def _try_loading_included_file_formats() -> Dict[str, Type[BaseReader]]:
from llama_index.readers.file.ipynb import IPYNBReader
from llama_index.readers.file.markdown import MarkdownReader
from llama_index.readers.file.mbox import MboxReader
from llama_index.readers.file.tabular import PandasCSVReader
from llama_index.readers.file.slides import PptxReader
from llama_index.readers.file.tabular import PandasCSVReader
from llama_index.readers.file.video_audio import VideoAudioReader
except ImportError:
raise ImportError("`llama-index-readers-file` package not found")
except ImportError as e:
raise ImportError("`llama-index-readers-file` package not found") from e
default_file_reader_cls: Dict[str, Type[BaseReader]] = {
default_file_reader_cls: dict[str, type[BaseReader]] = {
".hwp": HWPReader,
".pdf": PDFReader,
".docx": DocxReader,

View File

@ -2,15 +2,14 @@ import logging
from injector import inject, singleton
from llama_index.core.llms import LLM, MockLLM
from llama_index.core.utils import set_global_tokenizer
from llama_index.core.settings import Settings as LlamaIndexSettings
from llama_index.core.utils import set_global_tokenizer
from transformers import AutoTokenizer # type: ignore
from private_gpt.components.llm.prompt_helper import get_prompt_style
from private_gpt.paths import models_cache_path, models_path
from private_gpt.settings.settings import Settings
logger = logging.getLogger(__name__)

View File

@ -60,12 +60,14 @@ class VectorStoreComponent:
case "chroma":
try:
from private_gpt.components.vector_store.batched_chroma import \
BatchedChromaVectorStore
import chromadb # type: ignore
from chromadb.config import ( # type: ignore
Settings as ChromaSettings,
)
from private_gpt.components.vector_store.batched_chroma import (
BatchedChromaVectorStore,
)
except ImportError as e:
raise ImportError(
"ChromaDB dependencies not found, install with `poetry install --extras chroma`"

View File

@ -6,6 +6,7 @@ from fastapi.middleware.cors import CORSMiddleware
from injector import Injector
from llama_index.core.callbacks import CallbackManager
from llama_index.core.callbacks.global_handlers import create_global_handler
from llama_index.core.settings import Settings as LlamaIndexSettings
from private_gpt.server.chat.chat_router import chat_router
from private_gpt.server.chunks.chunks_router import chunks_router
@ -14,7 +15,6 @@ from private_gpt.server.embeddings.embeddings_router import embeddings_router
from private_gpt.server.health.health_router import health_router
from private_gpt.server.ingest.ingest_router import ingest_router
from private_gpt.settings.settings import Settings
from llama_index.core.settings import Settings as LlamaIndexSettings
logger = logging.getLogger(__name__)

View File

@ -105,6 +105,7 @@ class ChatService:
# TODO ContextChatEngine is still not migrated by LlamaIndex to accept
# llm directly, so we are passing legacy ServiceContext until it is fixed.
from llama_index.core import ServiceContext
return ContextChatEngine.from_defaults(
system_prompt=system_prompt,
retriever=vector_index_retriever,