fix: Remove global state (#1216)

* Remove all global settings state

* chore: remove autogenerated class

* chore: cleanup

* chore: merge conflicts
This commit is contained in:
Pablo Orgaz
2023-11-12 22:20:36 +01:00
committed by GitHub
parent f394ca61bb
commit 022bd718e3
24 changed files with 286 additions and 190 deletions

View File

@@ -2,13 +2,13 @@ import argparse
import logging
from pathlib import Path
from private_gpt.di import root_injector
from private_gpt.di import global_injector
from private_gpt.server.ingest.ingest_service import IngestService
from private_gpt.server.ingest.ingest_watcher import IngestWatcher
logger = logging.getLogger(__name__)
ingest_service = root_injector.get(IngestService)
ingest_service = global_injector.get(IngestService)
parser = argparse.ArgumentParser(prog="ingest_folder.py")
parser.add_argument("folder", help="Folder to ingest")

View File

@@ -9,9 +9,9 @@ from private_gpt.settings.settings import settings
os.makedirs(models_path, exist_ok=True)
embedding_path = models_path / "embedding"
print(f"Downloading embedding {settings.local.embedding_hf_model_name}")
print(f"Downloading embedding {settings().local.embedding_hf_model_name}")
snapshot_download(
repo_id=settings.local.embedding_hf_model_name,
repo_id=settings().local.embedding_hf_model_name,
cache_dir=models_cache_path,
local_dir=embedding_path,
)
@@ -20,8 +20,8 @@ print("Downloading models for local execution...")
# Download LLM and create a symlink to the model file
hf_hub_download(
repo_id=settings.local.llm_hf_repo_id,
filename=settings.local.llm_hf_model_file,
repo_id=settings().local.llm_hf_repo_id,
filename=settings().local.llm_hf_model_file,
cache_dir=models_cache_path,
local_dir=models_path,
)