Merge branch 'feature/upgrade-llamaindex' into feature/tensorrt-support

This commit is contained in:
imartinez 2024-03-01 10:01:00 +01:00
commit 3aaf4d682b
3 changed files with 23 additions and 6 deletions

View File

@ -62,6 +62,23 @@ Where `<extra>` can be any of the following:
There are just some examples of recommended setups. You can mix and match the different options to fit your needs.
You'll find more information in the Manual section of the documentation.
> **Important for Windows**: In the examples below or how to run PrivateGPT with `make run`, `PGPT_PROFILES` env var is being set inline following Unix command line syntax (works on MacOS and Linux).
If you are using Windows, you'll need to set the env var in a different way, for example:
```powershell
# Powershell
$env:PGPT_PROFILES="ollama"
make run
```
or
```cmd
# CMD
set PGPT_PROFILES=ollama
make run
```
### Local, Ollama-powered setup
The easiest way to run PrivateGPT fully locally is to depend on Ollama for the LLM. Ollama provides a local LLM that is easy to install and use.

View File

@ -87,7 +87,7 @@ class LLMComponent:
from llama_index.llms.openai_like import OpenAILike # type: ignore
except ImportError as e:
raise ImportError(
"OpenAILike dependencies not found, install with `poetry install --extras llms-openailike`"
"OpenAILike dependencies not found, install with `poetry install --extras llms-openai-like`"
) from e
openai_settings = settings.openai

View File

@ -19,19 +19,19 @@ os.makedirs(models_path, exist_ok=True)
# Download Embedding model
embedding_path = models_path / "embedding"
print(f"Downloading embedding {settings().local.embedding_hf_model_name}")
print(f"Downloading embedding {settings().huggingface.embedding_hf_model_name}")
snapshot_download(
repo_id=settings().local.embedding_hf_model_name,
repo_id=settings().huggingface.embedding_hf_model_name,
cache_dir=models_cache_path,
local_dir=embedding_path,
)
print("Embedding model downloaded!")
# Download LLM and create a symlink to the model file
print(f"Downloading LLM {settings().local.llm_hf_model_file}")
print(f"Downloading LLM {settings().llamacpp.llm_hf_model_file}")
hf_hub_download(
repo_id=settings().local.llm_hf_repo_id,
filename=settings().local.llm_hf_model_file,
repo_id=settings().llamacpp.llm_hf_repo_id,
filename=settings().llamacpp.llm_hf_model_file,
cache_dir=models_cache_path,
local_dir=models_path,
resume_download=resume_download,