From 8fad1966f078f15036d27dd68ab65f64a7a4d6fe Mon Sep 17 00:00:00 2001 From: imartinez Date: Fri, 1 Mar 2024 09:15:43 +0100 Subject: [PATCH 1/3] Update setup script to point to the new settings --- scripts/setup | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/scripts/setup b/scripts/setup index e08516a2..3e02e641 100755 --- a/scripts/setup +++ b/scripts/setup @@ -19,19 +19,19 @@ os.makedirs(models_path, exist_ok=True) # Download Embedding model embedding_path = models_path / "embedding" -print(f"Downloading embedding {settings().local.embedding_hf_model_name}") +print(f"Downloading embedding {settings().huggingface.embedding_hf_model_name}") snapshot_download( - repo_id=settings().local.embedding_hf_model_name, + repo_id=settings().huggingface.embedding_hf_model_name, cache_dir=models_cache_path, local_dir=embedding_path, ) print("Embedding model downloaded!") # Download LLM and create a symlink to the model file -print(f"Downloading LLM {settings().local.llm_hf_model_file}") +print(f"Downloading LLM {settings().llamacpp.llm_hf_model_file}") hf_hub_download( - repo_id=settings().local.llm_hf_repo_id, - filename=settings().local.llm_hf_model_file, + repo_id=settings().llamacpp.llm_hf_repo_id, + filename=settings().llamacpp.llm_hf_model_file, cache_dir=models_cache_path, local_dir=models_path, resume_download=resume_download, From e1456c13fe36b1a6d290dd50a7094878dbbd87a7 Mon Sep 17 00:00:00 2001 From: imartinez Date: Fri, 1 Mar 2024 09:31:08 +0100 Subject: [PATCH 2/3] Windows note for setting env vars --- fern/docs/pages/installation/installation.mdx | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/fern/docs/pages/installation/installation.mdx b/fern/docs/pages/installation/installation.mdx index 4ff8b2ee..1d297562 100644 --- a/fern/docs/pages/installation/installation.mdx +++ b/fern/docs/pages/installation/installation.mdx @@ -61,6 +61,23 @@ Where `` can be any of the following: There are just some examples of recommended setups. You can mix and match the different options to fit your needs. You'll find more information in the Manual section of the documentation. +> **Important for Windows**: In the examples below or how to run PrivateGPT with `make run`, `PGPT_PROFILES` env var is being set inline following Unix command line syntax (works on MacOS and Linux). +If you are using Windows, you'll need to set the env var in a different way, for example: + +```powershell +# Powershell +$env:PGPT_PROFILES="ollama" +make run +``` + +or + +```cmd +# CMD +set PGPT_PROFILES=ollama +make run +``` + ### Local, Ollama-powered setup The easiest way to run PrivateGPT fully locally is to depend on Ollama for the LLM. Ollama provides a local LLM that is easy to install and use. From 274c3863127e8911428ffd8521f9103efc2fad62 Mon Sep 17 00:00:00 2001 From: imartinez Date: Fri, 1 Mar 2024 09:33:11 +0100 Subject: [PATCH 3/3] Fix error comment for openailike --- private_gpt/components/llm/llm_component.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/private_gpt/components/llm/llm_component.py b/private_gpt/components/llm/llm_component.py index 232d5b28..b553e2d9 100644 --- a/private_gpt/components/llm/llm_component.py +++ b/private_gpt/components/llm/llm_component.py @@ -87,7 +87,7 @@ class LLMComponent: from llama_index.llms.openai_like import OpenAILike # type: ignore except ImportError as e: raise ImportError( - "OpenAILike dependencies not found, install with `poetry install --extras llms-openailike`" + "OpenAILike dependencies not found, install with `poetry install --extras llms-openai-like`" ) from e openai_settings = settings.openai