diff --git a/fern/docs/pages/installation/installation.mdx b/fern/docs/pages/installation/installation.mdx index 94ad82d8..b41c1e62 100644 --- a/fern/docs/pages/installation/installation.mdx +++ b/fern/docs/pages/installation/installation.mdx @@ -62,6 +62,23 @@ Where `` can be any of the following: There are just some examples of recommended setups. You can mix and match the different options to fit your needs. You'll find more information in the Manual section of the documentation. +> **Important for Windows**: In the examples below or how to run PrivateGPT with `make run`, `PGPT_PROFILES` env var is being set inline following Unix command line syntax (works on MacOS and Linux). +If you are using Windows, you'll need to set the env var in a different way, for example: + +```powershell +# Powershell +$env:PGPT_PROFILES="ollama" +make run +``` + +or + +```cmd +# CMD +set PGPT_PROFILES=ollama +make run +``` + ### Local, Ollama-powered setup The easiest way to run PrivateGPT fully locally is to depend on Ollama for the LLM. Ollama provides a local LLM that is easy to install and use. diff --git a/private_gpt/components/llm/llm_component.py b/private_gpt/components/llm/llm_component.py index 767767b9..6b747982 100644 --- a/private_gpt/components/llm/llm_component.py +++ b/private_gpt/components/llm/llm_component.py @@ -87,7 +87,7 @@ class LLMComponent: from llama_index.llms.openai_like import OpenAILike # type: ignore except ImportError as e: raise ImportError( - "OpenAILike dependencies not found, install with `poetry install --extras llms-openailike`" + "OpenAILike dependencies not found, install with `poetry install --extras llms-openai-like`" ) from e openai_settings = settings.openai diff --git a/scripts/setup b/scripts/setup index e08516a2..3e02e641 100755 --- a/scripts/setup +++ b/scripts/setup @@ -19,19 +19,19 @@ os.makedirs(models_path, exist_ok=True) # Download Embedding model embedding_path = models_path / "embedding" -print(f"Downloading embedding {settings().local.embedding_hf_model_name}") +print(f"Downloading embedding {settings().huggingface.embedding_hf_model_name}") snapshot_download( - repo_id=settings().local.embedding_hf_model_name, + repo_id=settings().huggingface.embedding_hf_model_name, cache_dir=models_cache_path, local_dir=embedding_path, ) print("Embedding model downloaded!") # Download LLM and create a symlink to the model file -print(f"Downloading LLM {settings().local.llm_hf_model_file}") +print(f"Downloading LLM {settings().llamacpp.llm_hf_model_file}") hf_hub_download( - repo_id=settings().local.llm_hf_repo_id, - filename=settings().local.llm_hf_model_file, + repo_id=settings().llamacpp.llm_hf_repo_id, + filename=settings().llamacpp.llm_hf_model_file, cache_dir=models_cache_path, local_dir=models_path, resume_download=resume_download,