mirror of
https://github.com/imartinez/privateGPT.git
synced 2025-10-09 16:03:26 +00:00
Compare commits
9 Commits
0.1.3
...
feat/docke
Author | SHA1 | Date | |
---|---|---|---|
|
23704d23ad | ||
|
1e82ac9d6f | ||
|
ca2b8da69c | ||
|
f09f6dd255 | ||
|
1c665f7900 | ||
|
1d4c14d7a3 | ||
|
dae0727a1b | ||
|
6674b46fea | ||
|
e44a7f5773 |
16
.docker/router.yml
Normal file
16
.docker/router.yml
Normal file
@@ -0,0 +1,16 @@
|
||||
http:
|
||||
services:
|
||||
ollama:
|
||||
loadBalancer:
|
||||
healthCheck:
|
||||
interval: 5s
|
||||
path: /
|
||||
servers:
|
||||
- url: http://ollama-cpu:11434
|
||||
- url: http://ollama-cuda:11434
|
||||
- url: http://host.docker.internal:11434
|
||||
|
||||
routers:
|
||||
ollama-router:
|
||||
rule: "PathPrefix(`/`)"
|
||||
service: ollama
|
45
.github/workflows/docker.yml
vendored
45
.github/workflows/docker.yml
vendored
@@ -1,45 +0,0 @@
|
||||
name: docker
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [ published ]
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
REGISTRY: ghcr.io
|
||||
IMAGE_NAME: ${{ github.repository }}
|
||||
|
||||
jobs:
|
||||
build-and-push-image:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
- name: Log in to the Container registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Extract metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
tags: |
|
||||
type=ref,event=branch
|
||||
type=ref,event=pr
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=sha
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
file: Dockerfile.external
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
83
.github/workflows/generate-release.yml
vendored
Normal file
83
.github/workflows/generate-release.yml
vendored
Normal file
@@ -0,0 +1,83 @@
|
||||
name: generate-release
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [ published ]
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
REGISTRY: docker.io
|
||||
IMAGE_NAME: ${{ github.repository }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
DEFAULT_TYPE: "ollama"
|
||||
|
||||
jobs:
|
||||
build-and-push-image:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
type: [ llamacpp-cpu, ollama ]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
outputs:
|
||||
version: ${{ steps.version.outputs.version }}
|
||||
|
||||
steps:
|
||||
- name: Free Disk Space (Ubuntu)
|
||||
uses: jlumbroso/free-disk-space@main
|
||||
with:
|
||||
tool-cache: false
|
||||
android: true
|
||||
dotnet: true
|
||||
haskell: true
|
||||
large-packages: true
|
||||
docker-images: false
|
||||
swap-storage: true
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
- name: Extract metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
tags: |
|
||||
type=semver,pattern={{version}},enable=${{ matrix.type == env.DEFAULT_TYPE }}
|
||||
type=semver,pattern={{version}}-${{ matrix.type }}
|
||||
type=semver,pattern={{major}}.{{minor}},enable=${{ matrix.type == env.DEFAULT_TYPE }}
|
||||
type=semver,pattern={{major}}.{{minor}}-${{ matrix.type }}
|
||||
type=raw,value=latest,enable=${{ matrix.type == env.DEFAULT_TYPE }}
|
||||
type=sha
|
||||
flavor: |
|
||||
latest=false
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
file: Dockerfile.${{ matrix.type }}
|
||||
platforms: ${{ env.platforms }}
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
|
||||
- name: Version output
|
||||
id: version
|
||||
run: echo "version=${{ steps.meta.outputs.version }}" >> "$GITHUB_OUTPUT"
|
48
CHANGELOG.md
48
CHANGELOG.md
@@ -1,5 +1,53 @@
|
||||
# Changelog
|
||||
|
||||
## [0.6.1](https://github.com/zylon-ai/private-gpt/compare/v0.6.0...v0.6.1) (2024-08-05)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* add built image from DockerHub ([#2042](https://github.com/zylon-ai/private-gpt/issues/2042)) ([f09f6dd](https://github.com/zylon-ai/private-gpt/commit/f09f6dd2553077d4566dbe6b48a450e05c2f049e))
|
||||
* Adding azopenai to model list ([#2035](https://github.com/zylon-ai/private-gpt/issues/2035)) ([1c665f7](https://github.com/zylon-ai/private-gpt/commit/1c665f7900658144f62814b51f6e3434a6d7377f))
|
||||
* **deploy:** generate docker release when new version is released ([#2038](https://github.com/zylon-ai/private-gpt/issues/2038)) ([1d4c14d](https://github.com/zylon-ai/private-gpt/commit/1d4c14d7a3c383c874b323d934be01afbaca899e))
|
||||
* **deploy:** improve Docker-Compose and quickstart on Docker ([#2037](https://github.com/zylon-ai/private-gpt/issues/2037)) ([dae0727](https://github.com/zylon-ai/private-gpt/commit/dae0727a1b4abd35d2b0851fe30e0a4ed67e0fbb))
|
||||
|
||||
## [0.6.0](https://github.com/zylon-ai/private-gpt/compare/v0.5.0...v0.6.0) (2024-08-02)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* bump dependencies ([#1987](https://github.com/zylon-ai/private-gpt/issues/1987)) ([b687dc8](https://github.com/zylon-ai/private-gpt/commit/b687dc852413404c52d26dcb94536351a63b169d))
|
||||
* **docs:** add privategpt-ts sdk ([#1924](https://github.com/zylon-ai/private-gpt/issues/1924)) ([d13029a](https://github.com/zylon-ai/private-gpt/commit/d13029a046f6e19e8ee65bef3acd96365c738df2))
|
||||
* **docs:** Fix setup docu ([#1926](https://github.com/zylon-ai/private-gpt/issues/1926)) ([067a5f1](https://github.com/zylon-ai/private-gpt/commit/067a5f144ca6e605c99d7dbe9ca7d8207ac8808d))
|
||||
* **docs:** update doc for ipex-llm ([#1968](https://github.com/zylon-ai/private-gpt/issues/1968)) ([19a7c06](https://github.com/zylon-ai/private-gpt/commit/19a7c065ef7f42b37f289dd28ac945f7afc0e73a))
|
||||
* **docs:** update documentation and fix preview-docs ([#2000](https://github.com/zylon-ai/private-gpt/issues/2000)) ([4523a30](https://github.com/zylon-ai/private-gpt/commit/4523a30c8f004aac7a7ae224671e2c45ec0cb973))
|
||||
* **llm:** add progress bar when ollama is pulling models ([#2031](https://github.com/zylon-ai/private-gpt/issues/2031)) ([cf61bf7](https://github.com/zylon-ai/private-gpt/commit/cf61bf780f8d122e4057d002abf03563bb45614a))
|
||||
* **llm:** autopull ollama models ([#2019](https://github.com/zylon-ai/private-gpt/issues/2019)) ([20bad17](https://github.com/zylon-ai/private-gpt/commit/20bad17c9857809158e689e9671402136c1e3d84))
|
||||
* **llm:** Support for Google Gemini LLMs and Embeddings ([#1965](https://github.com/zylon-ai/private-gpt/issues/1965)) ([fc13368](https://github.com/zylon-ai/private-gpt/commit/fc13368bc72d1f4c27644677431420ed77731c03))
|
||||
* make llama3.1 as default ([#2022](https://github.com/zylon-ai/private-gpt/issues/2022)) ([9027d69](https://github.com/zylon-ai/private-gpt/commit/9027d695c11fbb01e62424b855665de71d513417))
|
||||
* prompt_style applied to all LLMs + extra LLM params. ([#1835](https://github.com/zylon-ai/private-gpt/issues/1835)) ([e21bf20](https://github.com/zylon-ai/private-gpt/commit/e21bf20c10938b24711d9f2c765997f44d7e02a9))
|
||||
* **recipe:** add our first recipe `Summarize` ([#2028](https://github.com/zylon-ai/private-gpt/issues/2028)) ([8119842](https://github.com/zylon-ai/private-gpt/commit/8119842ae6f1f5ecfaf42b06fa0d1ffec675def4))
|
||||
* **vectordb:** Milvus vector db Integration ([#1996](https://github.com/zylon-ai/private-gpt/issues/1996)) ([43cc31f](https://github.com/zylon-ai/private-gpt/commit/43cc31f74015f8d8fcbf7a8ea7d7d9ecc66cf8c9))
|
||||
* **vectorstore:** Add clickhouse support as vectore store ([#1883](https://github.com/zylon-ai/private-gpt/issues/1883)) ([2612928](https://github.com/zylon-ai/private-gpt/commit/26129288394c7483e6fc0496a11dc35679528cc1))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* "no such group" error in Dockerfile, added docx2txt and cryptography deps ([#1841](https://github.com/zylon-ai/private-gpt/issues/1841)) ([947e737](https://github.com/zylon-ai/private-gpt/commit/947e737f300adf621d2261d527192f36f3387f8e))
|
||||
* **config:** make tokenizer optional and include a troubleshooting doc ([#1998](https://github.com/zylon-ai/private-gpt/issues/1998)) ([01b7ccd](https://github.com/zylon-ai/private-gpt/commit/01b7ccd0648be032846647c9a184925d3682f612))
|
||||
* **docs:** Fix concepts.mdx referencing to installation page ([#1779](https://github.com/zylon-ai/private-gpt/issues/1779)) ([dde0224](https://github.com/zylon-ai/private-gpt/commit/dde02245bcd51a7ede7b6789c82ae217cac53d92))
|
||||
* **docs:** Update installation.mdx ([#1866](https://github.com/zylon-ai/private-gpt/issues/1866)) ([c1802e7](https://github.com/zylon-ai/private-gpt/commit/c1802e7cf0e56a2603213ec3b6a4af8fadb8a17a))
|
||||
* ffmpy dependency ([#2020](https://github.com/zylon-ai/private-gpt/issues/2020)) ([dabf556](https://github.com/zylon-ai/private-gpt/commit/dabf556dae9cb00fe0262270e5138d982585682e))
|
||||
* light mode ([#2025](https://github.com/zylon-ai/private-gpt/issues/2025)) ([1020cd5](https://github.com/zylon-ai/private-gpt/commit/1020cd53288af71a17882781f392512568f1b846))
|
||||
* **LLM:** mistral ignoring assistant messages ([#1954](https://github.com/zylon-ai/private-gpt/issues/1954)) ([c7212ac](https://github.com/zylon-ai/private-gpt/commit/c7212ac7cc891f9e3c713cc206ae9807c5dfdeb6))
|
||||
* **llm:** special tokens and leading space ([#1831](https://github.com/zylon-ai/private-gpt/issues/1831)) ([347be64](https://github.com/zylon-ai/private-gpt/commit/347be643f7929c56382a77c3f45f0867605e0e0a))
|
||||
* make embedding_api_base match api_base when on docker ([#1859](https://github.com/zylon-ai/private-gpt/issues/1859)) ([2a432bf](https://github.com/zylon-ai/private-gpt/commit/2a432bf9c5582a94eb4052b1e80cabdb118d298e))
|
||||
* nomic embeddings ([#2030](https://github.com/zylon-ai/private-gpt/issues/2030)) ([5465958](https://github.com/zylon-ai/private-gpt/commit/54659588b5b109a3dd17cca835e275240464d275))
|
||||
* prevent to ingest local files (by default) ([#2010](https://github.com/zylon-ai/private-gpt/issues/2010)) ([e54a8fe](https://github.com/zylon-ai/private-gpt/commit/e54a8fe0433252808d0a60f6a08a43c9f5a42f3b))
|
||||
* Replacing unsafe `eval()` with `json.loads()` ([#1890](https://github.com/zylon-ai/private-gpt/issues/1890)) ([9d0d614](https://github.com/zylon-ai/private-gpt/commit/9d0d614706581a8bfa57db45f62f84ab23d26f15))
|
||||
* **settings:** enable cors by default so it will work when using ts sdk (spa) ([#1925](https://github.com/zylon-ai/private-gpt/issues/1925)) ([966af47](https://github.com/zylon-ai/private-gpt/commit/966af4771dbe5cf3fdf554b5fdf8f732407859c4))
|
||||
* **ui:** gradio bug fixes ([#2021](https://github.com/zylon-ai/private-gpt/issues/2021)) ([d4375d0](https://github.com/zylon-ai/private-gpt/commit/d4375d078f18ba53562fd71651159f997fff865f))
|
||||
* unify embedding models ([#2027](https://github.com/zylon-ai/private-gpt/issues/2027)) ([40638a1](https://github.com/zylon-ai/private-gpt/commit/40638a18a5713d60fec8fe52796dcce66d88258c))
|
||||
|
||||
## [0.5.0](https://github.com/zylon-ai/private-gpt/compare/v0.4.0...v0.5.0) (2024-04-02)
|
||||
|
||||
|
||||
|
84
Dockerfile.local-cuda
Normal file
84
Dockerfile.local-cuda
Normal file
@@ -0,0 +1,84 @@
|
||||
FROM nvidia/cuda:12.5.1-cudnn-devel-ubuntu22.04 as base
|
||||
|
||||
# For tzdata
|
||||
ENV DEBIAN_FRONTEND="noninteractive" TZ="Etc/UTC"
|
||||
|
||||
RUN apt-get update && apt-get upgrade -y \
|
||||
&& apt-get install -y git build-essential \
|
||||
python3 python3-pip python3.11-venv gcc wget \
|
||||
ocl-icd-opencl-dev opencl-headers clinfo \
|
||||
libclblast-dev libopenblas-dev \
|
||||
&& mkdir -p /etc/OpenCL/vendors && echo "libnvidia-opencl.so.1" > /etc/OpenCL/vendors/nvidia.icd \
|
||||
&& ln -sf /usr/bin/python3.11 /usr/bin/python3 \
|
||||
&& python3 --version
|
||||
|
||||
# Install poetry
|
||||
RUN pip install pipx
|
||||
RUN python3 -m pipx ensurepath
|
||||
RUN pipx install poetry==1.8.3
|
||||
ENV PATH="/root/.local/bin:$PATH"
|
||||
ENV PATH=".venv/bin/:$PATH"
|
||||
|
||||
# Dependencies to build llama-cpp
|
||||
RUN apt update && apt install -y \
|
||||
libopenblas-dev\
|
||||
ninja-build\
|
||||
build-essential\
|
||||
pkg-config\
|
||||
wget
|
||||
|
||||
# https://python-poetry.org/docs/configuration/#virtualenvsin-project
|
||||
ENV POETRY_VIRTUALENVS_IN_PROJECT=true
|
||||
|
||||
FROM base as dependencies
|
||||
WORKDIR /home/worker/app
|
||||
COPY pyproject.toml poetry.lock ./
|
||||
|
||||
ARG POETRY_EXTRAS="ui embeddings-huggingface llms-llama-cpp vector-stores-qdrant"
|
||||
RUN poetry install --no-root --extras "${POETRY_EXTRAS}"
|
||||
|
||||
# Enable GPU support
|
||||
ENV CUDA_DOCKER_ARCH=all
|
||||
ENV GGML_CUDA=1
|
||||
ENV TOKENIZERS_PARALLELISM=true
|
||||
RUN CMAKE_ARGS="-DGGML_CUDA=on" \
|
||||
poetry run pip install \
|
||||
--force-reinstall \
|
||||
--no-cache-dir \
|
||||
--verbose \
|
||||
llama-cpp-python==0.2.84 \
|
||||
numpy==1.26.0
|
||||
|
||||
FROM base as app
|
||||
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
ENV PORT=8080
|
||||
ENV APP_ENV=prod
|
||||
ENV PYTHONPATH="$PYTHONPATH:/home/worker/app/private_gpt/"
|
||||
EXPOSE 8080
|
||||
|
||||
# Prepare a non-root user
|
||||
# More info about how to configure UIDs and GIDs in Docker:
|
||||
# https://github.com/systemd/systemd/blob/main/docs/UIDS-GIDS.md
|
||||
|
||||
# Define the User ID (UID) for the non-root user
|
||||
# UID 100 is chosen to avoid conflicts with existing system users
|
||||
ARG UID=1000
|
||||
|
||||
# Define the Group ID (GID) for the non-root user
|
||||
# GID 65534 is often used for the 'nogroup' or 'nobody' group
|
||||
ARG GID=65534
|
||||
|
||||
RUN adduser --system --gid ${GID} --uid ${UID} --home /home/worker worker
|
||||
WORKDIR /home/worker/app
|
||||
|
||||
RUN chown worker /home/worker/app
|
||||
RUN mkdir local_data && chown worker local_data
|
||||
RUN mkdir models && chown worker models
|
||||
COPY --chown=worker --from=dependencies /home/worker/app/.venv/ .venv
|
||||
COPY --chown=worker private_gpt/ private_gpt
|
||||
COPY --chown=worker *.yaml ./
|
||||
COPY --chown=worker scripts/ scripts
|
||||
|
||||
USER worker
|
||||
ENTRYPOINT python -m private_gpt
|
@@ -1,19 +1,122 @@
|
||||
services:
|
||||
private-gpt:
|
||||
|
||||
#-----------------------------------
|
||||
#---- Private-GPT services ---------
|
||||
#-----------------------------------
|
||||
|
||||
# Private-GPT service for the Ollama CPU and GPU modes
|
||||
# This service builds from an external Dockerfile and runs the Ollama mode.
|
||||
private-gpt-ollama:
|
||||
image: ${PGPT_IMAGE:-zylonai/private-gpt}${PGPT_TAG:-0.6.1}-ollama
|
||||
build:
|
||||
dockerfile: Dockerfile.external
|
||||
context: .
|
||||
dockerfile: Dockerfile.ollama
|
||||
volumes:
|
||||
- ./local_data/:/home/worker/app/local_data
|
||||
ports:
|
||||
- 8001:8001
|
||||
- "8001:8001"
|
||||
environment:
|
||||
PORT: 8001
|
||||
PGPT_PROFILES: docker
|
||||
PGPT_MODE: ollama
|
||||
PGPT_EMBED_MODE: ollama
|
||||
ollama:
|
||||
image: ollama/ollama:latest
|
||||
PGPT_OLLAMA_API_BASE: http://ollama:11434
|
||||
HF_TOKEN: ${HF_TOKEN:-}
|
||||
profiles:
|
||||
- ""
|
||||
- ollama-cpu
|
||||
- ollama-cuda
|
||||
- ollama-api
|
||||
|
||||
# Private-GPT service for the local mode
|
||||
# This service builds from a local Dockerfile and runs the application in local mode.
|
||||
private-gpt-llamacpp-cpu:
|
||||
image: ${PGPT_IMAGE:-zylonai/private-gpt}${PGPT_TAG:-0.6.1}-llamacpp-cpu
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile.llamacpp-cpu
|
||||
volumes:
|
||||
- ./local_data/:/home/worker/app/local_data
|
||||
- ./models/:/home/worker/app/models
|
||||
entrypoint: sh -c ".venv/bin/python scripts/setup && .venv/bin/python -m private_gpt"
|
||||
ports:
|
||||
- 11434:11434
|
||||
- "8001:8001"
|
||||
environment:
|
||||
PORT: 8001
|
||||
PGPT_PROFILES: local
|
||||
HF_TOKEN: ${HF_TOKEN}
|
||||
profiles:
|
||||
- llamacpp-cpu
|
||||
|
||||
# Private-GPT service for the local mode (with CUDA support)
|
||||
# This service builds from a local Dockerfile and runs the application in local mode.
|
||||
private-gpt-llamacpp-cuda:
|
||||
image: ${PGPT_IMAGE:-zylonai/private-gpt}${PGPT_TAG:-0.6.1}-llamacpp-cuda
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile.llamacpp-cuda
|
||||
volumes:
|
||||
- ./local_data/:/home/worker/app/local_data
|
||||
- ./models/:/home/worker/app/models
|
||||
entrypoint: sh -c ".venv/bin/python scripts/setup && .venv/bin/python -m private_gpt"
|
||||
ports:
|
||||
- "8001:8001"
|
||||
environment:
|
||||
PORT: 8001
|
||||
PGPT_PROFILES: local
|
||||
HF_TOKEN: ${HF_TOKEN}
|
||||
profiles:
|
||||
- llamacpp-cuda
|
||||
|
||||
#-----------------------------------
|
||||
#---- Ollama services --------------
|
||||
#-----------------------------------
|
||||
|
||||
# Traefik reverse proxy for the Ollama service
|
||||
# This will route requests to the Ollama service based on the profile.
|
||||
ollama:
|
||||
image: traefik:v2.10
|
||||
ports:
|
||||
- "11435:11434"
|
||||
- "8081:8080"
|
||||
command:
|
||||
- "--providers.file.filename=/etc/router.yml"
|
||||
- "--log.level=ERROR"
|
||||
- "--api.insecure=true"
|
||||
- "--providers.docker=true"
|
||||
- "--providers.docker.exposedbydefault=false"
|
||||
- "--entrypoints.web.address=:11434"
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
- ./.docker/router.yml:/etc/router.yml:ro
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway"
|
||||
profiles:
|
||||
- ""
|
||||
- ollama-cpu
|
||||
- ollama-cuda
|
||||
- ollama-api
|
||||
|
||||
# Ollama service for the CPU mode
|
||||
ollama-cpu:
|
||||
image: ollama/ollama:latest
|
||||
volumes:
|
||||
- ./models:/root/.ollama
|
||||
profiles:
|
||||
- ""
|
||||
- ollama
|
||||
|
||||
# Ollama service for the CUDA mode
|
||||
ollama-cuda:
|
||||
image: ollama/ollama:latest
|
||||
volumes:
|
||||
- ./models:/root/.ollama
|
||||
deploy:
|
||||
resources:
|
||||
reservations:
|
||||
devices:
|
||||
- driver: nvidia
|
||||
count: 1
|
||||
capabilities: [gpu]
|
||||
profiles:
|
||||
- ollama-cuda
|
@@ -10,6 +10,9 @@ tabs:
|
||||
overview:
|
||||
display-name: Overview
|
||||
icon: "fa-solid fa-home"
|
||||
quickstart:
|
||||
display-name: Quickstart
|
||||
icon: "fa-solid fa-rocket"
|
||||
installation:
|
||||
display-name: Installation
|
||||
icon: "fa-solid fa-download"
|
||||
@@ -32,6 +35,12 @@ navigation:
|
||||
contents:
|
||||
- page: Introduction
|
||||
path: ./docs/pages/overview/welcome.mdx
|
||||
- tab: quickstart
|
||||
layout:
|
||||
- section: Getting started
|
||||
contents:
|
||||
- page: Quickstart
|
||||
path: ./docs/pages/quickstart/quickstart.mdx
|
||||
# How to install PrivateGPT, with FAQ and troubleshooting
|
||||
- tab: installation
|
||||
layout:
|
||||
|
120
fern/docs/pages/quickstart/quickstart.mdx
Normal file
120
fern/docs/pages/quickstart/quickstart.mdx
Normal file
@@ -0,0 +1,120 @@
|
||||
This guide provides a quick start for running different profiles of PrivateGPT using Docker Compose.
|
||||
The profiles cater to various environments, including Ollama setups (CPU, CUDA, MacOS), and a fully local setup.
|
||||
|
||||
By default, Docker Compose will download pre-built images from a remote registry when starting the services. However, you have the option to build the images locally if needed. Details on building Docker image locally are provided at the end of this guide.
|
||||
|
||||
If you want to run PrivateGPT locally without Docker, refer to the [Local Installation Guide](/installation).
|
||||
|
||||
## Prerequisites
|
||||
- **Docker and Docker Compose:** Ensure both are installed on your system.
|
||||
[Installation Guide for Docker](https://docs.docker.com/get-docker/), [Installation Guide for Docker Compose](https://docs.docker.com/compose/install/).
|
||||
- **Clone PrivateGPT Repository:** Clone the PrivateGPT repository to your machine and navigate to the directory:
|
||||
```sh
|
||||
git clone https://github.com/zylon-ai/private-gpt.git
|
||||
cd private-gpt
|
||||
```
|
||||
|
||||
## Setups
|
||||
|
||||
### Ollama Setups (Recommended)
|
||||
|
||||
#### 1. Default/Ollama CPU
|
||||
|
||||
**Description:**
|
||||
This profile runs the Ollama service using CPU resources. It is the standard configuration for running Ollama-based Private-GPT services without GPU acceleration.
|
||||
|
||||
**Run:**
|
||||
To start the services using pre-built images, run:
|
||||
```sh
|
||||
docker-compose up
|
||||
```
|
||||
or with a specific profile:
|
||||
```sh
|
||||
docker-compose --profile ollama-cpu up
|
||||
```
|
||||
|
||||
#### 2. Ollama Nvidia CUDA
|
||||
|
||||
**Description:**
|
||||
This profile leverages GPU acceleration with CUDA support, suitable for computationally intensive tasks that benefit from GPU resources.
|
||||
|
||||
**Requirements:**
|
||||
Ensure that your system has compatible GPU hardware and the necessary NVIDIA drivers installed. The installation process is detailed [here](https://docs.nvidia.com/cuda/cuda-installation-guide-microsoft-windows/index.html).
|
||||
|
||||
**Run:**
|
||||
To start the services with CUDA support using pre-built images, run:
|
||||
```sh
|
||||
docker-compose --profile ollama-cuda up
|
||||
```
|
||||
|
||||
#### 3. Ollama External API
|
||||
|
||||
**Description:**
|
||||
This profile is designed for running PrivateGPT using Ollama installed on the host machine. This setup is particularly useful for MacOS users, as Docker does not yet support Metal GPU.
|
||||
|
||||
**Requirements:**
|
||||
Install Ollama on your machine by following the instructions at [ollama.ai](https://ollama.ai/).
|
||||
|
||||
**Run:**
|
||||
To start the Ollama service, use:
|
||||
```sh
|
||||
OLLAMA_HOST=0.0.0.0 ollama serve
|
||||
```
|
||||
To start the services with the host configuration using pre-built images, run:
|
||||
```sh
|
||||
docker-compose --profile ollama-api up
|
||||
```
|
||||
|
||||
### Fully Local Setups
|
||||
|
||||
#### 1. LlamaCPP CPU
|
||||
|
||||
**Description:**
|
||||
This profile runs the Private-GPT services locally using `llama-cpp` and Hugging Face models.
|
||||
|
||||
**Requirements:**
|
||||
A **Hugging Face Token (HF_TOKEN)** is required for accessing Hugging Face models. Obtain your token following [this guide](/installation/getting-started/troubleshooting#downloading-gated-and-private-models).
|
||||
|
||||
**Run:**
|
||||
Start the services with your Hugging Face token using pre-built images:
|
||||
```sh
|
||||
HF_TOKEN=<your_hf_token> docker-compose --profile llamacpp-cpu up
|
||||
```
|
||||
Replace `<your_hf_token>` with your actual Hugging Face token.
|
||||
|
||||
#### 2. LlamaCPP CUDA
|
||||
|
||||
**Description:**
|
||||
This profile runs the Private-GPT services locally using `llama-cpp` and Hugging Face models.
|
||||
|
||||
**Requirements:**
|
||||
A **Hugging Face Token (HF_TOKEN)** is required for accessing Hugging Face models. Obtain your token following [this guide](/installation/getting-started/troubleshooting#downloading-gated-and-private-models).
|
||||
|
||||
**Run:**
|
||||
Start the services with your Hugging Face token using pre-built images:
|
||||
```sh
|
||||
HF_TOKEN=<your_hf_token> docker-compose --profile llamacpp-cuda up
|
||||
```
|
||||
Replace `<your_hf_token>` with your actual Hugging Face token.
|
||||
|
||||
## Building Locally
|
||||
|
||||
If you prefer to build Docker images locally, which is useful when making changes to the codebase or the Dockerfiles, follow these steps:
|
||||
|
||||
### Building Locally
|
||||
To build the Docker images locally, navigate to the cloned repository directory and run:
|
||||
```sh
|
||||
docker-compose build
|
||||
```
|
||||
This command compiles the necessary Docker images based on the current codebase and Dockerfile configurations.
|
||||
|
||||
### Forcing a Rebuild with --build
|
||||
If you have made changes and need to ensure these changes are reflected in the Docker images, you can force a rebuild before starting the services:
|
||||
```sh
|
||||
docker-compose up --build
|
||||
```
|
||||
or with a specific profile:
|
||||
```sh
|
||||
docker-compose --profile <profile_name> up --build
|
||||
```
|
||||
Replace `<profile_name>` with the desired profile.
|
@@ -519,6 +519,7 @@ class PrivateGptUi:
|
||||
"llamacpp": config_settings.llamacpp.llm_hf_model_file,
|
||||
"openai": config_settings.openai.model,
|
||||
"openailike": config_settings.openai.model,
|
||||
"azopenai": config_settings.azopenai.llm_model,
|
||||
"sagemaker": config_settings.sagemaker.llm_endpoint_name,
|
||||
"mock": llm_mode,
|
||||
"ollama": config_settings.ollama.llm_model,
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[tool.poetry]
|
||||
name = "private-gpt"
|
||||
version = "0.5.0"
|
||||
version = "0.6.0"
|
||||
description = "Private GPT"
|
||||
authors = ["Zylon <hi@zylon.ai>"]
|
||||
|
||||
|
@@ -1 +1 @@
|
||||
0.5.0
|
||||
0.6.1
|
||||
|
Reference in New Issue
Block a user