diff --git a/Dockerfile.llamacpp-cpu b/Dockerfile.llamacpp-cpu
index fce9915a..feab1659 100644
--- a/Dockerfile.llamacpp-cpu
+++ b/Dockerfile.llamacpp-cpu
@@ -1,6 +1,6 @@
 ### IMPORTANT, THIS IMAGE CAN ONLY BE RUN IN LINUX DOCKER
 ### You will run into a segfault in mac
-FROM python:3.11.6-slim-bookworm as base
+FROM python:3.11.6-slim-bookworm AS base
 
 # Install poetry
 RUN pip install pipx
@@ -20,14 +20,14 @@ RUN apt update && apt install -y \
 # https://python-poetry.org/docs/configuration/#virtualenvsin-project
 ENV POETRY_VIRTUALENVS_IN_PROJECT=true
 
-FROM base as dependencies
+FROM base AS dependencies
 WORKDIR /home/worker/app
 COPY pyproject.toml poetry.lock ./
 
 ARG POETRY_EXTRAS="ui embeddings-huggingface llms-llama-cpp vector-stores-qdrant"
 RUN poetry install --no-root --extras "${POETRY_EXTRAS}"
 
-FROM base as app
+FROM base AS app
 
 ENV PYTHONUNBUFFERED=1
 ENV PORT=8080
diff --git a/Dockerfile.ollama b/Dockerfile.ollama
index 5a8b73ac..83fd1297 100644
--- a/Dockerfile.ollama
+++ b/Dockerfile.ollama
@@ -1,4 +1,4 @@
-FROM python:3.11.6-slim-bookworm as base
+FROM python:3.11.6-slim-bookworm AS base
 
 # Install poetry
 RUN pip install pipx
@@ -10,14 +10,14 @@ ENV PATH=".venv/bin/:$PATH"
 # https://python-poetry.org/docs/configuration/#virtualenvsin-project
 ENV POETRY_VIRTUALENVS_IN_PROJECT=true
 
-FROM base as dependencies
+FROM base AS dependencies
 WORKDIR /home/worker/app
 COPY pyproject.toml poetry.lock ./
 
 ARG POETRY_EXTRAS="ui vector-stores-qdrant llms-ollama embeddings-ollama"
 RUN poetry install --no-root --extras "${POETRY_EXTRAS}"
 
-FROM base as app
+FROM base AS app
 ENV PYTHONUNBUFFERED=1
 ENV PORT=8080
 ENV APP_ENV=prod
diff --git a/docker-compose.yaml b/docker-compose.yaml
index c2ef0f6d..2d6dff2e 100644
--- a/docker-compose.yaml
+++ b/docker-compose.yaml
@@ -8,11 +8,12 @@ services:
   # This service builds from an external Dockerfile and runs the Ollama mode.
   private-gpt-ollama:
     image: ${PGPT_IMAGE:-zylonai/private-gpt}:${PGPT_TAG:-0.6.2}-ollama  # x-release-please-version
+    user: root
     build:
       context: .
       dockerfile: Dockerfile.ollama
     volumes:
-      - ./local_data/:/home/worker/app/local_data
+      - ./local_data:/home/worker/app/local_data
     ports:
       - "8001:8001"
     environment:
@@ -27,11 +28,14 @@ services:
       - ollama-cpu
       - ollama-cuda
       - ollama-api
+    depends_on:
+      - ollama
 
   # Private-GPT service for the local mode
   # This service builds from a local Dockerfile and runs the application in local mode.
   private-gpt-llamacpp-cpu:
     image: ${PGPT_IMAGE:-zylonai/private-gpt}:${PGPT_TAG:-0.6.2}-llamacpp-cpu # x-release-please-version
+    user: root
     build:
       context: .
       dockerfile: Dockerfile.llamacpp-cpu
@@ -44,7 +48,7 @@ services:
     environment:
       PORT: 8001
       PGPT_PROFILES: local
-      HF_TOKEN: ${HF_TOKEN}
+      HF_TOKEN: ${HF_TOKEN:-}
     profiles:
       - llamacpp-cpu
 
@@ -57,7 +61,7 @@ services:
   ollama:
     image: traefik:v2.10
     ports:
-      - "8081:8080"
+      - "11434:11434"
     command:
       - "--providers.file.filename=/etc/router.yml"
       - "--log.level=ERROR"
@@ -98,4 +102,4 @@ services:
               count: 1
               capabilities: [gpu]
     profiles:
-      - ollama-cuda
\ No newline at end of file
+      - ollama-cuda