Merge branch 'main' into itsliamdowd/main

# Conflicts:
#	poetry.lock
This commit is contained in:
Javier Martinez 2024-09-11 16:36:46 +02:00
commit 277f81081f
No known key found for this signature in database
20 changed files with 429 additions and 112 deletions

16
.docker/router.yml Normal file
View File

@ -0,0 +1,16 @@
http:
services:
ollama:
loadBalancer:
healthCheck:
interval: 5s
path: /
servers:
- url: http://ollama-cpu:11434
- url: http://ollama-cuda:11434
- url: http://host.docker.internal:11434
routers:
ollama-router:
rule: "PathPrefix(`/`)"
service: ollama

View File

@ -0,0 +1,19 @@
{
"$schema": "https://raw.githubusercontent.com/googleapis/release-please/main/schemas/config.json",
"release-type": "simple",
"version-file": "version.txt",
"extra-files": [
{
"type": "toml",
"path": "pyproject.toml",
"jsonpath": "$.tool.poetry.version"
},
{
"type": "generic",
"path": "docker-compose.yaml"
}
],
"packages": {
".": {}
}
}

View File

@ -0,0 +1,3 @@
{
".": "0.6.2"
}

View File

@ -1,45 +0,0 @@
name: docker
on:
release:
types: [ published ]
workflow_dispatch:
env:
REGISTRY: ghcr.io
IMAGE_NAME: ${{ github.repository }}
jobs:
build-and-push-image:
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Log in to the Container registry
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@v5
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
tags: |
type=ref,event=branch
type=ref,event=pr
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=sha
- name: Build and push Docker image
uses: docker/build-push-action@v5
with:
context: .
file: Dockerfile.external
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}

83
.github/workflows/generate-release.yml vendored Normal file
View File

@ -0,0 +1,83 @@
name: generate-release
on:
release:
types: [ published ]
workflow_dispatch:
env:
REGISTRY: docker.io
IMAGE_NAME: zylonai/private-gpt
platforms: linux/amd64,linux/arm64
DEFAULT_TYPE: "ollama"
jobs:
build-and-push-image:
runs-on: ubuntu-latest
strategy:
matrix:
type: [ llamacpp-cpu, ollama ]
permissions:
contents: read
packages: write
outputs:
version: ${{ steps.version.outputs.version }}
steps:
- name: Free Disk Space (Ubuntu)
uses: jlumbroso/free-disk-space@main
with:
tool-cache: false
android: true
dotnet: true
haskell: true
large-packages: true
docker-images: false
swap-storage: true
- name: Checkout repository
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Log in to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@v5
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
tags: |
type=semver,pattern={{version}},enable=${{ matrix.type == env.DEFAULT_TYPE }}
type=semver,pattern={{version}}-${{ matrix.type }}
type=semver,pattern={{major}}.{{minor}},enable=${{ matrix.type == env.DEFAULT_TYPE }}
type=semver,pattern={{major}}.{{minor}}-${{ matrix.type }}
type=raw,value=latest,enable=${{ matrix.type == env.DEFAULT_TYPE }}
type=sha
flavor: |
latest=false
- name: Build and push Docker image
uses: docker/build-push-action@v6
with:
context: .
file: Dockerfile.${{ matrix.type }}
platforms: ${{ env.platforms }}
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
- name: Version output
id: version
run: echo "version=${{ steps.meta.outputs.version }}" >> "$GITHUB_OUTPUT"

View File

@ -13,7 +13,8 @@ jobs:
release-please:
runs-on: ubuntu-latest
steps:
- uses: google-github-actions/release-please-action@v3
- uses: google-github-actions/release-please-action@v4
id: release
with:
release-type: simple
version-file: version.txt
config-file: .github/release_please/.release-please-config.json
manifest-file: .github/release_please/.release-please-manifest.json

View File

@ -14,7 +14,7 @@ jobs:
setup:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- uses: ./.github/workflows/actions/install_dependencies
checks:
@ -28,7 +28,7 @@ jobs:
- ruff
- mypy
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- uses: ./.github/workflows/actions/install_dependencies
- name: run ${{ matrix.quality-command }}
run: make ${{ matrix.quality-command }}
@ -38,7 +38,7 @@ jobs:
runs-on: ubuntu-latest
name: test
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- uses: ./.github/workflows/actions/install_dependencies
- name: run test
run: make test-coverage

View File

@ -1,5 +1,25 @@
# Changelog
## [0.6.2](https://github.com/zylon-ai/private-gpt/compare/v0.6.1...v0.6.2) (2024-08-08)
### Bug Fixes
* add numpy issue to troubleshooting ([#2048](https://github.com/zylon-ai/private-gpt/issues/2048)) ([4ca6d0c](https://github.com/zylon-ai/private-gpt/commit/4ca6d0cb556be7a598f7d3e3b00d2a29214ee1e8))
* auto-update version ([#2052](https://github.com/zylon-ai/private-gpt/issues/2052)) ([7fefe40](https://github.com/zylon-ai/private-gpt/commit/7fefe408b4267684c6e3c1a43c5dc2b73ec61fe4))
* publish image name ([#2043](https://github.com/zylon-ai/private-gpt/issues/2043)) ([b1acf9d](https://github.com/zylon-ai/private-gpt/commit/b1acf9dc2cbca2047cd0087f13254ff5cda6e570))
* update matplotlib to 3.9.1-post1 to fix win install ([b16abbe](https://github.com/zylon-ai/private-gpt/commit/b16abbefe49527ac038d235659854b98345d5387))
## [0.6.1](https://github.com/zylon-ai/private-gpt/compare/v0.6.0...v0.6.1) (2024-08-05)
### Bug Fixes
* add built image from DockerHub ([#2042](https://github.com/zylon-ai/private-gpt/issues/2042)) ([f09f6dd](https://github.com/zylon-ai/private-gpt/commit/f09f6dd2553077d4566dbe6b48a450e05c2f049e))
* Adding azopenai to model list ([#2035](https://github.com/zylon-ai/private-gpt/issues/2035)) ([1c665f7](https://github.com/zylon-ai/private-gpt/commit/1c665f7900658144f62814b51f6e3434a6d7377f))
* **deploy:** generate docker release when new version is released ([#2038](https://github.com/zylon-ai/private-gpt/issues/2038)) ([1d4c14d](https://github.com/zylon-ai/private-gpt/commit/1d4c14d7a3c383c874b323d934be01afbaca899e))
* **deploy:** improve Docker-Compose and quickstart on Docker ([#2037](https://github.com/zylon-ai/private-gpt/issues/2037)) ([dae0727](https://github.com/zylon-ai/private-gpt/commit/dae0727a1b4abd35d2b0851fe30e0a4ed67e0fbb))
## [0.6.0](https://github.com/zylon-ai/private-gpt/compare/v0.5.0...v0.6.0) (2024-08-02)

View File

@ -1,19 +1,101 @@
services:
private-gpt:
#-----------------------------------
#---- Private-GPT services ---------
#-----------------------------------
# Private-GPT service for the Ollama CPU and GPU modes
# This service builds from an external Dockerfile and runs the Ollama mode.
private-gpt-ollama:
image: ${PGPT_IMAGE:-zylonai/private-gpt}:${PGPT_TAG:-0.6.2}-ollama # x-release-please-version
build:
dockerfile: Dockerfile.external
context: .
dockerfile: Dockerfile.ollama
volumes:
- ./local_data/:/home/worker/app/local_data
ports:
- 8001:8001
- "8001:8001"
environment:
PORT: 8001
PGPT_PROFILES: docker
PGPT_MODE: ollama
PGPT_EMBED_MODE: ollama
ollama:
image: ollama/ollama:latest
PGPT_OLLAMA_API_BASE: http://ollama:11434
HF_TOKEN: ${HF_TOKEN:-}
profiles:
- ""
- ollama-cpu
- ollama-cuda
- ollama-api
# Private-GPT service for the local mode
# This service builds from a local Dockerfile and runs the application in local mode.
private-gpt-llamacpp-cpu:
image: ${PGPT_IMAGE:-zylonai/private-gpt}:${PGPT_TAG:-0.6.2}-llamacpp-cpu # x-release-please-version
build:
context: .
dockerfile: Dockerfile.llamacpp-cpu
volumes:
- ./local_data/:/home/worker/app/local_data
- ./models/:/home/worker/app/models
entrypoint: sh -c ".venv/bin/python scripts/setup && .venv/bin/python -m private_gpt"
ports:
- 11434:11434
- "8001:8001"
environment:
PORT: 8001
PGPT_PROFILES: local
HF_TOKEN: ${HF_TOKEN}
profiles:
- llamacpp-cpu
#-----------------------------------
#---- Ollama services --------------
#-----------------------------------
# Traefik reverse proxy for the Ollama service
# This will route requests to the Ollama service based on the profile.
ollama:
image: traefik:v2.10
ports:
- "8081:8080"
command:
- "--providers.file.filename=/etc/router.yml"
- "--log.level=ERROR"
- "--api.insecure=true"
- "--providers.docker=true"
- "--providers.docker.exposedbydefault=false"
- "--entrypoints.web.address=:11434"
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
- ./.docker/router.yml:/etc/router.yml:ro
extra_hosts:
- "host.docker.internal:host-gateway"
profiles:
- ""
- ollama-cpu
- ollama-cuda
- ollama-api
# Ollama service for the CPU mode
ollama-cpu:
image: ollama/ollama:latest
volumes:
- ./models:/root/.ollama
profiles:
- ""
- ollama-cpu
# Ollama service for the CUDA mode
ollama-cuda:
image: ollama/ollama:latest
volumes:
- ./models:/root/.ollama
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [gpu]
profiles:
- ollama-cuda

View File

@ -10,6 +10,9 @@ tabs:
overview:
display-name: Overview
icon: "fa-solid fa-home"
quickstart:
display-name: Quickstart
icon: "fa-solid fa-rocket"
installation:
display-name: Installation
icon: "fa-solid fa-download"
@ -32,6 +35,12 @@ navigation:
contents:
- page: Introduction
path: ./docs/pages/overview/welcome.mdx
- tab: quickstart
layout:
- section: Getting started
contents:
- page: Quickstart
path: ./docs/pages/quickstart/quickstart.mdx
# How to install PrivateGPT, with FAQ and troubleshooting
- tab: installation
layout:

View File

@ -307,11 +307,12 @@ If you have all required dependencies properly configured running the
following powershell command should succeed.
```powershell
$env:CMAKE_ARGS='-DLLAMA_CUBLAS=on'; poetry run pip install --force-reinstall --no-cache-dir llama-cpp-python
$env:CMAKE_ARGS='-DLLAMA_CUBLAS=on'; poetry run pip install --force-reinstall --no-cache-dir llama-cpp-python numpy==1.26.0
```
If your installation was correct, you should see a message similar to the following next
time you start the server `BLAS = 1`.
time you start the server `BLAS = 1`. If there is some issue, please refer to the
[troubleshooting](/installation/getting-started/troubleshooting#building-llama-cpp-with-nvidia-gpu-support) section.
```console
llama_new_context_with_model: total VRAM used: 4857.93 MB (model: 4095.05 MB, context: 762.87 MB)
@ -339,11 +340,12 @@ Some tips:
After that running the following command in the repository will install llama.cpp with GPU support:
```bash
CMAKE_ARGS='-DLLAMA_CUBLAS=on' poetry run pip install --force-reinstall --no-cache-dir llama-cpp-python
CMAKE_ARGS='-DLLAMA_CUBLAS=on' poetry run pip install --force-reinstall --no-cache-dir llama-cpp-python numpy==1.26.0
```
If your installation was correct, you should see a message similar to the following next
time you start the server `BLAS = 1`.
time you start the server `BLAS = 1`. If there is some issue, please refer to the
[troubleshooting](/installation/getting-started/troubleshooting#building-llama-cpp-with-nvidia-gpu-support) section.
```
llama_new_context_with_model: total VRAM used: 4857.93 MB (model: 4095.05 MB, context: 762.87 MB)

View File

@ -46,4 +46,19 @@ huggingface:
embedding:
embed_dim: 384
```
</Callout>
</Callout>
# Building Llama-cpp with NVIDIA GPU support
## Out-of-memory error
If you encounter an out-of-memory error while running `llama-cpp` with CUDA, you can try the following steps to resolve the issue:
1. **Set the next environment:**
```bash
TOKENIZERS_PARALLELISM=true
```
2. **Run PrivateGPT:**
```bash
poetry run python -m privategpt
```
Give thanks to [MarioRossiGithub](https://github.com/MarioRossiGithub) for providing the following solution.

View File

@ -0,0 +1,105 @@
This guide provides a quick start for running different profiles of PrivateGPT using Docker Compose.
The profiles cater to various environments, including Ollama setups (CPU, CUDA, MacOS), and a fully local setup.
By default, Docker Compose will download pre-built images from a remote registry when starting the services. However, you have the option to build the images locally if needed. Details on building Docker image locally are provided at the end of this guide.
If you want to run PrivateGPT locally without Docker, refer to the [Local Installation Guide](/installation).
## Prerequisites
- **Docker and Docker Compose:** Ensure both are installed on your system.
[Installation Guide for Docker](https://docs.docker.com/get-docker/), [Installation Guide for Docker Compose](https://docs.docker.com/compose/install/).
- **Clone PrivateGPT Repository:** Clone the PrivateGPT repository to your machine and navigate to the directory:
```sh
git clone https://github.com/zylon-ai/private-gpt.git
cd private-gpt
```
## Setups
### Ollama Setups (Recommended)
#### 1. Default/Ollama CPU
**Description:**
This profile runs the Ollama service using CPU resources. It is the standard configuration for running Ollama-based Private-GPT services without GPU acceleration.
**Run:**
To start the services using pre-built images, run:
```sh
docker-compose up
```
or with a specific profile:
```sh
docker-compose --profile ollama-cpu up
```
#### 2. Ollama Nvidia CUDA
**Description:**
This profile leverages GPU acceleration with CUDA support, suitable for computationally intensive tasks that benefit from GPU resources.
**Requirements:**
Ensure that your system has compatible GPU hardware and the necessary NVIDIA drivers installed. The installation process is detailed [here](https://docs.nvidia.com/cuda/cuda-installation-guide-microsoft-windows/index.html).
**Run:**
To start the services with CUDA support using pre-built images, run:
```sh
docker-compose --profile ollama-cuda up
```
#### 3. Ollama External API
**Description:**
This profile is designed for running PrivateGPT using Ollama installed on the host machine. This setup is particularly useful for MacOS users, as Docker does not yet support Metal GPU.
**Requirements:**
Install Ollama on your machine by following the instructions at [ollama.ai](https://ollama.ai/).
**Run:**
To start the Ollama service, use:
```sh
OLLAMA_HOST=0.0.0.0 ollama serve
```
To start the services with the host configuration using pre-built images, run:
```sh
docker-compose --profile ollama-api up
```
### Fully Local Setups
#### 1. LlamaCPP CPU
**Description:**
This profile runs the Private-GPT services locally using `llama-cpp` and Hugging Face models.
**Requirements:**
A **Hugging Face Token (HF_TOKEN)** is required for accessing Hugging Face models. Obtain your token following [this guide](/installation/getting-started/troubleshooting#downloading-gated-and-private-models).
**Run:**
Start the services with your Hugging Face token using pre-built images:
```sh
HF_TOKEN=<your_hf_token> docker-compose --profile llamacpp-cpu up
```
Replace `<your_hf_token>` with your actual Hugging Face token.
## Building Locally
If you prefer to build Docker images locally, which is useful when making changes to the codebase or the Dockerfiles, follow these steps:
### Building Locally
To build the Docker images locally, navigate to the cloned repository directory and run:
```sh
docker-compose build
```
This command compiles the necessary Docker images based on the current codebase and Dockerfile configurations.
### Forcing a Rebuild with --build
If you have made changes and need to ensure these changes are reflected in the Docker images, you can force a rebuild before starting the services:
```sh
docker-compose up --build
```
or with a specific profile:
```sh
docker-compose --profile <profile_name> up --build
```
Replace `<profile_name>` with the desired profile.

89
poetry.lock generated
View File

@ -1,4 +1,4 @@
# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand.
# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand.
[[package]]
name = "aiofiles"
@ -1248,18 +1248,14 @@ standard = ["fastapi", "uvicorn[standard] (>=0.15.0)"]
[[package]]
name = "ffmpy"
version = "0.3.2"
description = "A simple Python wrapper for ffmpeg"
optional = true
python-versions = "*"
files = []
develop = false
[package.source]
type = "git"
url = "https://github.com/EuDs63/ffmpy.git"
reference = "333a19ee4d21f32537c0508aa1942ef1aa7afe24"
resolved_reference = "333a19ee4d21f32537c0508aa1942ef1aa7afe24"
version = "0.4.0"
description = "A simple Python wrapper for FFmpeg"
optional = false
python-versions = "<4.0.0,>=3.8.1"
files = [
{file = "ffmpy-0.4.0-py3-none-any.whl", hash = "sha256:39c0f20c5b465e7f8d29a5191f3a7d7675a8c546d9d985de8921151cd9b59e14"},
{file = "ffmpy-0.4.0.tar.gz", hash = "sha256:131b57794e802ad555f579007497f7a3d0cab0583d37496c685b8acae4837b1d"},
]
[[package]]
name = "filelock"
@ -2927,35 +2923,40 @@ tests = ["pytest", "pytz", "simplejson"]
[[package]]
name = "matplotlib"
version = "3.9.1"
version = "3.9.1.post1"
description = "Python plotting package"
optional = true
python-versions = ">=3.9"
files = [
{file = "matplotlib-3.9.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:7ccd6270066feb9a9d8e0705aa027f1ff39f354c72a87efe8fa07632f30fc6bb"},
{file = "matplotlib-3.9.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:591d3a88903a30a6d23b040c1e44d1afdd0d778758d07110eb7596f811f31842"},
{file = "matplotlib-3.9.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd2a59ff4b83d33bca3b5ec58203cc65985367812cb8c257f3e101632be86d92"},
{file = "matplotlib-3.9.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0fc001516ffcf1a221beb51198b194d9230199d6842c540108e4ce109ac05cc0"},
{file = "matplotlib-3.9.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:83c6a792f1465d174c86d06f3ae85a8fe36e6f5964633ae8106312ec0921fdf5"},
{file = "matplotlib-3.9.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:b3fce58971b465e01b5c538f9d44915640c20ec5ff31346e963c9e1cd66fa812"},
{file = "matplotlib-3.9.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a973c53ad0668c53e0ed76b27d2eeeae8799836fd0d0caaa4ecc66bf4e6676c0"},
{file = "matplotlib-3.9.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82cd5acf8f3ef43f7532c2f230249720f5dc5dd40ecafaf1c60ac8200d46d7eb"},
{file = "matplotlib-3.9.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ab38a4f3772523179b2f772103d8030215b318fef6360cb40558f585bf3d017f"},
{file = "matplotlib-3.9.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:2315837485ca6188a4b632c5199900e28d33b481eb083663f6a44cfc8987ded3"},
{file = "matplotlib-3.9.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:565d572efea2b94f264dd86ef27919515aa6d629252a169b42ce5f570db7f37b"},
{file = "matplotlib-3.9.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6d397fd8ccc64af2ec0af1f0efc3bacd745ebfb9d507f3f552e8adb689ed730a"},
{file = "matplotlib-3.9.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:26040c8f5121cd1ad712abffcd4b5222a8aec3a0fe40bc8542c94331deb8780d"},
{file = "matplotlib-3.9.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d12cb1837cffaac087ad6b44399d5e22b78c729de3cdae4629e252067b705e2b"},
{file = "matplotlib-3.9.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:0e835c6988edc3d2d08794f73c323cc62483e13df0194719ecb0723b564e0b5c"},
{file = "matplotlib-3.9.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:0c584210c755ae921283d21d01f03a49ef46d1afa184134dd0f95b0202ee6f03"},
{file = "matplotlib-3.9.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:11fed08f34fa682c2b792942f8902e7aefeed400da71f9e5816bea40a7ce28fe"},
{file = "matplotlib-3.9.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0000354e32efcfd86bda75729716b92f5c2edd5b947200be9881f0a671565c33"},
{file = "matplotlib-3.9.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4db17fea0ae3aceb8e9ac69c7e3051bae0b3d083bfec932240f9bf5d0197a049"},
{file = "matplotlib-3.9.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:208cbce658b72bf6a8e675058fbbf59f67814057ae78165d8a2f87c45b48d0ff"},
{file = "matplotlib-3.9.1-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:3fda72d4d472e2ccd1be0e9ccb6bf0d2eaf635e7f8f51d737ed7e465ac020cb3"},
{file = "matplotlib-3.9.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:84b3ba8429935a444f1fdc80ed930babbe06725bcf09fbeb5c8757a2cd74af04"},
{file = "matplotlib-3.9.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b918770bf3e07845408716e5bbda17eadfc3fcbd9307dc67f37d6cf834bb3d98"},
{file = "matplotlib-3.9.1.tar.gz", hash = "sha256:de06b19b8db95dd33d0dc17c926c7c9ebed9f572074b6fac4f65068a6814d010"},
{file = "matplotlib-3.9.1.post1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3779ad3e8b72df22b8a622c5796bbcfabfa0069b835412e3c1dec8ee3de92d0c"},
{file = "matplotlib-3.9.1.post1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ec400340f8628e8e2260d679078d4e9b478699f386e5cc8094e80a1cb0039c7c"},
{file = "matplotlib-3.9.1.post1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82c18791b8862ea095081f745b81f896b011c5a5091678fb33204fef641476af"},
{file = "matplotlib-3.9.1.post1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:621a628389c09a6b9f609a238af8e66acecece1cfa12febc5fe4195114ba7446"},
{file = "matplotlib-3.9.1.post1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:9a54734ca761ebb27cd4f0b6c2ede696ab6861052d7d7e7b8f7a6782665115f5"},
{file = "matplotlib-3.9.1.post1-cp310-cp310-win_amd64.whl", hash = "sha256:0721f93db92311bb514e446842e2b21c004541dcca0281afa495053e017c5458"},
{file = "matplotlib-3.9.1.post1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:b08b46058fe2a31ecb81ef6aa3611f41d871f6a8280e9057cb4016cb3d8e894a"},
{file = "matplotlib-3.9.1.post1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:22b344e84fcc574f561b5731f89a7625db8ef80cdbb0026a8ea855a33e3429d1"},
{file = "matplotlib-3.9.1.post1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4b49fee26d64aefa9f061b575f0f7b5fc4663e51f87375c7239efa3d30d908fa"},
{file = "matplotlib-3.9.1.post1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89eb7e89e2b57856533c5c98f018aa3254fa3789fcd86d5f80077b9034a54c9a"},
{file = "matplotlib-3.9.1.post1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c06e742bade41fda6176d4c9c78c9ea016e176cd338e62a1686384cb1eb8de41"},
{file = "matplotlib-3.9.1.post1-cp311-cp311-win_amd64.whl", hash = "sha256:c44edab5b849e0fc1f1c9d6e13eaa35ef65925f7be45be891d9784709ad95561"},
{file = "matplotlib-3.9.1.post1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:bf28b09986aee06393e808e661c3466be9c21eff443c9bc881bce04bfbb0c500"},
{file = "matplotlib-3.9.1.post1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:92aeb8c439d4831510d8b9d5e39f31c16c7f37873879767c26b147cef61e54cd"},
{file = "matplotlib-3.9.1.post1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f15798b0691b45c80d3320358a88ce5a9d6f518b28575b3ea3ed31b4bd95d009"},
{file = "matplotlib-3.9.1.post1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d59fc6096da7b9c1df275f9afc3fef5cbf634c21df9e5f844cba3dd8deb1847d"},
{file = "matplotlib-3.9.1.post1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ab986817a32a70ce22302438691e7df4c6ee4a844d47289db9d583d873491e0b"},
{file = "matplotlib-3.9.1.post1-cp312-cp312-win_amd64.whl", hash = "sha256:0d78e7d2d86c4472da105d39aba9b754ed3dfeaeaa4ac7206b82706e0a5362fa"},
{file = "matplotlib-3.9.1.post1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:bd07eba6431b4dc9253cce6374a28c415e1d3a7dc9f8aba028ea7592f06fe172"},
{file = "matplotlib-3.9.1.post1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ca230cc4482010d646827bd2c6d140c98c361e769ae7d954ebf6fff2a226f5b1"},
{file = "matplotlib-3.9.1.post1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ace27c0fdeded399cbc43f22ffa76e0f0752358f5b33106ec7197534df08725a"},
{file = "matplotlib-3.9.1.post1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a4f3aeb7ba14c497dc6f021a076c48c2e5fbdf3da1e7264a5d649683e284a2f"},
{file = "matplotlib-3.9.1.post1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:23f96fbd4ff4cfa9b8a6b685a65e7eb3c2ced724a8d965995ec5c9c2b1f7daf5"},
{file = "matplotlib-3.9.1.post1-cp39-cp39-win_amd64.whl", hash = "sha256:2808b95452b4ffa14bfb7c7edffc5350743c31bda495f0d63d10fdd9bc69e895"},
{file = "matplotlib-3.9.1.post1-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:ffc91239f73b4179dec256b01299d46d0ffa9d27d98494bc1476a651b7821cbe"},
{file = "matplotlib-3.9.1.post1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:f965ebca9fd4feaaca45937c4849d92b70653057497181100fcd1e18161e5f29"},
{file = "matplotlib-3.9.1.post1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:801ee9323fd7b2da0d405aebbf98d1da77ea430bbbbbec6834c0b3af15e5db44"},
{file = "matplotlib-3.9.1.post1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:50113e9b43ceb285739f35d43db36aa752fb8154325b35d134ff6e177452f9ec"},
{file = "matplotlib-3.9.1.post1.tar.gz", hash = "sha256:c91e585c65092c975a44dc9d4239ba8c594ba3c193d7c478b6d178c4ef61f406"},
]
[package.dependencies]
@ -3018,13 +3019,13 @@ files = [
[[package]]
name = "mistralai"
version = "1.0.1"
version = "1.0.3"
description = "Python Client SDK for the Mistral AI API."
optional = true
python-versions = "<4.0,>=3.8"
files = [
{file = "mistralai-1.0.1-py3-none-any.whl", hash = "sha256:5e5fc28122e11aec0ce37781b6419963e31cd7caccddf89f54eac1ece81f063f"},
{file = "mistralai-1.0.1.tar.gz", hash = "sha256:f6b055d21dd56e174e5023371295c35945d0f7b282486457d6a71ff47c703fe8"},
{file = "mistralai-1.0.3-py3-none-any.whl", hash = "sha256:64af7c9192e64dc66b2da6d1c4d54a1324a881c21665a2f93d6b35d9de9f87c8"},
{file = "mistralai-1.0.3.tar.gz", hash = "sha256:84f1a217666c76fec9d477ae266399b813c3ac32a4a348d2ecd5fe1c039b0667"},
]
[package.dependencies]
@ -3035,7 +3036,7 @@ python-dateutil = ">=2.9.0.post0,<3.0.0"
typing-inspect = ">=0.9.0,<0.10.0"
[package.extras]
gcp = ["google-auth (>=2.31.0,<3.0.0)", "requests (>=2.32.3,<3.0.0)"]
gcp = ["google-auth (==2.27.0)", "requests (>=2.32.3,<3.0.0)"]
[[package]]
name = "mmh3"
@ -3896,8 +3897,6 @@ files = [
{file = "orjson-3.10.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:960db0e31c4e52fa0fc3ecbaea5b2d3b58f379e32a95ae6b0ebeaa25b93dfd34"},
{file = "orjson-3.10.6-cp312-none-win32.whl", hash = "sha256:a6ea7afb5b30b2317e0bee03c8d34c8181bc5a36f2afd4d0952f378972c4efd5"},
{file = "orjson-3.10.6-cp312-none-win_amd64.whl", hash = "sha256:874ce88264b7e655dde4aeaacdc8fd772a7962faadfb41abe63e2a4861abc3dc"},
{file = "orjson-3.10.6-cp313-none-win32.whl", hash = "sha256:efdf2c5cde290ae6b83095f03119bdc00303d7a03b42b16c54517baa3c4ca3d0"},
{file = "orjson-3.10.6-cp313-none-win_amd64.whl", hash = "sha256:8e190fe7888e2e4392f52cafb9626113ba135ef53aacc65cd13109eb9746c43e"},
{file = "orjson-3.10.6-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:66680eae4c4e7fc193d91cfc1353ad6d01b4801ae9b5314f17e11ba55e934183"},
{file = "orjson-3.10.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:caff75b425db5ef8e8f23af93c80f072f97b4fb3afd4af44482905c9f588da28"},
{file = "orjson-3.10.6-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3722fddb821b6036fd2a3c814f6bd9b57a89dc6337b9924ecd614ebce3271394"},
@ -6738,4 +6737,4 @@ vector-stores-qdrant = ["llama-index-vector-stores-qdrant"]
[metadata]
lock-version = "2.0"
python-versions = ">=3.11,<3.12"
content-hash = "3de5e86444ceee26b22bcecf8dd547d7c5ef2a14f62a1192a6125a4700e74640"
content-hash = "119ebc4623e38c745677ca78dee1133087cce3b04ac210bc7a774e6c5cce26c6"

View File

@ -104,6 +104,7 @@ class DataSettings(BaseModel):
"It will be treated as an absolute path if it starts with /"
)
class LLMSettings(BaseModel):
mode: Literal[
"llamacpp",
@ -196,7 +197,14 @@ class HuggingFaceSettings(BaseModel):
class EmbeddingSettings(BaseModel):
mode: Literal[
"huggingface", "openai", "azopenai", "sagemaker", "ollama", "mock", "gemini", "mistralai"
"huggingface",
"openai",
"azopenai",
"sagemaker",
"ollama",
"mock",
"gemini",
"mistralai",
]
ingest_mode: Literal["simple", "batch", "parallel", "pipeline"] = Field(
"simple",

View File

@ -519,6 +519,7 @@ class PrivateGptUi:
"llamacpp": config_settings.llamacpp.llm_hf_model_file,
"openai": config_settings.openai.model,
"openailike": config_settings.openai.model,
"azopenai": config_settings.azopenai.llm_model,
"sagemaker": config_settings.sagemaker.llm_endpoint_name,
"mock": llm_mode,
"ollama": config_settings.ollama.llm_model,

View File

@ -1,6 +1,6 @@
[tool.poetry]
name = "private-gpt"
version = "0.6.0"
version = "0.6.2"
description = "Private GPT"
authors = ["Zylon <hi@zylon.ai>"]
@ -57,8 +57,7 @@ sentence-transformers = {version ="^3.0.1", optional = true}
# Optional UI
gradio = {version ="^4.37.2", optional = true}
# Fix: https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/16289#issuecomment-2255106490
ffmpy = {git = "https://github.com/EuDs63/ffmpy.git", rev = "333a19ee4d21f32537c0508aa1942ef1aa7afe24", optional = true}
ffmpy = "0.4.0"
# Optional Google Gemini dependency
google-generativeai = {version ="^0.5.4", optional = true}

View File

@ -1 +1 @@
0.6.0
0.6.2