mirror of
https://github.com/imartinez/privateGPT.git
synced 2025-04-28 03:32:18 +00:00
Added embedded model option for fireworks \n Added documentation for fireworks
This commit is contained in:
parent
519c48b70b
commit
9c3590e555
@ -3,45 +3,63 @@ It is important that you review the [Main Concepts](../concepts) section to unde
|
||||
## Base requirements to run PrivateGPT
|
||||
|
||||
### 1. Clone the PrivateGPT Repository
|
||||
|
||||
Clone the repository and navigate to it:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/zylon-ai/private-gpt
|
||||
cd private-gpt
|
||||
```
|
||||
|
||||
### 2. Install Python 3.11
|
||||
|
||||
If you do not have Python 3.11 installed, install it using a Python version manager like `pyenv`. Earlier Python versions are not supported.
|
||||
|
||||
#### macOS/Linux
|
||||
|
||||
Install and set Python 3.11 using [pyenv](https://github.com/pyenv/pyenv):
|
||||
|
||||
```bash
|
||||
pyenv install 3.11
|
||||
pyenv local 3.11
|
||||
```
|
||||
|
||||
#### Windows
|
||||
|
||||
Install and set Python 3.11 using [pyenv-win](https://github.com/pyenv-win/pyenv-win):
|
||||
|
||||
```bash
|
||||
pyenv install 3.11
|
||||
pyenv local 3.11
|
||||
```
|
||||
|
||||
### 3. Install `Poetry`
|
||||
|
||||
Install [Poetry](https://python-poetry.org/docs/#installing-with-the-official-installer) for dependency management:
|
||||
Follow the instructions on the official Poetry website to install it.
|
||||
|
||||
<Callout intent="warning">
|
||||
A bug exists in Poetry versions 1.7.0 and earlier. We strongly recommend upgrading to a tested version.
|
||||
To upgrade Poetry to latest tested version, run `poetry self update 1.8.3` after installing it.
|
||||
A bug exists in Poetry versions 1.7.0 and earlier. We strongly recommend
|
||||
upgrading to a tested version. To upgrade Poetry to latest tested version, run
|
||||
`poetry self update 1.8.3` after installing it.
|
||||
</Callout>
|
||||
|
||||
### 4. Optional: Install `make`
|
||||
|
||||
To run various scripts, you need to install `make`. Follow the instructions for your operating system:
|
||||
|
||||
#### macOS
|
||||
|
||||
(Using Homebrew):
|
||||
|
||||
```bash
|
||||
brew install make
|
||||
```
|
||||
|
||||
#### Windows
|
||||
|
||||
(Using Chocolatey):
|
||||
|
||||
```bash
|
||||
choco install make
|
||||
```
|
||||
@ -53,6 +71,7 @@ PrivateGPT allows customization of the setup, from fully local to cloud-based, b
|
||||
```bash
|
||||
poetry install --extras "<extra1> <extra2>..."
|
||||
```
|
||||
|
||||
Where `<extra>` can be any of the following options described below.
|
||||
|
||||
### Available Modules
|
||||
@ -61,46 +80,49 @@ You need to choose one option per category (LLM, Embeddings, Vector Stores, UI).
|
||||
|
||||
#### LLM
|
||||
|
||||
| **Option** | **Description** | **Extra** |
|
||||
|--------------|------------------------------------------------------------------------|---------------------|
|
||||
| **ollama** | Adds support for Ollama LLM, requires Ollama running locally | llms-ollama |
|
||||
| llama-cpp | Adds support for local LLM using LlamaCPP | llms-llama-cpp |
|
||||
| sagemaker | Adds support for Amazon Sagemaker LLM, requires Sagemaker endpoints | llms-sagemaker |
|
||||
| openai | Adds support for OpenAI LLM, requires OpenAI API key | llms-openai |
|
||||
| openailike | Adds support for 3rd party LLM providers compatible with OpenAI's API | llms-openai-like |
|
||||
| azopenai | Adds support for Azure OpenAI LLM, requires Azure endpoints | llms-azopenai |
|
||||
| gemini | Adds support for Gemini LLM, requires Gemini API key | llms-gemini |
|
||||
| **Option** | **Description** | **Extra** |
|
||||
| ---------- | --------------------------------------------------------------------- | ---------------- |
|
||||
| **ollama** | Adds support for Ollama LLM, requires Ollama running locally | llms-ollama |
|
||||
| llama-cpp | Adds support for local LLM using LlamaCPP | llms-llama-cpp |
|
||||
| sagemaker | Adds support for Amazon Sagemaker LLM, requires Sagemaker endpoints | llms-sagemaker |
|
||||
| openai | Adds support for OpenAI LLM, requires OpenAI API key | llms-openai |
|
||||
| openailike | Adds support for 3rd party LLM providers compatible with OpenAI's API | llms-openai-like |
|
||||
| azopenai | Adds support for Azure OpenAI LLM, requires Azure endpoints | llms-azopenai |
|
||||
| gemini | Adds support for Gemini LLM, requires Gemini API key | llms-gemini |
|
||||
|
||||
#### Embeddings
|
||||
|
||||
| **Option** | **Description** | **Extra** |
|
||||
|------------------|--------------------------------------------------------------------------------|-------------------------|
|
||||
| **ollama** | Adds support for Ollama Embeddings, requires Ollama running locally | embeddings-ollama |
|
||||
| huggingface | Adds support for local Embeddings using HuggingFace | embeddings-huggingface |
|
||||
| openai | Adds support for OpenAI Embeddings, requires OpenAI API key | embeddings-openai |
|
||||
| sagemaker | Adds support for Amazon Sagemaker Embeddings, requires Sagemaker endpoints | embeddings-sagemaker |
|
||||
| azopenai | Adds support for Azure OpenAI Embeddings, requires Azure endpoints | embeddings-azopenai |
|
||||
| gemini | Adds support for Gemini Embeddings, requires Gemini API key | embeddings-gemini |
|
||||
| **Option** | **Description** | **Extra** |
|
||||
| ----------- | -------------------------------------------------------------------------- | ---------------------- |
|
||||
| **ollama** | Adds support for Ollama Embeddings, requires Ollama running locally | embeddings-ollama |
|
||||
| huggingface | Adds support for local Embeddings using HuggingFace | embeddings-huggingface |
|
||||
| openai | Adds support for OpenAI Embeddings, requires OpenAI API key | embeddings-openai |
|
||||
| sagemaker | Adds support for Amazon Sagemaker Embeddings, requires Sagemaker endpoints | embeddings-sagemaker |
|
||||
| azopenai | Adds support for Azure OpenAI Embeddings, requires Azure endpoints | embeddings-azopenai |
|
||||
| gemini | Adds support for Gemini Embeddings, requires Gemini API key | embeddings-gemini |
|
||||
|
||||
#### Vector Stores
|
||||
|
||||
| **Option** | **Description** | **Extra** |
|
||||
|------------------|-----------------------------------------|-------------------------|
|
||||
| **qdrant** | Adds support for Qdrant vector store | vector-stores-qdrant |
|
||||
| milvus | Adds support for Milvus vector store | vector-stores-milvus |
|
||||
| chroma | Adds support for Chroma DB vector store | vector-stores-chroma |
|
||||
| postgres | Adds support for Postgres vector store | vector-stores-postgres |
|
||||
| clickhouse | Adds support for Clickhouse vector store| vector-stores-clickhouse|
|
||||
| **Option** | **Description** | **Extra** |
|
||||
| ---------- | ---------------------------------------- | ------------------------ |
|
||||
| **qdrant** | Adds support for Qdrant vector store | vector-stores-qdrant |
|
||||
| milvus | Adds support for Milvus vector store | vector-stores-milvus |
|
||||
| chroma | Adds support for Chroma DB vector store | vector-stores-chroma |
|
||||
| postgres | Adds support for Postgres vector store | vector-stores-postgres |
|
||||
| clickhouse | Adds support for Clickhouse vector store | vector-stores-clickhouse |
|
||||
|
||||
#### UI
|
||||
|
||||
| **Option** | **Description** | **Extra** |
|
||||
|--------------|------------------------------------------|-----------|
|
||||
| Gradio | Adds support for UI using Gradio | ui |
|
||||
| **Option** | **Description** | **Extra** |
|
||||
| ---------- | -------------------------------- | --------- |
|
||||
| Gradio | Adds support for UI using Gradio | ui |
|
||||
|
||||
<Callout intent = "warning">
|
||||
A working **Gradio UI client** is provided to test the API, together with a set of useful tools such as bulk
|
||||
model download script, ingestion script, documents folder watch, etc. Please refer to the [UI alternatives](/manual/user-interface/alternatives) page for more UI alternatives.
|
||||
<Callout intent="warning">
|
||||
A working **Gradio UI client** is provided to test the API, together with a
|
||||
set of useful tools such as bulk model download script, ingestion script,
|
||||
documents folder watch, etc. Please refer to the [UI
|
||||
alternatives](/manual/user-interface/alternatives) page for more UI
|
||||
alternatives.
|
||||
</Callout>
|
||||
|
||||
## Recommended Setups
|
||||
@ -109,7 +131,7 @@ There are just some examples of recommended setups. You can mix and match the di
|
||||
You'll find more information in the Manual section of the documentation.
|
||||
|
||||
> **Important for Windows**: In the examples below or how to run PrivateGPT with `make run`, `PGPT_PROFILES` env var is being set inline following Unix command line syntax (works on MacOS and Linux).
|
||||
If you are using Windows, you'll need to set the env var in a different way, for example:
|
||||
> If you are using Windows, you'll need to set the env var in a different way, for example:
|
||||
|
||||
```powershell
|
||||
# Powershell
|
||||
@ -136,6 +158,7 @@ Go to [ollama.ai](https://ollama.ai/) and follow the instructions to install Oll
|
||||
After the installation, make sure the Ollama desktop app is closed.
|
||||
|
||||
Now, start Ollama service (it will start a local inference server, serving both the LLM and the Embeddings):
|
||||
|
||||
```bash
|
||||
ollama serve
|
||||
```
|
||||
@ -152,6 +175,7 @@ ollama pull nomic-embed-text
|
||||
```
|
||||
|
||||
Once done, on a different terminal, you can install PrivateGPT with the following command:
|
||||
|
||||
```bash
|
||||
poetry install --extras "ui llms-ollama embeddings-ollama vector-stores-qdrant"
|
||||
```
|
||||
@ -175,6 +199,7 @@ You need to have access to sagemaker inference endpoints for the LLM and / or th
|
||||
Edit the `settings-sagemaker.yaml` file to include the correct Sagemaker endpoints.
|
||||
|
||||
Then, install PrivateGPT with the following command:
|
||||
|
||||
```bash
|
||||
poetry install --extras "ui llms-sagemaker embeddings-sagemaker vector-stores-qdrant"
|
||||
```
|
||||
@ -198,6 +223,7 @@ You need an OPENAI API key to run this setup.
|
||||
Edit the `settings-openai.yaml` file to include the correct API KEY. Never commit it! It's a secret! As an alternative to editing `settings-openai.yaml`, you can just set the env var OPENAI_API_KEY.
|
||||
|
||||
Then, install PrivateGPT with the following command:
|
||||
|
||||
```bash
|
||||
poetry install --extras "ui llms-openai embeddings-openai vector-stores-qdrant"
|
||||
```
|
||||
@ -221,6 +247,7 @@ You need to have access to Azure OpenAI inference endpoints for the LLM and / or
|
||||
Edit the `settings-azopenai.yaml` file to include the correct Azure OpenAI endpoints.
|
||||
|
||||
Then, install PrivateGPT with the following command:
|
||||
|
||||
```bash
|
||||
poetry install --extras "ui llms-azopenai embeddings-azopenai vector-stores-qdrant"
|
||||
```
|
||||
@ -235,6 +262,30 @@ PrivateGPT will use the already existing `settings-azopenai.yaml` settings file,
|
||||
|
||||
The UI will be available at http://localhost:8001
|
||||
|
||||
### Non-Private, FIREWORKS-powered test setup
|
||||
|
||||
If you want to test PrivateGPT with FIREWORKS's LLM and Embeddings -taking into account your data is going to FIREWORKS!- you can run the following command:
|
||||
|
||||
You need an FIREWORKS API key to run this setup.
|
||||
|
||||
Edit the `settings-fireworks.yaml` file to include the correct API KEY. Never commit it! It's a secret! As an alternative to editing `settings-fireworks.yaml`, you can just set the env var FIREWORKS_API_KEY.
|
||||
|
||||
Then, install PrivateGPT with the following command:
|
||||
|
||||
```bash
|
||||
poetry install --extras "ui llms-fireworks embeddings-fireworks vector-stores-qdrant embeddings-openai"
|
||||
```
|
||||
|
||||
Once installed, you can run PrivateGPT.
|
||||
|
||||
```bash
|
||||
PGPT_PROFILES=fireworks make run
|
||||
```
|
||||
|
||||
PrivateGPT will use the already existing `settings-fireworks.yaml` settings file, which is already configured to use FIREWORKS LLM and Embeddings endpoints, and Qdrant.
|
||||
|
||||
The UI will be available at http://localhost:8001
|
||||
|
||||
### Local, Llama-CPP powered setup
|
||||
|
||||
If you want to run PrivateGPT fully locally without relying on Ollama, you can run the following command:
|
||||
@ -244,6 +295,7 @@ poetry install --extras "ui llms-llama-cpp embeddings-huggingface vector-stores-
|
||||
```
|
||||
|
||||
In order for local LLM and embeddings to work, you need to download the models to the `models` folder. You can do so by running the `setup` script:
|
||||
|
||||
```bash
|
||||
poetry run python scripts/setup
|
||||
```
|
||||
@ -277,6 +329,7 @@ To do that, you need to install `llama.cpp` python's binding `llama-cpp-python`
|
||||
that activate `METAL`: you have to pass `-DLLAMA_METAL=on` to the CMake command tha `pip` runs for you (see below).
|
||||
|
||||
In other words, one should simply run:
|
||||
|
||||
```bash
|
||||
CMAKE_ARGS="-DLLAMA_METAL=on" pip install --force-reinstall --no-cache-dir llama-cpp-python
|
||||
```
|
||||
@ -285,9 +338,10 @@ The above command will force the re-installation of `llama-cpp-python` with `MET
|
||||
`llama.cpp` locally with your `METAL` libraries (shipped by default with your macOS).
|
||||
|
||||
More information is available in the documentation of the libraries themselves:
|
||||
* [llama-cpp-python](https://github.com/abetlen/llama-cpp-python#installation-with-hardware-acceleration)
|
||||
* [llama-cpp-python's documentation](https://llama-cpp-python.readthedocs.io/en/latest/#installation-with-hardware-acceleration)
|
||||
* [llama.cpp](https://github.com/ggerganov/llama.cpp#build)
|
||||
|
||||
- [llama-cpp-python](https://github.com/abetlen/llama-cpp-python#installation-with-hardware-acceleration)
|
||||
- [llama-cpp-python's documentation](https://llama-cpp-python.readthedocs.io/en/latest/#installation-with-hardware-acceleration)
|
||||
- [llama.cpp](https://github.com/ggerganov/llama.cpp#build)
|
||||
|
||||
##### Llama-CPP Windows NVIDIA GPU support
|
||||
|
||||
@ -297,11 +351,11 @@ dependencies.
|
||||
|
||||
Some tips to get it working with an NVIDIA card and CUDA (Tested on Windows 10 with CUDA 11.5 RTX 3070):
|
||||
|
||||
* Install latest VS2022 (and build tools) https://visualstudio.microsoft.com/vs/community/
|
||||
* Install CUDA toolkit https://developer.nvidia.com/cuda-downloads
|
||||
* Verify your installation is correct by running `nvcc --version` and `nvidia-smi`, ensure your CUDA version is up to
|
||||
- Install latest VS2022 (and build tools) https://visualstudio.microsoft.com/vs/community/
|
||||
- Install CUDA toolkit https://developer.nvidia.com/cuda-downloads
|
||||
- Verify your installation is correct by running `nvcc --version` and `nvidia-smi`, ensure your CUDA version is up to
|
||||
date and your GPU is detected.
|
||||
* [Optional] Install CMake to troubleshoot building issues by compiling llama.cpp directly https://cmake.org/download/
|
||||
- [Optional] Install CMake to troubleshoot building issues by compiling llama.cpp directly https://cmake.org/download/
|
||||
|
||||
If you have all required dependencies properly configured running the
|
||||
following powershell command should succeed.
|
||||
@ -332,9 +386,9 @@ dependencies.
|
||||
|
||||
Some tips:
|
||||
|
||||
* Make sure you have an up-to-date C++ compiler
|
||||
* Install CUDA toolkit https://developer.nvidia.com/cuda-downloads
|
||||
* Verify your installation is correct by running `nvcc --version` and `nvidia-smi`, ensure your CUDA version is up to
|
||||
- Make sure you have an up-to-date C++ compiler
|
||||
- Install CUDA toolkit https://developer.nvidia.com/cuda-downloads
|
||||
- Verify your installation is correct by running `nvcc --version` and `nvidia-smi`, ensure your CUDA version is up to
|
||||
date and your GPU is detected.
|
||||
|
||||
After that running the following command in the repository will install llama.cpp with GPU support:
|
||||
@ -356,13 +410,17 @@ AVX = 1 | AVX2 = 1 | AVX512 = 0 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | FMA = 1 |
|
||||
|
||||
Linux GPU support is done through ROCm.
|
||||
Some tips:
|
||||
* Install ROCm from [quick-start install guide](https://rocm.docs.amd.com/projects/install-on-linux/en/latest/tutorial/quick-start.html)
|
||||
* [Install PyTorch for ROCm](https://rocm.docs.amd.com/projects/radeon/en/latest/docs/install/install-pytorch.html)
|
||||
|
||||
- Install ROCm from [quick-start install guide](https://rocm.docs.amd.com/projects/install-on-linux/en/latest/tutorial/quick-start.html)
|
||||
- [Install PyTorch for ROCm](https://rocm.docs.amd.com/projects/radeon/en/latest/docs/install/install-pytorch.html)
|
||||
|
||||
```bash
|
||||
wget https://repo.radeon.com/rocm/manylinux/rocm-rel-6.0/torch-2.1.1%2Brocm6.0-cp311-cp311-linux_x86_64.whl
|
||||
poetry run pip install --force-reinstall --no-cache-dir torch-2.1.1+rocm6.0-cp311-cp311-linux_x86_64.whl
|
||||
```
|
||||
* Install bitsandbytes for ROCm
|
||||
|
||||
- Install bitsandbytes for ROCm
|
||||
|
||||
```bash
|
||||
PYTORCH_ROCM_ARCH=gfx900,gfx906,gfx908,gfx90a,gfx1030,gfx1100,gfx1101,gfx940,gfx941,gfx942
|
||||
BITSANDBYTES_VERSION=62353b0200b8557026c176e74ac48b84b953a854
|
||||
@ -374,6 +432,7 @@ pip install . --extra-index-url https://download.pytorch.org/whl/nightly
|
||||
```
|
||||
|
||||
After that running the following command in the repository will install llama.cpp with GPU support:
|
||||
|
||||
```bash
|
||||
LLAMA_CPP_PYTHON_VERSION=0.2.56
|
||||
DAMDGPU_TARGETS=gfx900;gfx906;gfx908;gfx90a;gfx1030;gfx1100;gfx1101;gfx940;gfx941;gfx942
|
||||
@ -391,15 +450,15 @@ AVX = 1 | AVX_VNNI = 0 | AVX2 = 1 | AVX512 = 0 | AVX512_VBMI = 0 | AVX512_VNNI =
|
||||
Execution of LLMs locally still has a lot of sharp edges, specially when running on non Linux platforms.
|
||||
You might encounter several issues:
|
||||
|
||||
* Performance: RAM or VRAM usage is very high, your computer might experience slowdowns or even crashes.
|
||||
* GPU Virtualization on Windows and OSX: Simply not possible with docker desktop, you have to run the server directly on
|
||||
- Performance: RAM or VRAM usage is very high, your computer might experience slowdowns or even crashes.
|
||||
- GPU Virtualization on Windows and OSX: Simply not possible with docker desktop, you have to run the server directly on
|
||||
the host.
|
||||
* Building errors: Some of PrivateGPT dependencies need to build native code, and they might fail on some platforms.
|
||||
- Building errors: Some of PrivateGPT dependencies need to build native code, and they might fail on some platforms.
|
||||
Most likely you are missing some dev tools in your machine (updated C++ compiler, CUDA is not on PATH, etc.).
|
||||
If you encounter any of these issues, please open an issue and we'll try to help.
|
||||
|
||||
One of the first reflex to adopt is: get more information.
|
||||
If, during your installation, something does not go as planned, retry in *verbose* mode, and see what goes wrong.
|
||||
If, during your installation, something does not go as planned, retry in _verbose_ mode, and see what goes wrong.
|
||||
|
||||
For example, when installing packages with `pip install`, you can add the option `-vvv` to show the details of the installation.
|
||||
|
||||
@ -414,8 +473,8 @@ To install a C++ compiler on Windows 10/11, follow these steps:
|
||||
|
||||
1. Install Visual Studio 2022.
|
||||
2. Make sure the following components are selected:
|
||||
* Universal Windows Platform development
|
||||
* C++ CMake tools for Windows
|
||||
- Universal Windows Platform development
|
||||
- C++ CMake tools for Windows
|
||||
3. Download the MinGW installer from the [MinGW website](https://sourceforge.net/projects/mingw/).
|
||||
4. Run the installer and select the `gcc` component.
|
||||
|
||||
|
49
poetry.lock
generated
49
poetry.lock
generated
@ -1303,6 +1303,20 @@ django = ["dj-database-url", "dj-email-url", "django-cache-url"]
|
||||
lint = ["flake8 (==4.0.1)", "flake8-bugbear (==21.9.2)", "mypy (==0.910)", "pre-commit (>=2.4,<3.0)"]
|
||||
tests = ["dj-database-url", "dj-email-url", "django-cache-url", "pytest"]
|
||||
|
||||
[[package]]
|
||||
name = "eval-type-backport"
|
||||
version = "0.2.0"
|
||||
description = "Like `typing._eval_type`, but lets older Python versions use newer typing features."
|
||||
optional = true
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "eval_type_backport-0.2.0-py3-none-any.whl", hash = "sha256:ac2f73d30d40c5a30a80b8739a789d6bb5e49fdffa66d7912667e2015d9c9933"},
|
||||
{file = "eval_type_backport-0.2.0.tar.gz", hash = "sha256:68796cfbc7371ebf923f03bdf7bef415f3ec098aeced24e054b253a0e78f7b37"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
tests = ["pytest"]
|
||||
|
||||
[[package]]
|
||||
name = "fastapi"
|
||||
version = "0.111.1"
|
||||
@ -3247,32 +3261,33 @@ description = "An experimental Python binding of the Rust MiniJinja template eng
|
||||
optional = true
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "minijinja-2.0.1-cp38-abi3-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:063b291cb31f5c33eb77bb4cb457f67f14426ca1418232b8ae9f267155d330cc"},
|
||||
{file = "minijinja-2.0.1-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a4e9d639dd89ce7fef86e82147082ab3c248a36950fa3fbe793685ba322c1b7"},
|
||||
{file = "minijinja-2.0.1-cp38-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a20373af4ee5430356c196c7fe5f19e3261a4fa16c944542b4de7a2349bac7a6"},
|
||||
{file = "minijinja-2.0.1-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ade637bf4826258811a785ccc4e5d41cd2bdf4ec317b1ed3daa4dbbdd020f37d"},
|
||||
{file = "minijinja-2.0.1-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d5ec956d777e0fee8e214af48363334c04f098e986038a9e8cb92a0564f81943"},
|
||||
{file = "minijinja-2.0.1-cp38-abi3-win32.whl", hash = "sha256:039f4d1a1a73f90917cff1ed7c617eb56e2b2f91bbbdc551adaa448e1673e5c2"},
|
||||
{file = "minijinja-2.0.1-cp38-abi3-win_amd64.whl", hash = "sha256:dca5d7689905dce340e36e47348b505c788daf297253b85a1aff506ea63ad1b8"},
|
||||
{file = "minijinja-2.0.1.tar.gz", hash = "sha256:e774beffebfb8a1ad17e638ef70917cf5e94593f79acb8a8fff7d983169f3a4e"},
|
||||
{file = "minijinja-2.2.0-cp38-abi3-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:e4154fcf72e81be01c2733b770e6cb3e584851cb2fa73c58e347b04967d3d7c0"},
|
||||
{file = "minijinja-2.2.0-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4b05e0070c08b550fa9a09ff9c051f47424674332dd56cc54b997dd602887907"},
|
||||
{file = "minijinja-2.2.0-cp38-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:360ea4a93fdf1fe327f3e70eed20ecb29f324ca28fae177de0605dcc29869300"},
|
||||
{file = "minijinja-2.2.0-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c9cad5ccb021ef25b6a271158f4d6636474edb08cd1dd49355aac6b68a48aebb"},
|
||||
{file = "minijinja-2.2.0-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7a85c67c519b413fc4892854782927e1244a24cbbb3a3cb0ac5e57d9fdb1868c"},
|
||||
{file = "minijinja-2.2.0-cp38-abi3-win32.whl", hash = "sha256:e431a2467dd6e1bcb7c511e9fbad012b02c6e5453acdd9fbd4c4af0d34a3d1c5"},
|
||||
{file = "minijinja-2.2.0-cp38-abi3-win_amd64.whl", hash = "sha256:d4df7e4a09be4249c8243207fa89e6f4d22b853c2b565a99f48e478a30713822"},
|
||||
{file = "minijinja-2.2.0.tar.gz", hash = "sha256:4411052c7a60f8d56468cc6d17d45d72be3d5e89e9578a04f8336cc56601523c"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "mistralai"
|
||||
version = "1.0.3"
|
||||
version = "1.1.0"
|
||||
description = "Python Client SDK for the Mistral AI API."
|
||||
optional = true
|
||||
python-versions = "<4.0,>=3.8"
|
||||
files = [
|
||||
{file = "mistralai-1.0.3-py3-none-any.whl", hash = "sha256:64af7c9192e64dc66b2da6d1c4d54a1324a881c21665a2f93d6b35d9de9f87c8"},
|
||||
{file = "mistralai-1.0.3.tar.gz", hash = "sha256:84f1a217666c76fec9d477ae266399b813c3ac32a4a348d2ecd5fe1c039b0667"},
|
||||
{file = "mistralai-1.1.0-py3-none-any.whl", hash = "sha256:eea0938975195f331d0ded12d14e3c982f09f1b68210200ed4ff0c6b9b22d0fb"},
|
||||
{file = "mistralai-1.1.0.tar.gz", hash = "sha256:9d1fe778e0e8c6ddab714e6a64c6096bd39cfe119ff38ceb5019d8e089df08ba"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
eval-type-backport = ">=0.2.0,<0.3.0"
|
||||
httpx = ">=0.27.0,<0.28.0"
|
||||
jsonpath-python = ">=1.0.6,<2.0.0"
|
||||
pydantic = ">=2.8.2,<2.9.0"
|
||||
python-dateutil = ">=2.9.0.post0,<3.0.0"
|
||||
pydantic = ">=2.9.0,<3.0.0"
|
||||
python-dateutil = "2.8.2"
|
||||
typing-inspect = ">=0.9.0,<0.10.0"
|
||||
|
||||
[package.extras]
|
||||
@ -5005,13 +5020,13 @@ testing = ["fields", "hunter", "process-tests", "pytest-xdist", "six", "virtuale
|
||||
|
||||
[[package]]
|
||||
name = "python-dateutil"
|
||||
version = "2.9.0.post0"
|
||||
version = "2.8.2"
|
||||
description = "Extensions to the standard Python datetime module"
|
||||
optional = false
|
||||
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
|
||||
files = [
|
||||
{file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"},
|
||||
{file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"},
|
||||
{file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"},
|
||||
{file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@ -7081,4 +7096,4 @@ vector-stores-qdrant = ["llama-index-vector-stores-qdrant"]
|
||||
[metadata]
|
||||
lock-version = "2.0"
|
||||
python-versions = ">=3.11,<3.12"
|
||||
content-hash = "3fa6ef447847895b1a16b8b0422dd9e4fda1aaaadef3af71971eb412da89bf67"
|
||||
content-hash = "df2024950dd0e9c55ae48020af1a71510cf878d696d2aa076e65cd7ee30b08fe"
|
||||
|
@ -74,11 +74,12 @@ class EmbeddingComponent:
|
||||
raise ImportError(
|
||||
"FireworksEmbedding dependencies not found, install with `poetry install --extras embeddings-fireworks`"
|
||||
) from e
|
||||
|
||||
|
||||
api_key = settings.fireworks.embedding_api_key or settings.fireworks.api_key
|
||||
|
||||
model = settings.openai.embedding_model
|
||||
self.embedding_model = FireworksEmbedding(
|
||||
api_key=api_key,
|
||||
model=model,
|
||||
)
|
||||
case "ollama":
|
||||
try:
|
||||
|
@ -275,15 +275,15 @@ class FireWorksSettings(BaseModel):
|
||||
"accounts/fireworks/models/llama-v3p1-70b-instruct",
|
||||
description="FireWorks Model to use. Example: 'accounts/fireworks/models/llama-v3p1-70b-instruct'.",
|
||||
)
|
||||
# embedding_api_base: str = Field(
|
||||
# None,
|
||||
# description="Base URL of OpenAI API. Example: 'https://api.openai.com/v1'.",
|
||||
# )
|
||||
embedding_api_base: str = Field(
|
||||
None,
|
||||
description="Base URL of FIREWORKS API. Example: 'https://api.fireworks.ai/inference/v1'.",
|
||||
)
|
||||
embedding_api_key: str
|
||||
# embedding_model: str = Field(
|
||||
# "text-embedding-ada-002",
|
||||
# description="OpenAI embedding Model to use. Example: 'text-embedding-3-large'.",
|
||||
# )
|
||||
embedding_model: str = Field(
|
||||
"nomic-ai/nomic-embed-text-v1.5",
|
||||
description="FIREWORKS embedding Model to use. Example: 'nomic-ai/nomic-embed-text-v1.5'.",
|
||||
)
|
||||
|
||||
class GeminiSettings(BaseModel):
|
||||
api_key: str
|
||||
|
Loading…
Reference in New Issue
Block a user