mirror of
https://github.com/csunny/DB-GPT.git
synced 2025-08-11 13:12:18 +00:00
More docker compose improvements (#268)
For me GPU0 is a 3080 and GPU1 is a 4090 while others may only have one GPU. So let us make gpu 0 the default. Also I found a better way to build the containers and added a cloudflared tunnel container
This commit is contained in:
commit
7d7452ac95
@ -1 +1,2 @@
|
||||
models/
|
||||
plugins/
|
||||
|
4
.gitignore
vendored
4
.gitignore
vendored
@ -28,7 +28,9 @@ sdist/
|
||||
|
||||
var/
|
||||
wheels/
|
||||
models/*
|
||||
models/
|
||||
plugins/
|
||||
|
||||
pip-wheel-metadata/
|
||||
share/python-wheels/
|
||||
*.egg-info/
|
||||
|
@ -1,23 +1,23 @@
|
||||
FROM nvidia/cuda:11.8.0-devel-ubuntu22.04
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
RUN apt-get update && apt-get install -y \
|
||||
git \
|
||||
python3 \
|
||||
pip
|
||||
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY . /app
|
||||
|
||||
|
||||
# upgrade pip
|
||||
RUN pip3 install --upgrade pip
|
||||
|
||||
COPY ./requirements.txt /app/requirements.txt
|
||||
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
RUN python3 -m spacy download zh_core_web_sm
|
||||
|
||||
EXPOSE 7860
|
||||
|
||||
CMD ["python3", "pilot/server/webserver.py"]
|
||||
COPY . /app
|
||||
|
||||
EXPOSE 7860
|
||||
EXPOSE 8000
|
@ -1,21 +0,0 @@
|
||||
FROM nvidia/cuda:11.8.0-devel-ubuntu22.04
|
||||
|
||||
RUN apt-get update && apt-get install -y \
|
||||
git \
|
||||
python3 \
|
||||
pip
|
||||
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY . /app
|
||||
|
||||
|
||||
# upgrade pip
|
||||
RUN pip3 install --upgrade pip
|
||||
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
EXPOSE 8000
|
||||
|
||||
CMD ["python3", "pilot/server/llmserver.py"]
|
@ -16,11 +16,16 @@ services:
|
||||
webserver:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile-webserver
|
||||
dockerfile: Dockerfile
|
||||
command: python3 pilot/server/webserver.py
|
||||
environment:
|
||||
- MODEL_SERVER=http://llmserver:8000
|
||||
- LOCAL_DB_HOST=db
|
||||
- WEB_SERVER_PORT=7860
|
||||
- ALLOWLISTED_PLUGINS=db_dashboard
|
||||
depends_on:
|
||||
- db
|
||||
- llmserver
|
||||
volumes:
|
||||
- ./models:/app/models
|
||||
- ./plugins:/app/plugins
|
||||
@ -28,16 +33,19 @@ services:
|
||||
env_file:
|
||||
- .env.template
|
||||
ports:
|
||||
- 7860:7860
|
||||
- 7860:7860/tcp
|
||||
expose:
|
||||
- 7860
|
||||
- 7860/tcp
|
||||
restart: unless-stopped
|
||||
llmserver:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile-llmserver
|
||||
dockerfile: Dockerfile
|
||||
command: python3 pilot/server/llmserver.py
|
||||
environment:
|
||||
- LOCAL_DB_HOST=db
|
||||
depends_on:
|
||||
- db
|
||||
volumes:
|
||||
- ./models:/app/models
|
||||
env_file:
|
||||
@ -50,9 +58,17 @@ services:
|
||||
reservations:
|
||||
devices:
|
||||
- driver: nvidia
|
||||
device_ids: ['1']
|
||||
device_ids: ['0']
|
||||
capabilities: [gpu]
|
||||
|
||||
tunnel:
|
||||
image: cloudflare/cloudflared:latest
|
||||
container_name: cloudflared-tunnel
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- TUNNEL_URL=http://webserver:7860
|
||||
command: tunnel --no-autoupdate
|
||||
depends_on:
|
||||
- webserver
|
||||
|
||||
|
||||
volumes:
|
||||
|
Loading…
Reference in New Issue
Block a user