mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-04-27 03:21:47 +00:00
* [test] fix flop tensor test * [test] fix autochunk test * [test] fix lazyinit test * [devops] update torch version of CI * [devops] enable testmon * [devops] fix ci * [devops] fix ci * [test] fix checkpoint io test * [test] fix cluster test * [test] fix timm test * [devops] fix ci * [devops] fix ci * [devops] fix ci * [devops] fix ci * [devops] force sync to test ci * [test] skip fsdp test
81 lines
2.7 KiB
YAML
81 lines
2.7 KiB
YAML
name: Build on Schedule
|
|
|
|
on:
|
|
schedule:
|
|
# run at 00:00 of every Sunday
|
|
- cron: '0 0 * * *'
|
|
workflow_dispatch:
|
|
|
|
jobs:
|
|
build:
|
|
name: Build and Test Colossal-AI
|
|
if: github.repository == 'hpcaitech/ColossalAI'
|
|
runs-on: [self-hosted, 8-gpu]
|
|
container:
|
|
image: hpcaitech/pytorch-cuda:1.12.0-11.3.0
|
|
options: --gpus all --rm -v /data/scratch/cifar-10:/data/scratch/cifar-10
|
|
timeout-minutes: 40
|
|
steps:
|
|
- name: Check GPU Availability # ensure all GPUs have enough memory
|
|
id: check-avai
|
|
run: |
|
|
avai=true
|
|
for i in $(seq 0 7);
|
|
do
|
|
gpu_used=$(nvidia-smi -i $i --query-gpu=memory.used --format=csv,noheader,nounits)
|
|
[ "$gpu_used" -gt "10000" ] && avai=false
|
|
done
|
|
|
|
echo "GPU is available: $avai"
|
|
echo "avai=$avai" >> $GITHUB_OUTPUT
|
|
|
|
- uses: actions/checkout@v2
|
|
if: steps.check-avai.outputs.avai == 'true'
|
|
with:
|
|
repository: hpcaitech/TensorNVMe
|
|
ssh-key: ${{ secrets.SSH_KEY_FOR_CI }}
|
|
path: TensorNVMe
|
|
|
|
- name: Install tensornvme
|
|
if: steps.check-avai.outputs.avai == 'true'
|
|
run: |
|
|
cd TensorNVMe
|
|
conda install cmake
|
|
pip install -r requirements.txt
|
|
pip install -v .
|
|
|
|
- uses: actions/checkout@v2
|
|
if: steps.check-avai.outputs.avai == 'true'
|
|
with:
|
|
ssh-key: ${{ secrets.SSH_KEY_FOR_CI }}
|
|
|
|
- name: Install Colossal-AI
|
|
if: steps.check-avai.outputs.avai == 'true'
|
|
run: |
|
|
[ ! -z "$(ls -A /github/home/cuda_ext_cache/)" ] && cp -r /github/home/cuda_ext_cache/* /__w/ColossalAI/ColossalAI/
|
|
CUDA_EXT=1 pip install -v -e .
|
|
cp -r /__w/ColossalAI/ColossalAI/build /github/home/cuda_ext_cache/
|
|
pip install -r requirements/requirements-test.txt
|
|
|
|
- name: Unit Testing
|
|
if: steps.check-avai.outputs.avai == 'true'
|
|
run: |
|
|
PYTHONPATH=$PWD pytest tests
|
|
env:
|
|
DATA: /data/scratch/cifar-10
|
|
LD_LIBRARY_PATH: /github/home/.tensornvme/lib:/usr/local/nvidia/lib:/usr/local/nvidia/lib64
|
|
|
|
- name: Notify Lark
|
|
id: message-preparation
|
|
if: ${{ failure() }}
|
|
run: |
|
|
url=$SERVER_URL/$REPO/actions/runs/$RUN_ID
|
|
msg="Scheduled Build and Test failed on 8 GPUs, please visit $url for details"
|
|
echo $msg
|
|
python .github/workflows/scripts/send_message_to_lark.py -m "$msg" -u $WEBHOOK_URL
|
|
env:
|
|
SERVER_URL: ${{github.server_url }}
|
|
REPO: ${{ github.repository }}
|
|
RUN_ID: ${{ github.run_id }}
|
|
WEBHOOK_URL: ${{ secrets.LARK_NOTIFICATION_WEBHOOK_URL }}
|