mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-04-27 11:31:58 +00:00
[CI] Remove triton version for compatibility bug; update req torch >=2.2 (#6018)
* remove triton version * remove torch 2.2 * remove torch 2.1 * debug * remove 2.1 build tests * require torch >=2.2 --------- Co-authored-by: Edenzzzz <wtan45@wisc.edu>
This commit is contained in:
parent
17904cb5bf
commit
d383449fc4
@ -1,4 +1,3 @@
|
||||
2.1.0-12.1.0
|
||||
2.2.2-12.1.0
|
||||
2.3.0-12.1.0
|
||||
2.4.0-12.4.1
|
||||
|
2
.github/workflows/build_on_pr.yml
vendored
2
.github/workflows/build_on_pr.yml
vendored
@ -89,7 +89,7 @@ jobs:
|
||||
if: needs.detect.outputs.anyLibraryFileChanged == 'true'
|
||||
runs-on: [self-hosted, gpu]
|
||||
container:
|
||||
image: hpcaitech/pytorch-cuda:2.1.0-12.1.0
|
||||
image: hpcaitech/pytorch-cuda:2.2.2-12.1.0
|
||||
options: --gpus all --rm -v /dev/shm -v /data/scratch:/data/scratch
|
||||
timeout-minutes: 90
|
||||
defaults:
|
||||
|
2
.github/workflows/build_on_schedule.yml
vendored
2
.github/workflows/build_on_schedule.yml
vendored
@ -12,7 +12,7 @@ jobs:
|
||||
if: github.repository == 'hpcaitech/ColossalAI'
|
||||
runs-on: [self-hosted, gpu]
|
||||
container:
|
||||
image: hpcaitech/pytorch-cuda:2.1.0-12.1.0
|
||||
image: hpcaitech/pytorch-cuda:2.2.2-12.1.0
|
||||
options: --gpus all --rm -v /dev/shm -v /data/scratch/:/data/scratch/
|
||||
timeout-minutes: 90
|
||||
steps:
|
||||
|
2
.github/workflows/doc_test_on_pr.yml
vendored
2
.github/workflows/doc_test_on_pr.yml
vendored
@ -56,7 +56,7 @@ jobs:
|
||||
needs: detect-changed-doc
|
||||
runs-on: [self-hosted, gpu]
|
||||
container:
|
||||
image: hpcaitech/pytorch-cuda:2.1.0-12.1.0
|
||||
image: hpcaitech/pytorch-cuda:2.2.2-12.1.0
|
||||
options: --gpus all --rm
|
||||
timeout-minutes: 30
|
||||
defaults:
|
||||
|
2
.github/workflows/doc_test_on_schedule.yml
vendored
2
.github/workflows/doc_test_on_schedule.yml
vendored
@ -12,7 +12,7 @@ jobs:
|
||||
name: Test the changed Doc
|
||||
runs-on: [self-hosted, gpu]
|
||||
container:
|
||||
image: hpcaitech/pytorch-cuda:2.1.0-12.1.0
|
||||
image: hpcaitech/pytorch-cuda:2.2.2-12.1.0
|
||||
options: --gpus all --rm
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
|
@ -45,7 +45,7 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix: ${{fromJson(needs.manual_check_matrix_preparation.outputs.matrix)}}
|
||||
container:
|
||||
image: hpcaitech/pytorch-cuda:2.1.0-12.1.0
|
||||
image: hpcaitech/pytorch-cuda:2.2.2-12.1.0
|
||||
options: --gpus all --rm -v /data/scratch/examples-data:/data/ -v /dev/shm
|
||||
timeout-minutes: 15
|
||||
steps:
|
||||
|
2
.github/workflows/example_check_on_pr.yml
vendored
2
.github/workflows/example_check_on_pr.yml
vendored
@ -90,7 +90,7 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix: ${{fromJson(needs.detect-changed-example.outputs.matrix)}}
|
||||
container:
|
||||
image: hpcaitech/pytorch-cuda:2.1.0-12.1.0
|
||||
image: hpcaitech/pytorch-cuda:2.2.2-12.1.0
|
||||
options: --gpus all --rm -v /data/scratch/examples-data:/data/ -v /dev/shm
|
||||
timeout-minutes: 30
|
||||
concurrency:
|
||||
|
@ -34,7 +34,7 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix: ${{fromJson(needs.matrix_preparation.outputs.matrix)}}
|
||||
container:
|
||||
image: hpcaitech/pytorch-cuda:2.1.0-12.1.0
|
||||
image: hpcaitech/pytorch-cuda:2.2.2-12.1.0
|
||||
options: --gpus all --rm -v /data/scratch/examples-data:/data/ -v /dev/shm
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
|
2
.github/workflows/run_chatgpt_examples.yml
vendored
2
.github/workflows/run_chatgpt_examples.yml
vendored
@ -19,7 +19,7 @@ jobs:
|
||||
github.event.pull_request.base.repo.full_name == 'hpcaitech/ColossalAI'
|
||||
runs-on: [self-hosted, gpu]
|
||||
container:
|
||||
image: hpcaitech/pytorch-cuda:2.1.0-12.1.0
|
||||
image: hpcaitech/pytorch-cuda:2.2.2-12.1.0
|
||||
options: --gpus all --rm -v /data/scratch/examples-data:/data/scratch/examples-data --shm-size=10.24gb
|
||||
timeout-minutes: 60
|
||||
defaults:
|
||||
|
2
.github/workflows/run_chatgpt_unit_tests.yml
vendored
2
.github/workflows/run_chatgpt_unit_tests.yml
vendored
@ -19,7 +19,7 @@ jobs:
|
||||
github.event.pull_request.base.repo.full_name == 'hpcaitech/ColossalAI'
|
||||
runs-on: [self-hosted, gpu]
|
||||
container:
|
||||
image: hpcaitech/pytorch-cuda:2.1.0-12.1.0
|
||||
image: hpcaitech/pytorch-cuda:2.2.2-12.1.0
|
||||
options: --gpus all --rm -v /data/scratch/examples-data:/data/scratch/examples-data
|
||||
timeout-minutes: 30
|
||||
defaults:
|
||||
|
@ -19,7 +19,7 @@ jobs:
|
||||
github.event.pull_request.base.repo.full_name == 'hpcaitech/ColossalAI'
|
||||
runs-on: [self-hosted, gpu]
|
||||
container:
|
||||
image: hpcaitech/pytorch-cuda:2.1.0-12.1.0
|
||||
image: hpcaitech/pytorch-cuda:2.2.2-12.1.0
|
||||
volumes:
|
||||
- /data/scratch/test_data_colossalqa:/data/scratch/test_data_colossalqa
|
||||
- /data/scratch/llama-tiny:/data/scratch/llama-tiny
|
||||
|
@ -420,7 +420,7 @@ Please visit our [documentation](https://www.colossalai.org/) and [examples](htt
|
||||
## Installation
|
||||
|
||||
Requirements:
|
||||
- PyTorch >= 2.1
|
||||
- PyTorch >= 2.2
|
||||
- Python >= 3.7
|
||||
- CUDA >= 11.0
|
||||
- [NVIDIA GPU Compute Capability](https://developer.nvidia.com/cuda-gpus) >= 7.0 (V100/RTX20 and higher)
|
||||
|
@ -8,7 +8,7 @@ click
|
||||
fabric
|
||||
contexttimer
|
||||
ninja
|
||||
torch>=2.1.0,<=2.4.0
|
||||
torch>=2.2.0,<=2.4.0
|
||||
safetensors
|
||||
einops
|
||||
pydantic
|
||||
|
Loading…
Reference in New Issue
Block a user