mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-04-28 19:55:29 +00:00
* [legacy] remove outdated codes of pipeline (#4692) * [legacy] remove cli of benchmark and update optim (#4690) * [legacy] remove cli of benchmark and update optim * [doc] fix cli doc test * [legacy] fix engine clip grad norm * [legacy] remove outdated colo tensor (#4694) * [legacy] remove outdated colo tensor * [test] fix test import * [legacy] move outdated zero to legacy (#4696) * [legacy] clean up utils (#4700) * [legacy] clean up utils * [example] update examples * [legacy] clean up amp * [legacy] fix amp module * [legacy] clean up gpc (#4742) * [legacy] clean up context * [legacy] clean core, constants and global vars * [legacy] refactor initialize * [example] fix examples ci * [example] fix examples ci * [legacy] fix tests * [example] fix gpt example * [example] fix examples ci * [devops] fix ci installation * [example] fix examples ci
17 lines
321 B
Python
17 lines
321 B
Python
from colossalai.legacy.amp import AMP_TYPE
|
|
|
|
# hyperparameters
|
|
# BATCH_SIZE is as per GPU
|
|
# global batch size = BATCH_SIZE x data parallel size
|
|
BATCH_SIZE = 512
|
|
LEARNING_RATE = 3e-3
|
|
WEIGHT_DECAY = 0.3
|
|
NUM_EPOCHS = 2
|
|
WARMUP_EPOCHS = 1
|
|
|
|
# model config
|
|
NUM_CLASSES = 10
|
|
|
|
fp16 = dict(mode=AMP_TYPE.NAIVE)
|
|
clip_grad_norm = 1.0
|