mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-10-02 07:39:24 +00:00
[misc] update pre-commit and run all files (#4752)
* [misc] update pre-commit * [misc] run pre-commit * [misc] remove useless configuration files * [misc] ignore cuda for clang-format
This commit is contained in:
@@ -12,7 +12,6 @@ from colossalai.kernel import LayerNorm
|
||||
from colossalai.legacy.amp import AMP_TYPE
|
||||
from colossalai.legacy.context.parallel_mode import ParallelMode
|
||||
from colossalai.legacy.core import global_context as gpc
|
||||
from colossalai.legacy.engine.schedule import PipelineSchedule
|
||||
from colossalai.legacy.utils import is_using_pp
|
||||
from colossalai.logging import get_dist_logger
|
||||
from colossalai.nn.optimizer import FusedAdam
|
||||
@@ -31,7 +30,7 @@ def process_batch_data(batch_data):
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('-s', '--synthetic', action="store_true", help="whether use synthetic data")
|
||||
parser.add_argument("-s", "--synthetic", action="store_true", help="whether use synthetic data")
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
@@ -48,37 +47,39 @@ def pipeline_data_process_func(stage_output, micro_batch_data):
|
||||
|
||||
def main():
|
||||
# initialize
|
||||
args = parse_args()
|
||||
colossalai.launch_from_torch(config='./config.py', seed=1234, backend='nccl')
|
||||
parse_args()
|
||||
colossalai.launch_from_torch(config="./config.py", seed=1234, backend="nccl")
|
||||
|
||||
logger = get_dist_logger()
|
||||
|
||||
# build synthetic dataloader
|
||||
BATCH_SIZE_PER_GPUS = gpc.config.GLOBAL_BATCH_SIZE // gpc.get_world_size(ParallelMode.DATA)
|
||||
VOCAB_SIZE = 30528
|
||||
trainloader = DummyDataloader(batch_size=BATCH_SIZE_PER_GPUS,
|
||||
vocab_size=VOCAB_SIZE,
|
||||
seq_length=gpc.config.SEQ_LENGTH)
|
||||
validloader = DummyDataloader(batch_size=BATCH_SIZE_PER_GPUS,
|
||||
vocab_size=VOCAB_SIZE,
|
||||
seq_length=gpc.config.SEQ_LENGTH)
|
||||
trainloader = DummyDataloader(
|
||||
batch_size=BATCH_SIZE_PER_GPUS, vocab_size=VOCAB_SIZE, seq_length=gpc.config.SEQ_LENGTH
|
||||
)
|
||||
validloader = DummyDataloader(
|
||||
batch_size=BATCH_SIZE_PER_GPUS, vocab_size=VOCAB_SIZE, seq_length=gpc.config.SEQ_LENGTH
|
||||
)
|
||||
|
||||
logger.info("Dataloaders are built", ranks=[0])
|
||||
|
||||
# build model
|
||||
if hasattr(gpc.config, 'fp16') and gpc.config.fp16.get('mode') == AMP_TYPE.NAIVE:
|
||||
if hasattr(gpc.config, "fp16") and gpc.config.fp16.get("mode") == AMP_TYPE.NAIVE:
|
||||
is_naive_fp16 = True
|
||||
else:
|
||||
is_naive_fp16 = False
|
||||
|
||||
use_pipeline = is_using_pp()
|
||||
kwargs = dict(vocab_size=VOCAB_SIZE,
|
||||
hidden_size=gpc.config.HIDDEN_SIZE,
|
||||
max_sequence_length=gpc.config.SEQ_LENGTH,
|
||||
num_attention_heads=gpc.config.NUM_ATTENTION_HEADS,
|
||||
convert_fp16_to_fp32_in_softmax=True,
|
||||
is_naive_fp16=is_naive_fp16,
|
||||
add_binary_head=gpc.config.ADD_BINARY_HEAD)
|
||||
kwargs = dict(
|
||||
vocab_size=VOCAB_SIZE,
|
||||
hidden_size=gpc.config.HIDDEN_SIZE,
|
||||
max_sequence_length=gpc.config.SEQ_LENGTH,
|
||||
num_attention_heads=gpc.config.NUM_ATTENTION_HEADS,
|
||||
convert_fp16_to_fp32_in_softmax=True,
|
||||
is_naive_fp16=is_naive_fp16,
|
||||
add_binary_head=gpc.config.ADD_BINARY_HEAD,
|
||||
)
|
||||
|
||||
if use_pipeline:
|
||||
model = build_pipeline_bert(num_layers=gpc.config.DEPTH, num_chunks=1, **kwargs)
|
||||
@@ -99,35 +100,39 @@ def main():
|
||||
logger.info("Criterion is built", ranks=[0])
|
||||
|
||||
# layernorm and bias has no weight decay
|
||||
weight_decay_params = {'params': []}
|
||||
no_weight_decay_params = {'params': [], 'weight_decay': 0.0}
|
||||
weight_decay_params = {"params": []}
|
||||
no_weight_decay_params = {"params": [], "weight_decay": 0.0}
|
||||
for module_ in model.modules():
|
||||
if isinstance(module_, LayerNorm):
|
||||
no_weight_decay_params['params'].extend([p for p in list(module_._parameters.values()) if p is not None])
|
||||
no_weight_decay_params["params"].extend([p for p in list(module_._parameters.values()) if p is not None])
|
||||
else:
|
||||
weight_decay_params['params'].extend(
|
||||
[p for n, p in list(module_._parameters.items()) if p is not None and n != 'bias'])
|
||||
no_weight_decay_params['params'].extend(
|
||||
[p for n, p in list(module_._parameters.items()) if p is not None and n == 'bias'])
|
||||
weight_decay_params["params"].extend(
|
||||
[p for n, p in list(module_._parameters.items()) if p is not None and n != "bias"]
|
||||
)
|
||||
no_weight_decay_params["params"].extend(
|
||||
[p for n, p in list(module_._parameters.items()) if p is not None and n == "bias"]
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"without weight decay param: {len(no_weight_decay_params['params'])}, with weight decay param: {len(weight_decay_params['params'])}"
|
||||
)
|
||||
# optimizer
|
||||
optimizer = FusedAdam((weight_decay_params, no_weight_decay_params),
|
||||
lr=gpc.config.LR,
|
||||
weight_decay=gpc.config.WEIGHT_DECAY)
|
||||
optimizer = FusedAdam(
|
||||
(weight_decay_params, no_weight_decay_params), lr=gpc.config.LR, weight_decay=gpc.config.WEIGHT_DECAY
|
||||
)
|
||||
logger.info("Optimizer is built", ranks=[0])
|
||||
|
||||
# lr scheduler
|
||||
# follow Megatron-LM setting
|
||||
warmup_steps = int(gpc.config.DECAY_ITERS * gpc.config.WARMUP_FRACTION)
|
||||
lr_scheduler = AnnealingLR(optimizer=optimizer,
|
||||
max_lr=gpc.config.LR,
|
||||
min_lr=gpc.config.MIN_LR,
|
||||
warmup_steps=warmup_steps,
|
||||
decay_steps=gpc.config.DECAY_ITERS,
|
||||
decay_style='linear')
|
||||
lr_scheduler = AnnealingLR(
|
||||
optimizer=optimizer,
|
||||
max_lr=gpc.config.LR,
|
||||
min_lr=gpc.config.MIN_LR,
|
||||
warmup_steps=warmup_steps,
|
||||
decay_steps=gpc.config.DECAY_ITERS,
|
||||
decay_style="linear",
|
||||
)
|
||||
logger.info(f"LR Scheduler is built with {warmup_steps} warmup steps and {gpc.config.DECAY_ITERS} decay steps")
|
||||
|
||||
# # init
|
||||
@@ -135,7 +140,6 @@ def main():
|
||||
|
||||
# build timer
|
||||
timer = MultiTimer()
|
||||
skip_iters = 0
|
||||
|
||||
# build loss tracker
|
||||
accumulated_train_loss = torch.zeros(1, dtype=torch.float32).cuda()
|
||||
@@ -150,7 +154,7 @@ def main():
|
||||
logger.info("start training")
|
||||
|
||||
for step in range(1, gpc.config.TRAIN_ITERS + 1):
|
||||
timer.start('train-iterations')
|
||||
timer.start("train-iterations")
|
||||
engine.train()
|
||||
if use_pipeline:
|
||||
engine.zero_grad()
|
||||
@@ -158,13 +162,14 @@ def main():
|
||||
engine.step()
|
||||
else:
|
||||
tokens, types, sentence_order, loss_mask, lm_labels, padding_mask = get_batch_for_sequence_parallel(
|
||||
trainloader)
|
||||
trainloader
|
||||
)
|
||||
engine.zero_grad()
|
||||
lm_loss, sop_output = engine(tokens, padding_mask, types, lm_labels)
|
||||
train_loss = engine.criterion(lm_loss, sop_output, loss_mask, sentence_order)
|
||||
engine.backward(train_loss)
|
||||
engine.step()
|
||||
timer.stop('train-iterations', keep_in_history=True)
|
||||
timer.stop("train-iterations", keep_in_history=True)
|
||||
|
||||
if not gpc.is_initialized(ParallelMode.PIPELINE) or gpc.is_last_rank(ParallelMode.PIPELINE):
|
||||
accumulated_train_loss += train_loss
|
||||
@@ -177,12 +182,18 @@ def main():
|
||||
for j in range(gpc.config.EVAL_ITERS):
|
||||
with torch.no_grad():
|
||||
if use_pipeline:
|
||||
_, _, eval_loss = engine.execute_schedule(valid_data_iter,
|
||||
forward_only=True,
|
||||
return_output_label=False)
|
||||
_, _, eval_loss = engine.execute_schedule(
|
||||
valid_data_iter, forward_only=True, return_output_label=False
|
||||
)
|
||||
else:
|
||||
tokens, types, sentence_order, loss_mask, lm_labels, padding_mask = get_batch_for_sequence_parallel(
|
||||
validloader)
|
||||
(
|
||||
tokens,
|
||||
types,
|
||||
sentence_order,
|
||||
loss_mask,
|
||||
lm_labels,
|
||||
padding_mask,
|
||||
) = get_batch_for_sequence_parallel(validloader)
|
||||
lm_loss, sop_output = engine(tokens, padding_mask, types, lm_labels)
|
||||
eval_loss = engine.criterion(lm_loss, sop_output, loss_mask, sentence_order)
|
||||
|
||||
@@ -196,18 +207,22 @@ def main():
|
||||
timer_string = []
|
||||
for n, t in timer:
|
||||
timer_string.append(f"{n}: {t.get_history_mean()*1000:.5f}")
|
||||
timer_string = ' | '.join(timer_string)
|
||||
lr = list(engine.optimizer.param_groups)[0]['lr']
|
||||
timer_string = " | ".join(timer_string)
|
||||
lr = list(engine.optimizer.param_groups)[0]["lr"]
|
||||
loss_scale = engine.optimizer.optim.loss_scale.item()
|
||||
|
||||
if gpc.is_initialized(ParallelMode.PIPELINE):
|
||||
ranks = [gpc.get_ranks_in_group(ParallelMode.PIPELINE)[-1]]
|
||||
else:
|
||||
ranks = [0]
|
||||
logger.info(f'Step {step} / {gpc.config.TRAIN_ITERS} | Train Loss: {accumulated_train_loss.item():.5g} ' +
|
||||
f'| Eval Loss: {accumulated_eval_loss.item():.5g} ' + f'| Loss Scale: {loss_scale}' +
|
||||
f"| Learning rate: {lr} | " + timer_string,
|
||||
ranks=ranks)
|
||||
logger.info(
|
||||
f"Step {step} / {gpc.config.TRAIN_ITERS} | Train Loss: {accumulated_train_loss.item():.5g} "
|
||||
+ f"| Eval Loss: {accumulated_eval_loss.item():.5g} "
|
||||
+ f"| Loss Scale: {loss_scale}"
|
||||
+ f"| Learning rate: {lr} | "
|
||||
+ timer_string,
|
||||
ranks=ranks,
|
||||
)
|
||||
|
||||
for n, t in timer:
|
||||
t.reset()
|
||||
@@ -215,5 +230,5 @@ def main():
|
||||
accumulated_train_loss.zero_()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
Reference in New Issue
Block a user