Support TP-compatible Torch AMP and Update trainer API (#27)

* Add gradient accumulation, fix lr scheduler

* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)

* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes

* fixed trainer

* Revert "fixed trainer"

This reverts commit 2e0b0b7699.

* improved consistency between trainer, engine and schedule (#23)

Co-authored-by: 1SAA <c2h214748@gmail.com>

Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
This commit is contained in:
Frank Lee
2021-11-18 19:45:06 +08:00
committed by GitHub
parent 2b05de4c64
commit 3defa32aee
80 changed files with 2194 additions and 1584 deletions

View File

@@ -27,8 +27,6 @@ train_data = dict(
dataloader=dict(
batch_size=BATCH_SIZE,
pin_memory=True,
# num_workers=1,
# shuffle=True,
)
)
@@ -63,14 +61,6 @@ loss = dict(
type='CrossEntropyLoss2D',
)
# model = dict(
# type='VanillaResNet',
# block_type='ResNetBasicBlock',
# layers=[2, 2, 2, 2],
# num_cls=10
# )
model = dict(
type='VisionTransformerFromConfig',
tensor_splitting_cfg=dict(
@@ -135,25 +125,26 @@ parallel = dict(
fp16 = dict(
mode=AMP_TYPE.PARALLEL,
initial_scale=2 ** 8
)
# fp16 = dict(
# mode=None,
# )
schedule = dict(
num_microbatches=2
)
lr_scheduler = dict(
type='LinearWarmupLR',
warmup_epochs=5
engine = dict(
schedule=dict(
num_microbatches=2
)
)
hooks = [
dict(
type='LRSchedulerHook',
by_epoch=True,
lr_scheduler_cfg=dict(
type='LinearWarmupLR',
warmup_steps=5
)
),
]
num_epochs = 60
logging = dict(
root_path='test_vit_2d_log'
)
seed = 100

View File

@@ -124,14 +124,21 @@ parallel = dict(
tensor=dict(size=4, depth=1, mode='2.5d'),
)
lr_scheduler = dict(
type='LinearWarmupLR',
warmup_epochs=5
)
hooks = [
dict(
type='LRSchedulerHook',
by_epoch=True,
lr_scheduler_cfg=dict(
type='LinearWarmupLR',
warmup_steps=5
)
),
]
engine = dict(
schedule = dict(
num_microbatches=2
)
)
num_epochs = 60
num_microbatches = 1

View File

@@ -9,21 +9,22 @@ import torch.autograd
import colossalai
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.engine import Engine
from colossalai.logging import get_global_dist_logger
from colossalai.nn.layer._parallel_utilities import _gather
CONFIG_PATH = Path(__file__).parent.parent.joinpath('configs/vit_2d.py')
def eval(engine):
def eval(engine, test_dataloader):
engine.eval()
accumulated_loss = 0
correct_sum = 0
total_sum = 0
num_steps = len(test_dataloader)
data_iter = iter(test_dataloader)
for i in range(engine.schedule.num_steps):
output, label, loss = engine.step()
for i in range(num_steps):
output, label, loss = engine.step(data_iter)
if gpc.is_last_rank(ParallelMode.PIPELINE):
# loss = sum(loss)
@@ -43,20 +44,22 @@ def eval(engine):
correct = torch.sum(label == output)
correct_sum += correct
total_sum += label.size(0)
avg_loss = accumulated_loss / engine.schedule.num_steps
avg_loss = accumulated_loss / num_steps
return correct_sum, total_sum, avg_loss
def train(engine):
def train(engine, train_dataloader):
engine.train()
accumulated_loss = 0
num_steps = len(train_dataloader)
data_iter = iter(train_dataloader)
for i in range(engine.schedule.num_steps):
output, label, loss = engine.step()
for i in range(num_steps):
output, label, loss = engine.step(data_iter)
if gpc.is_last_rank(ParallelMode.PIPELINE):
accumulated_loss += loss.detach().cpu().numpy()
avg_loss = accumulated_loss / engine.schedule.num_steps
avg_loss = accumulated_loss / num_steps
return avg_loss
@@ -64,25 +67,16 @@ def train(engine):
@pytest.mark.skip("This test should be invoked by test.sh in the same folder as it runs on multiple gpus")
def test_2d_parallel_vision_transformer():
# init dist
model, train_dataloader, test_dataloader, criterion, optimizer, schedule, lr_scheduler = colossalai.initialize(
CONFIG_PATH)
engine, train_dataloader, test_dataloader = colossalai.initialize(CONFIG_PATH)
logger = get_global_dist_logger()
engine = Engine(model=model,
train_dataloader=train_dataloader,
test_dataloader=test_dataloader,
criterion=criterion,
optimizer=optimizer,
lr_scheduler=lr_scheduler,
schedule=schedule)
for epoch in range(gpc.config.num_epochs):
train_loss = train(engine)
train_loss = train(engine, train_dataloader)
if gpc.is_last_rank(ParallelMode.PIPELINE):
logger.info(f'epoch {epoch} - train loss: {train_loss}')
if epoch % 2 == 0:
correct_sum, total_sum, eval_loss = eval(engine)
correct_sum, total_sum, eval_loss = eval(engine, test_dataloader)
if gpc.is_last_rank(ParallelMode.PIPELINE):
logger.info(
f'epoch {epoch} - eval loss: {eval_loss}, total: {total_sum}, '

View File

@@ -6,20 +6,22 @@ import torch.autograd
import colossalai
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.engine import Engine
from colossalai.logging import get_global_dist_logger
from colossalai.nn.layer._parallel_utilities import _gather
CONFIG_PATH = Path(__file__).parent.parent.joinpath('configs/vit_2p5d.py')
def eval(engine):
def eval(engine, test_dataloader):
engine.eval()
accumulated_loss = 0
correct_sum = 0
total_sum = 0
num_steps = len(test_dataloader)
data_iter = iter(test_dataloader)
for i in range(engine.schedule.num_steps):
output, label, loss = engine.step()
for i in range(num_steps):
output, label, loss = engine.step(data_iter)
if gpc.is_last_rank(ParallelMode.PIPELINE):
accumulated_loss += loss.detach().cpu().numpy()
@@ -43,21 +45,23 @@ def eval(engine):
correct = torch.sum(label == output)
correct_sum += correct
total_sum += label.size(0)
avg_loss = accumulated_loss / engine.schedule.num_steps
avg_loss = accumulated_loss / num_steps
return correct_sum, total_sum, avg_loss
def train(engine):
def train(engine, train_dataloader):
engine.train()
accumulated_loss = 0
num_steps = len(train_dataloader)
data_iter = iter(train_dataloader)
for i in range(num_steps):
output, label, loss = engine.step(data_iter)
for i in range(engine.schedule.num_steps):
output, label, loss = engine.step()
if gpc.is_last_rank(ParallelMode.PIPELINE):
accumulated_loss += loss.detach().cpu().numpy()
avg_loss = accumulated_loss / engine.schedule.num_steps
avg_loss = accumulated_loss / num_steps
return avg_loss
@@ -65,25 +69,16 @@ def train(engine):
@pytest.mark.skip("This test should be invoked by test.sh in the same folder as it runs on multiple gpus")
def test_2p5d_parallel_vision_transformer():
# init dist
model, train_dataloader, test_dataloader, criterion, optimizer, schedule, lr_scheduler = colossalai.initialize(
CONFIG_PATH)
engine, train_dataloader, test_dataloader = colossalai.initialize(CONFIG_PATH)
logger = get_global_dist_logger()
engine = Engine(model=model,
train_dataloader=train_dataloader,
test_dataloader=test_dataloader,
criterion=criterion,
optimizer=optimizer,
lr_scheduler=lr_scheduler,
schedule=schedule)
for epoch in range(gpc.config.num_epochs):
train_loss = train(engine)
train_loss = train(engine, train_dataloader)
if gpc.is_last_rank(ParallelMode.PIPELINE):
logger.info(f'epoch {epoch} - train loss: {train_loss}')
if epoch % 2 == 0:
correct_sum, total_sum, eval_loss = eval(engine)
correct_sum, total_sum, eval_loss = eval(engine, test_dataloader)
if gpc.is_last_rank(ParallelMode.PIPELINE):
logger.info(
f'epoch {epoch} - eval loss: {eval_loss}, total: {total_sum}, '
@@ -91,4 +86,4 @@ def test_2p5d_parallel_vision_transformer():
if __name__ == '__main__':
test_2p5d_parallel_vision_transformer()
test_2p5d_parallel_vision_transformer()

View File

@@ -38,5 +38,3 @@ optimizer = dict(type='Adam', lr=0.001)
loss = dict(type='CrossEntropyLoss')
# set_device_func = lambda global_rank, world_size: global_rank % 4
seed = 1024

View File

@@ -40,6 +40,3 @@ optimizer = dict(type='Adam', lr=0.001)
loss = dict(type='CrossEntropyLoss')
fp16 = dict(mode=AMP_TYPE.APEX)
# set_device_func = lambda global_rank, world_size: global_rank % 4
seed = 1024

View File

@@ -40,6 +40,3 @@ optimizer = dict(type='Adam', lr=0.001)
loss = dict(type='CrossEntropyLoss')
fp16 = dict(mode=AMP_TYPE.TORCH)
# set_device_func = lambda global_rank, world_size: global_rank % 4
seed = 1024

View File

@@ -38,11 +38,9 @@ parallel = dict(
tensor=dict(size=1, mode=None)
)
schedule = dict(
num_microbatches=4
engine = dict(
schedule=dict(
num_microbatches=4
)
)
num_pipeling_batches = 2
seed = 1024
lr_scheduler = dict(type='LinearWarmupLR', warmup_steps=5)
num_epochs = 10

View File

@@ -8,7 +8,6 @@ import torch
from colossalai import initialize
from colossalai.core import global_context as gpc
from colossalai.engine import Engine
from colossalai.logging import get_global_dist_logger
from colossalai.utils import report_memory_usage
@@ -24,20 +23,13 @@ NO_PIPE_CONFIG_PATH = osp.join(DIR_PATH, '../configs/non_pipeline_resnet_apex_am
def run_no_pipeline(config):
model, train_dataloader, test_dataloader, criterion, optimizer, schedule, lr_scheduler = initialize(config)
engine, train_dataloader, test_dataloader = initialize(config)
logger = get_global_dist_logger()
rank = torch.distributed.get_rank()
engine = Engine(model=model,
train_dataloader=train_dataloader,
criterion=criterion,
optimizer=optimizer,
schedule=schedule)
engine.train()
logger.info('lr = %g' % engine.get_lr())
output, label, loss = engine.step()
output, label, loss = engine.step(iter(train_dataloader))
logger.info('Rank {} returns: {}'.format(rank, loss.item()))
logger.info('lr = %g' % engine.get_lr())
gpc.destroy()
logger.info('Test engine finished')

View File

@@ -8,7 +8,6 @@ import torch
from colossalai import initialize
from colossalai.core import global_context as gpc
from colossalai.engine import Engine
from colossalai.logging import get_global_dist_logger
from colossalai.utils import report_memory_usage
@@ -26,21 +25,14 @@ NO_PIPE_CONFIG_PATH = osp.join(DIR_PATH, '../configs/non_pipeline_resnet.py')
def test_no_pipeline(config):
print('Test no pipeline engine start')
model, train_dataloader, test_dataloader, criterion, optimizer, schedule, lr_scheduler = initialize(config)
engine, train_dataloader, test_dataloader = initialize(config)
logger = get_global_dist_logger()
rank = torch.distributed.get_rank()
engine = Engine(model=model,
train_dataloader=train_dataloader,
criterion=criterion,
optimizer=optimizer,
schedule=schedule)
engine.train()
logger.info('lr = %g' % engine.get_lr())
output, label, loss = engine.step()
output, label, loss = engine.step(iter(train_dataloader))
logger.info('Rank {} returns: {}'.format(rank, loss.item()))
logger.info('lr = %g' % engine.get_lr())
gpc.destroy()
logger.info('Test engine finished')

View File

@@ -8,7 +8,6 @@ import torch
from colossalai import initialize
from colossalai.core import global_context as gpc
from colossalai.engine import Engine
from colossalai.logging import get_global_dist_logger
from colossalai.utils import report_memory_usage
@@ -26,21 +25,13 @@ NO_PIPE_CONFIG_PATH = osp.join(DIR_PATH, '../configs/non_pipeline_resnet_torch_a
def test_no_pipeline(config):
print('Test no pipeline engine start')
model, train_dataloader, test_dataloader, criterion, optimizer, schedule, lr_scheduler = initialize(config)
engine, train_dataloader, test_dataloader = initialize(config)
logger = get_global_dist_logger()
rank = torch.distributed.get_rank()
engine = Engine(model=model,
train_dataloader=train_dataloader,
criterion=criterion,
optimizer=optimizer,
schedule=schedule)
engine.train()
logger.info('lr = %g' % engine.get_lr())
output, label, loss = engine.step()
output, label, loss = engine.step(iter(train_dataloader))
logger.info('Rank {} returns: {}'.format(rank, loss.item()))
logger.info('lr = %g' % engine.get_lr())
gpc.destroy()
logger.info('Test engine finished')

View File

@@ -5,6 +5,7 @@ import os.path as osp
import pytest
from colossalai.context import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.initialize import initialize
from colossalai.logging import get_global_dist_logger
@@ -22,13 +23,25 @@ CONFIG_PATH = osp.join(DIR_PATH, '../configs/pipeline_vanilla_resnet.py')
@pytest.mark.skip("This test should be invoked using the test.sh provided")
@pytest.mark.dist
def test_schedule():
model, train_dataloader, test_dataloader, criterion, optimizer, schedule, lr_scheduler = initialize(CONFIG_PATH)
engine, train_dataloader, test_dataloader = initialize(CONFIG_PATH)
logger = get_global_dist_logger()
schedule.zero_grad()
output, label, losses = schedule.forward_backward_step(forward_only=False)
schedule.step()
logger.info('losses: {}'.format([loss.item() for loss in losses]))
model = engine.model
optimizer = engine.optimizer
criterion = engine.criterion
schedule = engine._schedule
output, label, loss = schedule.forward_backward_step(
data_iter=iter(train_dataloader),
model=model,
optimizer=optimizer,
criterion=criterion,
forward_only=False
)
schedule.optimizer_step(model, optimizer)
if gpc.is_last_rank(ParallelMode.PIPELINE):
logger.info('losses: {}'.format(loss))
gpc.destroy()
logger.info('training finished')

View File

@@ -9,7 +9,6 @@ import torch
from colossalai import initialize
from colossalai.context import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.engine import Engine
from colossalai.logging import get_global_dist_logger
NUM_BATCH = 128
@@ -23,22 +22,14 @@ PIPE_CONFIG_PATH = osp.join(DIR_PATH, '../configs/pipeline_vanilla_resnet.py')
def run_pipeline(config):
model, train_dataloader, test_dataloader, criterion, optimizer, schedule, lr_scheduler = initialize(config)
engine, train_dataloader, test_dataloader = initialize(config)
logger = get_global_dist_logger()
rank = torch.distributed.get_rank()
engine = Engine(model=model,
train_dataloader=train_dataloader,
criterion=criterion,
optimizer=optimizer,
lr_scheduler=lr_scheduler,
schedule=schedule)
engine.train()
logger.info('lr = %g' % engine.get_lr())
outputs, labels, loss = engine.step()
outputs, labels, loss = engine.step(iter(train_dataloader))
if gpc.is_last_rank(ParallelMode.PIPELINE):
logger.info('losses: {}'.format(rank, loss.item()))
logger.info('lr = %g' % engine.get_lr())
gpc.destroy()
logger.info('Test engine pipeline finished')

View File

@@ -132,9 +132,12 @@ fp16 = dict(
initial_scale=2 ** 4
)
num_epochs = 60
lr_scheduler = dict(
type='LinearWarmupLR',
warmup_epochs=5
warmup_steps=5,
total_steps=num_epochs
)
num_epochs = 60

View File

@@ -7,23 +7,25 @@ import pytest
import torch.autograd
import colossalai
from colossalai.builder import build_lr_scheduler
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.engine import Engine
from colossalai.logging import get_global_dist_logger
from colossalai.nn.layer._parallel_utilities import _gather
CONFIG_PATH = Path(__file__).parent.parent.joinpath('configs/vit_2d.py')
def eval(engine):
def eval(engine, test_dataloader):
engine.eval()
accumulated_loss = 0
correct_sum = 0
total_sum = 0
num_steps = len(test_dataloader)
data_iter = iter(test_dataloader)
for i in range(engine.schedule.num_steps):
output, label, loss = engine.step()
for i in range(num_steps):
output, label, loss = engine.step(data_iter)
accumulated_loss += loss.detach().cpu().numpy()
output = _gather(
@@ -40,18 +42,21 @@ def eval(engine):
correct = torch.sum(label[0] == output)
correct_sum += correct
total_sum += label[0].size(0)
avg_loss = accumulated_loss / engine.schedule.num_steps
avg_loss = accumulated_loss / num_steps
return correct_sum, total_sum, avg_loss
def train(engine):
def train(engine, train_dataloader, lr_scheduler):
engine.train()
accumulated_loss = 0
num_steps = len(train_dataloader)
data_iter = iter(train_dataloader)
for i in range(engine.schedule.num_steps):
output, label, loss = engine.step()
for i in range(num_steps):
output, label, loss = engine.step(data_iter)
accumulated_loss += loss.squeeze(0).detach().cpu().numpy()
avg_loss = accumulated_loss / engine.schedule.num_steps
avg_loss = accumulated_loss / num_steps
lr_scheduler.step()
return avg_loss
@@ -59,26 +64,18 @@ def train(engine):
@pytest.mark.skip("This test should be invoked by test.sh in the same folder as it runs on multiple gpus")
def test_2d_parallel_vision_transformer():
# init dist
model, train_dataloader, test_dataloader, criterion, optimizer, schedule, lr_scheduler = colossalai.initialize(
CONFIG_PATH)
engine, train_dataloader, test_dataloader = colossalai.initialize(CONFIG_PATH)
lr_scheduler = build_lr_scheduler(gpc.config.lr_scheduler, engine.optimizer)
logger = get_global_dist_logger()
engine = Engine(model=model,
train_dataloader=train_dataloader,
test_dataloader=test_dataloader,
criterion=criterion,
optimizer=optimizer,
lr_scheduler=lr_scheduler,
schedule=schedule)
logger.info('start training')
for epoch in range(gpc.config.num_epochs):
train_loss = train(engine)
train_loss = train(engine, train_dataloader, lr_scheduler)
logger.info(f'epoch {epoch} - train loss: {train_loss}')
if epoch % 2 == 0:
correct_sum, total_sum, eval_loss = eval(engine)
correct_sum, total_sum, eval_loss = eval(engine, test_dataloader)
logger.info(
f'epoch {epoch} - eval loss: {eval_loss}, total: {total_sum}, '
f'correct: {correct_sum}, acc: {correct_sum / total_sum}')

View File

@@ -102,6 +102,6 @@ parallel = dict(
tensor=dict(size=4, mode='2d'),
)
lr_scheduler = dict(type='LinearWarmupLR', warmup_epochs=5)
num_epochs = 60
lr_scheduler = dict(type='LinearWarmupLR', warmup_steps=5, total_steps=num_epochs)

View File

@@ -125,13 +125,6 @@ parallel = dict(
tensor=dict(size=4, depth=1, mode='2.5d'),
)
lr_scheduler = dict(
type='LinearWarmupLR',
warmup_epochs=5
)
schedule = dict(
num_microbatches=8
)
num_epochs = 60
lr_scheduler = dict(type='LinearWarmupLR', warmup_steps=5, total_steps=num_epochs)

View File

@@ -116,9 +116,14 @@ hooks = [
weight_parallel_mode=ParallelMode.PARALLEL_3D_WEIGHT,
),
dict(type='LossHook'),
# dict(type='TensorboardHook', log_dir='./tfb_logs'),
# dict(type='SaveCheckpointHook', interval=5, checkpoint_dir='./ckpt'),
# dict(type='LoadCheckpointHook', epoch=20, checkpoint_dir='./ckpt')
dict(
type='LRSchedulerHook',
by_epoch=True,
lr_scheduler_cfg=dict(
type='LinearWarmupLR',
warmup_steps=5
)
),
]
parallel = dict(
@@ -127,12 +132,4 @@ parallel = dict(
tensor=dict(mode='3d', size=8),
)
# fp16 = dict(mode=AMP_TYPE.PARALLEL, initial_scale=2 ** 6)
lr_scheduler = dict(type='LinearWarmupLR', warmup_epochs=5)
# schedule = dict(num_microbatches=4)
num_epochs = 60
seed = 42

View File

@@ -7,23 +7,25 @@ import pytest
import torch.autograd
import colossalai
from colossalai.builder import build_lr_scheduler
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.engine import Engine
from colossalai.logging import get_global_dist_logger
from colossalai.nn.layer._parallel_utilities import _gather
CONFIG_PATH = Path(__file__).parent.parent.joinpath('configs/vit_2d.py')
def eval(engine):
def eval(engine, test_dataloader):
engine.eval()
accumulated_loss = 0
correct_sum = 0
total_sum = 0
num_steps = len(test_dataloader)
data_iter = iter(test_dataloader)
for i in range(engine.schedule.num_steps):
output, label, loss = engine.step()
for i in range(num_steps):
output, label, loss = engine.step(data_iter)
accumulated_loss += loss.detach().cpu().numpy()
output = _gather(
@@ -40,18 +42,21 @@ def eval(engine):
correct = torch.sum(label[0] == output)
correct_sum += correct
total_sum += label[0].size(0)
avg_loss = accumulated_loss / engine.schedule.num_steps
avg_loss = accumulated_loss / num_steps
return correct_sum, total_sum, avg_loss
def train(engine):
def train(engine, train_dataloader, lr_scheduler):
engine.train()
accumulated_loss = 0
num_steps = len(train_dataloader)
data_iter = iter(train_dataloader)
for i in range(engine.schedule.num_steps):
output, label, loss = engine.step()
for i in range(num_steps):
output, label, loss = engine.step(data_iter)
accumulated_loss += loss.detach().cpu().numpy()
avg_loss = accumulated_loss / engine.schedule.num_steps
avg_loss = accumulated_loss / num_steps
lr_scheduler.step()
return avg_loss
@@ -59,25 +64,17 @@ def train(engine):
@pytest.mark.skip("This test should be invoked by test.sh in the same folder as it runs on multiple gpus")
def test_2d_parallel_vision_transformer():
# init dist
model, train_dataloader, test_dataloader, criterion, optimizer, schedule, lr_scheduler = colossalai.initialize(
CONFIG_PATH)
engine, train_dataloader, test_dataloader = colossalai.initialize(CONFIG_PATH)
lr_scheduler = build_lr_scheduler(gpc.config.lr_scheduler, engine.optimizer)
logger = get_global_dist_logger()
engine = Engine(model=model,
train_dataloader=train_dataloader,
test_dataloader=test_dataloader,
criterion=criterion,
optimizer=optimizer,
lr_scheduler=lr_scheduler,
schedule=schedule)
logger.info('start training')
for epoch in range(gpc.config.num_epochs):
train_loss = train(engine)
train_loss = train(engine, train_dataloader, lr_scheduler)
logger.info(f'epoch {epoch} - train loss: {train_loss}')
if epoch % 2 == 0:
correct_sum, total_sum, eval_loss = eval(engine)
correct_sum, total_sum, eval_loss = eval(engine, test_dataloader)
logger.info(
f'epoch {epoch} - eval loss: {eval_loss}, total: {total_sum}, '
f'correct: {correct_sum}, acc: {correct_sum / total_sum}')

View File

@@ -4,22 +4,25 @@ import pytest
import torch.autograd
import colossalai
from colossalai.builder import build_lr_scheduler
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.engine import Engine
from colossalai.logging import get_global_dist_logger
from colossalai.nn.layer._parallel_utilities import _gather
CONFIG_PATH = Path(__file__).parent.parent.joinpath('configs/vit_2p5d.py')
def eval(engine):
def eval(engine, test_dataloader):
engine.eval()
accumulated_loss = 0
correct_sum = 0
total_sum = 0
num_steps = len(test_dataloader)
data_iter = iter(test_dataloader)
for i in range(engine.schedule.num_steps):
output, label, loss = engine.step()
for i in range(num_steps):
output, label, loss = engine.step(data_iter)
accumulated_loss += loss.detach().cpu().numpy()
output = _gather(
@@ -41,18 +44,21 @@ def eval(engine):
correct = torch.sum(label[0] == output)
correct_sum += correct
total_sum += label[0].size(0)
avg_loss = accumulated_loss / engine.schedule.num_steps
avg_loss = accumulated_loss / num_steps
return correct_sum, total_sum, avg_loss
def train(engine):
def train(engine, train_dataloader, lr_scheduler):
engine.train()
accumulated_loss = 0
num_steps = len(train_dataloader)
data_iter = iter(train_dataloader)
for i in range(engine.schedule.num_steps):
output, label, loss = engine.step()
for i in range(num_steps):
output, label, loss = engine.step(data_iter)
accumulated_loss += loss.detach().cpu().numpy()
avg_loss = accumulated_loss / engine.schedule.num_steps
avg_loss = accumulated_loss / num_steps
lr_scheduler.step()
return avg_loss
@@ -60,29 +66,21 @@ def train(engine):
@pytest.mark.skip("This test should be invoked by test.sh in the same folder as it runs on multiple gpus")
def test_2p5d_parallel_vision_transformer():
# init dist
model, train_dataloader, test_dataloader, criterion, optimizer, schedule, lr_scheduler = colossalai.initialize(
CONFIG_PATH)
engine, train_dataloader, test_dataloader = colossalai.initialize(CONFIG_PATH)
lr_scheduler = build_lr_scheduler(gpc.config.lr_scheduler, engine.optimizer)
logger = get_global_dist_logger()
engine = Engine(model=model,
train_dataloader=train_dataloader,
test_dataloader=test_dataloader,
criterion=criterion,
optimizer=optimizer,
lr_scheduler=lr_scheduler,
schedule=schedule)
logger.info('start training')
for epoch in range(gpc.config.num_epochs):
train_loss = train(engine)
train_loss = train(engine, train_dataloader, lr_scheduler)
logger.info(f'epoch {epoch} - train loss: {train_loss}')
if epoch % 2 == 0:
correct_sum, total_sum, eval_loss = eval(engine)
correct_sum, total_sum, eval_loss = eval(engine, test_dataloader)
logger.info(
f'epoch {epoch} - eval loss: {eval_loss}, total: {total_sum}, '
f'correct: {correct_sum}, acc: {correct_sum / total_sum}')
if __name__ == '__main__':
test_2p5d_parallel_vision_transformer()
test_2p5d_parallel_vision_transformer()

View File

@@ -1,16 +1,14 @@
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import time
from pathlib import Path
import torch
from tqdm import tqdm
from colossalai import initialize
import colossalai
from colossalai.context import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.engine import Engine
from colossalai.logging import get_global_dist_logger
from colossalai.trainer import Trainer
from colossalai.trainer.metric import Accuracy3D
@@ -29,7 +27,7 @@ def _train_epoch(epoch, engine):
num_samples = 0
now = time.time()
epoch_start = now
progress = range(engine.schedule.num_steps)
progress = range(engine._schedule.num_steps)
if gpc.get_global_rank() == 0:
progress = tqdm(progress, desc='[Epoch %d]' % epoch, miniters=1)
for step in progress:
@@ -68,7 +66,7 @@ def _eval(epoch, engine):
ParallelMode.PARALLEL_3D_WEIGHT)
total = 0
with torch.no_grad():
for _ in range(engine.schedule.num_steps):
for _ in range(engine._schedule.num_steps):
outputs, targets, loss = engine.step()
if isinstance(outputs, (list, tuple)):
outputs = outputs[0]
@@ -80,32 +78,25 @@ def _eval(epoch, engine):
print_rank_0(
'[Epoch %d] Evaluation loss: %.3f | Acc: %.3f%%' %
(epoch, eval_loss / engine.schedule.num_steps,
(epoch, eval_loss / engine._schedule.num_steps,
acc.get_accumulated_value() * 100), logger)
def train():
model, train_dataloader, test_dataloader, criterion, \
optimizer, schedule, lr_scheduler = initialize(CONFIG_PATH)
# init dist
engine, train_dataloader, test_dataloader = colossalai.initialize(CONFIG_PATH)
logger = get_global_dist_logger()
engine = Engine(model=model,
train_dataloader=train_dataloader,
test_dataloader=test_dataloader,
criterion=criterion,
optimizer=optimizer,
lr_scheduler=lr_scheduler,
schedule=schedule)
logger.info("Engine is built", ranks=[0])
trainer = Trainer(engine=engine, hooks_cfg=gpc.config.hooks, verbose=True)
trainer = Trainer(engine=engine, verbose=True)
logger.info("Trainer is built", ranks=[0])
logger.info("Train start", ranks=[0])
trainer.fit(train_dataloader=train_dataloader,
test_dataloader=test_dataloader,
max_epochs=gpc.config.num_epochs,
epochs=gpc.config.num_epochs,
hooks_cfg=gpc.config.hooks,
display_progress=True,
test_interval=1)

View File

@@ -3,6 +3,7 @@ from pathlib import Path
BATCH_SIZE = 128
IMG_SIZE = 32
num_epochs = 200
# resnet 50
model = dict(
@@ -77,18 +78,14 @@ hooks = [
dict(type='AccuracyHook'),
dict(type='LossHook'),
dict(type='TensorboardHook', log_dir='./tfb_logs'),
dict(
type='LRSchedulerHook',
by_epoch=True,
lr_scheduler_cfg=dict(
type='CosineAnnealingLR',
warmup_steps=5
)
),
dict(type='SaveCheckpointHook', interval=5, checkpoint_dir='./ckpt'),
# dict(type='LoadCheckpointHook', epoch=20, checkpoint_dir='./ckpt')
]
# fp16 = dict(
# mode=AMP_TYPE.PARALLEL,
# initial_scale=1
# )
lr_scheduler = dict(
type='CosineAnnealingLR',
T_max=200
)
num_epochs = 200

View File

@@ -11,6 +11,7 @@ NUM_ATTENTION_HEADS = 8
SUMMA_DIM = 2
NUM_CLASSES = 10
DEPTH = 6
num_epochs = 60
train_data = dict(
dataset=dict(type='CIFAR10Dataset',
@@ -52,13 +53,6 @@ optimizer = dict(type='Adam', lr=0.001, weight_decay=0)
loss = dict(type='CrossEntropyLoss2D', )
# model = dict(
# type='VanillaResNet',
# block_type='ResNetBasicBlock',
# layers=[2, 2, 2, 2],
# num_cls=10
# )
model = dict(
type='VisionTransformerFromConfig',
tensor_splitting_cfg=dict(type='ViTInputSplitter2D', ),
@@ -114,8 +108,15 @@ hooks = [
dict(type='Accuracy2DHook'),
dict(type='LossHook'),
dict(type='TensorboardHook', log_dir='./tfb_logs'),
dict(
type='LRSchedulerHook',
by_epoch=True,
lr_scheduler_cfg=dict(
type='LinearWarmupLR',
warmup_steps=5
)
),
dict(type='SaveCheckpointHook', interval=5, checkpoint_dir='./ckpt'),
# dict(type='LoadCheckpointHook', epoch=20, checkpoint_dir='./ckpt')
]
parallel = dict(
@@ -125,11 +126,8 @@ parallel = dict(
fp16 = dict(mode=AMP_TYPE.PARALLEL, initial_scale=2 ** 8)
lr_scheduler = dict(type='LinearWarmupLR', warmup_epochs=5)
schedule = dict(num_microbatches=1)
num_epochs = 60
num_microbatches = 1
engine = dict(
schedule=dict(num_microbatches=1)
)
logging = dict(root_path='./logs')

View File

@@ -1,25 +1,16 @@
import colossalai
from colossalai.core import global_context as gpc
from colossalai.engine import Engine
from colossalai.logging import get_global_dist_logger
from colossalai.trainer import Trainer
def test_trainer():
model, train_dataloader, test_dataloader, criterion, optimizer, schedule, lr_scheduler = colossalai.initialize()
engine, train_dataloader, test_dataloader = colossalai.initialize()
logger = get_global_dist_logger()
engine = Engine(
model=model,
criterion=criterion,
optimizer=optimizer,
lr_scheduler=lr_scheduler,
schedule=schedule
)
logger.info("engine is built", ranks=[0])
trainer = Trainer(engine=engine,
hooks_cfg=gpc.config.hooks,
verbose=True)
logger.info("trainer is built", ranks=[0])
@@ -27,7 +18,8 @@ def test_trainer():
trainer.fit(
train_dataloader=train_dataloader,
test_dataloader=test_dataloader,
max_epochs=gpc.config.num_epochs,
hooks_cfg=gpc.config.hooks,
epochs=gpc.config.num_epochs,
display_progress=False,
test_interval=5
)

View File

@@ -18,14 +18,16 @@ level = os.environ['LEVEL']
CONFIG_PATH = Path(__file__).parent.parent.joinpath(f'configs/vit_2d_zero{level}.py')
def eval(engine):
def eval_epoch(engine: Engine, test_dataloader):
engine.eval()
accumulated_loss = 0
correct_sum = 0
total_sum = 0
num_steps = len(test_dataloader)
data_iter = iter(test_dataloader)
for i in range(engine.schedule.num_steps):
output, label, loss = engine.step()
for i in range(num_steps):
output, label, loss = engine.step(data_iter)
accumulated_loss += loss.detach().cpu().numpy()
output = _gather(
@@ -42,18 +44,19 @@ def eval(engine):
correct = torch.sum(label[0] == output)
correct_sum += correct
total_sum += label[0].size(0)
avg_loss = accumulated_loss / engine.schedule.num_steps
avg_loss = accumulated_loss / num_steps
return correct_sum, total_sum, avg_loss
def train(engine):
def train_epoch(engine, train_dataloader):
engine.train()
accumulated_loss = 0
for i in range(engine.schedule.num_steps):
output, label, loss = engine.step()
num_steps = len(train_dataloader)
data_iter = iter(train_dataloader)
for i in range(num_steps):
output, label, loss = engine.step(data_iter)
accumulated_loss += loss.detach().cpu().numpy()
avg_loss = accumulated_loss / engine.schedule.num_steps
avg_loss = accumulated_loss / num_steps
return avg_loss
@@ -61,30 +64,17 @@ def train(engine):
@pytest.mark.skip("This test should be invoked by test.sh in the same folder as it runs on multiple gpus")
def test_2d_parallel_vision_transformer():
# init dist
model, train_dataloader, test_dataloader, criterion, optimizer, schedule, lr_scheduler = colossalai.initialize(
CONFIG_PATH)
engine, train_dataloader, test_dataloader = colossalai.initialize(CONFIG_PATH)
logger = get_global_dist_logger()
engine = Engine(model=model,
train_dataloader=train_dataloader,
test_dataloader=test_dataloader,
criterion=criterion,
optimizer=optimizer,
lr_scheduler=lr_scheduler,
schedule=schedule)
# for param in model.parameters():
# if isinstance(param, torch.HalfTensor):
# print(param.shape)
logger.info('start training')
for epoch in range(gpc.config.num_epochs):
train_loss = train(engine)
train_loss = train_epoch(engine, train_dataloader)
logger.info(f'epoch {epoch} - train loss: {train_loss}')
if epoch % 2 == 0:
correct_sum, total_sum, eval_loss = eval(engine)
correct_sum, total_sum, eval_loss = eval_epoch(engine, test_dataloader)
logger.info(
f'epoch {epoch} - eval loss: {eval_loss}, total: {total_sum}, '
f'correct: {correct_sum}, acc: {correct_sum / total_sum}')