[misc] update pre-commit and run all files (#4752)

* [misc] update pre-commit

* [misc] run pre-commit

* [misc] remove useless configuration files

* [misc] ignore cuda for clang-format
This commit is contained in:
Hongxin Liu
2023-09-19 14:20:26 +08:00
committed by GitHub
parent 3c6b831c26
commit 079bf3cb26
1268 changed files with 50037 additions and 38444 deletions

View File

@@ -8,12 +8,10 @@ import torch.distributed as dist
from colossalai.legacy.communication import (
recv_backward,
recv_forward,
recv_obj_meta,
send_backward,
send_backward_recv_forward,
send_forward,
send_forward_recv_backward,
send_obj_meta,
)
from colossalai.legacy.context.parallel_mode import ParallelMode
from colossalai.legacy.core import global_context as gpc
@@ -39,10 +37,10 @@ def check_forward(output_tensor, rank, logger):
tensor = output_tensor.clone()
else:
tensor = recv_forward(output_tensor.shape)
logger.info('Rank {} received forward. Correct tensor: {}'.format(rank, check_equal(tensor, output_tensor)))
logger.info("Rank {} received forward. Correct tensor: {}".format(rank, check_equal(tensor, output_tensor)))
if not gpc.is_last_rank(ParallelMode.PIPELINE):
send_forward(tensor)
logger.info('Rank {} sent forward.'.format(rank))
logger.info("Rank {} sent forward.".format(rank))
def check_backward(output_grad, rank, logger):
@@ -51,22 +49,26 @@ def check_backward(output_grad, rank, logger):
grad = output_grad.clone()
else:
grad = recv_backward(output_grad.shape)
logger.info('Rank {} received backward. Correct grad: {}'.format(rank, check_equal(grad, output_grad)))
logger.info("Rank {} received backward. Correct grad: {}".format(rank, check_equal(grad, output_grad)))
if not gpc.is_first_rank(ParallelMode.PIPELINE):
send_backward(grad)
logger.info('Rank {} sent backward.'.format(rank))
logger.info("Rank {} sent backward.".format(rank))
def check_forward_backward(output_tensor, output_grad, rank, logger):
dist.barrier()
if not gpc.is_first_rank(ParallelMode.PIPELINE):
tensor = send_backward_recv_forward(output_grad, output_tensor.shape)
logger.info('Rank {} sent backward received forward. Correct tensor: {}'.format(
rank, check_equal(tensor, output_tensor)))
logger.info(
"Rank {} sent backward received forward. Correct tensor: {}".format(
rank, check_equal(tensor, output_tensor)
)
)
if not gpc.is_last_rank(ParallelMode.PIPELINE):
grad = send_forward_recv_backward(output_tensor, output_grad.shape)
logger.info('Rank {} sent forward received backward. Correct grad: {}'.format(
rank, check_equal(grad, output_grad)))
logger.info(
"Rank {} sent forward received backward. Correct grad: {}".format(rank, check_equal(grad, output_grad))
)
def check_comm(size, rank, prev_rank, next_rank, logger):
@@ -84,13 +86,13 @@ def check_comm(size, rank, prev_rank, next_rank, logger):
def run_check(rank, world_size, port):
launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
launch(config=CONFIG, rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
logger = get_dist_logger()
rank = gpc.get_global_rank()
prev_rank = gpc.get_prev_global_rank(ParallelMode.PIPELINE)
next_rank = gpc.get_next_global_rank(ParallelMode.PIPELINE)
logger.info('Rank {0}: prev rank {1}, next rank {2}'.format(rank, prev_rank, next_rank))
logger.info('Distributed environment is initialized.')
logger.info("Rank {0}: prev rank {1}, next rank {2}".format(rank, prev_rank, next_rank))
logger.info("Distributed environment is initialized.")
check_comm(world_size, rank, prev_rank, next_rank, logger)
gpc.destroy()
@@ -104,5 +106,5 @@ def test_p2p():
spawn(run_check, world_size)
if __name__ == '__main__':
if __name__ == "__main__":
test_p2p()

View File

@@ -23,7 +23,7 @@ CONFIG = dict(NUM_MICRO_BATCHES=2, parallel=dict(pipeline=dict(size=2), tensor=d
def run_schedule(rank, world_size, port):
launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
launch(config=CONFIG, rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
# build model
model = resnet18(num_classes=10)
@@ -33,20 +33,23 @@ def run_schedule(rank, world_size, port):
elif gpc.get_local_rank(ParallelMode.PIPELINE) == 1:
class Flatten(nn.Module):
def forward(self, x):
return torch.flatten(x, 1)
model = nn.Sequential(model.layer3, model.layer4, model.avgpool, Flatten(), model.fc)
print_rank_0('model is created')
print_rank_0("model is created")
train_dataset = CIFAR10(root=Path(os.environ['DATA']),
download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.4914, 0.4822, 0.4465], std=[0.2023, 0.1994, 0.2010]),
]))
train_dataset = CIFAR10(
root=Path(os.environ["DATA"]),
download=True,
transform=transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize(mean=[0.4914, 0.4822, 0.4465], std=[0.2023, 0.1994, 0.2010]),
]
),
)
train_dataloader = get_dataloader(
dataset=train_dataset,
@@ -83,5 +86,5 @@ def test_pipeline_schedule():
spawn(run_schedule, world_size)
if __name__ == '__main__':
if __name__ == "__main__":
test_pipeline_schedule()

View File

@@ -16,16 +16,15 @@ NUM_EPOCHS = 200
CONFIG = dict(fp16=dict(mode=AMP_TYPE.TORCH))
@parameterize('model_name', ['repeated_computed_layers', 'resnet18', 'nested_model'])
@parameterize("model_name", ["repeated_computed_layers", "resnet18", "nested_model"])
def run_trainer(model_name):
get_components_func = non_distributed_component_funcs.get_callable(model_name)
model_builder, train_dataloader, test_dataloader, optimizer_class, criterion = get_components_func()
model = model_builder()
optimizer = optimizer_class(model.parameters(), lr=1e-3)
engine, train_dataloader, *_ = colossalai.legacy.initialize(model=model,
optimizer=optimizer,
criterion=criterion,
train_dataloader=train_dataloader)
engine, train_dataloader, *_ = colossalai.legacy.initialize(
model=model, optimizer=optimizer, criterion=criterion, train_dataloader=train_dataloader
)
logger = get_dist_logger()
logger.info("engine is built", ranks=[0])
@@ -35,22 +34,21 @@ def run_trainer(model_name):
logger.info("trainer is built", ranks=[0])
logger.info("start training", ranks=[0])
trainer.fit(train_dataloader=train_dataloader,
test_dataloader=test_dataloader,
epochs=NUM_EPOCHS,
max_steps=3,
display_progress=True,
test_interval=5)
trainer.fit(
train_dataloader=train_dataloader,
test_dataloader=test_dataloader,
epochs=NUM_EPOCHS,
max_steps=3,
display_progress=True,
test_interval=5,
)
torch.cuda.empty_cache()
def run_dist(rank, world_size, port):
colossalai.legacy.launch(config=CONFIG,
rank=rank,
world_size=world_size,
host='localhost',
port=port,
backend='nccl')
colossalai.legacy.launch(
config=CONFIG, rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl"
)
@pytest.mark.dist
@@ -60,5 +58,5 @@ def test_trainer_no_pipeline():
spawn(run_dist, world_size)
if __name__ == '__main__':
if __name__ == "__main__":
test_trainer_no_pipeline()

View File

@@ -29,12 +29,9 @@ CONFIG = dict(
def run_trainer_with_pipeline(rank, world_size, port):
colossalai.legacy.launch(config=CONFIG,
rank=rank,
world_size=world_size,
host='localhost',
port=port,
backend='nccl')
colossalai.legacy.launch(
config=CONFIG, rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl"
)
# build model
model = resnet18(num_classes=10)
@@ -44,35 +41,35 @@ def run_trainer_with_pipeline(rank, world_size, port):
elif gpc.get_local_rank(ParallelMode.PIPELINE) == 1:
class Flatten(nn.Module):
def forward(self, x):
return torch.flatten(x, 1)
model = nn.Sequential(model.layer3, model.layer4, model.avgpool, Flatten(), model.fc)
# build dataloaders
train_dataset = CIFAR10(root=Path(os.environ['DATA']),
download=True,
transform=transforms.Compose([
transforms.Resize(size=(IMG_SIZE, IMG_SIZE)),
transforms.ToTensor(),
transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
]))
train_dataset = CIFAR10(
root=Path(os.environ["DATA"]),
download=True,
transform=transforms.Compose(
[
transforms.Resize(size=(IMG_SIZE, IMG_SIZE)),
transforms.ToTensor(),
transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)),
]
),
)
train_dataloader = get_dataloader(dataset=train_dataset,
shuffle=True,
batch_size=BATCH_SIZE,
pin_memory=True,
drop_last=True)
train_dataloader = get_dataloader(
dataset=train_dataset, shuffle=True, batch_size=BATCH_SIZE, pin_memory=True, drop_last=True
)
# build optimizer
optimizer = Adam(model.parameters(), lr=0.001)
criterion = nn.CrossEntropyLoss()
engine, train_dataloader, *args = colossalai.legacy.initialize(model=model,
optimizer=optimizer,
criterion=criterion,
train_dataloader=train_dataloader)
engine, train_dataloader, *args = colossalai.legacy.initialize(
model=model, optimizer=optimizer, criterion=criterion, train_dataloader=train_dataloader
)
logger = get_dist_logger()
logger.info("engine is built", ranks=[0])
@@ -82,11 +79,9 @@ def run_trainer_with_pipeline(rank, world_size, port):
logger.info("start training", ranks=[0])
trainer.fit(train_dataloader=train_dataloader,
epochs=NUM_EPOCHS,
max_steps=3,
display_progress=True,
test_interval=5)
trainer.fit(
train_dataloader=train_dataloader, epochs=NUM_EPOCHS, max_steps=3, display_progress=True, test_interval=5
)
gpc.destroy()
torch.cuda.empty_cache()
@@ -98,5 +93,5 @@ def test_trainer_with_pipeline():
spawn(run_trainer_with_pipeline, world_size)
if __name__ == '__main__':
if __name__ == "__main__":
test_trainer_with_pipeline()