[misc] update pre-commit and run all files (#4752)

* [misc] update pre-commit

* [misc] run pre-commit

* [misc] remove useless configuration files

* [misc] ignore cuda for clang-format
This commit is contained in:
Hongxin Liu
2023-09-19 14:20:26 +08:00
committed by GitHub
parent 3c6b831c26
commit 079bf3cb26
1268 changed files with 50037 additions and 38444 deletions

View File

@@ -2,19 +2,17 @@ import copy
import pytest
import torch
import torch.distributed as dist
import torch.nn as nn
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.testing import assert_close
import colossalai
from colossalai.testing import parameterize, rerun_if_address_is_in_use, spawn
from colossalai.testing import rerun_if_address_is_in_use, spawn
from colossalai.testing.random import seed_all
from colossalai.zero import LowLevelZeroOptimizer
class MlpModel(nn.Module):
def __init__(self):
super(MlpModel, self).__init__()
self.linear1 = nn.Linear(12, 24)
@@ -61,10 +59,9 @@ def exam_zero_1_torch_ddp_ckpt():
# we only test stage 1 here
# the state dicts of stage 1 and stage 2 are the same
zero_optimizer = LowLevelZeroOptimizer(zero_optimizer,
overlap_communication=True,
initial_scale=1,
reduce_bucket_size=262144)
zero_optimizer = LowLevelZeroOptimizer(
zero_optimizer, overlap_communication=True, initial_scale=1, reduce_bucket_size=262144
)
torch_optimizer = torch.optim.Adam(torch_model.parameters(), lr=1)
@@ -88,7 +85,7 @@ def exam_zero_1_torch_ddp_ckpt():
zero_state_dict = zero_optimizer.state_dict()
# examine the original state dict
for torch_state, zero_state in zip(torch_state_dict['state'].values(), zero_state_dict['state'].values()):
for torch_state, zero_state in zip(torch_state_dict["state"].values(), zero_state_dict["state"].values()):
for t_v, z_v in zip(torch_state.values(), zero_state.values()):
loose_close(t_v, z_v)
@@ -100,13 +97,13 @@ def exam_zero_1_torch_ddp_ckpt():
zero_state_dict = zero_optimizer.state_dict()
# examine the loaded state dict
for torch_state, zero_state in zip(torch_state_dict['state'].values(), zero_state_dict['state'].values()):
for torch_state, zero_state in zip(torch_state_dict["state"].values(), zero_state_dict["state"].values()):
for t_v, z_v in zip(torch_state.values(), zero_state.values()):
loose_close(t_v, z_v)
def run_dist(rank, world_size, port):
colossalai.launch(config=dict(), rank=rank, world_size=world_size, port=port, host='localhost')
colossalai.launch(config=dict(), rank=rank, world_size=world_size, port=port, host="localhost")
exam_zero_1_torch_ddp_ckpt()
@@ -117,5 +114,5 @@ def test_zero_ckpt():
spawn(run_dist, 2)
if __name__ == '__main__':
if __name__ == "__main__":
test_zero_ckpt()