mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-02 01:28:31 +00:00
[zero] new interface for ShardedOptimv2 (#406)
This commit is contained in:
@@ -74,8 +74,5 @@ def get_training_components():
|
||||
sequence_length=sequence_length,
|
||||
is_distrbuted=True)
|
||||
|
||||
def get_optim(model):
|
||||
return torch.optim.Adam(model.parameters(), lr=0.001)
|
||||
|
||||
criterion = None
|
||||
return bert_model_builder, trainloader, testloader, get_optim, criterion
|
||||
return bert_model_builder, trainloader, testloader, torch.optim.Adam, criterion
|
||||
|
@@ -49,8 +49,5 @@ def get_training_components():
|
||||
trainloader = DummyDataLoader()
|
||||
testloader = DummyDataLoader()
|
||||
|
||||
def optim_builder(model):
|
||||
return torch.optim.Adam(model.parameters(), lr=0.001)
|
||||
|
||||
criterion = torch.nn.CrossEntropyLoss()
|
||||
return model_builder, trainloader, testloader, optim_builder, criterion
|
||||
return model_builder, trainloader, testloader, torch.optim.Adam, criterion
|
||||
|
@@ -43,8 +43,5 @@ def get_training_components():
|
||||
trainloader = DummyDataLoader()
|
||||
testloader = DummyDataLoader()
|
||||
|
||||
def optim_builder(model):
|
||||
return torch.optim.Adam(model.parameters(), lr=0.001)
|
||||
|
||||
criterion = torch.nn.CrossEntropyLoss()
|
||||
return model_builder, trainloader, testloader, optim_builder, criterion
|
||||
return model_builder, trainloader, testloader, torch.optim.Adam, criterion
|
||||
|
@@ -29,8 +29,5 @@ def get_resnet_training_components():
|
||||
trainloader = get_cifar10_dataloader(train=True)
|
||||
testloader = get_cifar10_dataloader(train=False)
|
||||
|
||||
def optim_builder(model):
|
||||
return torch.optim.Adam(model.parameters(), lr=0.001)
|
||||
|
||||
criterion = torch.nn.CrossEntropyLoss()
|
||||
return model_builder, trainloader, testloader, optim_builder, criterion
|
||||
return model_builder, trainloader, testloader, torch.optim.Adam, criterion
|
||||
|
@@ -19,11 +19,11 @@ def run_train():
|
||||
# FIXME: test bert
|
||||
for model_name in test_models:
|
||||
get_components_func = non_distributed_component_funcs.get_callable(model_name)
|
||||
model_builder, train_dataloader, _, optimizer_builder, criterion = get_components_func()
|
||||
model_builder, train_dataloader, _, optimizer_class, criterion = get_components_func()
|
||||
|
||||
model = model_builder(checkpoint=False)
|
||||
engine, train_dataloader, *args = colossalai.initialize(model=model,
|
||||
optimizer=optimizer_builder(model),
|
||||
optimizer=optimizer_class(model.parameters(), lr=1e-3),
|
||||
criterion=criterion,
|
||||
train_dataloader=train_dataloader)
|
||||
|
||||
@@ -84,7 +84,7 @@ def run_engine(rank, world_size, port):
|
||||
|
||||
@pytest.mark.dist
|
||||
def test_engine():
|
||||
world_size = 4
|
||||
world_size = 2
|
||||
run_func = partial(run_engine, world_size=world_size, port=free_port())
|
||||
mp.spawn(run_func, nprocs=world_size)
|
||||
|
||||
|
@@ -25,9 +25,9 @@ def run_trainer_no_pipeline(rank, world_size, port):
|
||||
test_models = ['repeated_computed_layers', 'resnet18', 'nested_model']
|
||||
for name in test_models:
|
||||
get_components_func = non_distributed_component_funcs.get_callable(name)
|
||||
model_builder, train_dataloader, test_dataloader, optimizer_builder, criterion = get_components_func()
|
||||
model_builder, train_dataloader, test_dataloader, optimizer_class, criterion = get_components_func()
|
||||
model = model_builder()
|
||||
optimizer = optimizer_builder(model)
|
||||
optimizer = optimizer_class(model.parameters(), lr=1e-3)
|
||||
engine, train_dataloader, *_ = colossalai.initialize(model=model,
|
||||
optimizer=optimizer,
|
||||
criterion=criterion,
|
||||
|
@@ -44,19 +44,21 @@ def run_dist(rank, world_size, port, cpu_offload, shard_strategy):
|
||||
shard_strategy = shard_strategy()
|
||||
for model_name in test_models:
|
||||
get_components_func = non_distributed_component_funcs.get_callable(model_name)
|
||||
model, train_dataloader, test_dataloader, optimizer, criterion = get_components_func()
|
||||
model, train_dataloader, test_dataloader, optimizer_class, criterion = get_components_func()
|
||||
model = model(checkpoint=True).cuda()
|
||||
zero_model = ShardedModelV2(copy.deepcopy(model),
|
||||
shard_strategy,
|
||||
offload_config=dict(device='cpu') if cpu_offload else None)
|
||||
if dist.get_world_size() > 1:
|
||||
model = DDP(model)
|
||||
optim = Adam(model.parameters(), lr=1e-3)
|
||||
sharded_optim = ShardedOptimizerV2(Adam(zero_model.parameters(), lr=1e-3),
|
||||
zero_model,
|
||||
lr = 1e-3
|
||||
optim = optimizer_class(model.parameters(), lr=lr)
|
||||
sharded_optim = ShardedOptimizerV2(zero_model,
|
||||
optimizer_class,
|
||||
shard_strategy,
|
||||
cpu_offload=cpu_offload,
|
||||
initial_scale=2**5)
|
||||
initial_scale=2**5,
|
||||
lr=lr)
|
||||
for i, (data, label) in enumerate(train_dataloader):
|
||||
if i > 2:
|
||||
break
|
||||
|
@@ -59,11 +59,12 @@ def run_dist(rank, world_size, port, shard_strategy):
|
||||
if dist.get_world_size() > 1:
|
||||
model = DDP(model)
|
||||
optim = Adam(model.parameters(), lr=1e-3)
|
||||
sharded_optim = ShardedOptimizerV2(CPUAdam(zero_model.parameters(), lr=1e-3),
|
||||
zero_model,
|
||||
sharded_optim = ShardedOptimizerV2(zero_model,
|
||||
CPUAdam,
|
||||
shard_strategy,
|
||||
initial_scale=2**5,
|
||||
cpu_offload=True)
|
||||
cpu_offload=True,
|
||||
lr=1e-3)
|
||||
for i, (data, label) in enumerate(train_dataloader):
|
||||
if i > 2:
|
||||
break
|
||||
|
Reference in New Issue
Block a user