[zero] fix init bugs in zero context (#686)

* adapt model weight initialization for methods in Pytorch nn.init
This commit is contained in:
HELSON
2022-04-07 17:38:45 +08:00
committed by GitHub
parent 0ed7042f42
commit d7ecaf362b
8 changed files with 117 additions and 86 deletions

View File

@@ -51,36 +51,36 @@ def run_moe_zero_init(init_device_type, shard_strategy_class):
with ZeroInitContext(target_device=init_device,
shard_strategy=shard_strategy_class(),
shard_param=True,
model_numel_tensor=model_numel_tensor,
rm_torch_payload_on_the_fly=False):
model_numel_tensor=model_numel_tensor):
model = MoeModel()
for name, param in model.named_parameters():
assert hasattr(param, 'colo_attr')
for name, param in model.named_parameters():
assert hasattr(param, 'colo_attr')
# the weights in the gate should be fp32
if 'gate' in name:
assert param.colo_attr.sharded_data_tensor.dtype == torch.float32
else:
assert param.colo_attr.sharded_data_tensor.dtype == torch.half
# the weights in the gate should be fp32
if 'gate' in name:
assert param.colo_attr.sharded_data_tensor.dtype == torch.float32
else:
assert param.colo_attr.sharded_data_tensor.dtype == torch.half
# the parameters in moe experts and its gate should not be sharded
if ('experts' in name) or ('gate' in name) or ('residual_combine' in name):
assert not param.colo_attr.sharded_data_tensor.is_sharded
else:
assert param.colo_attr.sharded_data_tensor.is_sharded
# the parameters in moe experts and its gate should not be sharded
if ('experts' in name) or ('gate' in name) or ('residual_combine' in name):
assert not param.colo_attr.sharded_data_tensor.is_sharded
assert param.colo_attr.sharded_data_tensor.data_ptr() == param.data.data_ptr()
else:
assert param.colo_attr.sharded_data_tensor.is_sharded
# the parameters in moe experts is not replicated
if 'experts' in name:
assert not param.is_replicated
else:
assert param.is_replicated
# the parameters in moe experts is not replicated
if 'experts' in name:
assert not param.is_replicated
else:
assert param.is_replicated
if param.colo_attr.param_is_sharded:
assert param.colo_attr.sharded_data_tensor.payload.device.type == init_device.type, \
f'{param.colo_attr.sharded_data_tensor.payload.device.type} vs. {init_device.type}'
else:
assert param.colo_attr.sharded_data_tensor.payload.device.type == 'cuda'
if param.colo_attr.param_is_sharded:
assert param.colo_attr.sharded_data_tensor.payload.device.type == init_device.type, \
f'{param.colo_attr.sharded_data_tensor.payload.device.type} vs. {init_device.type}'
else:
assert param.colo_attr.sharded_data_tensor.payload.device.type == 'cuda'
def _run_dist(rank, world_size, port):
@@ -91,7 +91,6 @@ def _run_dist(rank, world_size, port):
@pytest.mark.dist
@pytest.mark.parametrize("world_size", [2, 4])
@pytest.mark.skip("Under development")
@rerun_on_exception(exception_type=mp.ProcessRaisedException, pattern=".*Address already in use.*")
def test_moe_zero_init(world_size):
run_func = partial(_run_dist, world_size=world_size, port=free_port())

View File

@@ -28,12 +28,9 @@ def run_model_test(enable_autocast, shard_strategy_class):
get_components_func = non_distributed_component_funcs.get_callable('no_leaf_module')
_, train_dataloader, _, _, criterion = get_components_func()
rm_torch_payload_on_the_fly = False
with ZeroInitContext(target_device=torch.cuda.current_device(),
with ZeroInitContext(target_device=torch.device('cuda', torch.cuda.current_device()),
shard_strategy=shard_strategy,
shard_param=True,
rm_torch_payload_on_the_fly=rm_torch_payload_on_the_fly):
shard_param=True):
zero_model = MoeModel()
zero_model = ShardedModelV2(zero_model, shard_strategy, use_memory_tracer=True)

View File

@@ -60,8 +60,7 @@ def _run_test_sharded_optim_v2(cpu_offload, shard_strategy_class, use_cpuadam, g
with ZeroInitContext(
target_device=torch.device('cpu') if cpu_offload else torch.device(f'cuda:{get_current_device()}'),
shard_strategy=shard_strategy,
shard_param=True,
rm_torch_payload_on_the_fly=False):
shard_param=True):
zero_model = MoeModel()
zero_model = ShardedModelV2(

View File

@@ -28,7 +28,6 @@ def run_model_test(init_device_type, shard_strategy_class):
for get_components_func in non_distributed_component_funcs:
model_builder, _, _, _, _ = get_components_func()
model_numel_tensor = torch.zeros(1, dtype=torch.int)
if init_device_type == 'cuda':
init_device = torch.device(f"cuda:{get_current_device()}")
elif init_device_type == 'cpu':
@@ -40,8 +39,7 @@ def run_model_test(init_device_type, shard_strategy_class):
with ZeroInitContext(target_device=init_device,
shard_strategy=shard_strategy_class(),
shard_param=True,
model_numel_tensor=model_numel_tensor,
rm_torch_payload_on_the_fly=False):
model_numel_tensor=model_numel_tensor):
model = model_builder(checkpoint=True)
for param in model.parameters():

View File

@@ -29,12 +29,9 @@ def run_model_test(enable_autocast, shard_strategy_class):
get_components_func = non_distributed_component_funcs.get_callable(model_name)
model_builder, train_dataloader, _, _, criterion = get_components_func()
rm_torch_payload_on_the_fly = False
with ZeroInitContext(target_device=torch.cuda.current_device(),
with ZeroInitContext(target_device=torch.device('cuda', torch.cuda.current_device()),
shard_strategy=shard_strategy,
shard_param=True,
rm_torch_payload_on_the_fly=rm_torch_payload_on_the_fly):
shard_param=True):
zero_model = model_builder(checkpoint=True)
zero_model = ShardedModelV2(zero_model, shard_strategy, use_memory_tracer=True)

View File

@@ -60,8 +60,7 @@ def _run_test_sharded_optim_v2(cpu_offload, shard_strategy_class, use_cpuadam, g
with ZeroInitContext(
target_device=torch.device(f'cpu:0') if cpu_offload else torch.device(f'cuda:{get_current_device()}'),
shard_strategy=shard_strategy,
shard_param=True,
rm_torch_payload_on_the_fly=False):
shard_param=True):
zero_model = model_builder(checkpoint=True)
zero_model = ShardedModelV2(
zero_model,

View File

@@ -27,10 +27,9 @@ def run_zero_state_dict(shard_strategy_class):
get_components_func = non_distributed_component_funcs.get_callable(model_name)
model_builder, train_dataloader, test_dataloader, optimizer, criterion = get_components_func()
with ZeroInitContext(target_device=torch.cuda.current_device(),
with ZeroInitContext(target_device=torch.device('cuda', torch.cuda.current_device()),
shard_strategy=shard_strategy,
shard_param=True,
rm_torch_payload_on_the_fly=False):
shard_param=True):
zero_model = model_builder(checkpoint=True)
zero_model = ShardedModelV2(zero_model, shard_strategy)