[zero] add tensor placement policies (#743)

* add tensor placement policies

* polish comments

* polish comments

* update moe unit tests
This commit is contained in:
ver217
2022-04-13 15:00:48 +08:00
committed by GitHub
parent 22c4b88d56
commit e396bb71f2
11 changed files with 139 additions and 83 deletions

View File

@@ -32,7 +32,7 @@ def run_model_test(enable_autocast, shard_strategy_class):
shard_strategy=shard_strategy,
shard_param=True):
zero_model = MoeModel(checkpoint=True)
zero_model = ShardedModelV2(zero_model, shard_strategy, use_memory_tracer=True)
zero_model = ShardedModelV2(zero_model, shard_strategy)
# check whether parameters are identical in ddp
for name, p in zero_model.named_parameters():

View File

@@ -69,8 +69,7 @@ def _run_test_sharded_optim_v2(cpu_offload,
zero_model = ShardedModelV2(zero_model,
shard_strategy,
offload_config=dict(device='cpu') if cpu_offload else None,
use_memory_tracer=gpu_margin_mem_ratio > 0.0,
tensor_placement_policy='cpu' if cpu_offload else 'cuda',
reuse_fp16_shard=reuse_fp16_shard)
# check whether parameters are identical in ddp
@@ -88,7 +87,6 @@ def _run_test_sharded_optim_v2(cpu_offload,
sharded_optim = optimizer_class(zero_model.parameters(), lr=1e-3)
sharded_optim = ShardedOptimizerV2(zero_model,
sharded_optim,
cpu_offload=cpu_offload,
initial_scale=2**5,
gpu_margin_mem_ratio=gpu_margin_mem_ratio)