[zero] find miss code (#378)

This commit is contained in:
Jiarui Fang
2022-03-10 17:51:50 +08:00
committed by Frank Lee
parent 6b6002962a
commit b5f43acee3
6 changed files with 114 additions and 8 deletions

View File

@@ -88,14 +88,19 @@ class ShardedOptimizerV2(ColossalaiOptimizer):
self.zero_grad()
return
# Write master param to p.data
# assign master param pointers to p.data.
# We will not trigger data copy here.
for group in self.optim.param_groups:
for p in group['params']:
p.data = self.master_params[p]
# Now p.data is sharded
# So optimizer states are sharded naturally
ret = self.optim.step(*args, **kwargs)
# Write master param to payload
# Copy master param data (fp32) to payload of col_attr (fp16)
# TODO() improve efficiency by gathering tensors into a chunk and transfering
# a chunk.
for group in self.optim.param_groups:
for p in group['params']:
is_param_sharded = p.col_attr.data.is_sharded
@@ -108,7 +113,10 @@ class ShardedOptimizerV2(ColossalaiOptimizer):
self.shard_strategy.shard([p.col_attr.data])
# We have to use `copy_payload` instead of `reset_payload`
# Since p.data is fp32 and p.col_attr.data is fp16
# TODO() optimize this line
p.col_attr.data.copy_payload(p.data)
if not is_param_sharded:
# We gather full fp16 param here
self.shard_strategy.gather([p.col_attr.data])

View File

@@ -14,7 +14,6 @@ class ShardedTensor(object):
self.world_size = dist.get_world_size(self.process_group)
self.local_rank = dist.get_rank(self.process_group)
self._is_sharded = False
self._payload = tensor
self._origin_shape = tensor.shape
self._origin_numel = tensor.numel()
@@ -41,7 +40,7 @@ class ShardedTensor(object):
return self._payload
def copy_payload(self, tensor):
self._payload.copy_(tensor)
self._payload.view(-1).copy_(tensor.view(-1))
def reset_payload(self, tensor):
del self._payload