[zero] hijack p.grad in sharded model (#554)

* hijack p.grad in sharded model

* polish comments

* polish comments
This commit is contained in:
ver217
2022-03-30 18:14:50 +08:00
committed by GitHub
parent f552b11294
commit 014bac0c49
6 changed files with 45 additions and 55 deletions

View File

@@ -92,7 +92,8 @@ def check_params(model, zero_model, loose=False):
def check_grads_padding(model, zero_model, loose=False):
rank = dist.get_rank()
for p, zero_p in zip(model.parameters(), zero_model.parameters()):
zero_grad = zero_p.grad.clone().to(p.device)
# zero_grad = zero_p.grad.clone().to(p.device)
zero_grad = zero_p.col_attr.saved_grad.payload.clone().to(p.device)
chunks = torch.flatten(p.grad).chunk(dist.get_world_size())
if rank >= len(chunks):
continue