[zero] refactor low level zero for shard evenly (#4030)

* refactor low level zero

* fix zero2 and support cpu offload

* avg gradient and modify unit test

* refactor grad store, support layer drop

* refactor bucket store, support grad accumulation

* fix and update unit test of zero and ddp

* compatible with tp, ga and unit test

* fix memory leak and polish

* add zero layer drop unittest

* polish code

* fix import err in unit test

* support diffenert comm dtype, modify docstring style

* polish code

* test padding and fix

* fix unit test of low level zero

* fix pad recording in bucket store

* support some models

* polish
This commit is contained in:
LuGY
2023-06-30 15:30:50 +08:00
committed by Hongxin Liu
parent 5187c96b7c
commit c6ab96983a
8 changed files with 424 additions and 470 deletions

View File

@@ -39,37 +39,37 @@ def exam_zero_1_2_grad_acc():
overlap_communication=True,
initial_scale=32,
clip_grad_norm=1.0,
grad_accumulate_interval=2,
verbose=True)
zero2_optimizer = LowLevelZeroOptimizer(zero2_optimizer,
overlap_communication=True,
partition_grad=True,
initial_scale=32,
clip_grad_norm=1.0)
clip_grad_norm=1.0,
grad_accumulate_interval=2)
# create data
seed_all(2021 + local_rank)
input_data1 = torch.randn(32, 128).cuda()
input_data2 = torch.randn(32, 128).cuda()
def fwd_bwd_func(number, cur_data):
def fwd_bwd_func(number, cur_data, check_flag):
# zero-dp forward
zero1_output = zero1_model(cur_data)
zero2_output = zero2_model(cur_data)
assert torch.equal(zero1_output, zero2_output)
# zero-dp backward
zero1_optimizer.backward(zero1_output.sum().float(), sync_grad=False)
zero2_optimizer.backward(zero2_output.sum().float(), sync_grad=False)
zero1_optimizer.backward(zero1_output.sum().float())
zero2_optimizer.backward(zero2_output.sum().float())
for (n, z1p), z2p in zip(zero1_model.named_parameters(), zero2_model.parameters()):
if z2p.grad is not None:
# print(local_rank, n, z1p.shape, torch.max(z2p.grad), torch.max(torch.abs(z1p.grad - z2p.grad)))
assert torch.equal(z1p.grad, z2p.grad)
if check_flag:
for (n, z1p), z2p in zip(zero1_model.named_parameters(), zero2_model.parameters()):
if z2p.grad is not None:
# print(local_rank, n, z1p.shape, torch.max(z2p.grad), torch.max(torch.abs(z1p.grad - z2p.grad)))
assert torch.equal(z1p.grad, z2p.grad)
zero1_optimizer._sync_grad()
zero2_optimizer._sync_grad()
fwd_bwd_func(0, input_data1)
fwd_bwd_func(1, input_data2)
fwd_bwd_func(0, input_data1, True)
fwd_bwd_func(1, input_data2, False)
# step
zero1_optimizer.step()
@@ -101,7 +101,8 @@ def exam_zero_1_grad_acc():
zero_optimizer = LowLevelZeroOptimizer(zero_optimizer,
overlap_communication=False,
reduce_bucket_size=262144,
clip_grad_norm=1.0)
clip_grad_norm=1.0,
grad_accumulate_interval=2)
torch_optimizer = torch.optim.Adam(torch_model.parameters(), lr=1)
@@ -115,13 +116,19 @@ def exam_zero_1_grad_acc():
zero_output = zero_model(cur_data)
# torch-ddp forward
torch_output = torch_model(cur_data)
assert torch.equal(zero_output, torch_output)
# zero-dp backward
zero_optimizer.backward(zero_output.sum().float(), sync_grad=False)
zero_optimizer.backward(zero_output.sum().float())
# torch-ddp backward
torch_output.sum().backward()
if number < 1:
with torch_model.no_sync():
torch_output = torch_model(cur_data)
assert torch.equal(zero_output, torch_output)
torch_output.sum().backward()
else:
torch_output = torch_model(cur_data)
assert torch.equal(zero_output, torch_output)
torch_output.sum().backward()
if check_flag:
# check grad
@@ -129,8 +136,6 @@ def exam_zero_1_grad_acc():
# print(n, p.shape, torch.max(torch.abs(p.grad - unscale_grad)))
assert torch.equal(p.grad, z1p.grad)
zero_optimizer._sync_grad()
fwd_bwd_func(0, input_data1, True)
fwd_bwd_func(1, input_data2, False)
@@ -148,7 +153,8 @@ def run_dist(rank, world_size, port):
colossalai.launch(config=dict(), rank=rank, world_size=world_size, port=port, host='localhost')
exam_zero_1_grad_acc()
exam_zero_1_2_grad_acc()
# gradient accumulation is not compatible with ZeRO-2
# exam_zero_1_2_grad_acc()
@pytest.mark.dist

View File

@@ -2,6 +2,7 @@ import copy
import pytest
import torch
import torch.distributed as dist
import torch.nn as nn
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.testing import assert_close
@@ -16,8 +17,9 @@ class MlpModel(nn.Module):
def __init__(self):
super(MlpModel, self).__init__()
self.linear1 = nn.Linear(128, 256)
self.linear2 = nn.Linear(256, 512)
self.linear1 = nn.Linear(123, 253)
self.linear_drop = nn.Linear(253, 253)
self.linear2 = nn.Linear(253, 512)
def forward(self, x):
x = self.linear1(x)
@@ -41,6 +43,16 @@ def loose_close(a, b, dtype: torch.dtype = torch.float32):
assert_close(a, b, rtol=rtol, atol=atol)
def split_ddp_grad(grad, world_size):
with torch.no_grad():
grad = grad.clone().detach().flatten()
padding_size = (world_size - grad.numel() % world_size) % world_size
if padding_size > 0:
grad = torch.nn.functional.pad(grad, [0, padding_size])
splited_grad = grad.split(grad.numel() // world_size)
return splited_grad
def exam_zero_1_2():
"""
In this test, we want to test whether zero stage 1 and 2
@@ -72,23 +84,21 @@ def exam_zero_1_2():
initial_scale=128)
# create data
seed_all(2001 + local_rank)
input_data = torch.randn(32, 128).cuda()
input_data = torch.randn(32, 123).cuda()
zero1_output = zero1_model(input_data)
zero2_output = zero2_model(input_data)
assert torch.equal(zero1_output, zero2_output)
# zero-dp backward
zero1_optimizer.backward(zero1_output.mean().float(), sync_grad=False)
zero2_optimizer.backward(zero2_output.mean().float(), sync_grad=False)
zero1_optimizer.backward(zero1_output.mean().float())
zero2_optimizer.backward(zero2_output.mean().float())
for (n, z1p), z2p in zip(zero1_model.named_parameters(), zero2_model.parameters()):
if z2p.grad is not None:
# print(local_rank, n, z1p.shape, torch.max(z2p.grad), torch.max(torch.abs(z1p.grad - z2p.grad)))
assert torch.equal(z1p.grad, z2p.grad)
zero1_optimizer._sync_grad()
zero2_optimizer._sync_grad()
# check grad
z1g_list = zero1_optimizer._grad_store.get_working_grads_by_group_id(0)
z2g_list = zero2_optimizer._grad_store.get_working_grads_by_group_id(0)
for z1g, z2g in zip(z1g_list, z2g_list):
assert torch.equal(z1g, z2g)
# step
zero1_optimizer.step()
@@ -100,7 +110,7 @@ def exam_zero_1_2():
@parameterize('dtype', [torch.float16, torch.bfloat16])
def exam_zero_1_torch_ddp(dtype: torch.dtype):
def exam_zero_1_torch_ddp(world_size, dtype: torch.dtype):
"""
In this test, two pairs of model and optimizers are created.
1. zero: use sharded optimizer and fp16 parameters
@@ -116,7 +126,7 @@ def exam_zero_1_torch_ddp(dtype: torch.dtype):
torch_model = MlpModel().cuda()
zero_model = copy.deepcopy(torch_model).to(dtype)
torch_model = DDP(torch_model.cuda(), bucket_cap_mb=0).cuda()
torch_model = DDP(torch_model.cuda(), static_graph=True).cuda()
# create optimizer
zero_optimizer = torch.optim.SGD(zero_model.parameters(), lr=1)
@@ -133,7 +143,7 @@ def exam_zero_1_torch_ddp(dtype: torch.dtype):
seed_all(1453 + local_rank)
# create
input_data = torch.rand(32, 128).cuda()
input_data = torch.rand(32, 123).cuda()
# zero-dp forward
zero_output = zero_model(input_data.to(dtype))
@@ -143,17 +153,20 @@ def exam_zero_1_torch_ddp(dtype: torch.dtype):
loose_close(zero_output, torch_output, dtype=dtype)
# zero-dp backward
zero_optimizer.backward(zero_output.mean().float(), sync_grad=False)
zero_optimizer.backward(zero_output.mean().float())
# torch-ddp backward
torch_output.mean().backward()
# check grad
for (n, p), z1p in zip(torch_model.named_parameters(), zero_model.parameters()):
loose_close(p.grad, z1p.grad, dtype=dtype)
if p.grad is not None:
zero_grad_list = zero_optimizer._grad_store.get_partitioned_gradients_by_param_id(0, id(z1p))
torch_grad_list = split_ddp_grad(p.grad, world_size)
for zero_grad, torch_grad in zip(zero_grad_list, torch_grad_list):
loose_close(zero_grad, torch_grad, dtype=dtype)
# zero-dp step
zero_optimizer._sync_grad()
zero_optimizer.step()
# torch ddp step
@@ -161,14 +174,13 @@ def exam_zero_1_torch_ddp(dtype: torch.dtype):
# check updated param
for (n, p), z1p in zip(torch_model.named_parameters(), zero_model.parameters()):
# print(n, torch.max(torch.abs(p.data - z1p.data)))
loose_close(p.data, z1p.data, dtype=dtype)
def run_dist(rank, world_size, port):
colossalai.launch(config=dict(), rank=rank, world_size=world_size, port=port, host='localhost')
exam_zero_1_torch_ddp()
exam_zero_1_torch_ddp(world_size=world_size)
exam_zero_1_2()