mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-02 01:28:31 +00:00
[bf16] add bf16 support (#3882)
* [bf16] add bf16 support for fused adam (#3844) * [bf16] fused adam kernel support bf16 * [test] update fused adam kernel test * [test] update fused adam test * [bf16] cpu adam and hybrid adam optimizers support bf16 (#3860) * [bf16] implement mixed precision mixin and add bf16 support for low level zero (#3869) * [bf16] add mixed precision mixin * [bf16] low level zero optim support bf16 * [text] update low level zero test * [text] fix low level zero grad acc test * [bf16] add bf16 support for gemini (#3872) * [bf16] gemini support bf16 * [test] update gemini bf16 test * [doc] update gemini docstring * [bf16] add bf16 support for plugins (#3877) * [bf16] add bf16 support for legacy zero (#3879) * [zero] init context support bf16 * [zero] legacy zero support bf16 * [test] add zero bf16 test * [doc] add bf16 related docstring for legacy zero
This commit is contained in:
@@ -21,23 +21,40 @@ TEST_MODELS = ['gpt2']
|
||||
# these models are too small, all parameters in these models are compacted into one chunk
|
||||
EXAMPLE_MODELS = ['albert', 'beit', 'bert', 'hanging_param_model', 'nested_model', 'repeated_computed_layers']
|
||||
|
||||
# bfloat16 cannot represent them exactly
|
||||
BF16_IGNORED_KEYS = [
|
||||
'albert.embeddings.word_embeddings.weight',
|
||||
'albert.embeddings.position_embeddings.weight',
|
||||
'masked_bias',
|
||||
]
|
||||
|
||||
def check_param(model: ZeroDDP, torch_model: torch.nn.Module):
|
||||
zero_dict = model.state_dict(only_rank_0=False)
|
||||
|
||||
def check_param(model: ZeroDDP, torch_model: torch.nn.Module, dtype: torch.dtype):
|
||||
zero_dict = model.state_dict(only_rank_0=False, dtype=dtype)
|
||||
torch_dict = torch_model.state_dict()
|
||||
|
||||
for key, value in torch_dict.items():
|
||||
# key is 'module.model.PARAMETER', so we truncate it
|
||||
key = key[7:]
|
||||
assert key in zero_dict, "{} not in ZeRO dictionary.".format(key)
|
||||
temp_zero_value = zero_dict[key].to(device=value.device, dtype=value.dtype)
|
||||
temp_zero_value = zero_dict[key].to(device=value.device)
|
||||
if dtype is torch.bfloat16 and any(k in key for k in BF16_IGNORED_KEYS):
|
||||
continue
|
||||
rtol, atol = 1e-3, 4e-3
|
||||
if dtype is torch.bfloat16:
|
||||
rtol, atol = 4e-3, 8e-3
|
||||
# debug_print([0], "max range: ", key, torch.max(torch.abs(value - temp_zero_value)))
|
||||
assert_close(value, temp_zero_value, rtol=1e-3, atol=4e-3)
|
||||
assert_close(value.float(),
|
||||
temp_zero_value.float(),
|
||||
rtol=rtol,
|
||||
atol=atol,
|
||||
msg=lambda s: s + f'\n{key}\n{temp_zero_value.dtype}')
|
||||
|
||||
|
||||
@parameterize('placement_policy', ['cuda', 'cpu', 'auto', 'const'])
|
||||
@parameterize('model_name', TEST_MODELS)
|
||||
def exam_model_step(placement_policy, model_name: str):
|
||||
@parameterize('mixed_precision', [torch.half, torch.bfloat16])
|
||||
def exam_model_step(placement_policy, model_name: str, mixed_precision: torch.dtype):
|
||||
set_seed(42)
|
||||
get_components_func = non_distributed_component_funcs.get_callable(model_name)
|
||||
model_builder, train_dataloader, test_dataloader, optimizer_class, criterion = get_components_func()
|
||||
@@ -65,7 +82,7 @@ def exam_model_step(placement_policy, model_name: str):
|
||||
init_device = None
|
||||
chunk_manager = ChunkManager(config_dict, init_device=init_device)
|
||||
gemini_manager = GeminiManager(placement_policy, chunk_manager)
|
||||
model = ZeroDDP(model, gemini_manager, pin_memory=True)
|
||||
model = ZeroDDP(model, gemini_manager, pin_memory=True, mixed_precision=mixed_precision)
|
||||
|
||||
optimizer = HybridAdam(model.parameters(), lr=1e-3)
|
||||
zero_optim = ZeroOptimizer(optimizer, model, initial_scale=128)
|
||||
@@ -74,6 +91,7 @@ def exam_model_step(placement_policy, model_name: str):
|
||||
torch_model.eval()
|
||||
|
||||
set_seed(dist.get_rank() * 3 + 128)
|
||||
rtol, atol = 1e-4, 1e-5
|
||||
for i, (input_ids, label) in enumerate(train_dataloader):
|
||||
if i > 2:
|
||||
break
|
||||
@@ -83,17 +101,18 @@ def exam_model_step(placement_policy, model_name: str):
|
||||
|
||||
torch_loss = run_fwd_bwd(torch_model, input_ids, label, criterion, torch_optim)
|
||||
loss = run_fwd_bwd(model, input_ids, label, criterion, zero_optim)
|
||||
assert_close(torch_loss, loss)
|
||||
assert_close(torch_loss, loss, rtol=rtol, atol=atol)
|
||||
|
||||
zero_optim.step()
|
||||
torch_optim.step()
|
||||
|
||||
check_param(model, torch_model)
|
||||
check_param(model, torch_model, mixed_precision)
|
||||
|
||||
|
||||
@parameterize('placement_policy', ['cuda', 'cpu', 'auto', 'const'])
|
||||
@parameterize('model_name', EXAMPLE_MODELS)
|
||||
def exam_tiny_example(placement_policy, model_name: str):
|
||||
@parameterize('mixed_precision', [torch.half, torch.bfloat16])
|
||||
def exam_tiny_example(placement_policy, model_name: str, mixed_precision: torch.dtype):
|
||||
set_seed(2008)
|
||||
get_components_func = non_distributed_component_funcs.get_callable(model_name)
|
||||
model_builder, train_dataloader, test_dataloader, optimizer_class, criterion = get_components_func()
|
||||
@@ -113,7 +132,7 @@ def exam_tiny_example(placement_policy, model_name: str):
|
||||
|
||||
chunk_manager = init_chunk_manager(model=model, init_device=get_current_device(), search_range_mb=1)
|
||||
gemini_manager = GeminiManager(placement_policy, chunk_manager)
|
||||
model = ZeroDDP(model, gemini_manager, pin_memory=True)
|
||||
model = ZeroDDP(model, gemini_manager, pin_memory=True, mixed_precision=mixed_precision)
|
||||
optimizer = HybridAdam(model.parameters(), lr=1e-3)
|
||||
zero_optim = ZeroOptimizer(optimizer, model, initial_scale=2)
|
||||
|
||||
@@ -121,6 +140,9 @@ def exam_tiny_example(placement_policy, model_name: str):
|
||||
torch_model.eval()
|
||||
|
||||
set_seed(dist.get_rank() * 3 + 128)
|
||||
rtol, atol = 1.5e-6, 2e-5
|
||||
if mixed_precision is torch.bfloat16:
|
||||
rtol, atol = 2e-3, 2e-3
|
||||
for i, (input_ids, label) in enumerate(train_dataloader):
|
||||
if i > 2:
|
||||
break
|
||||
@@ -133,12 +155,12 @@ def exam_tiny_example(placement_policy, model_name: str):
|
||||
|
||||
torch_loss = run_fwd_bwd(torch_model, input_ids, label, criterion, torch_optim)
|
||||
loss = run_fwd_bwd(model, input_ids, label, criterion, zero_optim)
|
||||
assert_close(torch_loss, loss, rtol=1.5e-6, atol=2e-5) # atol should be 2e-5 for torch lower than 1.12
|
||||
assert_close(torch_loss, loss, rtol=rtol, atol=atol) # atol should be 2e-5 for torch lower than 1.12
|
||||
|
||||
zero_optim.step()
|
||||
torch_optim.step()
|
||||
|
||||
check_param(model, torch_model)
|
||||
check_param(model, torch_model, mixed_precision)
|
||||
|
||||
|
||||
def run_dist(rank, world_size, port):
|
||||
|
@@ -16,7 +16,11 @@ from colossalai.zero.low_level._utils import has_inf_or_nan
|
||||
from tests.components_to_test.registry import non_distributed_component_funcs
|
||||
|
||||
|
||||
def run_dist(rank, world_size, port, parallel_config):
|
||||
def run_dist(rank, world_size, port, parallel_config, bf16):
|
||||
is_mp_config = parallel_config == MP_PARALLEL_CONFIG
|
||||
is_zero_config = parallel_config == ZERO_PARALLEL_CONFIG
|
||||
if bf16:
|
||||
parallel_config['zero']['model_config']['bf16'] = True
|
||||
colossalai.launch(config=parallel_config,
|
||||
rank=rank,
|
||||
world_size=world_size,
|
||||
@@ -30,7 +34,8 @@ def run_dist(rank, world_size, port, parallel_config):
|
||||
model_builder, train_dataloader, _, optimizer_class, criterion = get_components_func()
|
||||
with ZeroInitContext(target_device=torch.cuda.current_device(),
|
||||
shard_strategy=gpc.config.zero.model_config.shard_strategy,
|
||||
shard_param=True):
|
||||
shard_param=True,
|
||||
bf16=bf16):
|
||||
colo_model = model_builder(checkpoint=True)
|
||||
|
||||
colo_optimizer = optimizer_class(colo_model.parameters(), lr=1e-3)
|
||||
@@ -38,7 +43,8 @@ def run_dist(rank, world_size, port, parallel_config):
|
||||
optimizer=colo_optimizer,
|
||||
criterion=criterion,
|
||||
train_dataloader=train_dataloader)
|
||||
torch_model = model_builder(checkpoint=True).half()
|
||||
dtype = torch.bfloat16 if bf16 else torch.float16
|
||||
torch_model = model_builder(checkpoint=True).to(dtype)
|
||||
col_model_deepcopy(engine.model, torch_model)
|
||||
torch_model = torch_model.cuda().float()
|
||||
|
||||
@@ -80,9 +86,9 @@ def run_dist(rank, world_size, port, parallel_config):
|
||||
torch_optimizer.step()
|
||||
i += 1
|
||||
|
||||
if parallel_config == MP_PARALLEL_CONFIG:
|
||||
if is_mp_config:
|
||||
check_params(torch_model, colo_model, loose=True)
|
||||
elif parallel_config == ZERO_PARALLEL_CONFIG:
|
||||
elif is_zero_config:
|
||||
check_sharded_model_params(torch_model, colo_model, loose=True)
|
||||
|
||||
|
||||
@@ -97,9 +103,10 @@ def test_mp_engine(world_size):
|
||||
|
||||
@pytest.mark.dist
|
||||
@pytest.mark.parametrize("world_size", [1, 2])
|
||||
@pytest.mark.parametrize("bf16", [True, False])
|
||||
@rerun_if_address_is_in_use()
|
||||
def test_zero_engine(world_size):
|
||||
spawn(run_dist, world_size, parallel_config=ZERO_PARALLEL_CONFIG)
|
||||
def test_zero_engine(world_size, bf16):
|
||||
spawn(run_dist, world_size, parallel_config=ZERO_PARALLEL_CONFIG, bf16=bf16)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
@@ -82,7 +82,6 @@ def exam_zero_1_2_grad_acc():
|
||||
|
||||
def exam_zero_1_grad_acc():
|
||||
local_rank = torch.distributed.get_rank()
|
||||
grad_scale = 32
|
||||
seed_all(2008)
|
||||
|
||||
# create models
|
||||
@@ -101,7 +100,6 @@ def exam_zero_1_grad_acc():
|
||||
# level 1 and 2 will produce exactly the same results
|
||||
zero_optimizer = LowLevelZeroOptimizer(zero_optimizer,
|
||||
overlap_communication=False,
|
||||
initial_scale=grad_scale,
|
||||
reduce_bucket_size=262144,
|
||||
clip_grad_norm=1.0)
|
||||
|
||||
@@ -128,9 +126,8 @@ def exam_zero_1_grad_acc():
|
||||
if check_flag:
|
||||
# check grad
|
||||
for (n, p), z1p in zip(torch_model.named_parameters(), zero_model.parameters()):
|
||||
unscale_grad = z1p.grad / grad_scale
|
||||
# print(n, p.shape, torch.max(torch.abs(p.grad - unscale_grad)))
|
||||
assert torch.equal(p.grad, unscale_grad)
|
||||
assert torch.equal(p.grad, z1p.grad)
|
||||
|
||||
zero_optimizer._sync_grad()
|
||||
|
||||
|
@@ -7,7 +7,7 @@ from torch.nn.parallel import DistributedDataParallel as DDP
|
||||
from torch.testing import assert_close
|
||||
|
||||
import colossalai
|
||||
from colossalai.testing import rerun_if_address_is_in_use, spawn
|
||||
from colossalai.testing import parameterize, rerun_if_address_is_in_use, spawn
|
||||
from colossalai.testing.random import seed_all
|
||||
from colossalai.zero import LowLevelZeroOptimizer
|
||||
|
||||
@@ -25,15 +25,18 @@ class MlpModel(nn.Module):
|
||||
return x
|
||||
|
||||
|
||||
def half_close(a, b, loose=False):
|
||||
def loose_close(a, b, dtype: torch.dtype = torch.float32):
|
||||
rtol = None
|
||||
atol = None
|
||||
if loose:
|
||||
if dtype is torch.float16:
|
||||
rtol = 5e-2
|
||||
atol = 5e-4
|
||||
elif dtype is torch.bfloat16:
|
||||
rtol = 4e-3
|
||||
atol = 4e-3
|
||||
|
||||
a = a.detach().half()
|
||||
b = b.detach().half()
|
||||
a = a.detach().to(dtype)
|
||||
b = b.detach().to(dtype)
|
||||
|
||||
assert_close(a, b, rtol=rtol, atol=atol)
|
||||
|
||||
@@ -96,7 +99,8 @@ def exam_zero_1_2():
|
||||
assert torch.equal(z1p.data, z2p.data)
|
||||
|
||||
|
||||
def exam_zero_1_torch_ddp():
|
||||
@parameterize('dtype', [torch.float16, torch.bfloat16])
|
||||
def exam_zero_1_torch_ddp(dtype: torch.dtype):
|
||||
"""
|
||||
In this test, two pairs of model and optimizers are created.
|
||||
1. zero: use sharded optimizer and fp16 parameters
|
||||
@@ -109,15 +113,10 @@ def exam_zero_1_torch_ddp():
|
||||
seed_all(1453)
|
||||
|
||||
# create models
|
||||
zero_model = MlpModel()
|
||||
torch_model = copy.deepcopy(zero_model)
|
||||
torch_model = MlpModel().cuda()
|
||||
zero_model = copy.deepcopy(torch_model).to(dtype)
|
||||
|
||||
zero_model = zero_model.cuda().half()
|
||||
torch_model = DDP(torch_model.cuda(), bucket_cap_mb=0)
|
||||
torch_model = torch_model.cuda()
|
||||
|
||||
# for (n, p), z1p in zip(torch_model.named_parameters(), zero_model.parameters()):
|
||||
# half_close(p.data, z1p.data)
|
||||
torch_model = DDP(torch_model.cuda(), bucket_cap_mb=0).cuda()
|
||||
|
||||
# create optimizer
|
||||
zero_optimizer = torch.optim.SGD(zero_model.parameters(), lr=1)
|
||||
@@ -137,11 +136,11 @@ def exam_zero_1_torch_ddp():
|
||||
input_data = torch.rand(32, 128).cuda()
|
||||
|
||||
# zero-dp forward
|
||||
zero_output = zero_model(input_data.half())
|
||||
zero_output = zero_model(input_data.to(dtype))
|
||||
|
||||
# torch-ddp forward
|
||||
torch_output = torch_model(input_data)
|
||||
half_close(zero_output, torch_output, loose=True)
|
||||
loose_close(zero_output, torch_output, dtype=dtype)
|
||||
|
||||
# zero-dp backward
|
||||
zero_optimizer.backward(zero_output.mean().float(), sync_grad=False)
|
||||
@@ -151,7 +150,7 @@ def exam_zero_1_torch_ddp():
|
||||
|
||||
# check grad
|
||||
for (n, p), z1p in zip(torch_model.named_parameters(), zero_model.parameters()):
|
||||
half_close(p.grad, z1p.grad, loose=True)
|
||||
loose_close(p.grad, z1p.grad, dtype=dtype)
|
||||
|
||||
# zero-dp step
|
||||
zero_optimizer._sync_grad()
|
||||
@@ -163,7 +162,7 @@ def exam_zero_1_torch_ddp():
|
||||
# check updated param
|
||||
for (n, p), z1p in zip(torch_model.named_parameters(), zero_model.parameters()):
|
||||
# print(n, torch.max(torch.abs(p.data - z1p.data)))
|
||||
half_close(p.data, z1p.data, loose=True)
|
||||
loose_close(p.data, z1p.data, dtype=dtype)
|
||||
|
||||
|
||||
def run_dist(rank, world_size, port):
|
||||
|
Reference in New Issue
Block a user