mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-09 13:00:52 +00:00
[booster] add low level zero plugin (#3594)
* [booster] add low level zero plugin * [booster] fix gemini plugin test * [booster] fix precision * [booster] add low level zero plugin test * [test] fix booster plugin test oom * [test] fix booster plugin test oom * [test] fix googlenet and inception output trans * [test] fix diffuser clip vision model * [test] fix torchaudio_wav2vec2_base * [test] fix low level zero plugin test
This commit is contained in:
@@ -18,6 +18,7 @@ data_vae_fn = lambda: dict(sample=torch.randn(2, 3, 32, 32))
|
||||
data_unet_fn = lambda: dict(sample=torch.randn(2, 3, 32, 32), timestep=3)
|
||||
|
||||
identity_output = lambda x: x
|
||||
clip_vision_model_output = lambda x: dict(pooler_output=x[1])
|
||||
|
||||
|
||||
def data_clip_model():
|
||||
@@ -65,7 +66,7 @@ model_zoo.register(name='diffusers_clip_text_model',
|
||||
model_zoo.register(name='diffusers_clip_vision_model',
|
||||
model_fn=partial(transformers.CLIPVisionModel, config=transformers.CLIPVisionConfig()),
|
||||
data_gen_fn=data_clip_vision,
|
||||
output_transform_fn=identity_output)
|
||||
output_transform_fn=clip_vision_model_output)
|
||||
|
||||
model_zoo.register(name='diffusers_unet2d_model',
|
||||
model_fn=diffusers.UNet2DModel,
|
||||
|
@@ -1,3 +1,5 @@
|
||||
from functools import partial
|
||||
|
||||
import torch
|
||||
import torchaudio.models as tm
|
||||
|
||||
@@ -101,13 +103,11 @@ def tacotron_data_gen_fn():
|
||||
mel_specgram_lengths=mel_specgram_lengths)
|
||||
|
||||
|
||||
model_zoo.register(
|
||||
name='torchaudio_tacotron',
|
||||
model_fn=lambda: tm.Tacotron2(n_mels=N_MELS),
|
||||
data_gen_fn=tacotron_data_gen_fn,
|
||||
output_transform_fn=lambda outputs: dict(
|
||||
spectrogram_before=outputs[0], spectrogram_after=outputs[1], stop_tokens=outputs[2], attn_weights=outputs[3]),
|
||||
model_attribute=ModelAttribute(has_control_flow=True))
|
||||
model_zoo.register(name='torchaudio_tacotron',
|
||||
model_fn=lambda: tm.Tacotron2(n_mels=N_MELS),
|
||||
data_gen_fn=tacotron_data_gen_fn,
|
||||
output_transform_fn=lambda outputs: dict(summed_output=sum(x.sum() for x in outputs)),
|
||||
model_attribute=ModelAttribute(has_control_flow=True))
|
||||
|
||||
|
||||
def wav2vec_data_gen_fn():
|
||||
@@ -118,7 +118,7 @@ def wav2vec_data_gen_fn():
|
||||
|
||||
|
||||
model_zoo.register(name='torchaudio_wav2vec2_base',
|
||||
model_fn=tm.wav2vec2_base,
|
||||
model_fn=partial(tm.wav2vec2_base, encoder_layer_drop=0.0),
|
||||
data_gen_fn=wav2vec_data_gen_fn,
|
||||
output_transform_fn=transformer_output_transform_fn,
|
||||
model_attribute=ModelAttribute(has_control_flow=True))
|
||||
|
@@ -36,12 +36,12 @@ def swin_s():
|
||||
|
||||
|
||||
# special output transform fn
|
||||
google_net_output_transform_fn = lambda x: dict(output=x.logits) if isinstance(x, torchvision.models.GoogLeNetOutputs
|
||||
) else dict(output=x)
|
||||
google_net_output_transform_fn = lambda x: dict(output=sum(x)) if isinstance(x, torchvision.models.GoogLeNetOutputs
|
||||
) else dict(output=x)
|
||||
swin_s_output_output_transform_fn = lambda x: {f'output{idx}': val
|
||||
for idx, val in enumerate(x)} if isinstance(x, tuple) else dict(output=x)
|
||||
inception_v3_output_transform_fn = lambda x: dict(output=x.logits) if isinstance(x, torchvision.models.InceptionOutputs
|
||||
) else dict(output=x)
|
||||
inception_v3_output_transform_fn = lambda x: dict(output=sum(x)) if isinstance(x, torchvision.models.InceptionOutputs
|
||||
) else dict(output=x)
|
||||
|
||||
model_zoo.register(name='torchvision_alexnet',
|
||||
model_fn=tm.alexnet,
|
||||
|
@@ -1,4 +1,5 @@
|
||||
from contextlib import nullcontext
|
||||
from typing import Optional
|
||||
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
@@ -10,11 +11,53 @@ from colossalai.fx import is_compatible_with_meta
|
||||
from colossalai.nn.optimizer import HybridAdam
|
||||
from colossalai.tensor.colo_parameter import ColoParameter
|
||||
from colossalai.testing import parameterize, rerun_if_address_is_in_use, spawn
|
||||
from colossalai.utils.model.experimental import LazyInitContext
|
||||
from colossalai.zero import ColoInitContext
|
||||
from tests.kit.model_zoo import model_zoo
|
||||
|
||||
|
||||
@parameterize('init_method', ['lazy', 'none', 'colo'])
|
||||
def run_fn(init_method, model_fn, data_gen_fn, output_transform_fn) -> Optional[str]:
|
||||
try:
|
||||
if init_method == 'colo':
|
||||
ctx = ColoInitContext()
|
||||
elif init_method == 'lazy':
|
||||
ctx = LazyInitContext()
|
||||
else:
|
||||
ctx = nullcontext()
|
||||
plugin = GeminiPlugin(placement_policy='cuda', strict_ddp_mode=True, max_norm=1.0, initial_scale=2**5)
|
||||
booster = Booster(plugin=plugin)
|
||||
with ctx:
|
||||
model = model_fn()
|
||||
optimizer = HybridAdam(model.parameters(), lr=1e-3)
|
||||
criterion = lambda x: x.mean()
|
||||
data = data_gen_fn()
|
||||
|
||||
data = {
|
||||
k: v.to('cuda') if torch.is_tensor(v) or 'Tensor' in v.__class__.__name__ else v for k, v in data.items()
|
||||
}
|
||||
|
||||
model, optimizer, criterion, _, _ = booster.boost(model, optimizer, criterion)
|
||||
|
||||
for n, p in model.named_parameters():
|
||||
assert isinstance(p, ColoParameter), f'{n} is not a ColoParameter'
|
||||
|
||||
output = model(**data)
|
||||
output = output_transform_fn(output)
|
||||
output_key = list(output.keys())[0]
|
||||
loss = criterion(output[output_key])
|
||||
|
||||
booster.backward(loss, optimizer)
|
||||
optimizer.step()
|
||||
|
||||
except Exception as e:
|
||||
return repr(e)
|
||||
|
||||
|
||||
# TODO(ver217): CI does not support lazy now
|
||||
# @parameterize('init_method', ['lazy', 'none', 'colo'])
|
||||
|
||||
|
||||
@parameterize('init_method', ['none'])
|
||||
def check_gemini_plugin(init_method: str = 'none', early_stop: bool = True):
|
||||
"""check gemini plugin over model zoo
|
||||
|
||||
@@ -25,7 +68,6 @@ def check_gemini_plugin(init_method: str = 'none', early_stop: bool = True):
|
||||
if not is_support_meta and init_method == 'lazy':
|
||||
return
|
||||
|
||||
from colossalai.utils.model.experimental import LazyInitContext
|
||||
passed_models = []
|
||||
failed_info = {} # (model_name, error) pair
|
||||
|
||||
@@ -58,48 +100,16 @@ def check_gemini_plugin(init_method: str = 'none', early_stop: bool = True):
|
||||
]:
|
||||
continue
|
||||
|
||||
try:
|
||||
if init_method == 'colo':
|
||||
ctx = ColoInitContext()
|
||||
elif init_method == 'lazy':
|
||||
ctx = LazyInitContext()
|
||||
else:
|
||||
ctx = nullcontext()
|
||||
plugin = GeminiPlugin(placement_policy='cuda', strict_ddp_mode=True, max_norm=1.0, initial_scale=2**5)
|
||||
booster = Booster(plugin=plugin)
|
||||
with ctx:
|
||||
model = model_fn()
|
||||
optimizer = HybridAdam(model.parameters(), lr=1e-3)
|
||||
criterion = lambda x: x.mean()
|
||||
data = data_gen_fn()
|
||||
|
||||
data = {
|
||||
k: v.to('cuda') if torch.is_tensor(v) or 'Tensor' in v.__class__.__name__ else v
|
||||
for k, v in data.items()
|
||||
}
|
||||
|
||||
model, optimizer, criterion, _, _ = booster.boost(model, optimizer, criterion)
|
||||
|
||||
for n, p in model.named_parameters():
|
||||
assert isinstance(p, ColoParameter), f'{n} is not a ColoParameter'
|
||||
|
||||
output = model(**data)
|
||||
output = output_transform_fn(output)
|
||||
output_key = list(output.keys())[0]
|
||||
loss = criterion(output[output_key])
|
||||
|
||||
booster.backward(loss, optimizer)
|
||||
optimizer.step()
|
||||
passed_models.append(name)
|
||||
|
||||
del booster, plugin, model, optimizer, criterion, data, output, loss
|
||||
except Exception as e:
|
||||
failed_info[name] = e
|
||||
if early_stop:
|
||||
raise e
|
||||
|
||||
err = run_fn(init_method, model_fn, data_gen_fn, output_transform_fn)
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
if err is None:
|
||||
passed_models.append(name)
|
||||
else:
|
||||
failed_info[name] = err
|
||||
if early_stop:
|
||||
break
|
||||
|
||||
if dist.get_rank() == 0:
|
||||
print(f'Init method: {init_method}')
|
||||
print(f'Passed models({len(passed_models)}): {passed_models}\n\n')
|
||||
@@ -140,7 +150,7 @@ def run_dist(rank, world_size, port, early_stop: bool = True):
|
||||
|
||||
@rerun_if_address_is_in_use()
|
||||
def test_gemini_plugin(early_stop: bool = True):
|
||||
spawn(run_dist, 2, early_stop=early_stop)
|
||||
spawn(run_dist, 4, early_stop=early_stop)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
122
tests/test_booster/test_plugin/test_low_level_zero_plugin.py
Normal file
122
tests/test_booster/test_plugin/test_low_level_zero_plugin.py
Normal file
@@ -0,0 +1,122 @@
|
||||
from typing import Optional
|
||||
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
|
||||
import colossalai
|
||||
from colossalai.booster import Booster
|
||||
from colossalai.booster.plugin import LowLevelZeroPlugin
|
||||
from colossalai.nn.optimizer import HybridAdam
|
||||
from colossalai.testing import parameterize, rerun_if_address_is_in_use, spawn
|
||||
from tests.kit.model_zoo import model_zoo
|
||||
|
||||
# These models are not compatible with AMP
|
||||
_AMP_ERR_MODELS = ['timm_convit', 'dlrm', 'deepfm_interactionarch', 'deepfm_simpledeepfmnn`']
|
||||
# These models have no parameters
|
||||
_LOW_LEVEL_ZERO_ERR_MODELS = ['dlrm_interactionarch']
|
||||
# These models will get stuck
|
||||
_STUCK_MODELS = [
|
||||
'diffusers_vq_model', 'transformers_albert', 'transformers_albert_for_pretraining', 'transformers_bert',
|
||||
'transformers_bert_for_pretraining', 'transformers_gpt_double_heads'
|
||||
]
|
||||
|
||||
|
||||
def run_fn(stage, model_fn, data_gen_fn, output_transform_fn) -> Optional[str]:
|
||||
try:
|
||||
plugin = LowLevelZeroPlugin(stage=stage, max_norm=1.0, initial_scale=2**5)
|
||||
booster = Booster(plugin=plugin)
|
||||
model = model_fn()
|
||||
optimizer = HybridAdam(model.parameters(), lr=1e-3)
|
||||
criterion = lambda x: x.mean()
|
||||
data = data_gen_fn()
|
||||
|
||||
data = {
|
||||
k: v.to('cuda') if torch.is_tensor(v) or 'Tensor' in v.__class__.__name__ else v for k, v in data.items()
|
||||
}
|
||||
|
||||
model, optimizer, criterion, _, _ = booster.boost(model, optimizer, criterion)
|
||||
|
||||
output = model(**data)
|
||||
output = output_transform_fn(output)
|
||||
output_key = list(output.keys())[0]
|
||||
loss = criterion(output[output_key])
|
||||
|
||||
booster.backward(loss, optimizer)
|
||||
optimizer.step()
|
||||
|
||||
except Exception as e:
|
||||
return repr(e)
|
||||
|
||||
|
||||
@parameterize('stage', [2])
|
||||
def check_low_level_zero_plugin(stage: int, early_stop: bool = True):
|
||||
"""check low level zero plugin over model zoo
|
||||
|
||||
Args:
|
||||
stage (int), stage of low level zero plugin
|
||||
early_stop (bool, optional): Whether to stop when getting the first error. Defaults to True.
|
||||
"""
|
||||
passed_models = []
|
||||
failed_info = {} # (model_name, error) pair
|
||||
ignore_models = _AMP_ERR_MODELS + _LOW_LEVEL_ZERO_ERR_MODELS + _STUCK_MODELS
|
||||
skipped_models = []
|
||||
|
||||
for name, (model_fn, data_gen_fn, output_transform_fn, _) in model_zoo.items():
|
||||
# FIXME(ver217): fix these models
|
||||
if name in ignore_models:
|
||||
skipped_models.append(name)
|
||||
continue
|
||||
err = run_fn(stage, model_fn, data_gen_fn, output_transform_fn)
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
if err is None:
|
||||
passed_models.append(name)
|
||||
else:
|
||||
failed_info[name] = err
|
||||
if early_stop:
|
||||
break
|
||||
|
||||
if dist.get_rank() == 0:
|
||||
print(f'Passed models({len(passed_models)}): {passed_models}\n\n')
|
||||
print(f'Failed models({len(failed_info)}): {list(failed_info.keys())}\n\n')
|
||||
print(f'Skipped models({len(skipped_models)}): {skipped_models}\n\n')
|
||||
assert len(failed_info) == 0, '\n'.join([f'{k}: {v}' for k, v in failed_info.items()])
|
||||
|
||||
|
||||
def check_dataloader_sharding():
|
||||
plugin = LowLevelZeroPlugin()
|
||||
|
||||
# create a custom dasetset with 0 to 10
|
||||
dataset = torch.utils.data.TensorDataset(torch.arange(0, 10))
|
||||
train_dataloader = plugin.prepare_train_dataloader(dataset, batch_size=2)
|
||||
|
||||
# get the first batch of data
|
||||
batch = next(iter(train_dataloader))[0].cuda()
|
||||
is_rank_0 = dist.get_rank() == 0
|
||||
|
||||
if is_rank_0:
|
||||
batch_to_compare = batch.clone()
|
||||
else:
|
||||
batch_to_compare = batch
|
||||
# pass to the rank 1 value to rank 0
|
||||
dist.broadcast(batch_to_compare, src=1)
|
||||
|
||||
# compare on rank 0
|
||||
if is_rank_0:
|
||||
assert not torch.equal(batch,
|
||||
batch_to_compare), 'Same number was found across ranks but expected it to be different'
|
||||
|
||||
|
||||
def run_dist(rank, world_size, port, early_stop: bool = True):
|
||||
# init dist env
|
||||
colossalai.launch(config=dict(), rank=rank, world_size=world_size, port=port, host='localhost')
|
||||
check_low_level_zero_plugin(early_stop=early_stop)
|
||||
|
||||
|
||||
@rerun_if_address_is_in_use()
|
||||
def test_low_level_zero_plugin(early_stop: bool = True):
|
||||
spawn(run_dist, 2, early_stop=early_stop)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
test_low_level_zero_plugin(early_stop=False)
|
@@ -11,36 +11,37 @@ from colossalai.testing import rerun_if_address_is_in_use, spawn
|
||||
from tests.kit.model_zoo import model_zoo
|
||||
|
||||
|
||||
def check_torch_ddp_plugin():
|
||||
def run_fn(model_fn, data_gen_fn, output_transform_fn):
|
||||
plugin = TorchDDPPlugin()
|
||||
booster = Booster(plugin=plugin)
|
||||
model = model_fn()
|
||||
optimizer = SGD(model.parameters(), lr=1e-3)
|
||||
criterion = lambda x: x.mean()
|
||||
data = data_gen_fn()
|
||||
|
||||
data = {k: v.to('cuda') if torch.is_tensor(v) or 'Tensor' in v.__class__.__name__ else v for k, v in data.items()}
|
||||
|
||||
model, optimizer, criterion, _, _ = booster.boost(model, optimizer, criterion)
|
||||
|
||||
assert isinstance(model.module, DDP)
|
||||
assert isinstance(optimizer, OptimizerWrapper)
|
||||
|
||||
output = model(**data)
|
||||
output = output_transform_fn(output)
|
||||
output_key = list(output.keys())[0]
|
||||
loss = criterion(output[output_key])
|
||||
|
||||
booster.backward(loss, optimizer)
|
||||
optimizer.clip_grad_by_norm(1.0)
|
||||
optimizer.step()
|
||||
|
||||
|
||||
def check_torch_ddp_plugin():
|
||||
for name, (model_fn, data_gen_fn, output_transform_fn, _) in model_zoo.items():
|
||||
if name == 'dlrm_interactionarch':
|
||||
continue
|
||||
|
||||
model = model_fn()
|
||||
optimizer = SGD(model.parameters(), lr=1e-3)
|
||||
criterion = lambda x: x.mean()
|
||||
data = data_gen_fn()
|
||||
|
||||
data = {
|
||||
k: v.to('cuda') if torch.is_tensor(v) or 'Tensor' in v.__class__.__name__ else v for k, v in data.items()
|
||||
}
|
||||
|
||||
model, optimizer, criterion, _, _ = booster.boost(model, optimizer, criterion)
|
||||
|
||||
assert isinstance(model.module, DDP)
|
||||
assert isinstance(optimizer, OptimizerWrapper)
|
||||
|
||||
output = model(**data)
|
||||
output = output_transform_fn(output)
|
||||
output_key = list(output.keys())[0]
|
||||
loss = criterion(output[output_key])
|
||||
|
||||
booster.backward(loss, optimizer)
|
||||
optimizer.clip_grad_by_norm(1.0)
|
||||
optimizer.step()
|
||||
run_fn(model_fn, data_gen_fn, output_transform_fn)
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
|
||||
def check_dataloader_sharding():
|
||||
|
Reference in New Issue
Block a user