mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-01 09:07:51 +00:00
[refactor] remove old zero code (#517)
This commit is contained in:
@@ -7,9 +7,7 @@ import colossalai
|
||||
|
||||
import torch
|
||||
|
||||
from functools import partial
|
||||
import torch.multiprocessing as mp
|
||||
import pytest
|
||||
|
||||
|
||||
def run_tensor_move(rank):
|
||||
|
@@ -2,11 +2,9 @@
|
||||
# -*- encoding: utf-8 -*-
|
||||
|
||||
import copy
|
||||
import operator as op
|
||||
from functools import partial, reduce
|
||||
from typing import List
|
||||
|
||||
import colossalai
|
||||
from colossalai.zero.sharded_model.sharded_model_v2 import ShardedModelV2
|
||||
import pytest
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
@@ -14,10 +12,11 @@ import torch.multiprocessing as mp
|
||||
import torch.nn as nn
|
||||
from colossalai.logging import disable_existing_loggers
|
||||
from colossalai.utils import checkpoint, clip_grad_norm_fp32, free_port
|
||||
from colossalai.zero.sharded_model import ShardedModel
|
||||
from torch.nn.parallel import DistributedDataParallel as DDP
|
||||
from torch.nn.utils import clip_grad_norm_
|
||||
from colossalai.testing import parameterize
|
||||
from colossalai.zero.shard_utils.tensor_shard_strategy import TensorShardStrategy
|
||||
from functools import partial
|
||||
|
||||
|
||||
def checkpoint_wrapper(module, enable=True):
|
||||
@@ -97,41 +96,9 @@ def check_params(model, zero_model, loose=False):
|
||||
assert allclose(p, zero_p, loose=loose)
|
||||
|
||||
|
||||
@parameterize('checkpoint', [False, True])
|
||||
@parameterize('fp16', [False, True])
|
||||
@parameterize('offload', [False, True])
|
||||
@parameterize('norm_type', [1.0, 2.0, float('inf')])
|
||||
def check_config(checkpoint=False, fp16=False, offload=False, norm_type=2.0):
|
||||
model = Net(checkpoint=checkpoint).cuda()
|
||||
zero_model = copy.deepcopy(model)
|
||||
ddp_model = DDP(model)
|
||||
|
||||
offload_config = {}
|
||||
if offload:
|
||||
offload_config['device'] = 'cpu'
|
||||
zero_model = zero_model.cpu()
|
||||
zero_model = ShardedModel(zero_model, mixed_precision=fp16, offload_config=offload_config)
|
||||
|
||||
optimizer = torch.optim.Adam(ddp_model.parameters(), lr=1e-3)
|
||||
zero_optimizer = torch.optim.Adam(zero_model.parameters(), lr=1e-3)
|
||||
for _ in range(5):
|
||||
x = torch.rand(2, 5).cuda()
|
||||
run_step(ddp_model, optimizer, x, enable_autocast=fp16, norm_type=norm_type)
|
||||
run_step(zero_model, zero_optimizer, x, enable_autocast=fp16, norm_type=norm_type)
|
||||
check_grads(ddp_model, zero_model)
|
||||
check_params(ddp_model, zero_model)
|
||||
for _ in range(5):
|
||||
x = torch.rand(2, 5).cuda()
|
||||
run_step(ddp_model, optimizer, x, enable_autocast=False, norm_type=norm_type)
|
||||
run_step(zero_model, zero_optimizer, x, enable_autocast=False, norm_type=norm_type)
|
||||
check_grads(ddp_model, zero_model, loose=True)
|
||||
check_params(ddp_model, zero_model, loose=True)
|
||||
|
||||
|
||||
def run_dist(rank, world_size, port):
|
||||
disable_existing_loggers()
|
||||
colossalai.launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
|
||||
check_config()
|
||||
|
||||
|
||||
@pytest.mark.dist
|
||||
|
@@ -12,7 +12,7 @@ from colossalai.utils import free_port
|
||||
from colossalai.zero.init_ctx import ZeroInitContext
|
||||
from colossalai.zero.shard_utils import (BucketTensorShardStrategy, TensorShardStrategy)
|
||||
from colossalai.zero.sharded_model import ShardedModelV2
|
||||
from colossalai.zero.sharded_model._zero3_utils import cast_tensor_to_fp16
|
||||
from colossalai.zero.sharded_model._utils import cast_tensor_to_fp16
|
||||
from colossalai.zero.sharded_model.utils import col_model_deepcopy
|
||||
from tests.components_to_test.registry import non_distributed_component_funcs
|
||||
from torch.nn.parallel import DistributedDataParallel as DDP
|
||||
|
@@ -1,168 +0,0 @@
|
||||
import torch
|
||||
import colossalai
|
||||
import copy
|
||||
import pytest
|
||||
import torch.multiprocessing as mp
|
||||
from colossalai.zero import ShardedOptimizer
|
||||
from torch.nn.parallel import DistributedDataParallel as DDP
|
||||
|
||||
from colossalai.utils import free_port
|
||||
from functools import partial
|
||||
from common import allclose
|
||||
from tests.components_to_test.registry import non_distributed_component_funcs
|
||||
|
||||
|
||||
def check_completely_equal(a, b):
|
||||
"""
|
||||
This function checks if two tensors are completely equal
|
||||
"""
|
||||
assert torch.all(a == b), f'a = {a}, b = {b}'
|
||||
|
||||
|
||||
def check_sharded_param_consistency():
|
||||
"""
|
||||
In this test, we want to test whether zero stage 1 and 2
|
||||
deliver the same numerical results despite different communication
|
||||
pattern
|
||||
|
||||
we use these prefixes to differentiate the zero stage
|
||||
oss: partition optimizer states
|
||||
pg: partition gradients and optimizer states
|
||||
|
||||
"""
|
||||
test_models = ['repeated_computed_layers', 'resnet18', 'nested_model']
|
||||
|
||||
for name in test_models:
|
||||
get_components_func = non_distributed_component_funcs.get_callable(name)
|
||||
model_builder, train_dataloader, *_ = get_components_func()
|
||||
|
||||
# create model
|
||||
oss_model = model_builder(checkpoint=True).cuda().half()
|
||||
pg_model = copy.deepcopy(oss_model)
|
||||
|
||||
# create optimizer
|
||||
oss_optimizer = torch.optim.Adam(oss_model.parameters(), lr=0.001)
|
||||
pg_optimizer = torch.optim.Adam(pg_model.parameters(), lr=0.001)
|
||||
oss_optimizer = ShardedOptimizer(oss_optimizer, overlap_communication=True, initial_scale=1, clip_grad_norm=0.0)
|
||||
pg_optimizer = ShardedOptimizer(pg_optimizer,
|
||||
overlap_communication=True,
|
||||
partition_grad=True,
|
||||
initial_scale=1,
|
||||
clip_grad_norm=0.0)
|
||||
|
||||
# create
|
||||
data, label = next(iter(train_dataloader))
|
||||
input_data = data.cuda().half()
|
||||
|
||||
# forward
|
||||
oss_output = oss_model(input_data)
|
||||
pg_output = pg_model(input_data)
|
||||
check_completely_equal(oss_output, pg_output)
|
||||
|
||||
# backward
|
||||
oss_optimizer.backward(oss_output.mean().float())
|
||||
pg_optimizer.backward(pg_output.mean().float())
|
||||
|
||||
# check grad
|
||||
# as this param is small, the backward reduction
|
||||
# will not be fired
|
||||
for oss_param, pg_param in zip(oss_model.parameters(), pg_model.parameters()):
|
||||
check_completely_equal(oss_param.grad, pg_param.grad)
|
||||
|
||||
# step
|
||||
oss_optimizer.sync_grad()
|
||||
pg_optimizer.sync_grad()
|
||||
|
||||
# step
|
||||
oss_optimizer.step()
|
||||
pg_optimizer.step()
|
||||
|
||||
# check updated param
|
||||
for oss_param, pg_param in zip(oss_model.parameters(), pg_model.parameters()):
|
||||
check_completely_equal(oss_param, pg_param)
|
||||
|
||||
|
||||
def check_sharded_optim_against_torch_ddp():
|
||||
"""
|
||||
In this test, two pairs of model and optimizers are created.
|
||||
1. zero: use sharded optimizer and fp16 parameters
|
||||
2. torch: use torch DDP and fp32 parameters
|
||||
|
||||
We feed these two sets of models with the same input and check if the
|
||||
differences in model output and updated parameters are within tolerance.
|
||||
"""
|
||||
|
||||
test_models = ['repeated_computed_layers', 'resnet18', 'nested_model']
|
||||
|
||||
for name in test_models:
|
||||
get_components_func = non_distributed_component_funcs.get_callable(name)
|
||||
model_builder, train_dataloader, *_ = get_components_func()
|
||||
|
||||
# create model
|
||||
zero_model = model_builder(checkpoint=True).cuda()
|
||||
torch_model = copy.deepcopy(zero_model)
|
||||
|
||||
zero_model = zero_model.half()
|
||||
torch_model = DDP(torch_model.cuda())
|
||||
|
||||
# create optimizer
|
||||
zero_optimizer = torch.optim.Adam(zero_model.parameters(), lr=0.001)
|
||||
|
||||
# we only test stage 1 here
|
||||
# in `check_sharded_param_consistency.py`, we will test whether
|
||||
# level 1 and 2 will produce exactly the same results
|
||||
zero_optimizer = ShardedOptimizer(zero_optimizer,
|
||||
overlap_communication=True,
|
||||
initial_scale=1,
|
||||
clip_grad_norm=0.0)
|
||||
torch_optimizer = torch.optim.Adam(torch_model.parameters(), lr=0.001)
|
||||
|
||||
# create
|
||||
input_data, _ = next(iter(train_dataloader))
|
||||
input_data = input_data.cuda()
|
||||
|
||||
# zero-dp forward
|
||||
zero_output = zero_model(input_data.half())
|
||||
|
||||
# torch-ddp forward
|
||||
torch_output = torch_model(input_data)
|
||||
allclose(zero_output, torch_output.half())
|
||||
|
||||
# zero-dp backward
|
||||
zero_optimizer.backward(zero_output.mean().float())
|
||||
|
||||
# torch-ddp backward
|
||||
torch_output.mean().backward()
|
||||
|
||||
# check grad
|
||||
for oss_param, torch_param in zip(zero_model.parameters(), torch_model.parameters()):
|
||||
allclose(oss_param.grad, torch_param.grad.half())
|
||||
|
||||
# zero-dp step
|
||||
zero_optimizer.sync_grad()
|
||||
zero_optimizer.step()
|
||||
|
||||
# torch ddp step
|
||||
torch_optimizer.step()
|
||||
|
||||
# check updated param
|
||||
for oss_param, torch_param in zip(zero_model.parameters(), torch_model.parameters()):
|
||||
allclose(oss_param, torch_param.half())
|
||||
|
||||
|
||||
def run_dist(rank, world_size, port):
|
||||
colossalai.launch(config=dict(), rank=rank, world_size=world_size, port=port, host='localhost')
|
||||
|
||||
check_sharded_optim_against_torch_ddp()
|
||||
check_sharded_param_consistency()
|
||||
|
||||
|
||||
@pytest.mark.dist
|
||||
def test_sharded_optim():
|
||||
world_size = 2
|
||||
run_func = partial(run_dist, world_size=world_size, port=free_port())
|
||||
mp.spawn(run_func, nprocs=world_size)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
test_sharded_optim()
|
@@ -1,39 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- encoding: utf-8 -*-
|
||||
|
||||
from functools import partial
|
||||
import pytest
|
||||
|
||||
import torch
|
||||
import torch.multiprocessing as mp
|
||||
|
||||
import colossalai
|
||||
from colossalai.zero.sharded_model.param_manager import Zero3ParameterManager
|
||||
from colossalai.core import global_context as gpc
|
||||
from colossalai.context.parallel_mode import ParallelMode
|
||||
from colossalai.utils import free_port
|
||||
from common import CONFIG
|
||||
|
||||
|
||||
def run_shard_shape_check(rank, world_size, port):
|
||||
colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
|
||||
|
||||
model = torch.nn.Linear(2, 4 * world_size)
|
||||
gpc.init_parallel_groups()
|
||||
Zero3ParameterManager(module=model,
|
||||
process_group=gpc.get_group(ParallelMode.DATA),
|
||||
offload_config=CONFIG.get('offload_param_config'))
|
||||
|
||||
assert (model.weight.numel() == 4 * 2)
|
||||
assert (model.bias.numel() == 4)
|
||||
|
||||
|
||||
@pytest.mark.dist
|
||||
@pytest.mark.parametrize("world_size", [1, 2, 4])
|
||||
def test_run_shard_shape(world_size):
|
||||
run_func = partial(run_shard_shape_check, world_size=world_size, port=free_port())
|
||||
mp.spawn(run_func, nprocs=world_size)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
test_run_shard_shape(2)
|
@@ -1,19 +0,0 @@
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
repo_path = Path(__file__).absolute().parents[2]
|
||||
sys.path.append(str(repo_path))
|
||||
|
||||
try:
|
||||
import model_zoo.vit.vision_transformer_from_config
|
||||
except ImportError:
|
||||
raise ImportError("model_zoo is not found, please check your path")
|
||||
|
||||
BATCH_SIZE = 8
|
||||
IMG_SIZE = 32
|
||||
PATCH_SIZE = 4
|
||||
DIM = 512
|
||||
NUM_ATTENTION_HEADS = 8
|
||||
SUMMA_DIM = 2
|
||||
NUM_CLASSES = 10
|
||||
DEPTH = 6
|
@@ -1,99 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- encoding: utf-8 -*-
|
||||
|
||||
import os
|
||||
from functools import partial
|
||||
from pathlib import Path
|
||||
|
||||
import colossalai
|
||||
import pytest
|
||||
import torch
|
||||
import torch.autograd
|
||||
import torch.multiprocessing as mp
|
||||
from colossalai.core import global_context as gpc
|
||||
from colossalai.logging import get_dist_logger
|
||||
from colossalai.nn import CrossEntropyLoss
|
||||
from colossalai.utils import free_port, get_dataloader
|
||||
from model_zoo.vit import vit_lite_depth7_patch4_32
|
||||
from torchvision import transforms
|
||||
from torchvision.datasets import CIFAR10
|
||||
|
||||
from components import *
|
||||
|
||||
CONFIG = dict(parallel=dict(
|
||||
pipeline=dict(size=1),
|
||||
tensor=dict(size=4, mode='2d'),
|
||||
),
|
||||
fp16=dict(mode=None, ),
|
||||
zero=dict(level=2))
|
||||
|
||||
|
||||
def train_epoch(engine, train_dataloader):
|
||||
engine.train()
|
||||
accumulated_loss = 0
|
||||
num_steps = len(train_dataloader)
|
||||
data_iter = iter(train_dataloader)
|
||||
for i in range(num_steps):
|
||||
output, label, loss = engine.step(data_iter)
|
||||
accumulated_loss += loss.detach().cpu().numpy()
|
||||
avg_loss = accumulated_loss / num_steps
|
||||
return avg_loss
|
||||
|
||||
|
||||
def run_2d_parallel_vision_transformer_level_2(rank, world_size, port):
|
||||
colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
|
||||
|
||||
# build model
|
||||
model = vit_lite_depth7_patch4_32()
|
||||
|
||||
# build dataloader# build dataloaders
|
||||
train_dataset = CIFAR10(root=Path(os.environ['DATA']),
|
||||
download=True,
|
||||
transform=transforms.Compose([
|
||||
transforms.Resize(size=(IMG_SIZE, IMG_SIZE)),
|
||||
transforms.ToTensor(),
|
||||
transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
|
||||
]))
|
||||
train_dataloader = get_dataloader(dataset=train_dataset,
|
||||
shuffle=True,
|
||||
batch_size=BATCH_SIZE,
|
||||
pin_memory=True,
|
||||
drop_last=True)
|
||||
|
||||
# build optimizer and loss
|
||||
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
|
||||
criterion = CrossEntropyLoss()
|
||||
|
||||
engine, train_dataloader, *args = colossalai.initialize(model=model,
|
||||
optimizer=optimizer,
|
||||
criterion=criterion,
|
||||
train_dataloader=train_dataloader)
|
||||
logger = get_dist_logger()
|
||||
|
||||
logger.info('start training')
|
||||
engine.train()
|
||||
|
||||
for img, label in train_dataloader:
|
||||
engine.zero_grad()
|
||||
img = img.cuda()
|
||||
label = label.cuda()
|
||||
out = engine(img)
|
||||
loss = engine.criterion(out, label)
|
||||
engine.backward(loss)
|
||||
engine.step()
|
||||
break
|
||||
|
||||
gpc.destroy()
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
|
||||
@pytest.mark.dist
|
||||
@pytest.mark.skip(reason="This test should be refactored for the reconstructed zero")
|
||||
def test_2d_vit_zero_level_2():
|
||||
world_size = 8
|
||||
run_func = partial(run_2d_parallel_vision_transformer_level_2, world_size=world_size, port=free_port())
|
||||
mp.spawn(run_func, nprocs=world_size)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
test_2d_vit_zero_level_2()
|
@@ -1,99 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- encoding: utf-8 -*-
|
||||
|
||||
import os
|
||||
from functools import partial
|
||||
from pathlib import Path
|
||||
|
||||
import colossalai
|
||||
import pytest
|
||||
import torch
|
||||
import torch.autograd
|
||||
import torch.multiprocessing as mp
|
||||
from colossalai.core import global_context as gpc
|
||||
from colossalai.logging import get_dist_logger
|
||||
from colossalai.nn import CrossEntropyLoss
|
||||
from colossalai.utils import free_port, get_dataloader
|
||||
from model_zoo.vit import vit_lite_depth7_patch4_32
|
||||
from torchvision import transforms
|
||||
from torchvision.datasets import CIFAR10
|
||||
|
||||
from components import *
|
||||
|
||||
CONFIG = dict(parallel=dict(
|
||||
pipeline=dict(size=1),
|
||||
tensor=dict(size=4, mode='2d'),
|
||||
),
|
||||
fp16=dict(mode=None, ),
|
||||
zero=dict(level=3))
|
||||
|
||||
|
||||
def train_epoch(engine, train_dataloader):
|
||||
engine.train()
|
||||
accumulated_loss = 0
|
||||
num_steps = len(train_dataloader)
|
||||
data_iter = iter(train_dataloader)
|
||||
for i in range(num_steps):
|
||||
output, label, loss = engine.step(data_iter)
|
||||
accumulated_loss += loss.detach().cpu().numpy()
|
||||
avg_loss = accumulated_loss / num_steps
|
||||
return avg_loss
|
||||
|
||||
|
||||
def run_2d_parallel_vision_transformer_level_3(rank, world_size, port):
|
||||
colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
|
||||
|
||||
# build model
|
||||
model = vit_lite_depth7_patch4_32()
|
||||
|
||||
# build dataloader# build dataloaders
|
||||
train_dataset = CIFAR10(root=Path(os.environ['DATA']),
|
||||
download=True,
|
||||
transform=transforms.Compose([
|
||||
transforms.Resize(size=(IMG_SIZE, IMG_SIZE)),
|
||||
transforms.ToTensor(),
|
||||
transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
|
||||
]))
|
||||
train_dataloader = get_dataloader(dataset=train_dataset,
|
||||
shuffle=True,
|
||||
batch_size=BATCH_SIZE,
|
||||
pin_memory=True,
|
||||
drop_last=True)
|
||||
|
||||
# build optimizer and loss
|
||||
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
|
||||
criterion = CrossEntropyLoss()
|
||||
|
||||
engine, train_dataloader, *args = colossalai.initialize(model=model,
|
||||
optimizer=optimizer,
|
||||
criterion=criterion,
|
||||
train_dataloader=train_dataloader)
|
||||
logger = get_dist_logger()
|
||||
|
||||
logger.info('start training')
|
||||
engine.train()
|
||||
|
||||
for img, label in train_dataloader:
|
||||
engine.zero_grad()
|
||||
img = img.cuda()
|
||||
label = label.cuda()
|
||||
out = engine(img)
|
||||
loss = engine.criterion(out, label)
|
||||
engine.backward(loss)
|
||||
engine.step()
|
||||
break
|
||||
|
||||
gpc.destroy()
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
|
||||
@pytest.mark.dist
|
||||
@pytest.mark.skip(reason="This test should be refactored for the reconstructed zero")
|
||||
def test_3d_vit_zero_level_3():
|
||||
world_size = 8
|
||||
run_func = partial(run_2d_parallel_vision_transformer_level_3, world_size=world_size, port=free_port())
|
||||
mp.spawn(run_func, nprocs=world_size)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
test_3d_vit_zero_level_3()
|
Reference in New Issue
Block a user