mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-05-31 11:25:27 +00:00
* Add gradient accumulation, fix lr scheduler * fix FP16 optimizer and adapted torch amp with tensor parallel (#18) * fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes * fixed trainer * Revert "fixed trainer" This reverts commit2e0b0b7699
. * improved consistency between trainer, engine and schedule (#23) Co-authored-by: 1SAA <c2h214748@gmail.com> * Split conv2d, class token, positional embedding in 2d, Fix random number in ddp Fix convergence in cifar10, Imagenet1000 * Integrate 1d tensor parallel in Colossal-AI (#39) * fixed 1D and 2D convergence (#38) * optimized 2D operations * fixed 1D ViT convergence problem * Feature/ddp (#49) * remove redundancy func in setup (#19) (#20) * use env to control the language of doc (#24) (#25) * Support TP-compatible Torch AMP and Update trainer API (#27) * Add gradient accumulation, fix lr scheduler * fix FP16 optimizer and adapted torch amp with tensor parallel (#18) * fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes * fixed trainer * Revert "fixed trainer" This reverts commit2e0b0b7699
. * improved consistency between trainer, engine and schedule (#23) Co-authored-by: 1SAA <c2h214748@gmail.com> Co-authored-by: 1SAA <c2h214748@gmail.com> Co-authored-by: ver217 <lhx0217@gmail.com> * add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29) * add explanation for ViT example (#35) (#36) * support torch ddp * fix loss accumulation * add log for ddp * change seed * modify timing hook Co-authored-by: Frank Lee <somerlee.9@gmail.com> Co-authored-by: 1SAA <c2h214748@gmail.com> Co-authored-by: binmakeswell <binmakeswell@gmail.com> * Feature/pipeline (#40) * remove redundancy func in setup (#19) (#20) * use env to control the language of doc (#24) (#25) * Support TP-compatible Torch AMP and Update trainer API (#27) * Add gradient accumulation, fix lr scheduler * fix FP16 optimizer and adapted torch amp with tensor parallel (#18) * fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes * fixed trainer * Revert "fixed trainer" This reverts commit2e0b0b7699
. * improved consistency between trainer, engine and schedule (#23) Co-authored-by: 1SAA <c2h214748@gmail.com> Co-authored-by: 1SAA <c2h214748@gmail.com> Co-authored-by: ver217 <lhx0217@gmail.com> * add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29) * add explanation for ViT example (#35) (#36) * optimize communication of pipeline parallel * fix grad clip for pipeline Co-authored-by: Frank Lee <somerlee.9@gmail.com> Co-authored-by: 1SAA <c2h214748@gmail.com> Co-authored-by: binmakeswell <binmakeswell@gmail.com> * optimized 3d layer to fix slow computation ; tested imagenet performance with 3d; reworked lr_scheduler config definition; fixed launch args; fixed some printing issues; simplified apis of 3d layers (#51) * Update 2.5d layer code to get a similar accuracy on imagenet-1k dataset * update api for better usability (#58) update api for better usability Co-authored-by: 1SAA <c2h214748@gmail.com> Co-authored-by: ver217 <lhx0217@gmail.com> Co-authored-by: puck_WCR <46049915+WANG-CR@users.noreply.github.com> Co-authored-by: binmakeswell <binmakeswell@gmail.com> Co-authored-by: アマデウス <kurisusnowdeng@users.noreply.github.com> Co-authored-by: BoxiangW <45734921+BoxiangW@users.noreply.github.com>
102 lines
2.8 KiB
Python
102 lines
2.8 KiB
Python
#!/usr/bin/env python
|
|
# -*- encoding: utf-8 -*-
|
|
|
|
import os
|
|
from functools import partial
|
|
from pathlib import Path
|
|
|
|
import pytest
|
|
import torch.cuda
|
|
import torch.distributed as dist
|
|
import torch.multiprocessing as mp
|
|
from torchvision import transforms
|
|
from torch.utils.data import DataLoader
|
|
|
|
import colossalai
|
|
from colossalai.builder import build_dataset, build_transform
|
|
from colossalai.context import ParallelMode, Config
|
|
from colossalai.core import global_context as gpc
|
|
|
|
CONFIG = Config(
|
|
dict(
|
|
train_data=dict(
|
|
dataset=dict(
|
|
type='CIFAR10',
|
|
root=Path(os.environ['DATA']),
|
|
train=True,
|
|
download=True,
|
|
),
|
|
dataloader=dict(
|
|
num_workers=2,
|
|
batch_size=2,
|
|
shuffle=True
|
|
),
|
|
transform_pipeline=[
|
|
dict(type='ToTensor'),
|
|
dict(type='RandomCrop', size=32),
|
|
dict(type='Normalize', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
|
|
]
|
|
),
|
|
parallel=dict(
|
|
pipeline=dict(size=1),
|
|
tensor=dict(size=1, mode=None),
|
|
),
|
|
seed=1024,
|
|
)
|
|
)
|
|
|
|
|
|
def run_data_sampler(rank, world_size):
|
|
dist_args = dict(
|
|
config=CONFIG,
|
|
rank=rank,
|
|
world_size=world_size,
|
|
backend='gloo',
|
|
port='29499',
|
|
host='localhost'
|
|
)
|
|
colossalai.launch(**dist_args)
|
|
print('finished initialization')
|
|
|
|
dataset_cfg = gpc.config.train_data.dataset
|
|
dataloader_cfg = gpc.config.train_data.dataloader
|
|
transform_cfg = gpc.config.train_data.transform_pipeline
|
|
|
|
# build transform
|
|
transform_pipeline = [build_transform(cfg) for cfg in transform_cfg]
|
|
transform_pipeline = transforms.Compose(transform_pipeline)
|
|
dataset_cfg['transform'] = transform_pipeline
|
|
|
|
# build dataset
|
|
dataset = build_dataset(dataset_cfg)
|
|
|
|
# build dataloader
|
|
dataloader = DataLoader(dataset=dataset, **dataloader_cfg)
|
|
|
|
data_iter = iter(dataloader)
|
|
img, label = data_iter.next()
|
|
img = img[0]
|
|
|
|
if gpc.get_local_rank(ParallelMode.DATA) != 0:
|
|
img_to_compare = img.clone()
|
|
else:
|
|
img_to_compare = img
|
|
dist.broadcast(img_to_compare, src=0, group=gpc.get_group(ParallelMode.DATA))
|
|
|
|
if gpc.get_local_rank(ParallelMode.DATA) != 0:
|
|
# this is without sampler
|
|
# this should be false if data parallel sampler to given to the dataloader
|
|
assert torch.equal(img,
|
|
img_to_compare), 'Same image was distributed across ranks and expected it to be the same'
|
|
|
|
|
|
@pytest.mark.cpu
|
|
def test_data_sampler():
|
|
world_size = 4
|
|
test_func = partial(run_data_sampler, world_size=world_size)
|
|
mp.spawn(test_func, nprocs=world_size)
|
|
|
|
|
|
if __name__ == '__main__':
|
|
test_data_sampler()
|