mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2026-05-08 23:05:41 +00:00
Develop/experiments (#59)
* Add gradient accumulation, fix lr scheduler * fix FP16 optimizer and adapted torch amp with tensor parallel (#18) * fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes * fixed trainer * Revert "fixed trainer" This reverts commit2e0b0b7699. * improved consistency between trainer, engine and schedule (#23) Co-authored-by: 1SAA <c2h214748@gmail.com> * Split conv2d, class token, positional embedding in 2d, Fix random number in ddp Fix convergence in cifar10, Imagenet1000 * Integrate 1d tensor parallel in Colossal-AI (#39) * fixed 1D and 2D convergence (#38) * optimized 2D operations * fixed 1D ViT convergence problem * Feature/ddp (#49) * remove redundancy func in setup (#19) (#20) * use env to control the language of doc (#24) (#25) * Support TP-compatible Torch AMP and Update trainer API (#27) * Add gradient accumulation, fix lr scheduler * fix FP16 optimizer and adapted torch amp with tensor parallel (#18) * fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes * fixed trainer * Revert "fixed trainer" This reverts commit2e0b0b7699. * improved consistency between trainer, engine and schedule (#23) Co-authored-by: 1SAA <c2h214748@gmail.com> Co-authored-by: 1SAA <c2h214748@gmail.com> Co-authored-by: ver217 <lhx0217@gmail.com> * add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29) * add explanation for ViT example (#35) (#36) * support torch ddp * fix loss accumulation * add log for ddp * change seed * modify timing hook Co-authored-by: Frank Lee <somerlee.9@gmail.com> Co-authored-by: 1SAA <c2h214748@gmail.com> Co-authored-by: binmakeswell <binmakeswell@gmail.com> * Feature/pipeline (#40) * remove redundancy func in setup (#19) (#20) * use env to control the language of doc (#24) (#25) * Support TP-compatible Torch AMP and Update trainer API (#27) * Add gradient accumulation, fix lr scheduler * fix FP16 optimizer and adapted torch amp with tensor parallel (#18) * fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes * fixed trainer * Revert "fixed trainer" This reverts commit2e0b0b7699. * improved consistency between trainer, engine and schedule (#23) Co-authored-by: 1SAA <c2h214748@gmail.com> Co-authored-by: 1SAA <c2h214748@gmail.com> Co-authored-by: ver217 <lhx0217@gmail.com> * add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29) * add explanation for ViT example (#35) (#36) * optimize communication of pipeline parallel * fix grad clip for pipeline Co-authored-by: Frank Lee <somerlee.9@gmail.com> Co-authored-by: 1SAA <c2h214748@gmail.com> Co-authored-by: binmakeswell <binmakeswell@gmail.com> * optimized 3d layer to fix slow computation ; tested imagenet performance with 3d; reworked lr_scheduler config definition; fixed launch args; fixed some printing issues; simplified apis of 3d layers (#51) * Update 2.5d layer code to get a similar accuracy on imagenet-1k dataset * update api for better usability (#58) update api for better usability Co-authored-by: 1SAA <c2h214748@gmail.com> Co-authored-by: ver217 <lhx0217@gmail.com> Co-authored-by: puck_WCR <46049915+WANG-CR@users.noreply.github.com> Co-authored-by: binmakeswell <binmakeswell@gmail.com> Co-authored-by: アマデウス <kurisusnowdeng@users.noreply.github.com> Co-authored-by: BoxiangW <45734921+BoxiangW@users.noreply.github.com>
This commit is contained in:
@@ -4,7 +4,6 @@
|
||||
import torch.distributed as dist
|
||||
|
||||
from colossalai.context import Config
|
||||
from colossalai.core import global_context as gpc
|
||||
from colossalai.registry import DIST_GROUP_INITIALIZER
|
||||
from .process_group_initializer import ProcessGroupInitializer
|
||||
from ..parallel_mode import ParallelMode
|
||||
|
||||
@@ -8,7 +8,6 @@ import torch.distributed as dist
|
||||
|
||||
from colossalai.constants import TESSERACT_DIM, TESSERACT_DEP
|
||||
from colossalai.context import Config
|
||||
from colossalai.core import global_context as gpc
|
||||
from colossalai.registry import DIST_GROUP_INITIALIZER
|
||||
from .process_group_initializer import ProcessGroupInitializer
|
||||
from ..parallel_mode import ParallelMode
|
||||
@@ -42,8 +41,6 @@ class Initializer_2p5D_ROW(ProcessGroupInitializer):
|
||||
tesseract_dep: int,
|
||||
*args):
|
||||
super(Initializer_2p5D_ROW, self).__init__(*args)
|
||||
|
||||
self.tensor_parallel_size = gpc.tensor_parallel_size
|
||||
self.num_group = self.world_size // self.tensor_parallel_size
|
||||
self.tesseract_dep = tesseract_dep
|
||||
self.tesseract_dim = tesseract_dim
|
||||
@@ -66,7 +63,7 @@ class Initializer_2p5D_ROW(ProcessGroupInitializer):
|
||||
for j in range(self.tesseract_dim):
|
||||
for k in range(self.tesseract_dep):
|
||||
ranks = [h * self.tensor_parallel_size + i + self.tesseract_dim * (
|
||||
j + self.tesseract_dim * k) for i in range(self.tesseract_dim)]
|
||||
j + self.tesseract_dim * k) for i in range(self.tesseract_dim)]
|
||||
group = dist.new_group(ranks)
|
||||
|
||||
if self.rank in ranks:
|
||||
@@ -81,13 +78,12 @@ class Initializer_2p5D_ROW(ProcessGroupInitializer):
|
||||
class Initializer_2p5D_Col(ProcessGroupInitializer):
|
||||
'''2p5d tensor parallel initialization among cols.
|
||||
'''
|
||||
|
||||
def __init__(self,
|
||||
tesseract_dim: int,
|
||||
tesseract_dep: int,
|
||||
*args):
|
||||
super(Initializer_2p5D_Col, self).__init__(*args)
|
||||
|
||||
self.tensor_parallel_size = gpc.tensor_parallel_size
|
||||
self.num_group = self.world_size // self.tensor_parallel_size
|
||||
self.tesseract_dep = tesseract_dep
|
||||
self.tesseract_dim = tesseract_dim
|
||||
@@ -110,7 +106,7 @@ class Initializer_2p5D_Col(ProcessGroupInitializer):
|
||||
for i in range(self.tesseract_dim):
|
||||
for k in range(self.tesseract_dep):
|
||||
ranks = [h * self.tensor_parallel_size + i + self.tesseract_dim * (
|
||||
j + self.tesseract_dim * k) for j in range(self.tesseract_dim)]
|
||||
j + self.tesseract_dim * k) for j in range(self.tesseract_dim)]
|
||||
group = dist.new_group(ranks)
|
||||
|
||||
if self.rank in ranks:
|
||||
@@ -125,13 +121,12 @@ class Initializer_2p5D_Col(ProcessGroupInitializer):
|
||||
class Initializer_2p5D_Dep(ProcessGroupInitializer):
|
||||
'''2p5D tensor parallel initialization among depths.
|
||||
'''
|
||||
|
||||
def __init__(self,
|
||||
tesseract_dim: int,
|
||||
tesseract_dep: int,
|
||||
*args):
|
||||
super(Initializer_2p5D_Dep, self).__init__(*args)
|
||||
|
||||
self.tensor_parallel_size = gpc.tensor_parallel_size
|
||||
self.num_group = self.world_size // self.tensor_parallel_size
|
||||
self.tesseract_dep = tesseract_dep
|
||||
self.tesseract_dim = tesseract_dim
|
||||
@@ -154,7 +149,7 @@ class Initializer_2p5D_Dep(ProcessGroupInitializer):
|
||||
for i in range(self.tesseract_dim):
|
||||
for j in range(self.tesseract_dim):
|
||||
ranks = [h * self.tensor_parallel_size + i + self.tesseract_dim * (
|
||||
j + self.tesseract_dim * k) for k in range(self.tesseract_dep)]
|
||||
j + self.tesseract_dim * k) for k in range(self.tesseract_dep)]
|
||||
group = dist.new_group(ranks)
|
||||
|
||||
if self.rank in ranks:
|
||||
@@ -170,13 +165,12 @@ class Initializer_2p5D_Dep(ProcessGroupInitializer):
|
||||
class Initializer_2p5D_XZ(ProcessGroupInitializer):
|
||||
'''2p5d tensor parallel initialization among cols times dep.
|
||||
'''
|
||||
|
||||
def __init__(self,
|
||||
tesseract_dim: int,
|
||||
tesseract_dep: int,
|
||||
*args):
|
||||
super(Initializer_2p5D_XZ, self).__init__(*args)
|
||||
|
||||
self.tensor_parallel_size = gpc.tensor_parallel_size
|
||||
self.num_group = self.world_size // self.tensor_parallel_size
|
||||
self.tesseract_dep = tesseract_dep
|
||||
self.tesseract_dim = tesseract_dim
|
||||
@@ -198,8 +192,8 @@ class Initializer_2p5D_XZ(ProcessGroupInitializer):
|
||||
for h in range(self.num_group):
|
||||
for i in range(self.tesseract_dim):
|
||||
ranks = [h * self.tensor_parallel_size + i + self.tesseract_dim * (
|
||||
j + self.tesseract_dim * k) for k in range(self.tesseract_dep) for j in
|
||||
range(self.tesseract_dim)]
|
||||
j + self.tesseract_dim * k) for k in range(self.tesseract_dep) for j in
|
||||
range(self.tesseract_dim)]
|
||||
group = dist.new_group(ranks)
|
||||
|
||||
if self.rank in ranks:
|
||||
|
||||
@@ -5,7 +5,7 @@ import math
|
||||
import os
|
||||
|
||||
import torch.distributed as dist
|
||||
from colossalai.constants import DEPTH_3D
|
||||
from colossalai.constants import DEPTH_3D, INPUT_GROUP_3D, WEIGHT_GROUP_3D, OUTPUT_GROUP_3D
|
||||
from colossalai.registry import DIST_GROUP_INITIALIZER
|
||||
|
||||
from ..parallel_mode import ParallelMode
|
||||
@@ -18,7 +18,7 @@ def _check_depth_env_var(depth):
|
||||
|
||||
if env_depth:
|
||||
assert int(env_depth) == depth, \
|
||||
'SUMMA_DIM has been set in the current environment and ' \
|
||||
'DEPTH_3D has been set in the current environment and ' \
|
||||
'does not match with the value passed to this initialized'
|
||||
else:
|
||||
os.environ[DEPTH_3D] = str(depth)
|
||||
@@ -43,6 +43,7 @@ class Initializer_3D_Input(ProcessGroupInitializer):
|
||||
process_group = None
|
||||
group_world_size = None
|
||||
mode = ParallelMode.PARALLEL_3D_INPUT
|
||||
os.environ[INPUT_GROUP_3D] = INPUT_GROUP_3D
|
||||
|
||||
for h in range(self.num_group):
|
||||
for i in range(self.depth):
|
||||
@@ -82,6 +83,7 @@ class Initializer_3D_Weight(ProcessGroupInitializer):
|
||||
process_group = None
|
||||
group_world_size = None
|
||||
mode = ParallelMode.PARALLEL_3D_WEIGHT
|
||||
os.environ[WEIGHT_GROUP_3D] = WEIGHT_GROUP_3D
|
||||
|
||||
for h in range(self.num_group):
|
||||
for k in range(self.depth):
|
||||
@@ -121,6 +123,7 @@ class Initializer_3D_Output(ProcessGroupInitializer):
|
||||
process_group = None
|
||||
group_world_size = None
|
||||
mode = ParallelMode.PARALLEL_3D_OUTPUT
|
||||
os.environ[OUTPUT_GROUP_3D] = OUTPUT_GROUP_3D
|
||||
|
||||
for h in range(self.num_group):
|
||||
for i in range(self.depth):
|
||||
|
||||
Reference in New Issue
Block a user