From c8bf2681e333c14f860b4f4cd94f15205e1b5319 Mon Sep 17 00:00:00 2001 From: hxwang Date: Tue, 16 Jul 2024 09:08:31 +0000 Subject: [PATCH] [moe] clean legacy code --- .../moe => legacy/moe/layer}/__init__.py | 0 .../layer/moe => legacy/moe/layer}/experts.py | 6 +- .../layer/moe => legacy/moe/layer}/layers.py | 6 +- .../layer/moe => legacy/moe/layer}/routers.py | 6 +- colossalai/{ => legacy}/moe/load_balance.py | 2 +- colossalai/{ => legacy}/moe/manager.py | 0 .../legacy/moe}/openmoe/README.md | 0 .../moe}/openmoe/benchmark/benchmark_cai.py | 4 +- .../moe}/openmoe/benchmark/benchmark_cai.sh | 0 .../openmoe/benchmark/benchmark_cai_dist.sh | 0 .../moe}/openmoe/benchmark/benchmark_fsdp.py | 2 +- .../moe}/openmoe/benchmark/benchmark_fsdp.sh | 0 .../moe}/openmoe/benchmark/hostfile.txt | 0 .../legacy/moe}/openmoe/benchmark/utils.py | 0 .../legacy/moe}/openmoe/infer.py | 0 .../legacy/moe}/openmoe/infer.sh | 0 .../legacy/moe}/openmoe/model/__init__.py | 0 .../openmoe/model/convert_openmoe_ckpt.py | 0 .../openmoe/model/convert_openmoe_ckpt.sh | 0 .../moe}/openmoe/model/modeling_openmoe.py | 4 +- .../moe}/openmoe/model/openmoe_8b_config.json | 0 .../openmoe/model/openmoe_base_config.json | 0 .../moe}/openmoe/model/openmoe_policy.py | 2 +- .../legacy/moe}/openmoe/requirements.txt | 0 .../legacy/moe}/openmoe/test_ci.sh | 0 .../legacy/moe}/openmoe/train.py | 2 +- .../legacy/moe}/openmoe/train.sh | 0 colossalai/{ => legacy}/moe/utils.py | 2 +- colossalai/moe/__init__.py | 5 - .../moe/{_operation.py => operators.py} | 2 + colossalai/shardformer/modeling/mixtral.py | 8 +- tests/test_legacy/test_moe/moe_utils.py | 136 ++++++++++++++++++ .../test_moe/test_grad_handler.py | 2 +- .../test_moe/test_mixtral_layer.py | 0 .../test_moe/test_moe_group.py | 4 +- .../test_moe/test_moe_hybrid_zero.py | 2 +- .../test_moe/test_moe_load_balance.py | 2 +- tests/test_moe/moe_utils.py | 135 ----------------- tests/test_moe/test_kernel.py | 4 +- 39 files changed, 163 insertions(+), 173 deletions(-) rename colossalai/{shardformer/layer/moe => legacy/moe/layer}/__init__.py (100%) rename colossalai/{shardformer/layer/moe => legacy/moe/layer}/experts.py (97%) rename colossalai/{shardformer/layer/moe => legacy/moe/layer}/layers.py (98%) rename colossalai/{shardformer/layer/moe => legacy/moe/layer}/routers.py (97%) rename colossalai/{ => legacy}/moe/load_balance.py (99%) rename colossalai/{ => legacy}/moe/manager.py (100%) rename {examples/language => colossalai/legacy/moe}/openmoe/README.md (100%) rename {examples/language => colossalai/legacy/moe}/openmoe/benchmark/benchmark_cai.py (99%) rename {examples/language => colossalai/legacy/moe}/openmoe/benchmark/benchmark_cai.sh (100%) rename {examples/language => colossalai/legacy/moe}/openmoe/benchmark/benchmark_cai_dist.sh (100%) rename {examples/language => colossalai/legacy/moe}/openmoe/benchmark/benchmark_fsdp.py (98%) rename {examples/language => colossalai/legacy/moe}/openmoe/benchmark/benchmark_fsdp.sh (100%) rename {examples/language => colossalai/legacy/moe}/openmoe/benchmark/hostfile.txt (100%) rename {examples/language => colossalai/legacy/moe}/openmoe/benchmark/utils.py (100%) rename {examples/language => colossalai/legacy/moe}/openmoe/infer.py (100%) rename {examples/language => colossalai/legacy/moe}/openmoe/infer.sh (100%) rename {examples/language => colossalai/legacy/moe}/openmoe/model/__init__.py (100%) rename {examples/language => colossalai/legacy/moe}/openmoe/model/convert_openmoe_ckpt.py (100%) rename {examples/language => colossalai/legacy/moe}/openmoe/model/convert_openmoe_ckpt.sh (100%) rename {examples/language => colossalai/legacy/moe}/openmoe/model/modeling_openmoe.py (99%) rename {examples/language => colossalai/legacy/moe}/openmoe/model/openmoe_8b_config.json (100%) rename {examples/language => colossalai/legacy/moe}/openmoe/model/openmoe_base_config.json (100%) rename {examples/language => colossalai/legacy/moe}/openmoe/model/openmoe_policy.py (99%) rename {examples/language => colossalai/legacy/moe}/openmoe/requirements.txt (100%) rename {examples/language => colossalai/legacy/moe}/openmoe/test_ci.sh (100%) rename {examples/language => colossalai/legacy/moe}/openmoe/train.py (99%) rename {examples/language => colossalai/legacy/moe}/openmoe/train.sh (100%) rename colossalai/{ => legacy}/moe/utils.py (99%) rename colossalai/moe/{_operation.py => operators.py} (99%) create mode 100644 tests/test_legacy/test_moe/moe_utils.py rename tests/{ => test_legacy}/test_moe/test_grad_handler.py (98%) rename tests/{ => test_legacy}/test_moe/test_mixtral_layer.py (100%) rename tests/{ => test_legacy}/test_moe/test_moe_group.py (95%) rename tests/{ => test_legacy}/test_moe/test_moe_hybrid_zero.py (98%) rename tests/{ => test_legacy}/test_moe/test_moe_load_balance.py (99%) diff --git a/colossalai/shardformer/layer/moe/__init__.py b/colossalai/legacy/moe/layer/__init__.py similarity index 100% rename from colossalai/shardformer/layer/moe/__init__.py rename to colossalai/legacy/moe/layer/__init__.py diff --git a/colossalai/shardformer/layer/moe/experts.py b/colossalai/legacy/moe/layer/experts.py similarity index 97% rename from colossalai/shardformer/layer/moe/experts.py rename to colossalai/legacy/moe/layer/experts.py index 109740dbb..c16fc77bb 100644 --- a/colossalai/shardformer/layer/moe/experts.py +++ b/colossalai/legacy/moe/layer/experts.py @@ -5,9 +5,9 @@ import torch import torch.nn as nn from colossalai.kernel.triton.llama_act_combine_kernel import HAS_TRITON -from colossalai.moe._operation import EPGradScalerIn, EPGradScalerOut -from colossalai.moe.manager import MOE_MANAGER -from colossalai.moe.utils import get_activation +from colossalai.legacy.moe.manager import MOE_MANAGER +from colossalai.legacy.moe.utils import get_activation +from colossalai.moe.operators import EPGradScalerIn, EPGradScalerOut from colossalai.shardformer.layer.utils import Randomizer from colossalai.tensor.moe_tensor.api import get_ep_rank, get_ep_size diff --git a/colossalai/shardformer/layer/moe/layers.py b/colossalai/legacy/moe/layer/layers.py similarity index 98% rename from colossalai/shardformer/layer/moe/layers.py rename to colossalai/legacy/moe/layer/layers.py index e5b0ef97f..8681b5972 100644 --- a/colossalai/shardformer/layer/moe/layers.py +++ b/colossalai/legacy/moe/layer/layers.py @@ -7,9 +7,9 @@ import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F -from colossalai.moe._operation import AllGather, AllToAll, HierarchicalAllToAll, MoeCombine, MoeDispatch, ReduceScatter -from colossalai.moe.load_balance import LoadBalancer -from colossalai.moe.utils import create_ep_hierarchical_group, get_noise_generator +from colossalai.legacy.moe.load_balance import LoadBalancer +from colossalai.legacy.moe.utils import create_ep_hierarchical_group, get_noise_generator +from colossalai.moe.operators import AllGather, AllToAll, HierarchicalAllToAll, MoeCombine, MoeDispatch, ReduceScatter from colossalai.shardformer.layer.moe import MLPExperts from colossalai.tensor.moe_tensor.api import get_dp_group, get_ep_group, get_ep_group_ranks, get_ep_size diff --git a/colossalai/shardformer/layer/moe/routers.py b/colossalai/legacy/moe/layer/routers.py similarity index 97% rename from colossalai/shardformer/layer/moe/routers.py rename to colossalai/legacy/moe/layer/routers.py index 109740dbb..c16fc77bb 100644 --- a/colossalai/shardformer/layer/moe/routers.py +++ b/colossalai/legacy/moe/layer/routers.py @@ -5,9 +5,9 @@ import torch import torch.nn as nn from colossalai.kernel.triton.llama_act_combine_kernel import HAS_TRITON -from colossalai.moe._operation import EPGradScalerIn, EPGradScalerOut -from colossalai.moe.manager import MOE_MANAGER -from colossalai.moe.utils import get_activation +from colossalai.legacy.moe.manager import MOE_MANAGER +from colossalai.legacy.moe.utils import get_activation +from colossalai.moe.operators import EPGradScalerIn, EPGradScalerOut from colossalai.shardformer.layer.utils import Randomizer from colossalai.tensor.moe_tensor.api import get_ep_rank, get_ep_size diff --git a/colossalai/moe/load_balance.py b/colossalai/legacy/moe/load_balance.py similarity index 99% rename from colossalai/moe/load_balance.py rename to colossalai/legacy/moe/load_balance.py index 3dc6c02c7..7339b1a7b 100644 --- a/colossalai/moe/load_balance.py +++ b/colossalai/legacy/moe/load_balance.py @@ -7,7 +7,7 @@ from torch import Tensor, nn from torch.distributed import ProcessGroup from colossalai.cluster import ProcessGroupMesh -from colossalai.moe.manager import MOE_MANAGER +from colossalai.legacy.moe.manager import MOE_MANAGER from colossalai.shardformer.layer.moe import MLPExperts from colossalai.zero.low_level import LowLevelZeroOptimizer diff --git a/colossalai/moe/manager.py b/colossalai/legacy/moe/manager.py similarity index 100% rename from colossalai/moe/manager.py rename to colossalai/legacy/moe/manager.py diff --git a/examples/language/openmoe/README.md b/colossalai/legacy/moe/openmoe/README.md similarity index 100% rename from examples/language/openmoe/README.md rename to colossalai/legacy/moe/openmoe/README.md diff --git a/examples/language/openmoe/benchmark/benchmark_cai.py b/colossalai/legacy/moe/openmoe/benchmark/benchmark_cai.py similarity index 99% rename from examples/language/openmoe/benchmark/benchmark_cai.py rename to colossalai/legacy/moe/openmoe/benchmark/benchmark_cai.py index b9ef915c3..5f9447246 100644 --- a/examples/language/openmoe/benchmark/benchmark_cai.py +++ b/colossalai/legacy/moe/openmoe/benchmark/benchmark_cai.py @@ -18,9 +18,9 @@ from colossalai.accelerator import get_accelerator from colossalai.booster import Booster from colossalai.booster.plugin.moe_hybrid_parallel_plugin import MoeHybridParallelPlugin from colossalai.cluster import DistCoordinator +from colossalai.legacy.moe.manager import MOE_MANAGER +from colossalai.legacy.moe.utils import skip_init from colossalai.moe.layers import apply_load_balance -from colossalai.moe.manager import MOE_MANAGER -from colossalai.moe.utils import skip_init from colossalai.nn.optimizer import HybridAdam diff --git a/examples/language/openmoe/benchmark/benchmark_cai.sh b/colossalai/legacy/moe/openmoe/benchmark/benchmark_cai.sh similarity index 100% rename from examples/language/openmoe/benchmark/benchmark_cai.sh rename to colossalai/legacy/moe/openmoe/benchmark/benchmark_cai.sh diff --git a/examples/language/openmoe/benchmark/benchmark_cai_dist.sh b/colossalai/legacy/moe/openmoe/benchmark/benchmark_cai_dist.sh similarity index 100% rename from examples/language/openmoe/benchmark/benchmark_cai_dist.sh rename to colossalai/legacy/moe/openmoe/benchmark/benchmark_cai_dist.sh diff --git a/examples/language/openmoe/benchmark/benchmark_fsdp.py b/colossalai/legacy/moe/openmoe/benchmark/benchmark_fsdp.py similarity index 98% rename from examples/language/openmoe/benchmark/benchmark_fsdp.py rename to colossalai/legacy/moe/openmoe/benchmark/benchmark_fsdp.py index b00fbd001..1ae94dd90 100644 --- a/examples/language/openmoe/benchmark/benchmark_fsdp.py +++ b/colossalai/legacy/moe/openmoe/benchmark/benchmark_fsdp.py @@ -14,7 +14,7 @@ from torch.utils.data.distributed import DistributedSampler from transformers.models.llama import LlamaConfig from utils import PerformanceEvaluator, get_model_numel -from colossalai.moe.manager import MOE_MANAGER +from colossalai.legacy.moe.manager import MOE_MANAGER class RandomDataset(Dataset): diff --git a/examples/language/openmoe/benchmark/benchmark_fsdp.sh b/colossalai/legacy/moe/openmoe/benchmark/benchmark_fsdp.sh similarity index 100% rename from examples/language/openmoe/benchmark/benchmark_fsdp.sh rename to colossalai/legacy/moe/openmoe/benchmark/benchmark_fsdp.sh diff --git a/examples/language/openmoe/benchmark/hostfile.txt b/colossalai/legacy/moe/openmoe/benchmark/hostfile.txt similarity index 100% rename from examples/language/openmoe/benchmark/hostfile.txt rename to colossalai/legacy/moe/openmoe/benchmark/hostfile.txt diff --git a/examples/language/openmoe/benchmark/utils.py b/colossalai/legacy/moe/openmoe/benchmark/utils.py similarity index 100% rename from examples/language/openmoe/benchmark/utils.py rename to colossalai/legacy/moe/openmoe/benchmark/utils.py diff --git a/examples/language/openmoe/infer.py b/colossalai/legacy/moe/openmoe/infer.py similarity index 100% rename from examples/language/openmoe/infer.py rename to colossalai/legacy/moe/openmoe/infer.py diff --git a/examples/language/openmoe/infer.sh b/colossalai/legacy/moe/openmoe/infer.sh similarity index 100% rename from examples/language/openmoe/infer.sh rename to colossalai/legacy/moe/openmoe/infer.sh diff --git a/examples/language/openmoe/model/__init__.py b/colossalai/legacy/moe/openmoe/model/__init__.py similarity index 100% rename from examples/language/openmoe/model/__init__.py rename to colossalai/legacy/moe/openmoe/model/__init__.py diff --git a/examples/language/openmoe/model/convert_openmoe_ckpt.py b/colossalai/legacy/moe/openmoe/model/convert_openmoe_ckpt.py similarity index 100% rename from examples/language/openmoe/model/convert_openmoe_ckpt.py rename to colossalai/legacy/moe/openmoe/model/convert_openmoe_ckpt.py diff --git a/examples/language/openmoe/model/convert_openmoe_ckpt.sh b/colossalai/legacy/moe/openmoe/model/convert_openmoe_ckpt.sh similarity index 100% rename from examples/language/openmoe/model/convert_openmoe_ckpt.sh rename to colossalai/legacy/moe/openmoe/model/convert_openmoe_ckpt.sh diff --git a/examples/language/openmoe/model/modeling_openmoe.py b/colossalai/legacy/moe/openmoe/model/modeling_openmoe.py similarity index 99% rename from examples/language/openmoe/model/modeling_openmoe.py rename to colossalai/legacy/moe/openmoe/model/modeling_openmoe.py index 1febacd7d..5d6e91765 100644 --- a/examples/language/openmoe/model/modeling_openmoe.py +++ b/colossalai/legacy/moe/openmoe/model/modeling_openmoe.py @@ -50,8 +50,8 @@ try: except: HAS_FLASH_ATTN = False from colossalai.kernel.triton.llama_act_combine_kernel import HAS_TRITON -from colossalai.moe.manager import MOE_MANAGER -from colossalai.moe.utils import get_activation, set_moe_args +from colossalai.legacy.moe.manager import MOE_MANAGER +from colossalai.legacy.moe.utils import get_activation, set_moe_args from colossalai.shardformer.layer.moe import SparseMLP if HAS_TRITON: diff --git a/examples/language/openmoe/model/openmoe_8b_config.json b/colossalai/legacy/moe/openmoe/model/openmoe_8b_config.json similarity index 100% rename from examples/language/openmoe/model/openmoe_8b_config.json rename to colossalai/legacy/moe/openmoe/model/openmoe_8b_config.json diff --git a/examples/language/openmoe/model/openmoe_base_config.json b/colossalai/legacy/moe/openmoe/model/openmoe_base_config.json similarity index 100% rename from examples/language/openmoe/model/openmoe_base_config.json rename to colossalai/legacy/moe/openmoe/model/openmoe_base_config.json diff --git a/examples/language/openmoe/model/openmoe_policy.py b/colossalai/legacy/moe/openmoe/model/openmoe_policy.py similarity index 99% rename from examples/language/openmoe/model/openmoe_policy.py rename to colossalai/legacy/moe/openmoe/model/openmoe_policy.py index f46062128..ccd566b08 100644 --- a/examples/language/openmoe/model/openmoe_policy.py +++ b/colossalai/legacy/moe/openmoe/model/openmoe_policy.py @@ -9,7 +9,7 @@ from torch.nn import Module from transformers.modeling_outputs import CausalLMOutputWithPast from transformers.utils import logging -from colossalai.moe.manager import MOE_MANAGER +from colossalai.legacy.moe.manager import MOE_MANAGER from colossalai.pipeline.stage_manager import PipelineStageManager from colossalai.shardformer.layer import FusedRMSNorm, Linear1D_Col from colossalai.shardformer.policies.base_policy import ModulePolicyDescription, Policy, SubModuleReplacementDescription diff --git a/examples/language/openmoe/requirements.txt b/colossalai/legacy/moe/openmoe/requirements.txt similarity index 100% rename from examples/language/openmoe/requirements.txt rename to colossalai/legacy/moe/openmoe/requirements.txt diff --git a/examples/language/openmoe/test_ci.sh b/colossalai/legacy/moe/openmoe/test_ci.sh similarity index 100% rename from examples/language/openmoe/test_ci.sh rename to colossalai/legacy/moe/openmoe/test_ci.sh diff --git a/examples/language/openmoe/train.py b/colossalai/legacy/moe/openmoe/train.py similarity index 99% rename from examples/language/openmoe/train.py rename to colossalai/legacy/moe/openmoe/train.py index ff0e4bad6..0173f0964 100644 --- a/examples/language/openmoe/train.py +++ b/colossalai/legacy/moe/openmoe/train.py @@ -19,7 +19,7 @@ from colossalai.accelerator import get_accelerator from colossalai.booster import Booster from colossalai.booster.plugin.moe_hybrid_parallel_plugin import MoeHybridParallelPlugin from colossalai.cluster import DistCoordinator -from colossalai.moe.utils import skip_init +from colossalai.legacy.moe.utils import skip_init from colossalai.nn.optimizer import HybridAdam from colossalai.shardformer.layer.moe import apply_load_balance diff --git a/examples/language/openmoe/train.sh b/colossalai/legacy/moe/openmoe/train.sh similarity index 100% rename from examples/language/openmoe/train.sh rename to colossalai/legacy/moe/openmoe/train.sh diff --git a/colossalai/moe/utils.py b/colossalai/legacy/moe/utils.py similarity index 99% rename from colossalai/moe/utils.py rename to colossalai/legacy/moe/utils.py index 3d08ab7dd..d91c41363 100644 --- a/colossalai/moe/utils.py +++ b/colossalai/legacy/moe/utils.py @@ -9,7 +9,7 @@ import torch.nn.functional as F from torch.distributed.distributed_c10d import get_process_group_ranks from colossalai.accelerator import get_accelerator -from colossalai.moe.manager import MOE_MANAGER +from colossalai.legacy.moe.manager import MOE_MANAGER from colossalai.tensor.moe_tensor.api import is_moe_tensor diff --git a/colossalai/moe/__init__.py b/colossalai/moe/__init__.py index 0623d19ef..e69de29bb 100644 --- a/colossalai/moe/__init__.py +++ b/colossalai/moe/__init__.py @@ -1,5 +0,0 @@ -from .manager import MOE_MANAGER - -__all__ = [ - "MOE_MANAGER", -] diff --git a/colossalai/moe/_operation.py b/colossalai/moe/operators.py similarity index 99% rename from colossalai/moe/_operation.py rename to colossalai/moe/operators.py index 230b40530..f24191c16 100644 --- a/colossalai/moe/_operation.py +++ b/colossalai/moe/operators.py @@ -469,6 +469,8 @@ def all_to_all_uneven( # See the License for the specific language governing permissions and # limitations under the License. +# TODO: used when non-moe are tp but moe are not + def _gather_tokens(input_, dim: int, tp_group: ProcessGroup): """Gather tensors and concatenate them along a dimension""" diff --git a/colossalai/shardformer/modeling/mixtral.py b/colossalai/shardformer/modeling/mixtral.py index cfa7da6c0..f8745c1d0 100644 --- a/colossalai/shardformer/modeling/mixtral.py +++ b/colossalai/shardformer/modeling/mixtral.py @@ -14,13 +14,7 @@ from transformers.models.mixtral.modeling_mixtral import ( from transformers.utils import is_flash_attn_2_available, logging from colossalai.lazy import LazyInitContext -from colossalai.moe._operation import ( - DPGradScalerIn, - DPGradScalerOut, - EPGradScalerIn, - EPGradScalerOut, - all_to_all_uneven, -) +from colossalai.moe.operators import DPGradScalerIn, DPGradScalerOut, EPGradScalerIn, EPGradScalerOut, all_to_all_uneven from colossalai.pipeline.stage_manager import PipelineStageManager from colossalai.shardformer.layer.linear import Linear1D_Col, Linear1D_Row from colossalai.shardformer.shard import ShardConfig diff --git a/tests/test_legacy/test_moe/moe_utils.py b/tests/test_legacy/test_moe/moe_utils.py new file mode 100644 index 000000000..8c133849b --- /dev/null +++ b/tests/test_legacy/test_moe/moe_utils.py @@ -0,0 +1,136 @@ +import torch +import torch.distributed as dist +import torch.nn as nn +from torch.distributed import ProcessGroup + +from colossalai.booster.plugin.low_level_zero_plugin import LowLevelZeroModel +from colossalai.legacy.engine.gradient_handler._base_gradient_handler import BaseGradientHandler +from colossalai.legacy.engine.gradient_handler.utils import bucket_allreduce +from colossalai.legacy.moe.manager import MOE_MANAGER +from colossalai.legacy.moe.utils import get_moe_epsize_param_dict +from colossalai.legacy.registry import GRADIENT_HANDLER +from colossalai.tensor.moe_tensor.api import get_ep_group, get_ep_size, set_moe_tensor_ep_group + + +def delete_moe_info(model): + for _, param in model.named_parameters(): + if hasattr(param, "ep_group"): + delattr(param, "ep_group") + + +class MoeModel(nn.Module): + def __init__(self, ep_group: ProcessGroup = None): + super().__init__() + self.test_embed = nn.Linear(4, 16, bias=False) + self.w1 = torch.nn.Parameter(torch.randn(16, 8)) + if ep_group: + set_moe_tensor_ep_group(self.w1, ep_group) + + def forward(self, x): + x = self.test_embed(x) + x = torch.matmul(x, self.w1) + + return x + + +@GRADIENT_HANDLER.register_module +class MoeGradientHandler(BaseGradientHandler): + """A helper class to handle all-reduce operations in a data parallel group and + moe model parallel. A all-reduce collective communication will be operated in + :func:`handle_gradient` among a data parallel group. + For better performance, it bucketizes the gradients of all parameters that are + the same type to improve the efficiency of communication. + + Args: + model (Module): Model where the gradients accumulate. + optimizer (Optimizer): Optimizer for updating the parameters. + """ + + def __init__(self, model, optimizer=None): + super().__init__(model, optimizer) + + def handle_gradient(self): + """A method running an all-reduce operation in a data parallel group. + Then running an all-reduce operation for all parameters in experts + across moe model parallel group + """ + if dist.get_world_size() > 1: + epsize_param_dict = get_moe_epsize_param_dict(self._model) + + # epsize is 1, indicating the params are replicated among processes in data parallelism + # use the ParallelMode.DATA to get data parallel group + # reduce gradients for all parameters in data parallelism + if 1 in epsize_param_dict: + bucket_allreduce(param_list=epsize_param_dict[1]) + + for ep_size in epsize_param_dict: + if ep_size != 1 and ep_size != MOE_MANAGER.world_size: + bucket_allreduce( + param_list=epsize_param_dict[ep_size], group=MOE_MANAGER.parallel_info_dict[ep_size].dp_group + ) + + +def assert_not_equal_in_group(tensor, process_group=None): + # all gather tensors from different ranks + world_size = dist.get_world_size(process_group) + tensor_list = [torch.empty_like(tensor) for _ in range(world_size)] + dist.all_gather(tensor_list, tensor, group=process_group) + + # check if they are equal one by one + for i in range(world_size - 1): + a = tensor_list[i] + b = tensor_list[i + 1] + assert not torch.allclose(a, b), ( + f"expected tensors on rank {i} and {i + 1} not to be equal " f"but they are, {a} vs {b}" + ) + + +def run_fwd_bwd(model, data, label, criterion, optimizer, enable_autocast=False): + model.train() + with torch.cuda.amp.autocast(enabled=enable_autocast): + if criterion: + y = model(data) + loss = criterion(y, label) + else: + loss = model(data, label) + loss = loss.float() + + if isinstance(model, LowLevelZeroModel): + optimizer.backward(loss) + else: + loss.backward() + return y + + +def sync_local_from_ep(local_model, ep_model, assert_grad_flag: bool = False) -> None: + """Sync the parameters of tp model from ep model + + Args: + local_model (MoeModule) + ep_model (MoeModule) + """ + for (local_name, local_param), (ep_name, ep_param) in zip( + local_model.named_parameters(), ep_model.named_parameters() + ): + if "experts" not in local_name: + if assert_grad_flag: + assert torch.allclose(local_param, ep_param), f"local_param: {local_param}, ep_param: {ep_param}" + assert torch.allclose(local_param.grad, ep_param.grad) + else: + local_param.data.copy_(ep_param.data) + continue + + # gather param from ep model + param_list = [torch.zeros_like(ep_param) for _ in range(get_ep_size(ep_param))] + dist.all_gather(param_list, ep_param, group=get_ep_group(ep_param)) + all_param = torch.cat(param_list, dim=0) + if assert_grad_flag: + grad_list = [torch.zeros_like(ep_param) for _ in range(get_ep_size(ep_param))] + dist.all_gather(grad_list, ep_param.grad, group=get_ep_group(ep_param)) + all_grad = torch.cat(grad_list, dim=0) + + if assert_grad_flag: + assert torch.allclose(local_param, all_param) + assert torch.allclose(local_param.grad, all_grad) + else: + local_param.data.copy_(all_param.data) diff --git a/tests/test_moe/test_grad_handler.py b/tests/test_legacy/test_moe/test_grad_handler.py similarity index 98% rename from tests/test_moe/test_grad_handler.py rename to tests/test_legacy/test_moe/test_grad_handler.py index 25e61b091..3a782a6dd 100644 --- a/tests/test_moe/test_grad_handler.py +++ b/tests/test_legacy/test_moe/test_grad_handler.py @@ -5,7 +5,7 @@ import torch.nn as nn import colossalai from colossalai.accelerator import get_accelerator -from colossalai.moe.manager import MOE_MANAGER +from colossalai.legacy.moe.manager import MOE_MANAGER # from colossalai.shardformer.layer.moe.layers import SparseMLP from colossalai.testing import assert_equal_in_group, rerun_if_address_is_in_use, spawn diff --git a/tests/test_moe/test_mixtral_layer.py b/tests/test_legacy/test_moe/test_mixtral_layer.py similarity index 100% rename from tests/test_moe/test_mixtral_layer.py rename to tests/test_legacy/test_moe/test_mixtral_layer.py diff --git a/tests/test_moe/test_moe_group.py b/tests/test_legacy/test_moe/test_moe_group.py similarity index 95% rename from tests/test_moe/test_moe_group.py rename to tests/test_legacy/test_moe/test_moe_group.py index 89baf1d37..68dac4828 100644 --- a/tests/test_moe/test_moe_group.py +++ b/tests/test_legacy/test_moe/test_moe_group.py @@ -4,8 +4,8 @@ import torch.nn as nn import colossalai from colossalai.accelerator import get_accelerator -from colossalai.moe.manager import MOE_MANAGER -from colossalai.moe.utils import sync_moe_model_param +from colossalai.legacy.moe.manager import MOE_MANAGER +from colossalai.legacy.moe.utils import sync_moe_model_param # from colossalai.shardformer.layer.moe import MLPExperts from colossalai.testing import assert_equal_in_group, rerun_if_address_is_in_use, spawn diff --git a/tests/test_moe/test_moe_hybrid_zero.py b/tests/test_legacy/test_moe/test_moe_hybrid_zero.py similarity index 98% rename from tests/test_moe/test_moe_hybrid_zero.py rename to tests/test_legacy/test_moe/test_moe_hybrid_zero.py index 513c4ebda..fdd6d956e 100644 --- a/tests/test_moe/test_moe_hybrid_zero.py +++ b/tests/test_legacy/test_moe/test_moe_hybrid_zero.py @@ -6,7 +6,7 @@ import colossalai from colossalai.booster import Booster from colossalai.booster.plugin import LowLevelZeroPlugin from colossalai.booster.plugin.low_level_zero_plugin import LowLevelZeroModel -from colossalai.moe.manager import MOE_MANAGER +from colossalai.legacy.moe.manager import MOE_MANAGER from colossalai.tensor.moe_tensor.api import is_moe_tensor from colossalai.testing import rerun_if_address_is_in_use, spawn from tests.test_moe.moe_utils import MoeModel diff --git a/tests/test_moe/test_moe_load_balance.py b/tests/test_legacy/test_moe/test_moe_load_balance.py similarity index 99% rename from tests/test_moe/test_moe_load_balance.py rename to tests/test_legacy/test_moe/test_moe_load_balance.py index ddd3ea368..adf2dbc1c 100644 --- a/tests/test_moe/test_moe_load_balance.py +++ b/tests/test_legacy/test_moe/test_moe_load_balance.py @@ -6,7 +6,7 @@ import colossalai from colossalai.booster import Booster from colossalai.booster.plugin import LowLevelZeroPlugin from colossalai.booster.plugin.low_level_zero_plugin import LowLevelZeroModel -from colossalai.moe.manager import MOE_MANAGER +from colossalai.legacy.moe.manager import MOE_MANAGER # from colossalai.shardformer.layer.moe import apply_load_balance from colossalai.tensor.moe_tensor.api import is_moe_tensor diff --git a/tests/test_moe/moe_utils.py b/tests/test_moe/moe_utils.py index e49edb6f4..55aee6fb0 100644 --- a/tests/test_moe/moe_utils.py +++ b/tests/test_moe/moe_utils.py @@ -1,139 +1,4 @@ import torch -import torch.distributed as dist -import torch.nn as nn -from torch.distributed import ProcessGroup - -from colossalai.booster.plugin.low_level_zero_plugin import LowLevelZeroModel -from colossalai.legacy.engine.gradient_handler._base_gradient_handler import BaseGradientHandler -from colossalai.legacy.engine.gradient_handler.utils import bucket_allreduce -from colossalai.legacy.registry import GRADIENT_HANDLER -from colossalai.moe.manager import MOE_MANAGER -from colossalai.moe.utils import get_moe_epsize_param_dict -from colossalai.tensor.moe_tensor.api import get_ep_group, get_ep_size, set_moe_tensor_ep_group - - -def delete_moe_info(model): - for _, param in model.named_parameters(): - if hasattr(param, "ep_group"): - delattr(param, "ep_group") - - -class MoeModel(nn.Module): - def __init__(self, ep_group: ProcessGroup = None): - super().__init__() - self.test_embed = nn.Linear(4, 16, bias=False) - self.w1 = torch.nn.Parameter(torch.randn(16, 8)) - if ep_group: - set_moe_tensor_ep_group(self.w1, ep_group) - - def forward(self, x): - x = self.test_embed(x) - x = torch.matmul(x, self.w1) - - return x - - -@GRADIENT_HANDLER.register_module -class MoeGradientHandler(BaseGradientHandler): - """A helper class to handle all-reduce operations in a data parallel group and - moe model parallel. A all-reduce collective communication will be operated in - :func:`handle_gradient` among a data parallel group. - For better performance, it bucketizes the gradients of all parameters that are - the same type to improve the efficiency of communication. - - Args: - model (Module): Model where the gradients accumulate. - optimizer (Optimizer): Optimizer for updating the parameters. - """ - - def __init__(self, model, optimizer=None): - super().__init__(model, optimizer) - - def handle_gradient(self): - """A method running an all-reduce operation in a data parallel group. - Then running an all-reduce operation for all parameters in experts - across moe model parallel group - """ - if dist.get_world_size() > 1: - epsize_param_dict = get_moe_epsize_param_dict(self._model) - - # epsize is 1, indicating the params are replicated among processes in data parallelism - # use the ParallelMode.DATA to get data parallel group - # reduce gradients for all parameters in data parallelism - if 1 in epsize_param_dict: - bucket_allreduce(param_list=epsize_param_dict[1]) - - for ep_size in epsize_param_dict: - if ep_size != 1 and ep_size != MOE_MANAGER.world_size: - bucket_allreduce( - param_list=epsize_param_dict[ep_size], group=MOE_MANAGER.parallel_info_dict[ep_size].dp_group - ) - - -def assert_not_equal_in_group(tensor, process_group=None): - # all gather tensors from different ranks - world_size = dist.get_world_size(process_group) - tensor_list = [torch.empty_like(tensor) for _ in range(world_size)] - dist.all_gather(tensor_list, tensor, group=process_group) - - # check if they are equal one by one - for i in range(world_size - 1): - a = tensor_list[i] - b = tensor_list[i + 1] - assert not torch.allclose(a, b), ( - f"expected tensors on rank {i} and {i + 1} not to be equal " f"but they are, {a} vs {b}" - ) - - -def run_fwd_bwd(model, data, label, criterion, optimizer, enable_autocast=False): - model.train() - with torch.cuda.amp.autocast(enabled=enable_autocast): - if criterion: - y = model(data) - loss = criterion(y, label) - else: - loss = model(data, label) - loss = loss.float() - - if isinstance(model, LowLevelZeroModel): - optimizer.backward(loss) - else: - loss.backward() - return y - - -def sync_local_from_ep(local_model, ep_model, assert_grad_flag: bool = False) -> None: - """Sync the parameters of tp model from ep model - - Args: - local_model (MoeModule) - ep_model (MoeModule) - """ - for (local_name, local_param), (ep_name, ep_param) in zip( - local_model.named_parameters(), ep_model.named_parameters() - ): - if "experts" not in local_name: - if assert_grad_flag: - assert torch.allclose(local_param, ep_param), f"local_param: {local_param}, ep_param: {ep_param}" - assert torch.allclose(local_param.grad, ep_param.grad) - else: - local_param.data.copy_(ep_param.data) - continue - - # gather param from ep model - param_list = [torch.zeros_like(ep_param) for _ in range(get_ep_size(ep_param))] - dist.all_gather(param_list, ep_param, group=get_ep_group(ep_param)) - all_param = torch.cat(param_list, dim=0) - if assert_grad_flag: - grad_list = [torch.zeros_like(ep_param) for _ in range(get_ep_size(ep_param))] - dist.all_gather(grad_list, ep_param.grad, group=get_ep_group(ep_param)) - all_grad = torch.cat(grad_list, dim=0) - - if assert_grad_flag: - assert torch.allclose(local_param, all_param) - assert torch.allclose(local_param.grad, all_grad) - else: - local_param.data.copy_(all_param.data) def loose_close(a, b, dtype: torch.dtype = torch.float32, name=""): diff --git a/tests/test_moe/test_kernel.py b/tests/test_moe/test_kernel.py index 28e6db441..f2c6d206f 100644 --- a/tests/test_moe/test_kernel.py +++ b/tests/test_moe/test_kernel.py @@ -4,9 +4,7 @@ import pytest import torch from colossalai.accelerator import get_accelerator - -# from colossalai.moe import SparseMLP -from colossalai.moe._operation import MoeCombine, MoeDispatch, moe_cumsum +from colossalai.moe.operators import MoeCombine, MoeDispatch, moe_cumsum NUM_EXPERTS = 4 BATCH_SIZE = 4