mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-08 12:30:42 +00:00
[moe] merge moe into main (#4978)
* update moe module * support openmoe
This commit is contained in:
@@ -3,66 +3,80 @@ import torch.distributed as dist
|
||||
import torch.nn as nn
|
||||
|
||||
import colossalai
|
||||
from colossalai.context.moe_context import MOE_CONTEXT
|
||||
from colossalai.nn.layer.moe import Experts
|
||||
from colossalai.moe.experts import MLPExperts
|
||||
from colossalai.moe.manager import MOE_MANAGER
|
||||
from colossalai.moe.utils import sync_moe_model_param
|
||||
from colossalai.testing import assert_equal_in_group, rerun_if_address_is_in_use, spawn
|
||||
from colossalai.utils import get_current_device
|
||||
from colossalai.utils.moe import sync_moe_model_param
|
||||
|
||||
D_MODEL = 4
|
||||
D_FF = 8
|
||||
CONFIG = dict()
|
||||
HIDDEN_SIZE = 4
|
||||
INTERMEDIATE_SIZE = 8
|
||||
|
||||
|
||||
def run_test(rank, world_size, port):
|
||||
world_size = 4
|
||||
colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
|
||||
expert_module = nn.Linear
|
||||
expert_factor = dict(in_features=D_MODEL, out_features=D_FF, device=get_current_device())
|
||||
def run_moe_init(expert_parallel):
|
||||
MOE_MANAGER.__init__()
|
||||
MOE_MANAGER.setup(seed=42, parallel=expert_parallel)
|
||||
expert_args = dict(
|
||||
hidden_size=HIDDEN_SIZE,
|
||||
intermediate_size=INTERMEDIATE_SIZE,
|
||||
expert_parallel=expert_parallel,
|
||||
)
|
||||
exp0 = MLPExperts(1, **expert_args)
|
||||
exp1 = MLPExperts(2, **expert_args)
|
||||
exp2 = MLPExperts(4, **expert_args)
|
||||
|
||||
MOE_CONTEXT.setup(42) # MOE environment initialization
|
||||
exp0 = Experts(expert_module, 1, **expert_factor)
|
||||
exp1 = Experts(expert_module, 2, **expert_factor)
|
||||
exp2 = Experts(expert_module, 4, **expert_factor)
|
||||
exp3 = Experts(expert_module, 8, **expert_factor)
|
||||
if expert_parallel == "EP":
|
||||
assert exp0.num_local_experts == 1
|
||||
assert exp1.num_local_experts == 1
|
||||
assert exp2.num_local_experts == 2
|
||||
else:
|
||||
assert exp0.num_local_experts == 1
|
||||
assert exp1.num_local_experts == 2
|
||||
assert exp2.num_local_experts == 4
|
||||
|
||||
assert exp0.num_local_experts == 1
|
||||
assert exp1.num_local_experts == 1
|
||||
assert exp2.num_local_experts == 1
|
||||
assert exp3.num_local_experts == 2
|
||||
# experts deployment passed
|
||||
|
||||
parallel_info_dict = MOE_CONTEXT.parallel_info_dict
|
||||
parallel_info_dict = MOE_MANAGER.parallel_info_dict
|
||||
rank = dist.get_rank()
|
||||
|
||||
assert len(parallel_info_dict) == 3
|
||||
assert dist.get_rank(parallel_info_dict[4].ep_group) == rank
|
||||
# group creation assert
|
||||
assert len(parallel_info_dict) == 2
|
||||
assert dist.get_rank(parallel_info_dict[2].ep_group) == rank % 2
|
||||
assert dist.get_rank(parallel_info_dict[1].ep_group) == 0
|
||||
|
||||
assert dist.get_rank(parallel_info_dict[4].dp_group) == 0
|
||||
assert dist.get_rank(parallel_info_dict[2].dp_group) == rank // 2
|
||||
assert dist.get_rank(parallel_info_dict[1].dp_group) == rank
|
||||
# group creation passed
|
||||
|
||||
model = nn.ModuleList([exp0, exp1, exp2, exp3])
|
||||
model = nn.ModuleList([exp0, exp1, exp2])
|
||||
model = model.to(get_current_device())
|
||||
sync_moe_model_param(model)
|
||||
|
||||
assert_equal_in_group(exp0.experts[0].weight.data, parallel_info_dict[1].dp_group)
|
||||
assert_equal_in_group(exp0.experts[0].bias.data, parallel_info_dict[1].dp_group)
|
||||
# MOE experts layout success when ep_size = 1
|
||||
assert_equal_in_group(exp0.wi.data, parallel_info_dict[1].dp_group)
|
||||
assert_equal_in_group(exp0.wo.data, parallel_info_dict[1].dp_group)
|
||||
|
||||
assert_equal_in_group(exp1.experts[0].weight.data, parallel_info_dict[2].dp_group)
|
||||
assert_equal_in_group(exp1.experts[0].bias.data, parallel_info_dict[2].dp_group)
|
||||
# MOE experts layout success when ep_size = 2
|
||||
assert_equal_in_group(exp1.wi.data, parallel_info_dict[2].dp_group)
|
||||
assert_equal_in_group(exp1.wo.data, parallel_info_dict[2].dp_group)
|
||||
|
||||
|
||||
def _run_test(rank, world_size, port, expert_parallel):
|
||||
colossalai.launch(
|
||||
config=dict(),
|
||||
rank=rank,
|
||||
world_size=world_size,
|
||||
host="localhost",
|
||||
port=port,
|
||||
backend="nccl",
|
||||
)
|
||||
run_moe_init(expert_parallel)
|
||||
|
||||
|
||||
@pytest.mark.dist
|
||||
@pytest.mark.parametrize("expert_parallel", ["EP", "TP"])
|
||||
@rerun_if_address_is_in_use()
|
||||
def test_moe_initialization():
|
||||
spawn(run_test, 4)
|
||||
def test_moe_initialization(expert_parallel):
|
||||
spawn(_run_test, 2, expert_parallel=expert_parallel)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_moe_initialization()
|
||||
test_moe_initialization("EP")
|
||||
test_moe_initialization("TP")
|
||||
|
Reference in New Issue
Block a user