mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-05 02:51:59 +00:00
[moe] init mixtral impl
This commit is contained in:
@@ -1,13 +1,22 @@
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
import torch.nn as nn
|
||||
from torch.testing import assert_close
|
||||
|
||||
from colossalai.booster.plugin.low_level_zero_plugin import LowLevelZeroModel
|
||||
from colossalai.legacy.engine.gradient_handler._base_gradient_handler import BaseGradientHandler
|
||||
from colossalai.legacy.engine.gradient_handler.utils import bucket_allreduce
|
||||
from colossalai.legacy.registry import GRADIENT_HANDLER
|
||||
from colossalai.moe import SparseMLP
|
||||
from colossalai.moe.manager import MOE_MANAGER
|
||||
from colossalai.moe.utils import get_moe_epsize_param_dict
|
||||
from colossalai.tensor.moe_tensor.api import get_ep_group, get_ep_size
|
||||
|
||||
|
||||
def delete_moe_info(model):
|
||||
for _, param in model.named_parameters():
|
||||
if hasattr(param, "moe_info"):
|
||||
delattr(param, "moe_info")
|
||||
|
||||
|
||||
class MoeModel(nn.Module):
|
||||
@@ -85,6 +94,74 @@ def assert_not_equal_in_group(tensor, process_group=None):
|
||||
for i in range(world_size - 1):
|
||||
a = tensor_list[i]
|
||||
b = tensor_list[i + 1]
|
||||
assert not torch.allclose(a, b), \
|
||||
(f"expected tensors on rank {i} and {i + 1} not to be equal "
|
||||
f"but they are, {a} vs {b}")
|
||||
assert not torch.allclose(a, b), (
|
||||
f"expected tensors on rank {i} and {i + 1} not to be equal " f"but they are, {a} vs {b}"
|
||||
)
|
||||
|
||||
|
||||
def run_fwd_bwd(model, data, label, criterion, optimizer, enable_autocast=False):
|
||||
model.train()
|
||||
with torch.cuda.amp.autocast(enabled=enable_autocast):
|
||||
if criterion:
|
||||
y = model(data)
|
||||
loss = criterion(y, label)
|
||||
else:
|
||||
loss = model(data, label)
|
||||
loss = loss.float()
|
||||
|
||||
if isinstance(model, LowLevelZeroModel):
|
||||
optimizer.backward(loss)
|
||||
else:
|
||||
loss.backward()
|
||||
return y
|
||||
|
||||
|
||||
def sync_local_from_ep(local_model: SparseMLP, ep_model: SparseMLP, assert_grad_flag: bool = False) -> None:
|
||||
"""Sync the parameters of tp model from ep model
|
||||
|
||||
Args:
|
||||
local_model (MoeModule)
|
||||
ep_model (MoeModule)
|
||||
"""
|
||||
for (local_name, local_param), (ep_name, ep_param) in zip(
|
||||
local_model.named_parameters(), ep_model.named_parameters()
|
||||
):
|
||||
assert local_name in ep_name, print(f"{local_name} != {ep_name}")
|
||||
if "experts" not in local_name:
|
||||
if assert_grad_flag:
|
||||
assert torch.allclose(local_param, ep_param), f"local_param: {local_param}, ep_param: {ep_param}"
|
||||
assert torch.allclose(local_param.grad, ep_param.grad)
|
||||
else:
|
||||
local_param.data.copy_(ep_param.data)
|
||||
continue
|
||||
|
||||
# gather param from ep model
|
||||
param_list = [torch.zeros_like(ep_param) for _ in range(get_ep_size(ep_param))]
|
||||
dist.all_gather(param_list, ep_param, group=get_ep_group(ep_param))
|
||||
all_param = torch.cat(param_list, dim=0)
|
||||
if assert_grad_flag:
|
||||
grad_list = [torch.zeros_like(ep_param) for _ in range(get_ep_size(ep_param))]
|
||||
dist.all_gather(grad_list, ep_param.grad, group=get_ep_group(ep_param))
|
||||
all_grad = torch.cat(grad_list, dim=0)
|
||||
|
||||
if assert_grad_flag:
|
||||
assert torch.allclose(local_param, all_param)
|
||||
assert torch.allclose(local_param.grad, all_grad)
|
||||
else:
|
||||
local_param.data.copy_(all_param.data)
|
||||
|
||||
|
||||
def loose_close(a, b, dtype: torch.dtype = torch.float32):
|
||||
rtol = None
|
||||
atol = None
|
||||
if dtype is torch.float16:
|
||||
rtol = 5e-2
|
||||
atol = 5e-4
|
||||
elif dtype is torch.bfloat16:
|
||||
rtol = 4e-3
|
||||
atol = 4e-3
|
||||
|
||||
a = a.detach().to(dtype)
|
||||
b = b.detach().to(dtype).to(a.device)
|
||||
|
||||
assert_close(a, b, rtol=rtol, atol=atol)
|
||||
|
Reference in New Issue
Block a user