mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-05-01 21:26:42 +00:00
* [moe] removed openmoe-coupled code and rectify mixstral code (#5471) * [Feauture] MoE refractor; Intergration with Mixtral (#5682) * cherry pick from refractor-moe branch * tests passed * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * support ep + zero --------- Co-authored-by: Edenzzzz <wtan45@wisc.edu> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * add mixtral auto policy & move pipeline forward code to modeling folder * [moe refactor] modify kernel test without Route Class * [moe refactor] add moe tensor test path environment variable to github workflow * fix typos * fix moe test bug due to the code rebase * [moe refactor] fix moe zero test, and little bug in low level zero * fix typo * add moe tensor path to github workflow * remove some useless code * fix typo & unify global variable XX_AXIS logic without using -1 * fix typo & prettifier the code * remove print code & support zero 2 test * remove useless code * reanme function * fix typo * fix typo * Further improve the test code * remove print code * [moe refactor] change test model from fake moe model to mixtral moe layer and remove useless test * [moe refactor] skip some unit test which will be refactored later * [moe refactor] fix unit import error * [moe refactor] fix circular import issues * [moe refactor] remove debug code * [moe refactor] update github workflow * [moe/zero] refactor low level optimizer (#5767) * [zero] refactor low level optimizer * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * [Feature] MoE refactor with newest version of ZeRO (#5801) * [zero] remove redundant members in BucketStore (#5802) * [zero] align api with previous version * [Moe/Zero] Update MoeHybridParallelPlugin with refactored ZeRO and Fix Zero bug (#5819) * [moe refactor] update unit test with the refactored ZeRO and remove useless test * move moe checkpoint to checkpoint folder and exchange global axis to class member * update moe hybrid parallel plugin with newest version of zero & fix zero working/master params bug * fix zero unit test * Add an assertion to prevent users from using it incorrectly * [hotfix]Solve the compatibility issue of zero refactor (#5823) * [moe refactor] update unit test with the refactored ZeRO and remove useless test * move moe checkpoint to checkpoint folder and exchange global axis to class member * update moe hybrid parallel plugin with newest version of zero & fix zero working/master params bug * fix zero unit test * Add an assertion to prevent users from using it incorrectly * Modify function parameter names to resolve compatibility issues * [zero] fix missing hook removal (#5824) * [MoE] Resolve .github conflict (#5829) * [Fix/Example] Fix Llama Inference Loading Data Type (#5763) * [fix/example] fix llama inference loading dtype * revise loading dtype of benchmark llama3 * [release] update version (#5752) * [release] update version * [devops] update compatibility test * [devops] update compatibility test * [devops] update compatibility test * [devops] update compatibility test * [test] fix ddp plugin test * [test] fix gptj and rpc test * [devops] fix cuda ext compatibility * [inference] fix flash decoding test * [inference] fix flash decoding test * fix (#5765) * [test] Fix/fix testcase (#5770) * [fix] branch for fix testcase; * [fix] fix test_analyzer & test_auto_parallel; * [fix] remove local change about moe; * [fix] rm local change moe; * [Hotfix] Add missing init file in inference.executor (#5774) * [CI/tests] simplify some test case to reduce testing time (#5755) * [ci/tests] simplify some test case to reduce testing time * [ci/tests] continue to remove test case to reduce ci time cost * restore some test config * [ci/tests] continue to reduce ci time cost * [misc] update dockerfile (#5776) * [misc] update dockerfile * [misc] update dockerfile * [devops] fix docker ci (#5780) * [Inference]Add Streaming LLM (#5745) * Add Streaming LLM * add some parameters to llama_generation.py * verify streamingllm config * add test_streamingllm.py * modified according to the opinions of review * add Citation * change _block_tables tolist * [hotfix] fix llama flash attention forward (#5777) * [misc] Accelerate CI for zero and dist optim (#5758) * remove fp16 from lamb * remove d2h copy in checking states --------- Co-authored-by: Edenzzzz <wtan45@wisc.edu> * [Test/CI] remove test cases to reduce CI duration (#5753) * [test] smaller gpt2 test case * [test] reduce test cases: tests/test_zero/test_gemini/test_zeroddp_state_dict.py * [test] reduce test cases: tests/test_zero/test_gemini/test_grad_accum.py * [test] reduce test cases tests/test_zero/test_gemini/test_optim.py * Revert "[test] smaller gpt2 test case" Some tests might depend on the size of model (num of chunks) This reverts commitdf705a5210
. * [test] reduce test cases: tests/test_checkpoint_io/test_gemini_checkpoint_io.py * [CI] smaller test model for two mwo the two modifid cases * [CI] hardcode gpt model for tests/test_zero/test_gemini/test_search.py since we need a fixed answer there * [hotfix] fix testcase in test_fx/test_tracer (#5779) * [fix] branch for fix testcase; * [fix] fix test_analyzer & test_auto_parallel; * [fix] remove local change about moe; * [fix] rm local change moe; * [fix] fix test_deepfm_model & test_dlrf_model; * [fix] fix test_hf_albert & test_hf_gpt; * [gemini] optimize reduce scatter d2h copy (#5760) * [gemini] optimize reduce scatter d2h copy * [fix] fix missing reduce variable * [refactor] remove legacy async reduce scatter code * [gemini] missing sync * Revert "[refactor] remove legacy async reduce scatter code" This reverts commit58ad76d466
. * [gemini] further optimize with async all reduce * [fix] pass flag from manager to chunk * Allow building cuda extension without a device. (#5535) Added FORCE_CUDA environment variable support, to enable building extensions where a GPU device is not present but cuda libraries are. * [misc] fix dist logger (#5782) * [install]fix setup (#5786) * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * [misc] update requirements (#5787) * [shardformer] fix import (#5788) * upgrade colossal-chat support tp_group>1, add sp for sft * upgrade ppo dpo rm script * run pre-commit * moupdate ci tests, st ci test cases passed, tp failed in generation for ppo, sp is buggy * fix training script * fix ci * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix transformers version * remove duplicated test * fix datasets version * remove models that require huggingface auth from ci * remove local data path * update ci * remove baichuan from template test due to transformer version conflict * merge * Refactor modeling by adding attention backend Signed-off-by: char-1ee <xingjianli59@gmail.com> * Fix tests and naming Signed-off-by: char-1ee <xingjianli59@gmail.com> * Pass inference model shard configs for module init Signed-off-by: char-1ee <xingjianli59@gmail.com> * Clean up Signed-off-by: char-1ee <xingjianli59@gmail.com> * replace the customized dataloader setup with the build-in one * replace the customized dataloader setup with the build-in one * Remove flash attention backend Signed-off-by: char-1ee <xingjianli59@gmail.com> * fix readme * Fix test import Signed-off-by: char-1ee <xingjianli59@gmail.com> * update sft trainning script * [Inference]refactor baichuan (#5791) * refactor baichuan * remove unused code and add TODO for lazyinit * [test] fix chatglm test kit (#5793) * [shardformer] fix modeling of bloom and falcon (#5796) * [test] fix qwen2 pytest distLarge (#5797) * [Inference] Fix flash-attn import and add model test (#5794) * Fix torch int32 dtype Signed-off-by: char-1ee <xingjianli59@gmail.com> * Fix flash-attn import Signed-off-by: char-1ee <xingjianli59@gmail.com> * Add generalized model test Signed-off-by: char-1ee <xingjianli59@gmail.com> * Remove exposed path to model Signed-off-by: char-1ee <xingjianli59@gmail.com> * Add default value for use_flash_attn Signed-off-by: char-1ee <xingjianli59@gmail.com> * Rename model test Signed-off-by: char-1ee <xingjianli59@gmail.com> --------- Signed-off-by: char-1ee <xingjianli59@gmail.com> * [Gemini] Use async stream to prefetch and h2d data moving (#5781) * use async stream to prefetch and h2d data moving * Remove redundant code * [gemini] quick fix on possible async operation (#5803) * [gemini] quick fix on possible async operation * [gemini] quick fix on possible async operation * [shardformer] upgrade transformers to 4.39.3 (#5815) * [shardformer]upgrade transformers for gpt2/gptj/whisper (#5807) * [shardformer] fix modeling of gpt2 and gptj * [shardformer] fix whisper modeling * [misc] update requirements --------- Co-authored-by: ver217 <lhx0217@gmail.com> * [shardformer]upgrade transformers for mistral (#5808) * upgrade transformers for mistral * fix * fix * [shardformer]upgrade transformers for llama (#5809) * update transformers fix * fix * fix * [inference] upgrade transformers (#5810) * update transformers fix * fix * fix * fix * fix * [gemini] update transformers for gemini (#5814) --------- Co-authored-by: ver217 <lhx0217@gmail.com> * Support 4d parallel + flash attention (#5789) * support tp + sp + pp * remove comments --------- Co-authored-by: Edenzzzz <wtan45@wisc.edu> --------- Signed-off-by: char-1ee <xingjianli59@gmail.com> Co-authored-by: Yuanheng Zhao <54058983+yuanheng-zhao@users.noreply.github.com> Co-authored-by: Hongxin Liu <lhx0217@gmail.com> Co-authored-by: flybird11111 <1829166702@qq.com> Co-authored-by: duanjunwen <935724073@qq.com> Co-authored-by: yuehuayingxueluo <867460659@qq.com> Co-authored-by: Edenzzzz <wenxuan.tan@wisc.edu> Co-authored-by: Edenzzzz <wtan45@wisc.edu> Co-authored-by: botbw <wang1570@e.ntu.edu.sg> Co-authored-by: Charles Coulombe <ccoulombe@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: YeAnbang <anbangy2@outlook.com> Co-authored-by: char-1ee <xingjianli59@gmail.com> Co-authored-by: Runyu Lu <77330637+LRY89757@users.noreply.github.com> Co-authored-by: YeAnbang <44796419+YeAnbang@users.noreply.github.com> Co-authored-by: Guangyao Zhang <xjtu521@qq.com> * [zero] fix hook bug * [zero] add low level optimizer back (#5839) * [zero] fix param & refactor * [zero] add back original low level opt * [zero] remove moe related * [zero] pass zero tests * [zero] refactor * [chore] add del func back * [zero] comments and naming (#5840) * [zero] modify api (#5843) * [zero] modify api * [test] remove _grad_store access in tests * [test] fix (#5857) * [CI] skip openmoe CI check * [CI] fox pre-commit * [zero] remove redundant memebr init (#5862) * [misc] remove useless code, modify the pg mesh implementation * [misc] remove useless code, modify the pg mesh implementation * [misc] use tempfile * resolve conflict with main branch * [misc] use tempfile in test_moe_checkpoint.py * [misc] remove useless code, add assertion about sequence parallel, move logger into function * [misc] remove useless code --------- Signed-off-by: char-1ee <xingjianli59@gmail.com> Co-authored-by: Frank Lee <somerlee.9@gmail.com> Co-authored-by: Edenzzzz <wenxuan.tan@wisc.edu> Co-authored-by: Edenzzzz <wtan45@wisc.edu> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: botbw <wang1570@e.ntu.edu.sg> Co-authored-by: Yuanheng Zhao <54058983+yuanheng-zhao@users.noreply.github.com> Co-authored-by: Hongxin Liu <lhx0217@gmail.com> Co-authored-by: flybird11111 <1829166702@qq.com> Co-authored-by: duanjunwen <935724073@qq.com> Co-authored-by: yuehuayingxueluo <867460659@qq.com> Co-authored-by: Charles Coulombe <ccoulombe@users.noreply.github.com> Co-authored-by: YeAnbang <anbangy2@outlook.com> Co-authored-by: char-1ee <xingjianli59@gmail.com> Co-authored-by: Runyu Lu <77330637+LRY89757@users.noreply.github.com> Co-authored-by: YeAnbang <44796419+YeAnbang@users.noreply.github.com> Co-authored-by: Guangyao Zhang <xjtu521@qq.com>
156 lines
5.7 KiB
Python
156 lines
5.7 KiB
Python
import torch
|
|
import torch.distributed as dist
|
|
import torch.nn as nn
|
|
from torch.distributed import ProcessGroup
|
|
from torch.testing import assert_close
|
|
|
|
from colossalai.booster.plugin.low_level_zero_plugin import LowLevelZeroModel
|
|
from colossalai.legacy.engine.gradient_handler._base_gradient_handler import BaseGradientHandler
|
|
from colossalai.legacy.engine.gradient_handler.utils import bucket_allreduce
|
|
from colossalai.legacy.registry import GRADIENT_HANDLER
|
|
from colossalai.moe.manager import MOE_MANAGER
|
|
from colossalai.moe.utils import get_moe_epsize_param_dict
|
|
|
|
# from colossalai.shardformer.layer.moe import SparseMLP
|
|
from colossalai.tensor.moe_tensor.api import get_ep_group, get_ep_size, set_moe_tensor_ep_group
|
|
|
|
|
|
def delete_moe_info(model):
|
|
for _, param in model.named_parameters():
|
|
if hasattr(param, "ep_group"):
|
|
delattr(param, "ep_group")
|
|
|
|
|
|
class MoeModel(nn.Module):
|
|
def __init__(self, ep_group: ProcessGroup = None):
|
|
super().__init__()
|
|
self.test_embed = nn.Linear(4, 16, bias=False)
|
|
self.w1 = torch.nn.Parameter(torch.randn(16, 8))
|
|
if ep_group:
|
|
set_moe_tensor_ep_group(self.w1, ep_group)
|
|
|
|
def forward(self, x):
|
|
x = self.test_embed(x)
|
|
x = torch.matmul(x, self.w1)
|
|
|
|
return x
|
|
|
|
|
|
@GRADIENT_HANDLER.register_module
|
|
class MoeGradientHandler(BaseGradientHandler):
|
|
"""A helper class to handle all-reduce operations in a data parallel group and
|
|
moe model parallel. A all-reduce collective communication will be operated in
|
|
:func:`handle_gradient` among a data parallel group.
|
|
For better performance, it bucketizes the gradients of all parameters that are
|
|
the same type to improve the efficiency of communication.
|
|
|
|
Args:
|
|
model (Module): Model where the gradients accumulate.
|
|
optimizer (Optimizer): Optimizer for updating the parameters.
|
|
"""
|
|
|
|
def __init__(self, model, optimizer=None):
|
|
super().__init__(model, optimizer)
|
|
|
|
def handle_gradient(self):
|
|
"""A method running an all-reduce operation in a data parallel group.
|
|
Then running an all-reduce operation for all parameters in experts
|
|
across moe model parallel group
|
|
"""
|
|
if dist.get_world_size() > 1:
|
|
epsize_param_dict = get_moe_epsize_param_dict(self._model)
|
|
|
|
# epsize is 1, indicating the params are replicated among processes in data parallelism
|
|
# use the ParallelMode.DATA to get data parallel group
|
|
# reduce gradients for all parameters in data parallelism
|
|
if 1 in epsize_param_dict:
|
|
bucket_allreduce(param_list=epsize_param_dict[1])
|
|
|
|
for ep_size in epsize_param_dict:
|
|
if ep_size != 1 and ep_size != MOE_MANAGER.world_size:
|
|
bucket_allreduce(
|
|
param_list=epsize_param_dict[ep_size], group=MOE_MANAGER.parallel_info_dict[ep_size].dp_group
|
|
)
|
|
|
|
|
|
def assert_not_equal_in_group(tensor, process_group=None):
|
|
# all gather tensors from different ranks
|
|
world_size = dist.get_world_size(process_group)
|
|
tensor_list = [torch.empty_like(tensor) for _ in range(world_size)]
|
|
dist.all_gather(tensor_list, tensor, group=process_group)
|
|
|
|
# check if they are equal one by one
|
|
for i in range(world_size - 1):
|
|
a = tensor_list[i]
|
|
b = tensor_list[i + 1]
|
|
assert not torch.allclose(a, b), (
|
|
f"expected tensors on rank {i} and {i + 1} not to be equal " f"but they are, {a} vs {b}"
|
|
)
|
|
|
|
|
|
def run_fwd_bwd(model, data, label, criterion, optimizer, enable_autocast=False):
|
|
model.train()
|
|
with torch.cuda.amp.autocast(enabled=enable_autocast):
|
|
if criterion:
|
|
y = model(data)
|
|
loss = criterion(y, label)
|
|
else:
|
|
loss = model(data, label)
|
|
loss = loss.float()
|
|
|
|
if isinstance(model, LowLevelZeroModel):
|
|
optimizer.backward(loss)
|
|
else:
|
|
loss.backward()
|
|
return y
|
|
|
|
|
|
def sync_local_from_ep(local_model, ep_model, assert_grad_flag: bool = False) -> None:
|
|
"""Sync the parameters of tp model from ep model
|
|
|
|
Args:
|
|
local_model (MoeModule)
|
|
ep_model (MoeModule)
|
|
"""
|
|
for (local_name, local_param), (ep_name, ep_param) in zip(
|
|
local_model.named_parameters(), ep_model.named_parameters()
|
|
):
|
|
if "experts" not in local_name:
|
|
if assert_grad_flag:
|
|
assert torch.allclose(local_param, ep_param), f"local_param: {local_param}, ep_param: {ep_param}"
|
|
assert torch.allclose(local_param.grad, ep_param.grad)
|
|
else:
|
|
local_param.data.copy_(ep_param.data)
|
|
continue
|
|
|
|
# gather param from ep model
|
|
param_list = [torch.zeros_like(ep_param) for _ in range(get_ep_size(ep_param))]
|
|
dist.all_gather(param_list, ep_param, group=get_ep_group(ep_param))
|
|
all_param = torch.cat(param_list, dim=0)
|
|
if assert_grad_flag:
|
|
grad_list = [torch.zeros_like(ep_param) for _ in range(get_ep_size(ep_param))]
|
|
dist.all_gather(grad_list, ep_param.grad, group=get_ep_group(ep_param))
|
|
all_grad = torch.cat(grad_list, dim=0)
|
|
|
|
if assert_grad_flag:
|
|
assert torch.allclose(local_param, all_param)
|
|
assert torch.allclose(local_param.grad, all_grad)
|
|
else:
|
|
local_param.data.copy_(all_param.data)
|
|
|
|
|
|
def loose_close(a, b, dtype: torch.dtype = torch.float32):
|
|
rtol = None
|
|
atol = None
|
|
if dtype is torch.float16:
|
|
rtol = 5e-2
|
|
atol = 5e-4
|
|
elif dtype is torch.bfloat16:
|
|
rtol = 4e-3
|
|
atol = 4e-3
|
|
|
|
a = a.detach().to(dtype)
|
|
b = b.detach().to(dtype).to(a.device)
|
|
|
|
assert_close(a, b, rtol=rtol, atol=atol)
|