mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-04-28 19:55:29 +00:00
* [moe] removed openmoe-coupled code and rectify mixstral code (#5471) * [Feauture] MoE refractor; Intergration with Mixtral (#5682) * cherry pick from refractor-moe branch * tests passed * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * support ep + zero --------- Co-authored-by: Edenzzzz <wtan45@wisc.edu> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * add mixtral auto policy & move pipeline forward code to modeling folder * [moe refactor] modify kernel test without Route Class * [moe refactor] add moe tensor test path environment variable to github workflow * fix typos * fix moe test bug due to the code rebase * [moe refactor] fix moe zero test, and little bug in low level zero * fix typo * add moe tensor path to github workflow * remove some useless code * fix typo & unify global variable XX_AXIS logic without using -1 * fix typo & prettifier the code * remove print code & support zero 2 test * remove useless code * reanme function * fix typo * fix typo * Further improve the test code * remove print code * [moe refactor] change test model from fake moe model to mixtral moe layer and remove useless test * [moe refactor] skip some unit test which will be refactored later * [moe refactor] fix unit import error * [moe refactor] fix circular import issues * [moe refactor] remove debug code * [moe refactor] update github workflow * [moe/zero] refactor low level optimizer (#5767) * [zero] refactor low level optimizer * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * [Feature] MoE refactor with newest version of ZeRO (#5801) * [zero] remove redundant members in BucketStore (#5802) * [zero] align api with previous version * [Moe/Zero] Update MoeHybridParallelPlugin with refactored ZeRO and Fix Zero bug (#5819) * [moe refactor] update unit test with the refactored ZeRO and remove useless test * move moe checkpoint to checkpoint folder and exchange global axis to class member * update moe hybrid parallel plugin with newest version of zero & fix zero working/master params bug * fix zero unit test * Add an assertion to prevent users from using it incorrectly * [hotfix]Solve the compatibility issue of zero refactor (#5823) * [moe refactor] update unit test with the refactored ZeRO and remove useless test * move moe checkpoint to checkpoint folder and exchange global axis to class member * update moe hybrid parallel plugin with newest version of zero & fix zero working/master params bug * fix zero unit test * Add an assertion to prevent users from using it incorrectly * Modify function parameter names to resolve compatibility issues * [zero] fix missing hook removal (#5824) * [MoE] Resolve .github conflict (#5829) * [Fix/Example] Fix Llama Inference Loading Data Type (#5763) * [fix/example] fix llama inference loading dtype * revise loading dtype of benchmark llama3 * [release] update version (#5752) * [release] update version * [devops] update compatibility test * [devops] update compatibility test * [devops] update compatibility test * [devops] update compatibility test * [test] fix ddp plugin test * [test] fix gptj and rpc test * [devops] fix cuda ext compatibility * [inference] fix flash decoding test * [inference] fix flash decoding test * fix (#5765) * [test] Fix/fix testcase (#5770) * [fix] branch for fix testcase; * [fix] fix test_analyzer & test_auto_parallel; * [fix] remove local change about moe; * [fix] rm local change moe; * [Hotfix] Add missing init file in inference.executor (#5774) * [CI/tests] simplify some test case to reduce testing time (#5755) * [ci/tests] simplify some test case to reduce testing time * [ci/tests] continue to remove test case to reduce ci time cost * restore some test config * [ci/tests] continue to reduce ci time cost * [misc] update dockerfile (#5776) * [misc] update dockerfile * [misc] update dockerfile * [devops] fix docker ci (#5780) * [Inference]Add Streaming LLM (#5745) * Add Streaming LLM * add some parameters to llama_generation.py * verify streamingllm config * add test_streamingllm.py * modified according to the opinions of review * add Citation * change _block_tables tolist * [hotfix] fix llama flash attention forward (#5777) * [misc] Accelerate CI for zero and dist optim (#5758) * remove fp16 from lamb * remove d2h copy in checking states --------- Co-authored-by: Edenzzzz <wtan45@wisc.edu> * [Test/CI] remove test cases to reduce CI duration (#5753) * [test] smaller gpt2 test case * [test] reduce test cases: tests/test_zero/test_gemini/test_zeroddp_state_dict.py * [test] reduce test cases: tests/test_zero/test_gemini/test_grad_accum.py * [test] reduce test cases tests/test_zero/test_gemini/test_optim.py * Revert "[test] smaller gpt2 test case" Some tests might depend on the size of model (num of chunks) This reverts commitdf705a5210
. * [test] reduce test cases: tests/test_checkpoint_io/test_gemini_checkpoint_io.py * [CI] smaller test model for two mwo the two modifid cases * [CI] hardcode gpt model for tests/test_zero/test_gemini/test_search.py since we need a fixed answer there * [hotfix] fix testcase in test_fx/test_tracer (#5779) * [fix] branch for fix testcase; * [fix] fix test_analyzer & test_auto_parallel; * [fix] remove local change about moe; * [fix] rm local change moe; * [fix] fix test_deepfm_model & test_dlrf_model; * [fix] fix test_hf_albert & test_hf_gpt; * [gemini] optimize reduce scatter d2h copy (#5760) * [gemini] optimize reduce scatter d2h copy * [fix] fix missing reduce variable * [refactor] remove legacy async reduce scatter code * [gemini] missing sync * Revert "[refactor] remove legacy async reduce scatter code" This reverts commit58ad76d466
. * [gemini] further optimize with async all reduce * [fix] pass flag from manager to chunk * Allow building cuda extension without a device. (#5535) Added FORCE_CUDA environment variable support, to enable building extensions where a GPU device is not present but cuda libraries are. * [misc] fix dist logger (#5782) * [install]fix setup (#5786) * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * [misc] update requirements (#5787) * [shardformer] fix import (#5788) * upgrade colossal-chat support tp_group>1, add sp for sft * upgrade ppo dpo rm script * run pre-commit * moupdate ci tests, st ci test cases passed, tp failed in generation for ppo, sp is buggy * fix training script * fix ci * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix transformers version * remove duplicated test * fix datasets version * remove models that require huggingface auth from ci * remove local data path * update ci * remove baichuan from template test due to transformer version conflict * merge * Refactor modeling by adding attention backend Signed-off-by: char-1ee <xingjianli59@gmail.com> * Fix tests and naming Signed-off-by: char-1ee <xingjianli59@gmail.com> * Pass inference model shard configs for module init Signed-off-by: char-1ee <xingjianli59@gmail.com> * Clean up Signed-off-by: char-1ee <xingjianli59@gmail.com> * replace the customized dataloader setup with the build-in one * replace the customized dataloader setup with the build-in one * Remove flash attention backend Signed-off-by: char-1ee <xingjianli59@gmail.com> * fix readme * Fix test import Signed-off-by: char-1ee <xingjianli59@gmail.com> * update sft trainning script * [Inference]refactor baichuan (#5791) * refactor baichuan * remove unused code and add TODO for lazyinit * [test] fix chatglm test kit (#5793) * [shardformer] fix modeling of bloom and falcon (#5796) * [test] fix qwen2 pytest distLarge (#5797) * [Inference] Fix flash-attn import and add model test (#5794) * Fix torch int32 dtype Signed-off-by: char-1ee <xingjianli59@gmail.com> * Fix flash-attn import Signed-off-by: char-1ee <xingjianli59@gmail.com> * Add generalized model test Signed-off-by: char-1ee <xingjianli59@gmail.com> * Remove exposed path to model Signed-off-by: char-1ee <xingjianli59@gmail.com> * Add default value for use_flash_attn Signed-off-by: char-1ee <xingjianli59@gmail.com> * Rename model test Signed-off-by: char-1ee <xingjianli59@gmail.com> --------- Signed-off-by: char-1ee <xingjianli59@gmail.com> * [Gemini] Use async stream to prefetch and h2d data moving (#5781) * use async stream to prefetch and h2d data moving * Remove redundant code * [gemini] quick fix on possible async operation (#5803) * [gemini] quick fix on possible async operation * [gemini] quick fix on possible async operation * [shardformer] upgrade transformers to 4.39.3 (#5815) * [shardformer]upgrade transformers for gpt2/gptj/whisper (#5807) * [shardformer] fix modeling of gpt2 and gptj * [shardformer] fix whisper modeling * [misc] update requirements --------- Co-authored-by: ver217 <lhx0217@gmail.com> * [shardformer]upgrade transformers for mistral (#5808) * upgrade transformers for mistral * fix * fix * [shardformer]upgrade transformers for llama (#5809) * update transformers fix * fix * fix * [inference] upgrade transformers (#5810) * update transformers fix * fix * fix * fix * fix * [gemini] update transformers for gemini (#5814) --------- Co-authored-by: ver217 <lhx0217@gmail.com> * Support 4d parallel + flash attention (#5789) * support tp + sp + pp * remove comments --------- Co-authored-by: Edenzzzz <wtan45@wisc.edu> --------- Signed-off-by: char-1ee <xingjianli59@gmail.com> Co-authored-by: Yuanheng Zhao <54058983+yuanheng-zhao@users.noreply.github.com> Co-authored-by: Hongxin Liu <lhx0217@gmail.com> Co-authored-by: flybird11111 <1829166702@qq.com> Co-authored-by: duanjunwen <935724073@qq.com> Co-authored-by: yuehuayingxueluo <867460659@qq.com> Co-authored-by: Edenzzzz <wenxuan.tan@wisc.edu> Co-authored-by: Edenzzzz <wtan45@wisc.edu> Co-authored-by: botbw <wang1570@e.ntu.edu.sg> Co-authored-by: Charles Coulombe <ccoulombe@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: YeAnbang <anbangy2@outlook.com> Co-authored-by: char-1ee <xingjianli59@gmail.com> Co-authored-by: Runyu Lu <77330637+LRY89757@users.noreply.github.com> Co-authored-by: YeAnbang <44796419+YeAnbang@users.noreply.github.com> Co-authored-by: Guangyao Zhang <xjtu521@qq.com> * [zero] fix hook bug * [zero] add low level optimizer back (#5839) * [zero] fix param & refactor * [zero] add back original low level opt * [zero] remove moe related * [zero] pass zero tests * [zero] refactor * [chore] add del func back * [zero] comments and naming (#5840) * [zero] modify api (#5843) * [zero] modify api * [test] remove _grad_store access in tests * [test] fix (#5857) * [CI] skip openmoe CI check * [CI] fox pre-commit * [zero] remove redundant memebr init (#5862) * [misc] remove useless code, modify the pg mesh implementation * [misc] remove useless code, modify the pg mesh implementation * [misc] use tempfile * resolve conflict with main branch * [misc] use tempfile in test_moe_checkpoint.py * [misc] remove useless code, add assertion about sequence parallel, move logger into function * [misc] remove useless code --------- Signed-off-by: char-1ee <xingjianli59@gmail.com> Co-authored-by: Frank Lee <somerlee.9@gmail.com> Co-authored-by: Edenzzzz <wenxuan.tan@wisc.edu> Co-authored-by: Edenzzzz <wtan45@wisc.edu> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: botbw <wang1570@e.ntu.edu.sg> Co-authored-by: Yuanheng Zhao <54058983+yuanheng-zhao@users.noreply.github.com> Co-authored-by: Hongxin Liu <lhx0217@gmail.com> Co-authored-by: flybird11111 <1829166702@qq.com> Co-authored-by: duanjunwen <935724073@qq.com> Co-authored-by: yuehuayingxueluo <867460659@qq.com> Co-authored-by: Charles Coulombe <ccoulombe@users.noreply.github.com> Co-authored-by: YeAnbang <anbangy2@outlook.com> Co-authored-by: char-1ee <xingjianli59@gmail.com> Co-authored-by: Runyu Lu <77330637+LRY89757@users.noreply.github.com> Co-authored-by: YeAnbang <44796419+YeAnbang@users.noreply.github.com> Co-authored-by: Guangyao Zhang <xjtu521@qq.com>
384 lines
14 KiB
Python
384 lines
14 KiB
Python
import argparse
|
|
import os
|
|
from functools import partial
|
|
from typing import Dict
|
|
|
|
import torch
|
|
import torch.distributed as dist
|
|
from datasets import load_dataset
|
|
from huggingface_hub import snapshot_download
|
|
from model.modeling_openmoe import OpenMoeForCausalLM, set_openmoe_args
|
|
from model.openmoe_policy import OpenMoeForCausalLMPolicy
|
|
from torch.utils.data import Dataset
|
|
from tqdm import tqdm
|
|
from transformers import T5Tokenizer
|
|
from transformers.models.llama import LlamaConfig
|
|
|
|
import colossalai
|
|
from colossalai.accelerator import get_accelerator
|
|
from colossalai.booster import Booster
|
|
from colossalai.booster.plugin.moe_hybrid_parallel_plugin import MoeHybridParallelPlugin
|
|
from colossalai.cluster import DistCoordinator
|
|
from colossalai.moe.utils import skip_init
|
|
from colossalai.nn.optimizer import HybridAdam
|
|
from colossalai.shardformer.layer.moe import apply_load_balance
|
|
|
|
|
|
def move_to_cuda(batch, device):
|
|
return {k: v.to(device) for k, v in batch.items()}
|
|
|
|
|
|
def load_ckpt(repo_name: str, model: OpenMoeForCausalLM, booster: Booster):
|
|
ckpt_path = snapshot_download(repo_name)
|
|
# single ckpt
|
|
if os.path.exists(os.path.join(ckpt_path, "pytorch_model.bin")):
|
|
ckpt_path = os.path.join(ckpt_path, "pytorch_model.bin")
|
|
# shard ckpt
|
|
elif os.path.exists(os.path.join(ckpt_path, "pytorch_model.bin.index.json")):
|
|
ckpt_path = os.path.join(ckpt_path, "pytorch_model.bin.index.json")
|
|
else:
|
|
raise ValueError(f"Invalid checkpoint path: {ckpt_path}")
|
|
booster.load_model(model, ckpt_path)
|
|
|
|
|
|
def tokenize_data(batch, tokenizer: T5Tokenizer, max_length: int) -> Dict:
|
|
texts = ["<pad>" + sample["prompt"] + sample["completion"] for sample in batch]
|
|
data = tokenizer(
|
|
texts,
|
|
return_tensors="pt",
|
|
padding="max_length",
|
|
truncation=True,
|
|
max_length=max_length,
|
|
add_special_tokens=False,
|
|
)
|
|
data = {k: v.cuda() for k, v in data.items()}
|
|
data["labels"] = data["input_ids"].clone()
|
|
return data
|
|
|
|
|
|
class RandomDataset(Dataset):
|
|
def __init__(self, num_samples: int = 1000, max_length: int = 2048, vocab_size: int = 32000, tokenizer=None):
|
|
self.num_samples = num_samples
|
|
self.max_length = max_length
|
|
self.input_ids = torch.randint(
|
|
0, vocab_size, (num_samples, max_length), device=get_accelerator().get_current_device()
|
|
)
|
|
self.attention_mask = torch.ones_like(self.input_ids)
|
|
|
|
def __len__(self):
|
|
return self.num_samples
|
|
|
|
def __getitem__(self, idx):
|
|
return {
|
|
"input_ids": self.input_ids[idx],
|
|
"attention_mask": self.attention_mask[idx],
|
|
"labels": self.input_ids[idx],
|
|
}
|
|
|
|
|
|
def parse_args():
|
|
# basic settings
|
|
parser = argparse.ArgumentParser()
|
|
parser.add_argument(
|
|
"--model_name",
|
|
type=str,
|
|
default="base",
|
|
choices=["base", "8b", "test"],
|
|
help="Path to pretrained model or model identifier from huggingface.co/models.",
|
|
)
|
|
parser.add_argument(
|
|
"--plugin",
|
|
type=str,
|
|
default="hybrid",
|
|
choices=["ep", "ep_zero", "hybrid"],
|
|
help="Parallel methos. ep_zero is recommended for general cases. ep can provides least memory consumption and hybrid suits large scale training.",
|
|
)
|
|
parser.add_argument(
|
|
"--output_path",
|
|
type=str,
|
|
default="./outputs",
|
|
help="The path of your saved model after finetuning.",
|
|
)
|
|
parser.add_argument("--num_epoch", type=int, default=1, help="Number of epochs.")
|
|
parser.add_argument(
|
|
"--batch_size",
|
|
type=int,
|
|
default=1,
|
|
help="Batch size (per dp group) for the training dataloader.",
|
|
)
|
|
parser.add_argument(
|
|
"--save_interval",
|
|
type=int,
|
|
default=1000,
|
|
help=" The interval (steps) of saving checkpoints.",
|
|
)
|
|
parser.add_argument(
|
|
"--precision",
|
|
type=str,
|
|
default="bf16",
|
|
choices=["fp32", "bf16", "fp16"],
|
|
help="The mixed precision training.",
|
|
)
|
|
parser.add_argument("--max_length", type=int, default=2048, help="Max sequence length.")
|
|
parser.add_argument("--seed", type=int, default=42, help="A seed for reproducible training.")
|
|
parser.add_argument(
|
|
"--dataset",
|
|
type=str,
|
|
default="yizhongw/self_instruct",
|
|
help="dataset name from `datasets` repo.",
|
|
)
|
|
parser.add_argument(
|
|
"--task_name",
|
|
type=str,
|
|
default="super_natural_instructions",
|
|
help="task of corresponding dataset.",
|
|
)
|
|
|
|
# optim
|
|
parser.add_argument("--lr", type=float, default=1e-5, help="Learning rate.")
|
|
parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.")
|
|
|
|
# zero stage for all plugins
|
|
parser.add_argument("--zero_stage", type=int, default=2, help="zero stage.")
|
|
# ep_zero plugin
|
|
parser.add_argument(
|
|
"--extra_dp_size", type=int, default=1, help="ep_zero plugin's moe dp size. Recommended to be 2 or 4."
|
|
)
|
|
# hybrid plugin
|
|
parser.add_argument("--pp_size", type=int, default=2, help="pp size for hybrid plugin")
|
|
parser.add_argument("--dp_size", type=int, default=1, help="dp size for hybrid plugin")
|
|
parser.add_argument("--ep_size", type=int, default=2, help="ep size for hybrid plugin")
|
|
parser.add_argument("--microbatch_size", type=int, default=1, help="Microbatch size in pipeline for hybrid plugin")
|
|
|
|
# kernel
|
|
parser.add_argument(
|
|
"--use_kernel",
|
|
action="store_true",
|
|
help="Use kernel optim. Need to install flash attention and triton to enable all kernel optimizations. Skip if not installed.",
|
|
)
|
|
parser.add_argument(
|
|
"--use_layernorm_kernel",
|
|
action="store_true",
|
|
help="Use layernorm kernel. Need to install apex. Raise error if not installed.",
|
|
)
|
|
|
|
# loss
|
|
parser.add_argument(
|
|
"--router_aux_loss_factor",
|
|
type=float,
|
|
default=0.01,
|
|
help="Moe router z loss. You can refer to STMoE for details.",
|
|
)
|
|
parser.add_argument(
|
|
"--router_z_loss_factor",
|
|
type=float,
|
|
default=0.0001,
|
|
help="Moe router aux loss. You can refer to STMoE for details.",
|
|
)
|
|
parser.add_argument("--label_smoothing", type=float, default=0.0, help="Label smoothing.")
|
|
parser.add_argument(
|
|
"--z_loss_factor", type=float, default=0.0001, help="The final outputs' classification z loss factor."
|
|
)
|
|
|
|
# load balance
|
|
parser.add_argument(
|
|
"--load_balance", action="store_true", help="Expert load balance. Defaults to False. Recommend to enable."
|
|
)
|
|
parser.add_argument("--load_balance_interval", type=int, default=1000, help="Expert load balance interval.")
|
|
# communicate overlap
|
|
parser.add_argument(
|
|
"--comm_overlap",
|
|
action="store_true",
|
|
help="Use communication overlap for MoE. Recommended to enable for multi-node training.",
|
|
)
|
|
# hierarchical all-to-all
|
|
parser.add_argument(
|
|
"--hierarchical_alltoall",
|
|
action="store_true",
|
|
help="Use hierarchical all-to-all for MoE. Recommended to enable for multi-node training.",
|
|
)
|
|
|
|
args = parser.parse_args()
|
|
return args
|
|
|
|
|
|
def main():
|
|
args = parse_args()
|
|
|
|
# Launch ColossalAI
|
|
colossalai.launch_from_torch(seed=args.seed)
|
|
coordinator = DistCoordinator()
|
|
test_mode = args.model_name == "test"
|
|
|
|
# Set plugin
|
|
booster_kwargs = {}
|
|
hybrid_dict = {
|
|
"tp_size": 1,
|
|
"custom_policy": OpenMoeForCausalLMPolicy(),
|
|
"enable_fused_normalization": args.use_layernorm_kernel,
|
|
"enable_jit_fused": args.use_kernel,
|
|
"precision": args.precision,
|
|
"zero_stage": args.zero_stage,
|
|
}
|
|
if args.plugin == "ep":
|
|
dp_size = dist.get_world_size()
|
|
plugin = MoeHybridParallelPlugin(
|
|
pp_size=1,
|
|
ep_size=args.ep_size,
|
|
**hybrid_dict,
|
|
)
|
|
# MOE_MANAGER.setup(
|
|
# parallel="EP",
|
|
# max_ep_size=dp_size,
|
|
# **mgr_dict,
|
|
# )
|
|
elif args.plugin == "ep_zero":
|
|
dp_size = dist.get_world_size()
|
|
use_ep_inside = False
|
|
plugin = MoeHybridParallelPlugin(
|
|
pp_size=1,
|
|
ep_size=dp_size // args.ep_size,
|
|
use_ep_inside=use_ep_inside,
|
|
**hybrid_dict,
|
|
)
|
|
# MOE_MANAGER.setup(
|
|
# parallel="EP",
|
|
# max_ep_size=dp_size // args.extra_dp_size,
|
|
# use_ep_inside=use_ep_inside,
|
|
# **mgr_dict,
|
|
# )
|
|
elif args.plugin == "hybrid":
|
|
dp_size = dist.get_world_size() // args.pp_size
|
|
plugin = MoeHybridParallelPlugin(
|
|
pp_size=args.pp_size,
|
|
ep_size=args.ep_size,
|
|
microbatch_size=args.microbatch_size,
|
|
**hybrid_dict,
|
|
)
|
|
# MOE_MANAGER.setup(
|
|
# parallel="EP",
|
|
# mode="fixed",
|
|
# fixed_dp_size=args.dp_size,
|
|
# fixed_ep_size=args.ep_size,
|
|
# fixed_pp_size=args.pp_size,
|
|
# **mgr_dict,
|
|
# )
|
|
else:
|
|
raise ValueError(f"Invalid plugin {args.plugin}")
|
|
coordinator.print_on_master(f"Set plugin as {plugin.__class__.__name__}")
|
|
|
|
# Build OpenMoe model
|
|
if test_mode:
|
|
config = LlamaConfig.from_pretrained("hpcai-tech/openmoe-base")
|
|
config.hidden_size = 128
|
|
config.intermediate_size = 256
|
|
config.vocab_size = 32000
|
|
else:
|
|
repo_name = "hpcai-tech/openmoe-" + args.model_name
|
|
config = LlamaConfig.from_pretrained(repo_name)
|
|
set_openmoe_args(
|
|
config,
|
|
num_experts=config.num_experts,
|
|
moe_layer_interval=config.moe_layer_interval,
|
|
router_aux_loss_factor=args.router_aux_loss_factor,
|
|
router_z_loss_factor=args.router_z_loss_factor,
|
|
z_loss_factor=args.z_loss_factor,
|
|
enable_load_balance=args.load_balance,
|
|
enable_comm_overlap=args.comm_overlap,
|
|
enable_hierarchical_alltoall=args.hierarchical_alltoall,
|
|
enable_kernel=args.use_kernel,
|
|
)
|
|
with skip_init():
|
|
model = OpenMoeForCausalLM(config)
|
|
coordinator.print_on_master(f"Finish init model with config:\n{config}")
|
|
|
|
# Enable gradient checkpointing
|
|
model.gradient_checkpointing_enable()
|
|
|
|
# Prepare tokenizer and dataloader
|
|
tokenizer = T5Tokenizer.from_pretrained("google/umt5-small")
|
|
if test_mode:
|
|
dataset = RandomDataset(num_samples=20, tokenizer=tokenizer)
|
|
collate_fn = None
|
|
else:
|
|
dataset = load_dataset(args.dataset, args.task_name)
|
|
dataset = dataset["train"]
|
|
collate_fn = partial(tokenize_data, tokenizer=tokenizer, max_length=args.max_length)
|
|
dataloader = plugin.prepare_dataloader(
|
|
dataset, batch_size=args.batch_size, shuffle=True, drop_last=True, collate_fn=collate_fn
|
|
)
|
|
|
|
# Set optimizer
|
|
optimizer = HybridAdam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
|
|
|
|
# Set booster
|
|
booster = Booster(plugin=plugin, **booster_kwargs)
|
|
if not test_mode:
|
|
load_ckpt(repo_name, model, booster)
|
|
model, optimizer, _, dataloader, _ = booster.boost(model=model, optimizer=optimizer, dataloader=dataloader)
|
|
use_pipeline = isinstance(booster.plugin, MoeHybridParallelPlugin) and booster.plugin.pp_size > 1
|
|
is_pp_last_stage = use_pipeline and booster.plugin.stage_manager.is_last_stage()
|
|
coordinator.print_on_master(f"Finish init booster")
|
|
|
|
# Start finetuning
|
|
coordinator.print_on_master(f"Start finetuning")
|
|
for epoch in range(args.num_epoch):
|
|
model.train()
|
|
train_dataloader_iter = iter(dataloader)
|
|
total_len = len(train_dataloader_iter)
|
|
with tqdm(
|
|
range(total_len),
|
|
desc=f"Epoch [{epoch + 1}/{args.num_epoch}]",
|
|
disable=not coordinator.is_master(),
|
|
) as pbar:
|
|
for step in pbar:
|
|
if use_pipeline:
|
|
# Forward pass
|
|
outputs = booster.execute_pipeline(
|
|
train_dataloader_iter,
|
|
model,
|
|
lambda x, y: x.loss,
|
|
optimizer,
|
|
return_loss=True,
|
|
)
|
|
# Backward and optimize
|
|
if is_pp_last_stage:
|
|
loss = outputs["loss"]
|
|
pbar.set_postfix({"loss": loss.item()})
|
|
else:
|
|
# Forward pass
|
|
data = next(train_dataloader_iter)
|
|
data = move_to_cuda(data, torch.cuda.current_device())
|
|
outputs = model(**data)
|
|
loss = outputs["loss"]
|
|
# Backward
|
|
booster.backward(loss, optimizer)
|
|
pbar.set_postfix({"loss": loss.item()})
|
|
|
|
optimizer.step()
|
|
optimizer.zero_grad()
|
|
|
|
# Apply load balance
|
|
if (
|
|
args.load_balance
|
|
and args.load_balance_interval > 0
|
|
and (step + 1) % args.load_balance_interval == 0
|
|
):
|
|
coordinator.print_on_master(f"Apply load balance")
|
|
apply_load_balance(model, optimizer)
|
|
# save checkpoint
|
|
if (step + 1) % args.save_interval == 0:
|
|
coordinator.print_on_master(f"Saving model checkpoint to {args.output_path}")
|
|
booster.save_model(model, args.output_path, shard=True)
|
|
|
|
# save checkpoint at the end of each epochs
|
|
booster.save_model(model, args.output_path, shard=True)
|
|
coordinator.print_on_master(f"Saving model checkpoint to {args.output_path}")
|
|
|
|
# Finish training
|
|
coordinator.print_on_master(f"Finish training")
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|