diff --git a/colossalai/booster/plugin/hybrid_parallel_plugin.py b/colossalai/booster/plugin/hybrid_parallel_plugin.py index 14d9935f3..f4f9f7a50 100644 --- a/colossalai/booster/plugin/hybrid_parallel_plugin.py +++ b/colossalai/booster/plugin/hybrid_parallel_plugin.py @@ -1,7 +1,9 @@ import ctypes import random import warnings +from collections import defaultdict from contextlib import contextmanager +from copy import deepcopy from functools import partial from types import MethodType from typing import Any, Callable, Dict, Iterator, List, Optional, OrderedDict, Tuple, Union @@ -24,6 +26,8 @@ from colossalai.amp.naive_amp.mixed_precision_optimizer import MixedPrecisionOpt from colossalai.checkpoint_io import CheckpointIO, HybridParallelCheckpointIO from colossalai.cluster import ProcessGroupMesh from colossalai.interface import AMPModelMixin, ModelWrapper, OptimizerWrapper +from colossalai.interface.optimizer import DistributedOptim +from colossalai.nn.optimizer import DistGaloreAwamW from colossalai.pipeline.schedule import InterleavedSchedule, OneForwardOneBackwardSchedule from colossalai.pipeline.stage_manager import PipelineStageManager from colossalai.shardformer import GradientCheckpointConfig, ShardConfig, ShardFormer @@ -1171,6 +1175,15 @@ class HybridParallelPlugin(PipelinePluginBase): lr_scheduler: Optional[LRScheduler] = None, ) -> Tuple[Module, OptimizerWrapper, Callable, DataLoader, LRScheduler]: param_info = get_param_info(optimizer) + + # TODO: Support Galore + ZeRO + zero_stage = self.zero_stage + zero_config = deepcopy(self.zero_config) + if isinstance(optimizer, DistGaloreAwamW) and zero_stage > 0 and self.dp_size > 0: + warnings.warn("Galore is only supported for Tensor Parallel and vanilla Data Parallel yet. Disabling ZeRO.") + zero_config["partition_grad"] = False + zero_stage = 0 + if not isinstance(model, ModelWrapper): use_ddp = (self.dp_size > 1 and self.pp_size == 1 and self.zero_stage == 0) or ( self.dp_size == 1 @@ -1194,7 +1207,8 @@ class HybridParallelPlugin(PipelinePluginBase): custom_policy=self.custom_policy, ) if optimizer is not None and not isinstance(optimizer, OptimizerWrapper): - if self.zero_stage == 0: + if zero_stage == 0: + is_zero = False if self.precision in ["fp16", "bf16"]: optimizer = HybridParallelAMPOptimizer( optimizer, @@ -1218,11 +1232,11 @@ class HybridParallelPlugin(PipelinePluginBase): tp_process_group=self.tp_group, ) else: - zero_dp_size = dist.get_world_size(dp_group) - if zero_dp_size == 1: + is_zero = self.dp_size > 1 + if self.dp_size == 1: warnings.warn( "Use Zero Optimizer when data parallel size is 1 may introduce unnecessary overhead. " - "If you are not intended to use cpu_offload, please consider set zero_stage=0." + "If you do not intend to use cpu_offload, please consider set zero_stage=0." ) assert self.precision != "fp32", "Please set precision to 'fp16' or 'bf16' when using ZeRO." @@ -1236,11 +1250,19 @@ class HybridParallelPlugin(PipelinePluginBase): pp_process_group=self.pp_group, verbose=True, clip_grad_norm=self.max_norm, - **self.zero_config, + **zero_config, **self.amp_config, ) # inject update_master_params model.update_master_params = MethodType(optimizer.update_master_params, model) + + # Setup optimizers that require global states + optim = optimizer.optim + if isinstance(optim, DistributedOptim): + shard_to_param = optimizer.get_master_to_working_map() if is_zero else {} + padding_map = optimizer.get_param_padding_map() if is_zero else defaultdict(int) + optim.setup_distributed(self.tp_group, self.dp_group, shard_to_param, padding_map, is_zero) + return model, optimizer, criterion, dataloader, lr_scheduler def execute_pipeline( diff --git a/colossalai/booster/plugin/low_level_zero_plugin.py b/colossalai/booster/plugin/low_level_zero_plugin.py index be75bebac..dfc743fe5 100644 --- a/colossalai/booster/plugin/low_level_zero_plugin.py +++ b/colossalai/booster/plugin/low_level_zero_plugin.py @@ -8,7 +8,10 @@ from types import MethodType from typing import Callable, Dict, Iterator, List, Optional, Tuple import torch +import torch.distributed +import torch.distributed as dist import torch.nn as nn +from torch.distributed.distributed_c10d import _get_default_group from torch.nn import Parameter from torch.optim import Optimizer from torch.optim.lr_scheduler import _LRScheduler as LRScheduler @@ -28,6 +31,8 @@ from colossalai.checkpoint_io.utils import ( sharded_optimizer_loading_epilogue, ) from colossalai.interface import AMPModelMixin, ModelWrapper, OptimizerWrapper +from colossalai.interface.optimizer import DistributedOptim +from colossalai.nn.optimizer import DistGaloreAwamW from colossalai.quantization import BnbQuantizationConfig, quantize_model from colossalai.zero import LowLevelZeroOptimizer @@ -428,13 +433,31 @@ class LowLevelZeroPlugin(DPPluginBase): if not isinstance(model, ModelWrapper): model = LowLevelZeroModel(model, self.precision) + # TODO: Support Galore + ZeRO + zero_stage = self.stage + zero_optim_kwargs = {**self.zero_optim_kwargs} + dp_size = dist.get_world_size() + if isinstance(optimizer, DistGaloreAwamW) and zero_stage > 0 and dp_size > 0: + warnings.warn("Galore is only supported for Tensor Parallel and vanilla Data Parallel yet. Disabling ZeRO.") + zero_optim_kwargs["partition_grad"] = False + zero_stage = 0 + if optimizer is not None and not isinstance(optimizer, OptimizerWrapper): optimizer: LowLevelZeroOptimizer = LowLevelZeroOptimizer( - optimizer, **self.zero_optim_kwargs, verbose=self.verbose + optimizer, **zero_optim_kwargs, verbose=self.verbose ) # inject update_master_params model.update_master_params = MethodType(optimizer.update_master_params, model) + # Setup optimizers that require global states + optim = optimizer.optim + is_zero = dp_size > 1 and zero_stage > 0 + dp_group = _get_default_group() # Use the whole world + if isinstance(optim, DistributedOptim): + shard_to_param = optimizer.get_master_to_working_map() + padding_map = optimizer.get_param_padding_map() + optim.setup_distributed(None, dp_group, shard_to_param, padding_map, is_zero) + return model, optimizer, criterion, dataloader, lr_scheduler def control_checkpoint_io(self) -> bool: diff --git a/colossalai/cluster/process_group_mesh.py b/colossalai/cluster/process_group_mesh.py index 43095af50..fea4a23ba 100644 --- a/colossalai/cluster/process_group_mesh.py +++ b/colossalai/cluster/process_group_mesh.py @@ -38,7 +38,12 @@ class ProcessGroupMesh: def __init__(self, *size: int) -> None: assert dist.is_initialized(), "Please initialize torch.distributed first." - assert prod(size) == dist.get_world_size(), "The product of the size must be equal to the world size." + world_size = dist.get_world_size() + prod_size = prod(size) + assert ( + prod_size == world_size + ), f"The product of the size({prod_size}) must be equal to the world size({world_size})." + self._shape = size self._rank = dist.get_rank() self._coord = ProcessGroupMesh.unravel(self._rank, self._shape) diff --git a/colossalai/device/device_mesh.py b/colossalai/device/device_mesh.py index 3949590e8..171d88762 100644 --- a/colossalai/device/device_mesh.py +++ b/colossalai/device/device_mesh.py @@ -306,9 +306,8 @@ class DeviceMesh: # index means the local rank in the current axis # inner_tensor refers to the processes with the same local rank - if inner_tensor.numel() == 1: - # if the inner_tensor only has one element, it means that - # it already reaches the last axis + if inner_tensor.dim() == 0: + # if the inner_tensor already reaches the last axis, # we append its local_rank in the last axis to the index_list # and assign to the mapping # the value of the mapping is the the local rank at the indexed axis of the device mesh @@ -459,6 +458,7 @@ class DeviceMesh: # replace the local rank in the given dimension with the # local rank of the current process iterated + process_coordinates[dim] = _local_rank processes_in_the_same_process_group[dim].append(process_coordinates) diff --git a/colossalai/interface/optimizer.py b/colossalai/interface/optimizer.py index 95d11087b..6cd74b3b4 100644 --- a/colossalai/interface/optimizer.py +++ b/colossalai/interface/optimizer.py @@ -1,6 +1,7 @@ -from typing import Union +from typing import Dict, Optional, Union import torch +import torch.distributed as dist import torch.nn as nn from torch import Tensor from torch.optim import Optimizer @@ -133,3 +134,25 @@ class OptimizerWrapper: Unwrap the optimizer for checkpoint saving/loading. """ return self.optim + + +class DistributedOptim(Optimizer): + def setup_distributed( + self, + tp_group: Optional[dist.ProcessGroup] = None, + dp_group: Optional[dist.ProcessGroup] = None, + shard_to_working_param: Optional[Dict] = {}, + padding_map: Optional[Dict] = None, + is_zero: Optional[bool] = False, + ): + """Assign process groups for TP and ZeRO 2. + Arguments: + tp_group (dist.ProcessGroup): Tensor Parallel process group + dp_group (dist.ProcessGroup): ZeRO stage 2 process group + shard_to_working_param (Dict): ZeRO stage 2 feeds the optimizer a sharded param view to match grad shape. + This maps from id(view) to model params used in forward & backward. + padding_map (Dict): Per-param padding from ZeRO stage 2 + is_zero (bool): Whether to use ZeRO stage 2. + """ + + raise NotImplementedError("setup_distributed for TP/DP isn't supported by this optimizer yet!") diff --git a/colossalai/nn/optimizer/__init__.py b/colossalai/nn/optimizer/__init__.py index 26f152da2..c7261b1bc 100644 --- a/colossalai/nn/optimizer/__init__.py +++ b/colossalai/nn/optimizer/__init__.py @@ -1,9 +1,36 @@ +from galore_torch import GaLoreAdafactor, GaLoreAdamW + +from .came import CAME from .cpu_adam import CPUAdam +from .distributed_adafactor import DistributedAdaFactor +from .distributed_came import DistributedCAME +from .distributed_galore import DistGaloreAwamW +from .distributed_lamb import DistributedLamb from .fused_adam import FusedAdam from .fused_lamb import FusedLAMB from .fused_sgd import FusedSGD +from .galore import GaLoreAdamW8bit from .hybrid_adam import HybridAdam from .lamb import Lamb from .lars import Lars -__all__ = ["FusedLAMB", "FusedAdam", "FusedSGD", "Lamb", "Lars", "CPUAdam", "HybridAdam"] +from .adafactor import Adafactor # noqa + +__all__ = [ + "FusedLAMB", + "FusedAdam", + "FusedSGD", + "Lamb", + "Lars", + "CPUAdam", + "HybridAdam", + "DistributedLamb", + "DistGaloreAwamW", + "GaLoreAdamW", + "GaLoreAdafactor", + "GaLoreAdamW8bit", + "CAME", + "DistributedCAME", + "Adafactor", + "DistributedAdaFactor", +] diff --git a/colossalai/nn/optimizer/adafactor.py b/colossalai/nn/optimizer/adafactor.py new file mode 100644 index 000000000..22a6c8f4d --- /dev/null +++ b/colossalai/nn/optimizer/adafactor.py @@ -0,0 +1,201 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math + +import torch +from torch.optim import Optimizer + +__all__ = ["Adafactor"] + + +# Adafactor +class Adafactor(Optimizer): + def __init__( + self, + params, + lr=None, + eps=(1e-30, 1e-3), + clip_threshold=1.0, + decay_rate=-0.8, + beta1=None, + weight_decay=0.0, + scale_parameter=True, + relative_step=True, + warmup_init=False, + ): + lr = None + if lr is not None and relative_step: + raise ValueError("Cannot combine manual `lr` and `relative_step=True` options") + if warmup_init and not relative_step: + raise ValueError("`warmup_init=True` requires `relative_step=True`") + + defaults = { + "lr": lr, + "eps": eps, + "clip_threshold": clip_threshold, + "decay_rate": decay_rate, + "beta1": beta1, + "weight_decay": weight_decay, + "scale_parameter": scale_parameter, + "relative_step": relative_step, + "warmup_init": warmup_init, + } + super().__init__(params, defaults) + + @staticmethod + def _get_lr(param_group, param_state): + rel_step_sz = param_group["lr"] + if param_group["relative_step"]: + min_step = 1e-6 * param_state["step"] if param_group["warmup_init"] else 1e-2 + rel_step_sz = min(min_step, 1.0 / math.sqrt(param_state["step"])) + param_scale = 1.0 + if param_group["scale_parameter"]: + param_scale = max(param_group["eps"][1], param_state["RMS"]) + return param_scale * rel_step_sz + + @staticmethod + def _get_options(param_group, param_shape): + factored = len(param_shape) >= 2 + use_first_moment = param_group["beta1"] is not None + return factored, use_first_moment + + @staticmethod + def _rms(tensor): + return tensor.norm(2) / (tensor.numel() ** 0.5) + + @staticmethod + def _approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col): + r_factor = (exp_avg_sq_row / exp_avg_sq_row.mean(dim=-1, keepdim=True)).rsqrt_().unsqueeze(-1) + c_factor = exp_avg_sq_col.unsqueeze(-2).rsqrt() + return torch.mul(r_factor, c_factor) + + @torch.no_grad() + def step(self, closure=None): + """ + Performs a single optimization step + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + loss = closure() + + """ + param_groups: Dict + { + "params":[weight, bias] + "lr" + "eps" + "clip_threshold" + "decay_rate" + "beta1" + "weight_decay" + "scale_parameter" + "relative_step" + "warmup_init" + } + """ + + for group in self.param_groups: + # update weight & bias + for p in group["params"]: + if p.grad is None: + continue + """ + # grad shape is same as weigh / bias + """ + grad = p.grad + if grad.is_sparse: + raise RuntimeError("Adafactor does not support sparse gradients.") + + """ + p is weight + state + {'step', + 'exp_avg_sq_row', + 'exp_avg_sq_col', + 'RMS' + } + + p is bias + state + {'step', + 'exp_avg_sq', + 'RMS' + } + """ + + state = self.state[p] + grad_shape = grad.shape + + factored, use_first_moment = self._get_options(group, grad_shape) + # State Initialization + if len(state) == 0: + state["step"] = 0 + if use_first_moment: + # Exponential moving average of gradient values + state["exp_avg"] = torch.zeros_like(grad) + if factored: + state["exp_avg_sq_row"] = torch.zeros(grad_shape[:-1], device=grad.device) + state["exp_avg_sq_col"] = torch.zeros(grad_shape[:-2] + grad_shape[-1:], device=grad.device) + else: + state["exp_avg_sq"] = torch.zeros_like(grad) + + state["RMS"] = 0 + else: + if use_first_moment: + state["exp_avg"] = state["exp_avg"] + if factored: + state["exp_avg_sq_row"] = state["exp_avg_sq_row"] + state["exp_avg_sq_col"] = state["exp_avg_sq_col"] + else: + state["exp_avg_sq"] = state["exp_avg_sq"] + + state["step"] += 1 + # state["RMS"] = self._rms(p_data_fp32) + lr = self._get_lr(group, state) + beta2t = 1.0 - math.pow(state["step"], group["decay_rate"]) + update = (grad**2) + group["eps"][0] + if factored: + exp_avg_sq_row = state["exp_avg_sq_row"] + exp_avg_sq_col = state["exp_avg_sq_col"] + # Exponential average of row indexes + exp_avg_sq_row.mul_(beta2t).add_(update.mean(dim=-1), alpha=(1.0 - beta2t)) + # Exponential average of columns indexes + exp_avg_sq_col.mul_(beta2t).add_(update.mean(dim=-2), alpha=(1.0 - beta2t)) + # Approximation of exponential moving average of square of gradient + update = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col) + update.mul_(grad) + else: + exp_avg_sq = state["exp_avg_sq"] + exp_avg_sq.mul_(beta2t).add_(update, alpha=(1.0 - beta2t)) + update = exp_avg_sq.rsqrt().mul_(grad) + # RMS + update.div_((self._rms(update) / group["clip_threshold"]).clamp_(min=1.0)) + update.mul_(lr) + + if use_first_moment: + exp_avg = state["exp_avg"] + exp_avg.mul_(group["beta1"]).add_(update, alpha=(1 - group["beta1"])) + update = exp_avg + + if group["weight_decay"] != 0: + p.add_(p, alpha=(-group["weight_decay"] * lr)) + p.add_(-update) + + return loss diff --git a/colossalai/nn/optimizer/came.py b/colossalai/nn/optimizer/came.py new file mode 100644 index 000000000..3a1a79dff --- /dev/null +++ b/colossalai/nn/optimizer/came.py @@ -0,0 +1,150 @@ +# Copied from https://github.com/yangluo7/CAME/blob/master/came_pytorch/CAME.py +import torch +import torch.optim + + +class CAME(torch.optim.Optimizer): + """Implements CAME algorithm. + This implementation is based on: + `CAME: Confidence-guided Adaptive Memory Efficient Optimization` + Args: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): external learning rate (default: None) + eps (tuple[float, float]): regularization constants for square gradient + and instability respectively (default: (1e-30, 1e-16)) + clip_threshold (float): threshold of root-mean-square of + final gradient update (default: 1.0) + betas (tuple[float, float, float]): coefficient used for computing running averages of + update, square gradient and instability (default: (0.9, 0.999, 0.9999))) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + """ + + def __init__( + self, + params, + lr=None, + eps=(1e-30, 1e-16), + clip_threshold=1.0, + betas=(0.9, 0.999, 0.9999), + weight_decay=0.0, + ): + assert lr > 0.0 + assert all([0.0 <= beta <= 1.0 for beta in betas]) + + defaults = dict( + lr=lr, + eps=eps, + clip_threshold=clip_threshold, + betas=betas, + weight_decay=weight_decay, + ) + super(CAME, self).__init__(params, defaults) + + @property + def supports_memory_efficient_fp16(self): + return True + + @property + def supports_flat_params(self): + return False + + def _get_options(self, param_shape): + factored = len(param_shape) >= 2 + return factored + + def _rms(self, tensor): + return tensor.norm(2) / (tensor.numel() ** 0.5) + + def _approx_sq_grad(self, exp_avg_sq_row, exp_avg_sq_col): + r_factor = (exp_avg_sq_row / exp_avg_sq_row.mean(dim=-1, keepdim=True)).rsqrt_().unsqueeze(-1) + c_factor = exp_avg_sq_col.unsqueeze(-2).rsqrt() + return torch.mul(r_factor, c_factor) + + def step(self, closure=None): + """Performs a single optimization step. + Args: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + for p in group["params"]: + if p.grad is None: + continue + grad = p.grad + if grad.is_sparse: + raise RuntimeError("CAME does not support sparse gradients.") + + state = self.state[p] + grad_shape = grad.shape + + factored = self._get_options(grad_shape) + # State Initialization + if len(state) == 0: + state["step"] = 0 + + state["exp_avg"] = torch.zeros_like(grad) + if factored: + state["exp_avg_sq_row"] = torch.zeros(grad_shape[:-1], dtype=p.dtype, device=p.device) + state["exp_avg_sq_col"] = torch.zeros( + grad_shape[:-2] + grad_shape[-1:], dtype=p.dtype, device=p.device + ) + + state["exp_avg_res_row"] = torch.zeros(grad_shape[:-1], dtype=p.dtype, device=p.device) + state["exp_avg_res_col"] = torch.zeros( + grad_shape[:-2] + grad_shape[-1:], dtype=p.dtype, device=p.device + ) + else: + state["exp_avg_sq"] = torch.zeros_like(p) + + state["step"] += 1 + + update = (grad**2) + group["eps"][0] + + if factored: + exp_avg_sq_row = state["exp_avg_sq_row"] + exp_avg_sq_col = state["exp_avg_sq_col"] + + exp_avg_sq_row.mul_(group["betas"][1]).add_(update.mean(dim=-1), alpha=1.0 - group["betas"][1]) + exp_avg_sq_col.mul_(group["betas"][1]).add_(update.mean(dim=-2), alpha=1.0 - group["betas"][1]) + + # Approximation of exponential moving average of square of gradient + update = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col) + update.mul_(grad) + else: + exp_avg_sq = state["exp_avg_sq"] + + exp_avg_sq.mul_(group["betas"][1]).add_(update, alpha=1.0 - group["betas"][1]) + update = exp_avg_sq.rsqrt().mul_(grad) + + update.div_((self._rms(update) / group["clip_threshold"]).clamp_(min=1.0)) + + exp_avg = state["exp_avg"] + exp_avg.mul_(group["betas"][0]).add_(update, alpha=1 - group["betas"][0]) + + # Confidence-guided strategy + # Calculation of instability + res = (update - exp_avg) ** 2 + group["eps"][1] + + if factored: + exp_avg_res_row = state["exp_avg_res_row"] + exp_avg_res_col = state["exp_avg_res_col"] + exp_avg_res_row.mul_(group["betas"][2]).add_(res.mean(dim=-1), alpha=1.0 - group["betas"][2]) + exp_avg_res_col.mul_(group["betas"][2]).add_(res.mean(dim=-2), alpha=1.0 - group["betas"][2]) + + # Approximation of exponential moving average of instability + res_approx = self._approx_sq_grad(exp_avg_res_row, exp_avg_res_col) + update = res_approx.mul_(exp_avg) + else: + update = exp_avg.clone() + + if group["weight_decay"] != 0: + p.data.add_(p.data, alpha=-group["weight_decay"] * group["lr"]) + update.mul_(group["lr"]) + p.data.add_(-update) + + return loss diff --git a/colossalai/nn/optimizer/distributed_adafactor.py b/colossalai/nn/optimizer/distributed_adafactor.py new file mode 100644 index 000000000..1e5b7cb93 --- /dev/null +++ b/colossalai/nn/optimizer/distributed_adafactor.py @@ -0,0 +1,440 @@ +import math +from typing import Dict + +import torch +import torch.distributed as dist + +from colossalai.interface.optimizer import DistributedOptim +from colossalai.shardformer.layer._operation import _gather, _split +from colossalai.tensor.d_tensor import get_sharding_spec, is_distributed_tensor + +# DistributedAdaFactor (with Tensor parallel and Zero stage 2) +__all__ = ["DistributedAdaFactor"] + + +class DistributedAdaFactor(DistributedOptim): + def __init__( + self, + params, + lr=None, + eps=(1e-30, 1e-3), + clip_threshold=1.0, + decay_rate=-0.8, + beta1=None, + weight_decay=0.0, + scale_parameter=True, + relative_step=True, + warmup_init=False, + ): + lr = None + if lr is not None and relative_step: + raise ValueError("Cannot combine manual `lr` and `relative_step=True` options") + if warmup_init and not relative_step: + raise ValueError("`warmup_init=True` requires `relative_step=True`") + + defaults = { + "lr": lr, + "eps": eps, + "clip_threshold": clip_threshold, + "decay_rate": decay_rate, + "beta1": beta1, + "weight_decay": weight_decay, + "scale_parameter": scale_parameter, + "relative_step": relative_step, + "warmup_init": warmup_init, + } + self.tp_size = 1 + self.tp_group = None + self.dp_size = 1 + self.dp_group = None + self.shard_to_working_param = None # Dict{id:shape}, sample {id(param): torch.tensor} + self.use_zero = True + + self.param_is_dtensor_dict = {} # {id(p): True/False} + self.grad_shape_dict = {} # {id(p): master param shape} + self.factored_dict = {} # {id(p): True/False} + self.use_first_moment_dict = {} # {id(p): True/False} + self.shard_spec_dict = {} # {id(p): ShardSpec} + super().__init__(params, defaults) + + def setup_distributed( + self, + tp_group: dist.ProcessGroup = None, + dp_group: dist.ProcessGroup = None, + shard_to_working_param: Dict = {}, + padding_map=None, + use_zero: bool = True, + ) -> None: + """Setup process groups for TP and ZeRO 2. + Inject features to the Optimizer + + Args: + tp_group: The devices group for tensor parallel; + dp_group: The devices group for data parallel; + shard_to_working_param (Dict): ZeRO 2 feeds the optimizer a sharded param view as grads are sharded. + This maps from id(view) to working params used in forward & backward. + padding_map: An empty interface placeholder; + use_zero: Whether or not to use zero; + + """ + self.tp_group = tp_group # "Expected row process group" + self.dp_group = dp_group + if self.tp_group is not None: + self.tp_size = dist.get_world_size(self.tp_group) + if self.dp_group is not None: + self.dp_size = dist.get_world_size(self.dp_group) + self.use_zero = use_zero + + self.shard_to_working_param = shard_to_working_param if shard_to_working_param is not None else {} + # grad is None, cause we dont setup now + for group in self.param_groups: + for p in group["params"]: + self.shard_to_working_param[id(p)] = self.shard_to_working_param.get( + id(p), p + ) # If not ZeRO, working param is master param + self.param_is_dtensor_dict[id(p)] = is_distributed_tensor(self.shard_to_working_param[id(p)]) + self.grad_shape_dict[id(p)] = self.shard_to_working_param.get(id(p)).shape + self.factored_dict[id(p)], self.use_first_moment_dict[id(p)] = self._get_options( + group, self.grad_shape_dict[id(p)] + ) + if self.param_is_dtensor_dict[id(p)]: + self.shard_spec_dict[id(p)] = get_sharding_spec(self.shard_to_working_param[id(p)]) + else: + self.shard_spec_dict[id(p)] = None + + @staticmethod + def _get_lr(param_group, param_state): + rel_step_sz = param_group["lr"] + if param_group["relative_step"]: + min_step = 1e-6 * param_state["step"] if param_group["warmup_init"] else 1e-2 + rel_step_sz = min(min_step, 1.0 / math.sqrt(param_state["step"])) + param_scale = 1.0 + if param_group["scale_parameter"]: + param_scale = max(param_group["eps"][1], param_state["RMS"]) + return param_scale * rel_step_sz + + @staticmethod + def _get_options(param_group, param_shape): + """ + Determines whether the current param is factored + Args: + param_group : param group + param_shape : Original Shape of param + + """ + factored = len(param_shape) >= 2 + use_first_moment = param_group["beta1"] is not None + return factored, use_first_moment + + @staticmethod + def _rms(tensor, param_is_dtensor, use_zero, tp_size, dp_size, tp_group, dp_group): + tensor_sum = tensor.pow(2).sum() + num_of_element = tensor.numel() + + if param_is_dtensor: + # reduce tensor_sum from tp_group + dist.all_reduce(tensor_sum, group=tp_group) + num_of_element = num_of_element * tp_size + if use_zero: + dist.all_reduce(tensor_sum, group=dp_group) + num_of_element = num_of_element * dp_size + rms = (tensor_sum / num_of_element).sqrt() + return rms + + @staticmethod + def _approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col): + r_factor = (exp_avg_sq_row / exp_avg_sq_row.mean(dim=-1, keepdim=True)).rsqrt_().unsqueeze(-1) + c_factor = exp_avg_sq_col.unsqueeze(-2).rsqrt() + return torch.mul(r_factor, c_factor) + + # approx_sq_grad for row parallel weight + @staticmethod + def _approx_sq_grad_row_parallel(exp_avg_sq_row, exp_avg_sq_col, sq_row_meam): + # row_meam = sq_row_meam + r_factor = (exp_avg_sq_row / sq_row_meam).rsqrt_().unsqueeze(-1) + c_factor = exp_avg_sq_col.unsqueeze(-2).rsqrt() + return torch.mul(r_factor, c_factor) + + def _col_parallel_factor(self, update, grad, state, grad_shape, beta2t): + if grad_shape[0] % self.dp_size != 0: + # gather update[flatten] along dp group then reshape to [H, W/tp] + update = _gather(input_=update, dim=-1, process_group=self.dp_group) + update_reshape = update.view(-1, grad_shape[1]) + # gather grad[flatten] along dp group then reshape to [H, W/tp] + grad = _gather(input_=grad, dim=-1, process_group=self.dp_group) + grad_reshape = grad.view(-1, grad_shape[1]) + exp_avg_sq_row = state["exp_avg_sq_row"] # [H] + exp_avg_sq_col = state["exp_avg_sq_col"] # [W/tp] + exp_avg_sq_row.mul_(beta2t).add_(update_reshape.mean(dim=-1), alpha=(1.0 - beta2t)) + exp_avg_sq_col.mul_(beta2t).add_(update_reshape.mean(dim=-2), alpha=(1.0 - beta2t)) + update_reshape = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col) + update_reshape.mul_(grad_reshape) + else: + update_reshape = update.view(-1, grad_shape[1]) + grad_reshape = grad.view(-1, grad_shape[1]) + exp_avg_sq_row = state["exp_avg_sq_row"] # [H/dp] + exp_avg_sq_col = state["exp_avg_sq_col"] # [W/tp] + exp_avg_sq_row.mul_(beta2t).add_(update_reshape.mean(dim=-1), alpha=(1.0 - beta2t)) + exp_avg_sq_col.mul_(beta2t).add_(update_reshape.mean(dim=-2), alpha=(1.0 - beta2t)) + dist.all_reduce(exp_avg_sq_row, group=self.tp_group) + exp_avg_sq_row.div_(self.tp_size) + update_reshape = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col) + update_reshape.mul_(grad_reshape) + + if self.use_zero: + update = update_reshape.view(-1) + else: + update = update_reshape + return update + + def _row_parallel_factor(self, update, grad, state, grad_shape, beta2t): + if grad_shape[0] % self.dp_size != 0: + # gather update[flatten] along dp group then reshape to [H/tp, W] + update = _gather(input_=update, dim=-1, process_group=self.dp_group) + # view update to origin[tp] shape + update_reshape = update.view(-1, grad_shape[1]) + # gather grad[flatten] along dp group then reshape to [H/tp, W] + grad = _gather(input_=grad, dim=-1, process_group=self.dp_group) + grad_reshape = grad.view(-1, grad_shape[1]) + exp_avg_sq_row = state["exp_avg_sq_row"] # [H/tp] + exp_avg_sq_col = state["exp_avg_sq_col"] # [W] + exp_avg_sq_row.mul_(beta2t).add_(update_reshape.mean(dim=-1), alpha=(1.0 - beta2t)) + exp_avg_sq_col.mul_(beta2t).add_(update_reshape.mean(dim=-2), alpha=(1.0 - beta2t)) + # reduce col + dist.all_reduce(exp_avg_sq_col, group=self.tp_group) + exp_avg_sq_col.div_(self.tp_size) + update_reshape = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col) + update_reshape.mul_(grad_reshape) + if self.use_zero: + update = _split(input_=update_reshape.view(-1), dim=-1, process_group=self.dp_group) + else: + update = update_reshape + else: + update_reshape = update.view(-1, grad_shape[1]) + grad_reshape = grad.view(-1, grad_shape[1]) + exp_avg_sq_row = state["exp_avg_sq_row"] # [H/dp/tp] + exp_avg_sq_col = state["exp_avg_sq_col"] # [W] + exp_avg_sq_row.mul_(beta2t).add_(update_reshape.mean(dim=-1), alpha=(1.0 - beta2t)) + exp_avg_sq_col.mul_(beta2t).add_(update_reshape.mean(dim=-2), alpha=(1.0 - beta2t)) + # reduce col + dist.all_reduce(exp_avg_sq_col, group=self.tp_group) + exp_avg_sq_col.div_(self.tp_size) + # gather row + exp_avg_sq_row_gather = _gather(input_=exp_avg_sq_row, dim=-1, process_group=self.tp_group) + sq_row_meam = exp_avg_sq_row_gather.mean(dim=-1, keepdim=True) + update_reshape = self._approx_sq_grad_row_parallel(exp_avg_sq_row, exp_avg_sq_col, sq_row_meam) + update_reshape.mul_(grad_reshape) + if self.use_zero: + update = update_reshape.view(-1) + else: + update = update_reshape + return update + + def _base_factor(self, update, grad, state, grad_shape, beta2t): + if self.use_zero: + # only zero + if grad_shape[0] % self.dp_size != 0: + # view update to origin shape update.view(grad_shape[0]//self.data_parallel_size , grad_shape[1]) + # row mean no change + # col mean need reduce and div + # gather update[flatten] along dp group then reshape to [H, W] + update = _gather(input_=update, dim=-1, process_group=self.dp_group) + # view update to origin[tp] shape + update_reshape = update.view(-1, grad_shape[1]) + # gather grad[flatten] along dp group then reshape to [H, W] + grad = _gather(input_=grad, dim=-1, process_group=self.dp_group) + grad_reshape = grad.view(-1, grad_shape[1]) + exp_avg_sq_row = state["exp_avg_sq_row"] # [H/dp] + exp_avg_sq_col = state["exp_avg_sq_col"] # [W] + exp_avg_sq_row.mul_(beta2t).add_(update_reshape.mean(dim=-1), alpha=(1.0 - beta2t)) + exp_avg_sq_col.mul_(beta2t).add_(update_reshape.mean(dim=-2), alpha=(1.0 - beta2t)) + # reduce col + dist.all_reduce(exp_avg_sq_col, group=self.tp_group) + exp_avg_sq_col.div_(self.tp_size) + update_reshape = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col) + update_reshape.mul_(grad_reshape) + update = _split(input_=update_reshape.view(-1), dim=-1, process_group=self.dp_group) + else: + # no residual row + # view update to origin[tp] shape + update_reshape = update.view(-1, grad_shape[1]) # [H/dp, W] + grad_reshape = grad.view(-1, grad_shape[1]) # [H/dp, W] + exp_avg_sq_row = state["exp_avg_sq_row"] # [H/tp] + exp_avg_sq_col = state["exp_avg_sq_col"] # [W] + exp_avg_sq_row.mul_(beta2t).add_(update_reshape.mean(dim=-1), alpha=(1.0 - beta2t)) + exp_avg_sq_col.mul_(beta2t).add_(update_reshape.mean(dim=-2), alpha=(1.0 - beta2t)) + # reduce col + dist.all_reduce(exp_avg_sq_col, group=self.tp_group) + exp_avg_sq_col.div_(self.tp_size) + update_reshape = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col) + update_reshape.mul_(grad_reshape) + update = update_reshape.view(-1) + else: + # base factor; no tp, no dp + exp_avg_sq_row = state["exp_avg_sq_row"] + exp_avg_sq_col = state["exp_avg_sq_col"] + # Exponential average of row indexes + exp_avg_sq_row.mul_(beta2t).add_(update.mean(dim=-1), alpha=(1.0 - beta2t)) + # Exponential average of columns indexes + exp_avg_sq_col.mul_(beta2t).add_(update.mean(dim=-2), alpha=(1.0 - beta2t)) + # Approximation of exponential moving average of square of gradient + update = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col) + update.mul_(grad) + return update + + @torch.no_grad() + def step(self, closure=None): + """ + Performs a single optimization steps + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + loss = closure() + """ + param_groups: Dict + { + "params":[weight, bias] + "lr" + "eps" + "clip_threshold" + "decay_rate" + "beta1" + "weight_decay" + "scale_parameter" + "relative_step" + "warmup_init" + } + """ + for group in self.param_groups: + # update weight & bias + for p in group["params"]: + if p.grad is None: + continue + grad = p.grad + if grad.is_sparse: + raise RuntimeError("Adafactor does not support sparse gradients.") + + state = self.state[p] + grad_shape = self.grad_shape_dict[id(p)] + param_is_dtensor = self.param_is_dtensor_dict[id(p)] + if param_is_dtensor: + grad_shape = self.shard_to_working_param.get(id(p)).shape # tp shape (2 dim) + factored, use_first_moment = self.factored_dict[id(p)], self.use_first_moment_dict[id(p)] + + shard_spec = self.shard_spec_dict[id(p)] + if len(state) == 0: + state["step"] = 0 + if use_first_moment: + # Exponential moving average of gradient values + state["exp_avg"] = torch.zeros_like(p) + if factored: + if param_is_dtensor: + if shard_spec.sharding_sequence[0] == "R": # Col Parallel + if grad_shape[0] % self.dp_size != 0: + state["exp_avg_sq_row"] = torch.zeros( + grad_shape[0], device=p.device, dtype=p.dtype + ) # [H] + else: + state["exp_avg_sq_row"] = torch.zeros( + grad_shape[0] // self.dp_size, device=p.device, dtype=p.dtype + ) # [H/dp] + state["exp_avg_sq_col"] = torch.zeros( + grad_shape[1], device=p.device, dtype=p.dtype + ) # [W/TP] + + if shard_spec.sharding_sequence[-1] == "R": # Row Parallel + # Row indivisible shape situation + if grad_shape[0] % self.dp_size != 0: + state["exp_avg_sq_row"] = torch.zeros( + grad_shape[0], device=p.device, dtype=p.dtype + ) # [H/tp] + else: + state["exp_avg_sq_row"] = torch.zeros( + grad_shape[0] // self.dp_size, device=p.device, dtype=p.dtype + ) # [H/dp/tp] + + state["exp_avg_sq_col"] = torch.zeros( + grad_shape[1], device=p.device, dtype=p.dtype + ) # [W] + else: + if self.use_zero: + if grad_shape[0] % self.dp_size != 0: + # save all exp_avg_sq_row [H] + state["exp_avg_sq_row"] = torch.zeros( + grad_shape[0], device=grad.device, dtype=p.dtype + ) + else: + # exp_avg_sq_row [H // dp] + state["exp_avg_sq_row"] = torch.zeros( + grad_shape[0] // self.dp_size, device=grad.device, dtype=p.dtype + ) + else: + # exp_avg_sq_row [H] + state["exp_avg_sq_row"] = torch.zeros(grad_shape[0], device=grad.device, dtype=p.dtype) + # exp_avg_sq_col alaways [W] + state["exp_avg_sq_col"] = torch.zeros(grad_shape[1], device=grad.device, dtype=p.dtype) + else: + state["exp_avg_sq"] = torch.zeros_like(p) + state["RMS"] = 0 + else: + if use_first_moment: + state["exp_avg"] = state["exp_avg"] + if factored: + state["exp_avg_sq_row"] = state["exp_avg_sq_row"] + state["exp_avg_sq_col"] = state["exp_avg_sq_col"] + else: + state["exp_avg_sq"] = state["exp_avg_sq"] + + state["step"] += 1 + lr = self._get_lr(group, state) + beta2t = 1.0 - math.pow(state["step"], group["decay_rate"]) + update = (grad**2) + group["eps"][0] + + if factored: + if param_is_dtensor: + # ============================== + # First Dim is R, Last Dim is S{} means split dim -1 ---> + # Coloum Parallel ---> sq_row need Do (col) Reduce + # ============================== + if shard_spec.sharding_sequence[0] == "R": + update = self._col_parallel_factor(update, grad, state, grad_shape, beta2t) + # ============================== + # Last Dim is R, First Dim is S{} means split dim 0 ---> + # Row Parallel ---> sq_col need Do (row) Reduce + # ============================== + elif shard_spec.sharding_sequence[-1] == "R": + update = self._row_parallel_factor(update, grad, state, grad_shape, beta2t) + else: + update = self._base_factor(update, grad, state, grad_shape, beta2t) + else: + exp_avg_sq = state["exp_avg_sq"] + exp_avg_sq.mul_(beta2t).add_(update, alpha=(1.0 - beta2t)) + update = exp_avg_sq.rsqrt().mul_(grad) + + # # (Line No.8) RMS + rms = self._rms( + update, + param_is_dtensor, + self.use_zero, + self.tp_size, + self.dp_size, + self.tp_group, + self.dp_group, + ) + update.div_((rms / group["clip_threshold"]).clamp_(min=1.0)) + + update.mul_(lr) + if use_first_moment: + exp_avg = state["exp_avg"] + exp_avg.mul_(group["beta1"]).add_(update, alpha=(1 - group["beta1"])) + update = exp_avg + + if group["weight_decay"] != 0: + p.add_(p, alpha=(-group["weight_decay"] * lr)) + + p.add_(-update) + + return loss diff --git a/colossalai/nn/optimizer/distributed_came.py b/colossalai/nn/optimizer/distributed_came.py new file mode 100644 index 000000000..d93ec4982 --- /dev/null +++ b/colossalai/nn/optimizer/distributed_came.py @@ -0,0 +1,557 @@ +from typing import Dict + +import torch +import torch.distributed as dist + +from colossalai.interface.optimizer import DistributedOptim +from colossalai.shardformer.layer._operation import _gather, _split +from colossalai.tensor.d_tensor import get_sharding_spec, is_distributed_tensor + + +class DistributedCAME(DistributedOptim): + """Implements CAME algorithm. + This implementation is based on: + `CAME: Confidence-guided Adaptive Memory Efficient Optimization` + Args: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): external learning rate (default: None) + eps (tuple[float, float]): regularization constants for square gradient + and instability respectively (default: (1e-30, 1e-16)) + clip_threshold (float): threshold of root-mean-square of + final gradient update (default: 1.0) + betas (tuple[float, float, float]): coefficient used for computing running averages of + update, square gradient and instability (default: (0.9, 0.999, 0.9999))) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + """ + + def __init__( + self, + params, + lr=None, + eps=(1e-30, 1e-16), + clip_threshold=1.0, + betas=(0.9, 0.999, 0.9999), + weight_decay=0.0, + ): + assert lr > 0.0 + assert all([0.0 <= beta <= 1.0 for beta in betas]) + + defaults = dict( + lr=lr, + eps=eps, + clip_threshold=clip_threshold, + betas=betas, + weight_decay=weight_decay, + ) + + self.tp_size = 1 + self.tp_group = None + self.dp_size = 1 + self.dp_group = None + self.shard_to_working_param = None # Dict{id:shape}, sample {id(param): torch.tensor} + self.use_zero = True + + self.param_is_dtensor_dict = {} # {id(p): True/False} + self.grad_shape_dict = {} # {id(p): master param shape} + self.factored_dict = {} # {id(p): True/False} + self.use_first_moment_dict = {} # {id(p): True/False} + self.shard_spec_dict = {} # {id(p): ShardSpec} + + super(DistributedCAME, self).__init__(params, defaults) + + @property + def supports_memory_efficient_fp16(self): + return True + + @property + def supports_flat_params(self): + return False + + def setup_distributed( + self, + tp_group: dist.ProcessGroup = None, + dp_group: dist.ProcessGroup = None, + shard_to_working_param: Dict = {}, + padding_map=None, + use_zero: bool = True, + ) -> None: + """ + Inject features to the Optimizer + + Args: + tp_group: The devices group for tensor parallel; + dp_group: The devices group for data parallel; + shard_to_working_param (Dict): ZeRO 2 feeds the optimizer a sharded param view as grads are sharded. + This maps from id(view) to working params used in forward & backward. + padding_map: Interface placeholder + use_zero: Whether or not to use zero; + + """ + self.tp_group = tp_group # "Expected row process group" + self.dp_group = dp_group + if self.tp_group is not None: + self.tp_size = dist.get_world_size(self.tp_group) + if self.dp_group is not None: + self.dp_size = dist.get_world_size(self.dp_group) + self.use_zero = use_zero + + self.shard_to_working_param = shard_to_working_param if shard_to_working_param is not None else {} + # grad is None, cause we dont setup now + for group in self.param_groups: + for p in group["params"]: + # w/o ZeRO: master param = working param + self.shard_to_working_param[id(p)] = self.shard_to_working_param.get(id(p), p) + self.param_is_dtensor_dict[id(p)] = is_distributed_tensor(self.shard_to_working_param[id(p)]) + self.grad_shape_dict[id(p)] = self.shard_to_working_param[id(p)].shape + # Avoid row parallel lead H=1, then factored param is determined as not factored; + if self.param_is_dtensor_dict[id(p)]: + self.shard_spec_dict[id(p)] = get_sharding_spec(self.shard_to_working_param[id(p)]) + if self.shard_spec_dict[id(p)].sharding_sequence[0] == "R": + self.factored_dict[id(p)] = True + elif self.shard_spec_dict[id(p)].sharding_sequence[-1] == "R": + self.factored_dict[id(p)] = True + else: + self.factored_dict[id(p)] = self._get_options(self.grad_shape_dict[id(p)]) + + else: + self.shard_spec_dict[id(p)] = None + self.factored_dict[id(p)] = self._get_options(self.grad_shape_dict[id(p)]) + + @staticmethod + def _get_options(param_shape): + factored = len(param_shape) >= 2 + return factored + + @staticmethod + def _rms(tensor, param_is_dtensor, use_zero, tp_size, dp_size, tp_group, dp_group): + tensor_sum = tensor.pow(2).sum() + num_of_element = tensor.numel() + + if param_is_dtensor: + # reduce tensor_sum from tp_group + dist.all_reduce(tensor_sum, group=tp_group) + num_of_element = num_of_element * tp_size + if use_zero: + dist.all_reduce(tensor_sum, group=dp_group) + num_of_element = num_of_element * dp_size + rms = (tensor_sum / num_of_element).sqrt() + return rms + + @staticmethod + def _approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col): + r_factor = (exp_avg_sq_row / exp_avg_sq_row.mean(dim=-1, keepdim=True)).rsqrt_().unsqueeze(-1) + c_factor = exp_avg_sq_col.unsqueeze(-2).rsqrt() + return torch.mul(r_factor, c_factor) + + # approx_sq_grad for row parallel weight + @staticmethod + def _approx_sq_grad_row_parallel(exp_avg_sq_row, exp_avg_sq_col, sq_row_meam): + r_factor = (exp_avg_sq_row / sq_row_meam).rsqrt_().unsqueeze(-1) + c_factor = exp_avg_sq_col.unsqueeze(-2).rsqrt() + return torch.mul(r_factor, c_factor) + + def _col_parallel_factor(self, update, grad, state_row, state_col, grad_shape, beta2t): + if grad_shape[0] % self.dp_size != 0: + # gather update[flatten] along dp group then reshape to [H, W/tp] + update = _gather(input_=update, dim=-1, process_group=self.dp_group) + update_reshape = update.view(-1, grad_shape[1]) + # gather grad[flatten] along dp group then reshape to [H, W/tp] + grad = _gather(input_=grad, dim=-1, process_group=self.dp_group) + grad_reshape = grad.view(-1, grad_shape[1]) + exp_avg_sq_row = state_row # [H] + exp_avg_sq_col = state_col # [W/tp] + exp_avg_sq_row.mul_(beta2t).add_(update_reshape.mean(dim=-1), alpha=(1.0 - beta2t)) + exp_avg_sq_col.mul_(beta2t).add_(update_reshape.mean(dim=-2), alpha=(1.0 - beta2t)) + update_reshape = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col) + update_reshape.mul_(grad_reshape) + else: + update_reshape = update.view(-1, grad_shape[1]) + grad_reshape = grad.view(-1, grad_shape[1]) + exp_avg_sq_row = state_row # [H] + exp_avg_sq_col = state_col # [W/tp] + exp_avg_sq_row.mul_(beta2t).add_(update_reshape.mean(dim=-1), alpha=(1.0 - beta2t)) + exp_avg_sq_col.mul_(beta2t).add_(update_reshape.mean(dim=-2), alpha=(1.0 - beta2t)) + dist.all_reduce(exp_avg_sq_row, group=self.tp_group) + exp_avg_sq_row.div_(self.tp_size) + update_reshape = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col) + update_reshape.mul_(grad_reshape) + + if self.use_zero: + update = update_reshape.view(-1) + else: + update = update_reshape + return update + + def _row_parallel_factor(self, update, grad, state_row, state_col, grad_shape, beta2t): + if grad_shape[0] % self.dp_size != 0: + # gather update[flatten] along dp group then reshape to [H/tp, W] + update = _gather(input_=update, dim=-1, process_group=self.dp_group) + # view update to origin[tp] shape + update_reshape = update.view(-1, grad_shape[1]) + # gather grad[flatten] along dp group then reshape to [H/tp, W] + grad = _gather(input_=grad, dim=-1, process_group=self.dp_group) + grad_reshape = grad.view(-1, grad_shape[1]) + exp_avg_sq_row = state_row # [H] + exp_avg_sq_col = state_col # [W/tp] + exp_avg_sq_row.mul_(beta2t).add_(update_reshape.mean(dim=-1), alpha=(1.0 - beta2t)) + exp_avg_sq_col.mul_(beta2t).add_(update_reshape.mean(dim=-2), alpha=(1.0 - beta2t)) + # reduce col + dist.all_reduce(exp_avg_sq_col, group=self.tp_group) + exp_avg_sq_col.div_(self.tp_size) + update_reshape = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col) + update_reshape.mul_(grad_reshape) + if self.use_zero: + update = _split(input_=update_reshape.view(-1), dim=-1, process_group=self.dp_group) + else: + update = update_reshape + else: + update_reshape = update.view(-1, grad_shape[1]) + grad_reshape = grad.view(-1, grad_shape[1]) + exp_avg_sq_row = state_row # [H] + exp_avg_sq_col = state_col # [W/tp] + exp_avg_sq_row.mul_(beta2t).add_(update_reshape.mean(dim=-1), alpha=(1.0 - beta2t)) + exp_avg_sq_col.mul_(beta2t).add_(update_reshape.mean(dim=-2), alpha=(1.0 - beta2t)) + # reduce col + dist.all_reduce(exp_avg_sq_col, group=self.tp_group) + exp_avg_sq_col.div_(self.tp_size) + # gather row + exp_avg_sq_row_gather = _gather(input_=exp_avg_sq_row, dim=-1, process_group=self.tp_group) + sq_row_meam = exp_avg_sq_row_gather.mean(dim=-1, keepdim=True) + update_reshape = self._approx_sq_grad_row_parallel(exp_avg_sq_row, exp_avg_sq_col, sq_row_meam) + update_reshape.mul_(grad_reshape) + if self.use_zero: + update = update_reshape.view(-1) + else: + update = update_reshape + return update + + def _base_factor(self, update, grad, state_row, state_col, grad_shape, beta2t): + if self.use_zero: + # only zero + # [30522, 128], [2, 128] + if grad_shape[0] % self.dp_size != 0: + # view update to origin shape update.view(grad_shape[0]//self.data_parallel_size , grad_shape[1]) + # row mean no change + # col mean need reduce and div + # gather update[flatten] along dp group then reshape to [H, W] + update = _gather(input_=update, dim=-1, process_group=self.dp_group) + # view update to origin[tp] shape + update_reshape = update.view(-1, grad_shape[1]) + # gather grad[flatten] along dp group then reshape to [H, W] + grad = _gather(input_=grad, dim=-1, process_group=self.dp_group) + grad_reshape = grad.view(-1, grad_shape[1]) + exp_avg_sq_row = state_row # [H/dp] + exp_avg_sq_col = state_col # [W] + exp_avg_sq_row.mul_(beta2t).add_(update_reshape.mean(dim=-1), alpha=(1.0 - beta2t)) + exp_avg_sq_col.mul_(beta2t).add_(update_reshape.mean(dim=-2), alpha=(1.0 - beta2t)) + # reduce col + dist.all_reduce(exp_avg_sq_col, group=self.tp_group) + exp_avg_sq_col.div_(self.tp_size) + update_reshape = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col) + update_reshape.mul_(grad_reshape) + update = _split(input_=update_reshape.view(-1), dim=-1, process_group=self.dp_group) + else: + # no residual row + # view update to origin[tp] shape + update_reshape = update.view(-1, grad_shape[1]) # [H/dp, W] + grad_reshape = grad.view(-1, grad_shape[1]) # [H/dp, W] + exp_avg_sq_row = state_row # [H/dp] + exp_avg_sq_col = state_col # [W] + exp_avg_sq_row.mul_(beta2t).add_(update_reshape.mean(dim=-1), alpha=(1.0 - beta2t)) + exp_avg_sq_col.mul_(beta2t).add_(update_reshape.mean(dim=-2), alpha=(1.0 - beta2t)) + # reduce col + dist.all_reduce(exp_avg_sq_col, group=self.tp_group) + exp_avg_sq_col.div_(self.tp_size) + update_reshape = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col) + update_reshape.mul_(grad_reshape) + update = update_reshape.view(-1) + else: + # # base factor; no tp, no dp + exp_avg_sq_row = state_row # [H/dp] + exp_avg_sq_col = state_col # [W] + # Exponential average of row indexes + exp_avg_sq_row.mul_(beta2t).add_(update.mean(dim=-1), alpha=(1.0 - beta2t)) + # Exponential average of columns indexes + exp_avg_sq_col.mul_(beta2t).add_(update.mean(dim=-2), alpha=(1.0 - beta2t)) + # Approximation of exponential moving average of square of gradient + update = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col) + update.mul_(grad) + return update + + # factor + def _base_res_factor(self, res, exp_avg, state_row, state_col, grad_shape, beta2t): + if self.use_zero: + # only zero + if grad_shape[0] % self.dp_size != 0: + # view res to origin shape res.view(grad_shape[0]//self.data_parallel_size , grad_shape[1]) + # row mean no change + # col mean need reduce and div + # gather res[flatten] along dp group then reshape to [H, W] + res = _gather(input_=res, dim=-1, process_group=self.dp_group) + # view res to origin[tp] shape + res_reshape = res.view(-1, grad_shape[1]) + # gather exp_avg[flatten] along dp group then reshape to [H, W] + exp_avg = _gather(input_=exp_avg, dim=-1, process_group=self.dp_group) + exp_avg_reshape = exp_avg.view(-1, grad_shape[1]) + exp_avg_sq_row = state_row # [H/dp] + exp_avg_sq_col = state_col # [W] + exp_avg_sq_row.mul_(beta2t).add_(res_reshape.mean(dim=-1), alpha=(1.0 - beta2t)) + exp_avg_sq_col.mul_(beta2t).add_(res_reshape.mean(dim=-2), alpha=(1.0 - beta2t)) + # reduce col + dist.all_reduce(exp_avg_sq_col, group=self.tp_group) + exp_avg_sq_col.div_(self.tp_size) + res_reshape = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col) + res_reshape.mul_(exp_avg_reshape) + res = _split(input_=res_reshape.view(-1), dim=-1, process_group=self.dp_group) + else: + # no residual row + # view res to origin[tp] shape + res_reshape = res.view(-1, grad_shape[1]) # [H/dp, W] + exp_avg_reshape = exp_avg.view(-1, grad_shape[1]) # [H/dp, W] + exp_avg_sq_row = state_row # [H/dp] + exp_avg_sq_col = state_col # [W] + exp_avg_sq_row.mul_(beta2t).add_(res_reshape.mean(dim=-1), alpha=(1.0 - beta2t)) + exp_avg_sq_col.mul_(beta2t).add_(res_reshape.mean(dim=-2), alpha=(1.0 - beta2t)) + # reduce col + dist.all_reduce(exp_avg_sq_col, group=self.tp_group) + exp_avg_sq_col.div_(self.tp_size) + res_reshape = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col) + res_reshape.mul_(exp_avg_reshape) + res = res_reshape.view(-1) + else: + # # base factor; no tp, no dp + exp_avg_sq_row = state_row # [H/dp] + exp_avg_sq_col = state_col # [W] + # Exponential average of row indexes + exp_avg_sq_row.mul_(beta2t).add_(res.mean(dim=-1), alpha=(1.0 - beta2t)) + # Exponential average of columns indexes + exp_avg_sq_col.mul_(beta2t).add_(res.mean(dim=-2), alpha=(1.0 - beta2t)) + # Approximation of exponential moving average of square of gradient + res = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col) + res.mul_(exp_avg) + return res + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + Args: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + for p in group["params"]: + if p.grad is None: + continue + grad = p.grad + if grad.is_sparse: + raise RuntimeError("CAME does not support sparse gradients.") + + state = self.state[p] + # Under zero the grad_shape is the original grad that is flattened and then cut (only one dimension) + grad_shape = grad.shape + grad_shape = self.grad_shape_dict[id(p)] + param_is_dtensor = self.param_is_dtensor_dict[id(p)] + if param_is_dtensor: + grad_shape = self.shard_to_working_param.get(id(p)).shape # tp shape (2 dim) + factored = self.factored_dict[id(p)] + shard_spec = self.shard_spec_dict[id(p)] + + # State Initialization + if len(state) == 0: + state["step"] = 0 + state["exp_avg"] = torch.zeros_like(p) + if factored: + if param_is_dtensor: + if shard_spec.sharding_sequence[0] == "R": # Col Parallel + if grad_shape[0] % self.dp_size != 0: + state["exp_avg_sq_row"] = torch.zeros( + grad_shape[0], device=p.device, dtype=p.dtype + ) # [H] + state["exp_avg_res_row"] = torch.zeros( + grad_shape[0], device=p.device, dtype=p.dtype + ) # [H] + else: + state["exp_avg_sq_row"] = torch.zeros( + grad_shape[0] // self.dp_size, device=p.device, dtype=p.dtype + ) # [H/dp] + state["exp_avg_res_row"] = torch.zeros( + grad_shape[0] // self.dp_size, device=p.device, dtype=p.dtype + ) # [H/dp] + state["exp_avg_sq_col"] = torch.zeros( + grad_shape[1], device=p.device, dtype=p.dtype + ) # [W/TP] + state["exp_avg_res_col"] = torch.zeros( + grad_shape[1], device=p.device, dtype=p.dtype + ) # [W/TP] + + if shard_spec.sharding_sequence[-1] == "R": # Row Parallel + # Row indivisible shape situation + if grad_shape[0] % self.dp_size != 0: + state["exp_avg_sq_row"] = torch.zeros( + grad_shape[0], device=p.device, dtype=p.dtype + ) # [H/tp] + state["exp_avg_res_row"] = torch.zeros( + grad_shape[0], device=p.device, dtype=p.dtype + ) # [H/tp] + else: + state["exp_avg_sq_row"] = torch.zeros( + grad_shape[0] // self.dp_size, device=p.device, dtype=p.dtype + ) # [H/dp/tp] + state["exp_avg_res_row"] = torch.zeros( + grad_shape[0] // self.dp_size, device=p.device, dtype=p.dtype + ) # [H/dp/tp] + + state["exp_avg_sq_col"] = torch.zeros( + grad_shape[1], device=p.device, dtype=p.dtype + ) # [W] + state["exp_avg_res_col"] = torch.zeros( + grad_shape[1], device=p.device, dtype=p.dtype + ) # [W] + else: + if self.use_zero: + if grad_shape[0] % self.dp_size != 0: + # save all exp_avg_sq_row [H] + state["exp_avg_sq_row"] = torch.zeros( + grad_shape[0], device=grad.device, dtype=p.dtype + ) + state["exp_avg_res_row"] = torch.zeros( + grad_shape[0], device=grad.device, dtype=p.dtype + ) + else: + # exp_avg_sq_row [H // dp] + state["exp_avg_sq_row"] = torch.zeros( + grad_shape[0] // self.dp_size, device=grad.device, dtype=p.dtype + ) + state["exp_avg_res_row"] = torch.zeros( + grad_shape[0] // self.dp_size, device=grad.device, dtype=p.dtype + ) + else: + # exp_avg_sq_row [H] + state["exp_avg_sq_row"] = torch.zeros(grad_shape[0], device=grad.device, dtype=p.dtype) + state["exp_avg_res_row"] = torch.zeros(grad_shape[0], device=grad.device, dtype=p.dtype) + # exp_avg_sq_col alaways [W] + state["exp_avg_sq_col"] = torch.zeros(grad_shape[1], device=grad.device, dtype=p.dtype) + state["exp_avg_res_col"] = torch.zeros(grad_shape[1], device=grad.device, dtype=p.dtype) + else: + state["exp_avg_sq"] = torch.zeros_like(p) + state["RMS"] = 0 + else: + if factored: + state["exp_avg_sq_row"] = state["exp_avg_sq_row"] + state["exp_avg_sq_col"] = state["exp_avg_sq_col"] + state["exp_avg_res_row"] = state["exp_avg_sq_row"] + state["exp_avg_res_col"] = state["exp_avg_sq_col"] + else: + state["exp_avg_sq"] = state["exp_avg_sq"] + + state["step"] += 1 + + update = (grad**2) + group["eps"][0] + if factored: + if param_is_dtensor: + # ============================== + # First Dim is R, Last Dim is S{} means split dim -1 ---> + # Coloum Parallel ---> sq_row need Do (col) Reduce + # ============================== + if shard_spec.sharding_sequence[0] == "R": + update = self._col_parallel_factor( + update, + grad, + state["exp_avg_sq_row"], + state["exp_avg_sq_col"], + grad_shape, + group["betas"][1], + ) + # ============================== + # Last Dim is R, First Dim is S{} means split dim 0 ---> + # Row Parallel ---> sq_col need Do (row) Reduce + # ============================== + elif shard_spec.sharding_sequence[-1] == "R": + update = self._row_parallel_factor( + update, + grad, + state["exp_avg_sq_row"], + state["exp_avg_sq_col"], + grad_shape, + group["betas"][1], + ) + else: + update = self._base_factor( + update, + grad, + state["exp_avg_sq_row"], + state["exp_avg_sq_col"], + grad_shape, + group["betas"][1], + ) + else: + exp_avg_sq = state["exp_avg_sq"] + exp_avg_sq.mul_(group["betas"][1]).add_(update, alpha=(1.0 - group["betas"][1])) + update = exp_avg_sq.rsqrt().mul_(grad) + rms = self._rms( + update, + param_is_dtensor, + self.use_zero, + self.tp_size, + self.dp_size, + self.tp_group, + self.dp_group, + ) + + update.div_((rms / group["clip_threshold"]).clamp_(min=1.0)) + + exp_avg = state["exp_avg"] + exp_avg.mul_(group["betas"][0]).add_(update, alpha=1 - group["betas"][0]) + # Confidence-guided strategy + # Calculation of instability + res = (update - exp_avg) ** 2 + group["eps"][1] + if factored: + if param_is_dtensor: + # ============================== + # First Dim is R, Last Dim is S{} means split dim -1 ---> + # Coloum Parallel ---> sq_row need Do (col) Reduce + # ============================== + if shard_spec.sharding_sequence[0] == "R": + update = self._col_parallel_factor( + res, + exp_avg, + state["exp_avg_res_row"], + state["exp_avg_res_col"], + grad_shape, + group["betas"][2], + ) + # ============================== + # Last Dim is R, First Dim is S{} means split dim 0 ---> + # Row Parallel ---> sq_col need Do (row) Reduce + # ============================== + elif shard_spec.sharding_sequence[-1] == "R": + update = self._row_parallel_factor( + res, + exp_avg, + state["exp_avg_res_row"], + state["exp_avg_res_col"], + grad_shape, + group["betas"][2], + ) + else: + update = self._base_res_factor( + res, + exp_avg, + state["exp_avg_res_row"], + state["exp_avg_res_col"], + grad_shape, + group["betas"][2], + ) + else: + update = exp_avg + + if group["weight_decay"] != 0: + p.add_(p, alpha=-group["weight_decay"] * group["lr"]) + update.mul_(group["lr"]) + p.add_(-update) + return loss diff --git a/colossalai/nn/optimizer/distributed_galore.py b/colossalai/nn/optimizer/distributed_galore.py new file mode 100644 index 000000000..3f42dd5b9 --- /dev/null +++ b/colossalai/nn/optimizer/distributed_galore.py @@ -0,0 +1,279 @@ +""" adapted from https://github.com/jiaweizzhao/GaLore/blob/master/galore_torch/adamw8bit.py""" + +import warnings +from collections import defaultdict +from typing import Dict, Optional + +import torch +import torch.distributed as dist +import torch.nn.functional as F +from bitsandbytes.optim.optimizer import Optimizer2State + +from colossalai.interface.optimizer import DistributedOptim +from colossalai.tensor.d_tensor import get_shard_dim_1d, is_distributed_tensor + +from .galore import GaLoreProjector, make_low_rank_buffer + +__all__ = ["DistributedGalore"] +# Mark sharded dimension + + +class DistGaloreAwamW(DistributedOptim, Optimizer2State): + r"""Implements Galore, a optimizer-agonistic gradient compression technique on 8-bit AdamW. + It largely compresses gradient via low-rank projection and is claimed to be insensitive to hyperparams like lr. + Supports Tensor Parallel and ZeRO stage 1 and 2 via booster and plugin. + Proposed in `GaLore: Memory-Efficient LLM Training by Gradient Low-Rank Projection` + https://arxiv.org/abs/2403.03507 + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups. + lr (float, optional): learning rate. (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its norm. (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability. (default: 1e-6) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0.01) + nbits: Number of bits for quantization optim states. Only 32 and 8 are supported. + min_8bit_size (`int`, defaults to 4096): + The minimum number of elements of the parameter tensors for 8-bit optimization. + percentile_clipping (`int`, defaults to 100): + Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability. + block_wise (`bool`, defaults to `True`): + Whether to independently quantize each block of tensors to reduce outlier effects and improve stability. + is_paged (`bool`, defaults to `False`): + Whether the optimizer is a paged optimizer (handle memory spike via CPU-GPU transfer) or not. + """ + + def __init__( + self, + params, + lr=1e-3, + betas=(0.9, 0.999), + eps=1e-8, + weight_decay=1e-2, + nbits=8, + min_8bit_size=4096, + percentile_clipping=100, + block_wise=True, + is_paged=False, + ): + super().__init__( + "adam", + params, + lr, + betas, + eps, + weight_decay, + nbits, + None, + min_8bit_size, + percentile_clipping, + block_wise, + is_paged=is_paged, + ) + self.tp_size = 1 + self.dp_size = 1 + self.is_dist = {} + proj_none = all(["rank" not in group for group in self.param_groups]) + if proj_none: + warnings.warn( + "Will not apply GaLore as rank isn't in any param group. If you forgot to, try get_galore_param_groups" + ) + + # Default from the paper + for group in self.param_groups: + if "rank" in group: + group["update_proj_gap"] = group.get("update_proj_gap", 200) + group["proj_type"] = group.get("proj_type", "std") + group["scale"] = group.get("scale", 0.25) + + def setup_distributed( + self, + tp_group: Optional[dist.ProcessGroup] = None, + dp_group: Optional[dist.ProcessGroup] = None, + shard_to_working_param: Optional[Dict] = {}, + padding_map: Optional[Dict] = defaultdict(int), + is_zero: Optional[bool] = False, + ): + """Setup process groups for TP and ZeRO 2. + Arguments: + tp_group (dist.ProcessGroup): Tensor Parallel process group + dp_group (dist.ProcessGroup): ZeRO 2 process group + shard_to_working_param (Dict): ZeRO 2 feeds the optimizer a sharded param view as grads are sharded. + This maps from id(view) to working params used in forward & backward. + padding_map (Dict): Padding size of each param from ZeRO's param store. Required if ZeRO is used. + is_zero (bool): Whether to use ZeRO 2. + """ + assert dist.is_initialized(), "You forgot to initialized distributed backend..." + + self.tp_group = tp_group + self.dp_group = dp_group + if tp_group is not None: + self.tp_size = dist.get_world_size(tp_group) + if dp_group is not None: + self.dp_size = dist.get_world_size(dp_group) + + self.shard_to_working_param = shard_to_working_param if shard_to_working_param is not None else {} + self.is_zero = is_zero and self.dp_size > 1 + self.padding_map = padding_map if padding_map is not None else defaultdict(int) + if is_zero: + assert self.padding_map is not defaultdict( + int + ), "We can't do SVD without knowing ZeRO's per-param padding size" + self.distributed_on = self.tp_size > 0 or self.dp_size > 0 + + # Cache working param layout + self.shard_dim = {} + for group in self.param_groups: + for p in group["params"]: + # w/o ZeRO: master param = working param + self.shard_to_working_param[id(p)] = self.shard_to_working_param.get(id(p), p) + if id(p) not in self.padding_map: + self.padding_map[id(p)] = 0 + + self.is_dist[id(p)] = is_distributed_tensor(self.shard_to_working_param[id(p)]) + if is_distributed_tensor(self.shard_to_working_param[id(p)]): + self.shard_dim[id(p)] = get_shard_dim_1d(self.shard_to_working_param[id(p)]) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + if not self.initialized: + self.check_overrides() + self.to_gpu() + self.initialized = True + + for gindex, group in enumerate(self.param_groups): + for pindex, p in enumerate(group["params"]): + if p.grad is None: + continue + state = self.state[p] + + if "step" not in state: + state["step"] = 0 + + # GaLore Projection + if "rank" in group: + if "projector" not in state: + state["projector"] = GaLoreProjector( + group["rank"], + scale=group["scale"], + update_proj_gap=group["update_proj_gap"], + proj_type=group["proj_type"], + ) + # decoupled weight decay + if "weight_decay" in group and group["weight_decay"] > 0: + group["weight_decay_saved"] = group["weight_decay"] + group["weight_decay"] = 0 + + grad = p.grad + working_shape = list(self.shard_to_working_param[id(p)].shape) + padding = self.padding_map[id(p)] + + # All-gather grads for projection step + if self.distributed_on: + # Gather for ZeRO 1 & 2 implementation don't retain full grads + if self.is_zero: + # (m, n).flatten().chunk(dp_size) equals to (m / dp_size, n).flatten() + working_shape[0] //= self.dp_size + # Gather grads for projection + if state["step"] % group["update_proj_gap"] == 0: + all_grads = [ + torch.empty_like(grad, dtype=p.grad.dtype, device=p.grad.device) + for _ in range(self.dp_size) + ] + dist.all_gather(all_grads, grad, self.dp_group) + grad = torch.cat(all_grads) + # To working param shape + if padding > 0: + grad = grad[:-padding] + working_shape[0] *= self.dp_size + grad = grad.reshape(working_shape) # unflatten + + # Gather TP grads + if self.is_dist[id(p)] and state["step"] % group["update_proj_gap"] == 0: + all_grads = [ + torch.empty_like(grad, dtype=p.grad.dtype, device=p.grad.device) + for _ in range(self.tp_size) + ] + dist.all_gather(all_grads, grad.contiguous(), self.tp_group) + grad = torch.cat(all_grads, dim=self.shard_dim[id(p)]) + + # Compute SVD. Will use a subset of singular vectors when grads are sharded. + grad = state["projector"].project(grad, state["step"]) + + # Re-shard gathered grads after SVD + if self.distributed_on and state["step"] % group["update_proj_gap"] == 0: + # TP + if self.is_dist[id(p)]: + grad = grad.chunk(self.tp_size, dim=self.shard_dim[id(p)])[dist.get_rank(self.tp_group)] + # ZeRO + # TODO: this might not work with padding, e.g. (3, 3) with dp size 2 + # Need extra logic in ZeRO to pad nRows/nCols to be divisible by dp_size + if self.is_zero: + grad = grad.chunk(self.dp_size)[dist.get_rank(self.dp_group)] + grad = grad.contiguous() # avoid bitsandbytes update error + + working_shape = grad.shape + # To flattended master param shape + grad = self.to_master_shape(grad, padding) + make_low_rank_buffer(p, grad) + + if "state1" not in state: + self.init_state(group, p, gindex, pindex) + + self.prefetch_state(p) + self.update_step(group, p, gindex, pindex) + torch.cuda.synchronize() + + # Project Back to working param shape + if "rank" in group: + # Unpad + if self.is_zero: + if padding > 0: + p.data = p.data[:-padding] + p.data = p.data.reshape(working_shape) + + p.data = state["projector"].project_back(p.data) + # Re-flatten grads for ZeRO + p.data = self.to_master_shape(p.data, padding) + p.data = p.saved_data.add_(p.data) + + # apply decoupled weight decay + if "weight_decay_saved" in group: + p.data.add_(p.data, alpha=-group["lr"] * group["weight_decay_saved"]) + group["weight_decay"] = group["weight_decay_saved"] + del group["weight_decay_saved"] + + if self.is_paged: + # all paged operation are asynchronous, we need + # to sync to make sure all tensors are in the right state + torch.cuda.synchronize() + return loss + + def to_master_shape(self, data, padding): + """Pad to master (optimizer) param shape""" + if not self.is_zero: + return data + data = data.view(-1) + if padding > 0: + data = F.pad(data, [0, padding]) + return data + + def __del__(self): + """Avoid buffer memory leak""" + for group in self.param_groups: + for p in group["params"]: + if hasattr(p, "saved_data"): + del p.saved_data diff --git a/colossalai/nn/optimizer/distributed_lamb.py b/colossalai/nn/optimizer/distributed_lamb.py new file mode 100644 index 000000000..c9ab8feab --- /dev/null +++ b/colossalai/nn/optimizer/distributed_lamb.py @@ -0,0 +1,181 @@ +# Disclaimer: Modified from https://github.com/NUS-HPC-AI-Lab/pytorch-lamb/blob/master/optim/lamb.py + + +from typing import Dict, Optional + +import torch +import torch.distributed as dist + +from colossalai.interface.optimizer import DistributedOptim +from colossalai.tensor.d_tensor import is_distributed_tensor + +__all__ = ["DistributedLamb"] + + +class DistributedLamb(DistributedOptim): + r"""Implements the Lamb algorithm, with extra support for ZeRO 2 and Tensor Parallel. + Proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_. + It's recommended to use this with HybridParallelPlugin/ZeRO plugin and booster, + which will take care of setup_distributed. + Example with 4 devices: + >>> optim = DistributedLamb(model.parameters(), lr=1e-3) + >>> proc_mesh = ProcessGroupMesh(tp_size, zero_size) + >>> tp_group = proc_mesh.get_group_along_axis(0) + >>> dp_group = proc_mesh.get_group_along_axis(1) + >>> optim.setup_distributed(tp_group, dp_group) + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + .. _Large Batch Optimization for Deep Learning: Training BERT in 76 minutes: + https://arxiv.org/abs/1904.00962 + """ + + def __init__( + self, + params, + lr=1e-3, + betas=(0.9, 0.999), + eps=1e-6, + weight_decay=0, + bias_correction=True, + ): + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 <= eps: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) + if not 0.0 <= betas[1] < 1.0: + raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) + + # self.setup_distributed(tp_group, dp_group) + self.shard_to_working_param = {} + self.tp_size = self.dp_size = 1 + self.is_zero = False + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, bias_correction=bias_correction) + super().__init__(params, defaults) + + def setup_distributed( + self, + tp_group: Optional[dist.ProcessGroup] = None, + dp_group: Optional[dist.ProcessGroup] = None, + shard_to_working_param: Optional[Dict] = {}, + padding_map=None, + is_zero: Optional[bool] = False, + ): + """Assign process groups for TP and ZeRO 2. + Arguments: + tp_group (dist.ProcessGroup): Tensor Parallel process group + dp_group (dist.ProcessGroup): ZeRO 2 process group + shard_to_working_param (Dict): ZeRO 2 feeds the optimizer a sharded param view as grads are sharded. + This maps from id(view) to working params used in forward & backward. + padding_map: An empty interface placeholder + is_zero (bool): Whether to use ZeRO 2. + """ + self.tp_group = tp_group + self.dp_group = dp_group + if tp_group is not None: + self.tp_size = dist.get_world_size(tp_group) + if dp_group is not None: + self.dp_size = dist.get_world_size(dp_group) + + self.shard_to_working_param = shard_to_working_param if shard_to_working_param is not None else {} + self.is_zero = is_zero + self.is_dist = {} + # Cache parameter layout + for group in self.param_groups: + for p in group["params"]: + # w/o ZeRO: master param = working param + self.shard_to_working_param[id(p)] = self.shard_to_working_param.get(id(p), p) + self.is_dist[p] = ( + is_distributed_tensor(p) + if self.dp_size <= 1 + else is_distributed_tensor(self.shard_to_working_param.get(id(p), None)) + ) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + for p in group["params"]: + if p.grad is None: + continue + grad = p.grad.data + if grad.is_sparse: + raise RuntimeError("Lamb does not support sparse gradients, consider SparseAdam instad.") + + state = self.state[p] + # State initialization + if len(state) == 0: + state["step"] = 0 + # Exponential moving average of gradient values + state["exp_avg"] = torch.zeros_like(p.data) + # Exponential moving average of squared gradient values + state["exp_avg_sq"] = torch.zeros_like(p.data) + + exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"] + beta1, beta2 = group["betas"] + + state["step"] += 1 + + # Decay the first and second moment running average coefficient + # m_t + exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) + # v_t + exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) + + scaled_lr = group["lr"] + if group["bias_correction"]: + bias_correction1 = 1 - beta1 ** state["step"] + bias_correction2 = 1 - beta2 ** state["step"] + # Apply debiasing to lr to avoid broadcast + scaled_lr *= (bias_correction2**0.5) / bias_correction1 + # exp_avg.div_(bias_correction1) + # exp_avg_sq.div_(bias_correction2) + + update = exp_avg / exp_avg_sq.sqrt().add(group["eps"]) + if group["weight_decay"] != 0: + update.add_(p.data, alpha=group["weight_decay"]) + + # Compute global layer-wise trust ratio + if self.is_dist[p] or self.is_zero: + p_local = p + g_sum = (update**2).sum() + if self.dp_size > 1 and self.is_zero: + # ZeRO 2 doesn't shard param. Compute full param norm w/o communication. + dist.all_reduce(g_sum, group=self.dp_group) + p_local = self.shard_to_working_param[id(p)] + + w_sum = (p_local**2).sum() + sums = torch.stack([w_sum, g_sum]) + + # Get global l2 norms + if self.tp_size > 1: + dist.all_reduce(sums, group=self.tp_group) + w_norm, g_norm = sums.sqrt().chunk(2) + else: + # Fall back to vanilla Lamb + w_norm = torch.norm(p) + g_norm = torch.norm(update) + + trust_ratio = torch.where(w_norm > 0 and g_norm > 0, (w_norm / g_norm), 1.0).item() + + scaled_lr *= trust_ratio + p.data.add_(update, alpha=-scaled_lr) + + return loss diff --git a/colossalai/nn/optimizer/galore.py b/colossalai/nn/optimizer/galore.py new file mode 100644 index 000000000..f7556fe61 --- /dev/null +++ b/colossalai/nn/optimizer/galore.py @@ -0,0 +1,315 @@ +""" adapted from https://github.com/jiaweizzhao/GaLore/blob/master/galore_torch/adamw8bit.py""" + +import warnings +from typing import List + +import torch +from bitsandbytes.optim.optimizer import Optimizer2State +from torch._C import _LinAlgError + + +def get_galore_param_groups( + model, weight_decay, rank=256, update_proj_gap=200, scale=0.25, proj_type="std" +) -> List[dict]: + """ + It's advised to use this instead of manually specifying which param groups + to apply GaLore on. + """ + galore_params = [] + non_galore = [] + no_decay_params = [] + no_decay = ["bias", "LayerNorm.weight"] + + for name, param in model.named_parameters(): + # Only make sense to do SVD on 2d gradient matrices + # e.g. nn.Linear, VocabEmbedding, etc. + if any(nd in name for nd in no_decay): + no_decay_params.append(param) + elif param.dim() == 2: + galore_params.append(param) + else: + non_galore.append(param) + + param_groups = [ + { + "params": galore_params, + "rank": rank, + "update_proj_gap": update_proj_gap, + "scale": scale, + "proj_type": proj_type, + "weight_decay": weight_decay, + }, + {"params": non_galore, "weight_decay": weight_decay}, + {"params": no_decay_params, "weight_decay": 0.0}, + ] + + return param_groups + + +def make_low_rank_buffer(p, grad): + """For compatibility with bitsandbytes's update_step, we need an empty low-rank + param update buffer to avoid mutating original params. + TODO: optimize by reusing the memory for p.grad? Need to modify bitsandbytes? + """ + p.saved_data = p.data.clone() + # p.data = grad.clone().to(p.data.dtype).to(p.data.device) + p.data = torch.zeros_like(grad, device=grad.device, dtype=grad.dtype) + # p.data.zero_() + p.grad = grad + + +class GaLoreProjector: + def __init__(self, rank, verbose=False, update_proj_gap=200, scale=1.0, proj_type="std"): + self.rank = rank + self.verbose = verbose + self.update_proj_gap = update_proj_gap + self.scale = scale + self.ortho_matrix = None + self.proj_type = proj_type + self.svd_type = None + + def project(self, full_rank_grad, iter): + dim = full_rank_grad.dim() + if dim != 2: + warnings.warn( + f"Warning: You shouldn't specify projection rank for {dim}D params in param_groups. Skipping SVD." + ) + return full_rank_grad + + m, n = full_rank_grad.shape # For ZeRO sharded grads + if self.proj_type == "std": + # Project the lower dim to minimize information loss + if self.svd_type is None: + self.svd_type = "right" if m >= n else "left" + # SVD step + if self.ortho_matrix is None or iter % self.update_proj_gap == 0: + self.ortho_matrix = self.get_orthogonal_matrix(full_rank_grad, self.rank, type=self.svd_type) + if self.svd_type == "right": + low_rank_grad = torch.matmul(full_rank_grad, self.ortho_matrix.t()[:n]) + else: + low_rank_grad = torch.matmul(self.ortho_matrix.t()[:, :m], full_rank_grad) + + elif self.proj_type == "reverse_std": + if self.svd_type is None: + self.svd_type = "left" if m >= n else "right" + # SVD step + if self.ortho_matrix is None or iter % self.update_proj_gap == 0: + self.ortho_matrix = self.get_orthogonal_matrix(full_rank_grad, self.rank, type=self.svd_type) + + if self.svd_type == "left": + low_rank_grad = torch.matmul(self.ortho_matrix.t()[:, :m], full_rank_grad) + else: + low_rank_grad = torch.matmul(full_rank_grad, self.ortho_matrix.t()[:n]) + return low_rank_grad + + def project_back(self, low_rank_grad): + if low_rank_grad.dim() != 2: + return + + m, n = low_rank_grad.shape + if self.svd_type == "right": + full_rank_grad = torch.matmul(low_rank_grad, self.ortho_matrix[:n]) + else: + full_rank_grad = torch.matmul(self.ortho_matrix[:, :m], low_rank_grad) + + return full_rank_grad * self.scale + + # svd decomposition + def get_orthogonal_matrix(self, weights, rank, type): + module_params = weights + + if module_params.data.dtype != torch.float: + float_data = False + original_type = module_params.data.dtype + original_device = module_params.data.device + matrix = module_params.data.float() + else: + float_data = True + matrix = module_params.data + + # TODO: redo SVD in the next step. + if matrix.isnan().any(): + print(f"{__file__}: skipping SVD due to NaN matrix") + return self.ortho_matrix + try: + U, s, Vh = torch.linalg.svd(matrix, full_matrices=False) + except _LinAlgError as e: + print(f"{__file__}: skipping SVD due to {e}") + return self.ortho_matrix + + # make the smaller matrix always to be orthogonal matrix + if type == "right": + B = Vh[:rank, :] + + if not float_data: + B = B.to(original_device).type(original_type) + return B + elif type == "left": + A = U[:, :rank] + if not float_data: + A = A.to(original_device).type(original_type) + return A + elif type == "full": + A = U[:, :rank] + B = Vh[:rank, :] + if not float_data: + A = A.to(original_device).type(original_type) + B = B.to(original_device).type(original_type) + return [A, B] + else: + raise ValueError("type should be left, right or full") + + +class GaLoreAdamW8bit(Optimizer2State): + r"""Implements Galore, a optimizer-agonistic gradient compression technique on 8-bit AdamW. + Proposed in `GaLore: Memory-Efficient LLM Training by Gradient Low-Rank Projection`. It compresses + gradient via low-rank projection and is claimed to be insensitive to hyperparams like lr. + https://arxiv.org/abs/2403.03507 + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups. + lr (float, optional): learning rate. (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its norm. (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability. (default: 1e-6) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0.01) + nbits (int): The number of bits of optim states. Only 32 and 8 are supported. + min_8bit_size (`int`, defaults to 4096): + The minimum number of elements of the parameter tensors for 8-bit optimization. + percentile_clipping (`int`, defaults to 100): + Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability. + block_wise (`bool`, defaults to `True`): + Whether to independently quantize each block of tensors to reduce outlier effects and improve stability. + is_paged (`bool`, defaults to `False`): + Whether the optimizer is a paged optimizer (handle memory spike via CPU-GPU transfer) or not. + Example: + + """ + + def __init__( + self, + params, + lr=1e-2, + betas=(0.9, 0.999), + eps=1e-8, + weight_decay=1e-2, + nbits=8, + min_8bit_size=4096, + percentile_clipping=100, + block_wise=True, + is_paged=False, + ): + super().__init__( + "adam", + params, + lr, + betas, + eps, + weight_decay, + nbits, + None, + min_8bit_size, + percentile_clipping, + block_wise, + is_paged=is_paged, + ) + + proj_none = all(["rank" not in group for group in self.param_groups]) + if proj_none: + warnings.warn( + "Will not apply GaLore as no rank is specified. Or did you forget to? Try get_galore_param_groups" + ) + + # Defaults from the paper + for group in self.param_groups: + if "rank" in group: + group["update_proj_gap"] = group.get("update_proj_gap", 200) + group["proj_type"] = group.get("proj_type", "std") + group["scale"] = group.get("scale", 0.25) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + if not self.initialized: + self.check_overrides() + self.to_gpu() # needed for fairseq pure fp16 training + self.initialized = True + + for gindex, group in enumerate(self.param_groups): + for pindex, p in enumerate(group["params"]): + if p.grad is None: + continue + if p is self.param_groups[0]["params"][0]: + torch.save(p.grad, "grad.pt") + state = self.state[p] + + if "step" not in state: + state["step"] = 0 + + # GaLore Projection + if "rank" in group: + if "projector" not in state: + state["projector"] = GaLoreProjector( + group["rank"], + scale=group["scale"], + update_proj_gap=group["update_proj_gap"], + proj_type=group["proj_type"], + ) + + if "weight_decay" in group and group["weight_decay"] > 0: + # ensure that the weight decay is not applied to the norm grad + group["weight_decay_saved"] = group["weight_decay"] + group["weight_decay"] = 0 + + grad = state["projector"].project(p.grad, state["step"]) + make_low_rank_buffer(p, grad) + + if "state1" not in state: + self.init_state(group, p, gindex, pindex) + + # p.grad = p.grad.contiguous() # avoid bitsandbytes update error + # Prefetch if paged + self.prefetch_state(p) + # Adam update step using the buffer + self.update_step(group, p, gindex, pindex) + torch.cuda.synchronize() + + # GaLore Projection Back + if "rank" in group: + if p is self.param_groups[0]["params"][1]: + pass + update = state["projector"].project_back(p.data) + p.data = p.saved_data.add_(update) + + # apply weight decay + if "weight_decay_saved" in group: + p.data.add_(p.data, alpha=-group["lr"] * group["weight_decay_saved"]) + group["weight_decay"] = group["weight_decay_saved"] + del group["weight_decay_saved"] + + if self.is_paged: + # all paged operation are asynchronous, we need + # to sync to make sure all tensors are in the right state + torch.cuda.synchronize() + + return loss + + def __del__(self): + """Avoid buffer memory leak""" + for group in self.param_groups: + for p in group["params"]: + if hasattr(p, "saved_data"): + del p.saved_data diff --git a/colossalai/nn/optimizer/lamb.py b/colossalai/nn/optimizer/lamb.py index 0d742487f..eee93fb69 100644 --- a/colossalai/nn/optimizer/lamb.py +++ b/colossalai/nn/optimizer/lamb.py @@ -26,7 +26,9 @@ class Lamb(Optimizer): https://arxiv.org/abs/1904.00962 """ - def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6, weight_decay=0, adam=False): + def __init__( + self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6, weight_decay=0, adam=False, bias_correction=False + ): if not 0.0 <= lr: raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= eps: @@ -35,7 +37,7 @@ class Lamb(Optimizer): raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) if not 0.0 <= betas[1] < 1.0: raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) - defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, bias_correction=bias_correction) self.adam = adam super(Lamb, self).__init__(params, defaults) @@ -79,12 +81,15 @@ class Lamb(Optimizer): # v_t exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) - # Paper v3 does not use debiasing. - # bias_correction1 = 1 - beta1 ** state['step'] - # bias_correction2 = 1 - beta2 ** state['step'] - # Apply bias to lr to avoid broadcast. - # * math.sqrt(bias_correction2) / bias_correction1 - step_size = group["lr"] + # NOTE: Paper v3 does not use debiasing. + scaled_lr = group["lr"] + if group["bias_correction"]: + bias_correction1 = 1 - beta1 ** state["step"] + bias_correction2 = 1 - beta2 ** state["step"] + # Apply debiasing to lr to avoid broadcast + scaled_lr *= (bias_correction2**0.5) / bias_correction1 + # exp_avg.div_(bias_correction1) + # exp_avg_sq.div_(bias_correction2) weight_norm = p.data.pow(2).sum().sqrt() @@ -97,12 +102,10 @@ class Lamb(Optimizer): trust_ratio = 1 else: trust_ratio = weight_norm / adam_norm - state["weight_norm"] = weight_norm - state["adam_norm"] = adam_norm - state["trust_ratio"] = trust_ratio + if self.adam: trust_ratio = 1 - p.data.add_(adam_step, alpha=-step_size * trust_ratio) + p.data.add_(adam_step, alpha=-scaled_lr * trust_ratio) return loss diff --git a/colossalai/tensor/d_tensor/__init__.py b/colossalai/tensor/d_tensor/__init__.py index 4129ec62e..26290c7ba 100644 --- a/colossalai/tensor/d_tensor/__init__.py +++ b/colossalai/tensor/d_tensor/__init__.py @@ -6,6 +6,7 @@ from .api import ( get_device_mesh, get_global_shape, get_layout, + get_shard_dim_1d, get_sharding_spec, init_as_dtensor, init_tensor_as_customization_distributed, @@ -37,6 +38,7 @@ __all__ = [ "get_device_mesh", "redistribute", "get_layout", + "get_shard_dim_1d", "is_customized_distributed_tensor", "distribute_tensor_with_customization", "init_tensor_as_customization_distributed", diff --git a/colossalai/tensor/d_tensor/api.py b/colossalai/tensor/d_tensor/api.py index 725817088..ede59ee89 100644 --- a/colossalai/tensor/d_tensor/api.py +++ b/colossalai/tensor/d_tensor/api.py @@ -8,6 +8,7 @@ import torch.distributed as dist from torch.distributed import ProcessGroup from colossalai.device.device_mesh import DeviceMesh +from colossalai.tensor.d_tensor.sharding_spec import DimSpec from .layout import Layout from .layout_converter import LayoutConverter @@ -15,6 +16,22 @@ from .sharding_spec import ShardingSpec layout_converter = LayoutConverter() +_SHARD_DIM = DimSpec([0]) + + +def get_shard_dim_1d(p: torch.Tensor): + """ + Get the dimension along which the tensor is sharded, for example in 1D Tensor Parallel. + Args: + p (torch.Tensor): the input tensor + Returns: + int: the dimension along which the tensor is sharded + """ + if not is_distributed_tensor(p): + raise ValueError("p is not a distributed tensor") + sharding = p.dist_layout.sharding_spec.sharding_sequence + return sharding.index(_SHARD_DIM) + def clear_layout_converter(): global layout_converter diff --git a/colossalai/tensor/d_tensor/sharding_spec.py b/colossalai/tensor/d_tensor/sharding_spec.py index 2ac0ca73e..16a4f248b 100644 --- a/colossalai/tensor/d_tensor/sharding_spec.py +++ b/colossalai/tensor/d_tensor/sharding_spec.py @@ -140,8 +140,9 @@ class DimSpec: class ShardingSpec: """ - Sharding spec describes how to shard a tensor with dim_size dimensions. The sharding sequence looks like - [R, R, S0, S1], which means + Sharding spec describes how to shard a tensor with dim_size dimensions. For example for a 3D tensor, the sharding sequence + [R, S0, S1] means not sharding the first dim, sharding the 3rd along the 1st device mesh axis (Process group) + and sharding the 3th dim along the 2nd device mesh axis. Useful for say, 2D Tensor Parallel. Argument: dim_partition_dict(Dict[int, List[int]], optional): The key is the dimension of tensor to be sharded, diff --git a/colossalai/zero/low_level/bookkeeping/parameter_store.py b/colossalai/zero/low_level/bookkeeping/parameter_store.py index e94fb4de9..c03231f5f 100644 --- a/colossalai/zero/low_level/bookkeeping/parameter_store.py +++ b/colossalai/zero/low_level/bookkeeping/parameter_store.py @@ -1,3 +1,5 @@ +from typing import Dict + from torch import Tensor from torch.distributed import ProcessGroup @@ -47,3 +49,12 @@ class ParameterStore(BaseStore): self.master_to_working_param[id(master_param)] = working_param self.working_to_master_param[id(working_param)] = master_param + + def get_padding_map(self) -> Dict[int, Tensor]: + """Return the padding map + + Returns: + Dict[int, Tensor]: The padding map + """ + + return self._padding_map diff --git a/colossalai/zero/low_level/low_level_optim.py b/colossalai/zero/low_level/low_level_optim.py index 1b856cafd..5f7f2a4e2 100644 --- a/colossalai/zero/low_level/low_level_optim.py +++ b/colossalai/zero/low_level/low_level_optim.py @@ -249,6 +249,7 @@ class LowLevelZeroOptimizer(OptimizerWrapper): else: splited_param_current_rank = splited_params + # Send the splited view to the optimizer to match ZeRO 2 grad shape params_current_rank.append(splited_param_current_rank) self._param_store.link_master_and_working_param(splited_param_current_rank, param) @@ -395,15 +396,15 @@ class LowLevelZeroOptimizer(OptimizerWrapper): else: if bucket_store.moe_extra_dp_pg is None: flat_grads_list = list(flat_grads.split(len(flat_grads) // bucket_store.zero_world_size)) - recieved_grad = torch.zeros_like(flat_grads_list[0]) - dist.reduce_scatter(recieved_grad, flat_grads_list, group=bucket_store.torch_pg) + received_grad = torch.zeros_like(flat_grads_list[0]) + dist.reduce_scatter(received_grad, flat_grads_list, group=bucket_store.torch_pg) - if recieved_grad.dtype != grad_dtype: - recieved_grad = recieved_grad.to(grad_dtype) + if received_grad.dtype != grad_dtype: + received_grad = received_grad.to(grad_dtype) grad_in_bucket_current_rank = bucket_store.get_grad()[bucket_store.zero_local_rank] LowLevelZeroOptimizer.update_partitoned_grad( - bucket_store, grad_store, grad_in_bucket_current_rank, recieved_grad, group_id, 1 + bucket_store, grad_store, grad_in_bucket_current_rank, received_grad, group_id, 1 ) else: # categorize moe and non moe param @@ -420,13 +421,13 @@ class LowLevelZeroOptimizer(OptimizerWrapper): flat_grads_list = list( non_moe_flat_grads.split(len(non_moe_flat_grads) // bucket_store.zero_world_size) ) - recieved_grad = torch.zeros_like(flat_grads_list[0]) - dist.reduce_scatter(recieved_grad, flat_grads_list, group=bucket_store.torch_pg) + received_grad = torch.zeros_like(flat_grads_list[0]) + dist.reduce_scatter(received_grad, flat_grads_list, group=bucket_store.torch_pg) LowLevelZeroOptimizer.update_partitoned_grad( bucket_store, grad_store, non_moe_grad_in_bucket_current_rank, - recieved_grad, + received_grad, group_id, 1, ) @@ -435,15 +436,15 @@ class LowLevelZeroOptimizer(OptimizerWrapper): flat_grads_list = list( moe_flat_grads.split(len(moe_flat_grads) // bucket_store.moe_extra_dp_pg_size) ) - recieved_grad = torch.zeros_like(flat_grads_list[0]) + received_grad = torch.zeros_like(flat_grads_list[0]) dist.reduce_scatter( - recieved_grad, + received_grad, flat_grads_list, group=bucket_store.moe_extra_dp_pg, ) param_slice = bucket_store.zero_world_size // bucket_store.moe_extra_dp_pg_size - recieved_grad = list(recieved_grad.split(len(recieved_grad) // param_slice)) - for split_recieved_grad in recieved_grad: + received_grad = list(received_grad.split(len(received_grad) // param_slice)) + for split_recieved_grad in received_grad: split_recieved_grad = _unflatten_dense_tensors( split_recieved_grad, moe_grad_in_bucket_current_rank ) @@ -1019,3 +1020,6 @@ class LowLevelZeroOptimizer(OptimizerWrapper): **self.moe_master_to_working_map, } return self._param_store.master_to_working_param + + def get_param_padding_map(self) -> Dict[int, torch.Tensor]: + return self._param_store.get_padding_map() diff --git a/docs/source/en/features/distributed_optimizers.md b/docs/source/en/features/distributed_optimizers.md new file mode 100644 index 000000000..7590669df --- /dev/null +++ b/docs/source/en/features/distributed_optimizers.md @@ -0,0 +1,141 @@ +# Distributed Optimizers + +Author: [Wenxuan Tan](https://github.com/Edenzzzz), [Junwen Duan](https://github.com/duanjunwen), [Renjie Mao](https://github.com/chongqichuizi875) + +**Related Paper** +- [Adafactor: Adaptive Learning Rates with Sublinear Memory Cost](https://arxiv.org/abs/1804.04235) +- [CAME: Confidence-guided Adaptive Memory Efficient Optimization] (https://arxiv.org/abs/2307.02047) +- [GaLore: Memory-Efficient LLM Training by Gradient Low-Rank Projection] (https://arxiv.org/abs/2403.03507) +- [Large Batch Optimization for Deep Learning: Training BERT in 76 minutes] (https://arxiv.org/pdf/1904.00962) + +## Introduction +Apart from the widely adopted Adam and SGD, many modern optimizers require layer-wise statistics to efficiently update parameters, and are thus not directly applicable to parallel settings where model layers are sharded across multiple devices. We provide optimized distributed implementations with minimal extra communications, and seamless integrations with Tensor Parallel, DDP and ZeRO using plugins. +## Optimizers +Adafactor is a first-order Adam variant using Non-negative Matrix Factorization(NMF) to reduce memory footprint. CAME improves by introducting a confidence matrix to correct NMF. GaLore further reduces memory by projecting gradients into a low-rank space and 8-bit block-wise quantization. Lamb allows huge batch sizes without lossing accuracy via layer-wise adaptive update bounded by the inverse of its Lipschiz constant. + +## API Reference + +{{ autodoc:colossalai.nn.optimizer.distributed_adafactor.DistributedAdaFactor }} +{{ autodoc:colossalai.nn.optimizer.distributed_lamb.DistributedLamb }} +{{ autodoc:colossalai.nn.optimizer.distributed_galore.DistGaloreAwamW }} +{{ autodoc:colossalai.nn.optimizer.distributed_came.DistributedCAME }} + +## Hands-On Practice +We now demonstrate how to use Distributed Adafactor with booster API combining Tensor Parallel and ZeRO 2 with 4 GPUs. +### step 1. Import libraries + +```python +from transformers import LlamaModel, LlamaConfig +from colossalai.nn.optimizer.distributed_adafactor import DistributedAdaFactor +from colossalai.booster import Booster +from colossalai.booster.plugin import HybridParallelPlugin +import colossalai +import torch +``` + +### step 2. Initialize Distributed Environment and Parallism Group +We need to initialize distributed environment. For demo purpose, we use `colossal run --nproc_per_node 4`. You can refer to [Launch Colossal-AI](../basics/launch_colossalai.md) + +```python +colossalai.launch_from_torch() +``` + +### step 3. Initialize Module and Optimizer +Build our model. We created an MLP using two Linear Layer. + +```python +# Init Llama from huggingface +configuration = LlamaConfig() +model = LlamaModel(configuration).cuda() +criterion = lambda x: x.mean() +dist_optim = DistributedAdaFactor(model.parameters()) + +``` + +### step 4.Init Booster + +```python +plugin = HybridParallelPlugin(tp_size=2, zero_stage=2, pp_size=1, enable_all_optimization=True) +booster = Booster(plugin=plugin) +# You should also pass in your own dataset. +model, dist_optim, criterion, dataloader, _ = booster.boost(model, dist_optim, criterion) +``` +### step 5.Train Your Model +```python +steps = 10 +for step in range(steps): + input_ids = torch.ones(1, 100, device="cuda", dtype=torch.int) + attention_mask = input_ids.clone() + outputs = model(input_ids.cuda(), attention_mask.cuda()) + loss = criterion(outputs.last_hidden_state) + booster.backward(loss, dist_optim) + dist_optim.step() + dist_optim.zero_grad() +``` +### GaLore special handling +For GaLore, we need to specify projection rank for each parameter group and quantization & paged optimizer params. Please refer to bitandbytes for quantization details. Support for ZeRO is underway. +```python +from colossalai.nn.optimizer.galore import get_galore_param_groups +from colossalai.nn.optimizer import DistGaloreAwamW +optim = DistGaloreAwamW( + get_galore_param_groups(model, decay=1e-2, rank=8), + lr=lr, + betas=(beta1, beta2), + eps=eps, + nbits=8, + percentile_clipping=100, + block_wise=True, + min_8bit_size=4096, +) +``` + +## Plugin compatibility +
Model/Feature | +Lamb | +GaLore | +Adafactor | +CAME | +||||||||||||||||||||||||||||||||||
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Hybrid Parallel Plugin |
+ ✔️ | +✔️ | +✔️ | +✔️ | +||||||||||||||||||||||||||||||||||
Low Level Zero Plugin |
+ ✔️ | +❌ | +✔️ | +✔️ | +||||||||||||||||||||||||||||||||||
Torch DDP Plugin |
+ ✔️ | +✔️ | +✔️ | +✔️ | +||||||||||||||||||||||||||||||||||
Gemini Plugin |
+ ❌ | +❌ | +❌ | +❌ | +||||||||||||||||||||||||||||||||||
Moe Hybrid Plugin |
+ ❌ | +❌ | +❌ | +❌ | +||||||||||||||||||||||||||||||||||
+ |
Model/Feature | +Lamb | +GaLore | +Adafactor | +CAME | +||||||||||||||||||||||||||||||||||
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Hybrid Parallel Plugin |
+ ✔️ | +✔️ | +✔️ | +✔️ | +||||||||||||||||||||||||||||||||||
Low Level Zero Plugin |
+ ✔️ | +❌ | +✔️ | +✔️ | +||||||||||||||||||||||||||||||||||
Torch DDP Plugin |
+ ✔️ | +✔️ | +✔️ | +✔️ | +||||||||||||||||||||||||||||||||||
Gemini Plugin |
+ ❌ | +❌ | +❌ | +❌ | +||||||||||||||||||||||||||||||||||
Moe Hybrid Plugin |
+ ❌ | +❌ | +❌ | +❌ | +||||||||||||||||||||||||||||||||||
+ |