mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-04-28 11:45:23 +00:00
* [feat] Add distributed lamb; minor fixes in DeviceMesh (#5476) * init: add dist lamb; add debiasing for lamb * dist lamb tester mostly done * all tests passed * add comments * all tests passed. Removed debugging statements * moved setup_distributed inside plugin. Added dist layout caching * organize better --------- Co-authored-by: Edenzzzz <wtan45@wisc.edu> * [hotfix] Improve tester precision by removing ZeRO on vanilla lamb (#5576) Co-authored-by: Edenzzzz <wtan45@wisc.edu> * [optim] add distributed came (#5526) * test CAME under LowLevelZeroOptimizer wrapper * test CAME TP row and col pass * test CAME zero pass * came zero add master and worker param id convert * came zero test pass * came zero test pass * test distributed came passed * reform code, Modify some expressions and add comments * minor fix of test came * minor fix of dist_came and test * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * minor fix of dist_came and test * rebase dist-optim * rebase dist-optim * fix remaining comments * add test dist came using booster api --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * [optim] Distributed Adafactor (#5484) * [feature] solve conflict; update optimizer readme; * [feature] update optimize readme; * [fix] fix testcase; * [feature] Add transformer-bert to testcase;solve a bug related to indivisible shape (induction in use_zero and tp is row parallel); * [feature] Add transformers_bert model zoo in testcase; * [feature] add user documentation to docs/source/feature. * [feature] add API Reference & Sample to optimizer Readme; add state check for bert exam; * [feature] modify user documentation; * [fix] fix readme format issue; * [fix] add zero=0 in testcase; cached augment in dict; * [fix] fix percision issue; * [feature] add distributed rms; * [feature] remove useless comment in testcase; * [fix] Remove useless test; open zero test; remove fp16 test in bert exam; * [feature] Extract distributed rms function; * [feature] add booster + lowlevelzeroPlugin in test; * [feature] add Start_with_booster_API case in md; add Supporting Information in md; * [fix] Also remove state movement in base adafactor; * [feature] extract factor function; * [feature] add LowLevelZeroPlugin test; * [fix] add tp=False and zero=True in logic; * [fix] fix use zero logic; * [feature] add row residue logic in column parallel factor; * [feature] add check optim state func; * [feature] Remove duplicate logic; * [feature] update optim state check func and percision test bug; * [fix] update/fix optim state; Still exist percision issue; * [fix] Add use_zero check in _rms; Add plugin support info in Readme; Add Dist Adafactor init Info; * [feature] removed print & comments in utils; * [feature] uodate Readme; * [feature] add LowLevelZeroPlugin test with Bert model zoo; * [fix] fix logic in _rms; * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * [fix] remove comments in testcase; * [feature] add zh-Han Readme; --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * [Feature] refractor dist came; fix percision error; add low level zero test with bert model zoo; (#5676) * [feature] daily update; * [fix] fix dist came; * [feature] refractor dist came; fix percision error; add low level zero test with bert model zoo; * [fix] open rms; fix low level zero test; fix dist came test function name; * [fix] remove redundant test; * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * [Feature] Add Galore (Adam, Adafactor) and distributed GaloreAdamW8bit (#5570) * init: add dist lamb; add debiasing for lamb * dist lamb tester mostly done * all tests passed * add comments * all tests passed. Removed debugging statements * moved setup_distributed inside plugin. Added dist layout caching * organize better * update comments * add initial distributed galore * add initial distributed galore * add galore set param utils; change setup_distributed interface * projected grad precision passed * basic precision tests passed * tests passed; located svd precision issue in fwd-bwd; banned these tests * Plugin DP + TP tests passed * move get_shard_dim to d_tensor * add comments * remove useless files * remove useless files * fix zero typo * improve interface * remove moe changes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix import * fix deepcopy * update came & adafactor to main * fix param map * fix typo --------- Co-authored-by: Edenzzzz <wtan45@wisc.edu> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * [Hotfix] Remove one buggy test case from dist_adafactor for now (#5692) Co-authored-by: Edenzzzz <wtan45@wisc.edu> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --------- Co-authored-by: Edenzzzz <wtan45@wisc.edu> Co-authored-by: chongqichuizi875 <107315010+chongqichuizi875@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: duanjunwen <54985467+duanjunwen@users.noreply.github.com> Co-authored-by: Hongxin Liu <lhx0217@gmail.com>
202 lines
7.0 KiB
Python
202 lines
7.0 KiB
Python
# coding=utf-8
|
|
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
import math
|
|
|
|
import torch
|
|
from torch.optim import Optimizer
|
|
|
|
__all__ = ["Adafactor"]
|
|
|
|
|
|
# Adafactor
|
|
class Adafactor(Optimizer):
|
|
def __init__(
|
|
self,
|
|
params,
|
|
lr=None,
|
|
eps=(1e-30, 1e-3),
|
|
clip_threshold=1.0,
|
|
decay_rate=-0.8,
|
|
beta1=None,
|
|
weight_decay=0.0,
|
|
scale_parameter=True,
|
|
relative_step=True,
|
|
warmup_init=False,
|
|
):
|
|
lr = None
|
|
if lr is not None and relative_step:
|
|
raise ValueError("Cannot combine manual `lr` and `relative_step=True` options")
|
|
if warmup_init and not relative_step:
|
|
raise ValueError("`warmup_init=True` requires `relative_step=True`")
|
|
|
|
defaults = {
|
|
"lr": lr,
|
|
"eps": eps,
|
|
"clip_threshold": clip_threshold,
|
|
"decay_rate": decay_rate,
|
|
"beta1": beta1,
|
|
"weight_decay": weight_decay,
|
|
"scale_parameter": scale_parameter,
|
|
"relative_step": relative_step,
|
|
"warmup_init": warmup_init,
|
|
}
|
|
super().__init__(params, defaults)
|
|
|
|
@staticmethod
|
|
def _get_lr(param_group, param_state):
|
|
rel_step_sz = param_group["lr"]
|
|
if param_group["relative_step"]:
|
|
min_step = 1e-6 * param_state["step"] if param_group["warmup_init"] else 1e-2
|
|
rel_step_sz = min(min_step, 1.0 / math.sqrt(param_state["step"]))
|
|
param_scale = 1.0
|
|
if param_group["scale_parameter"]:
|
|
param_scale = max(param_group["eps"][1], param_state["RMS"])
|
|
return param_scale * rel_step_sz
|
|
|
|
@staticmethod
|
|
def _get_options(param_group, param_shape):
|
|
factored = len(param_shape) >= 2
|
|
use_first_moment = param_group["beta1"] is not None
|
|
return factored, use_first_moment
|
|
|
|
@staticmethod
|
|
def _rms(tensor):
|
|
return tensor.norm(2) / (tensor.numel() ** 0.5)
|
|
|
|
@staticmethod
|
|
def _approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col):
|
|
r_factor = (exp_avg_sq_row / exp_avg_sq_row.mean(dim=-1, keepdim=True)).rsqrt_().unsqueeze(-1)
|
|
c_factor = exp_avg_sq_col.unsqueeze(-2).rsqrt()
|
|
return torch.mul(r_factor, c_factor)
|
|
|
|
@torch.no_grad()
|
|
def step(self, closure=None):
|
|
"""
|
|
Performs a single optimization step
|
|
|
|
Arguments:
|
|
closure (callable, optional): A closure that reevaluates the model
|
|
and returns the loss.
|
|
"""
|
|
loss = None
|
|
if closure is not None:
|
|
loss = closure()
|
|
|
|
"""
|
|
param_groups: Dict
|
|
{
|
|
"params":[weight, bias]
|
|
"lr"
|
|
"eps"
|
|
"clip_threshold"
|
|
"decay_rate"
|
|
"beta1"
|
|
"weight_decay"
|
|
"scale_parameter"
|
|
"relative_step"
|
|
"warmup_init"
|
|
}
|
|
"""
|
|
|
|
for group in self.param_groups:
|
|
# update weight & bias
|
|
for p in group["params"]:
|
|
if p.grad is None:
|
|
continue
|
|
"""
|
|
# grad shape is same as weigh / bias
|
|
"""
|
|
grad = p.grad
|
|
if grad.is_sparse:
|
|
raise RuntimeError("Adafactor does not support sparse gradients.")
|
|
|
|
"""
|
|
p is weight
|
|
state
|
|
{'step',
|
|
'exp_avg_sq_row',
|
|
'exp_avg_sq_col',
|
|
'RMS'
|
|
}
|
|
|
|
p is bias
|
|
state
|
|
{'step',
|
|
'exp_avg_sq',
|
|
'RMS'
|
|
}
|
|
"""
|
|
|
|
state = self.state[p]
|
|
grad_shape = grad.shape
|
|
|
|
factored, use_first_moment = self._get_options(group, grad_shape)
|
|
# State Initialization
|
|
if len(state) == 0:
|
|
state["step"] = 0
|
|
if use_first_moment:
|
|
# Exponential moving average of gradient values
|
|
state["exp_avg"] = torch.zeros_like(grad)
|
|
if factored:
|
|
state["exp_avg_sq_row"] = torch.zeros(grad_shape[:-1], device=grad.device)
|
|
state["exp_avg_sq_col"] = torch.zeros(grad_shape[:-2] + grad_shape[-1:], device=grad.device)
|
|
else:
|
|
state["exp_avg_sq"] = torch.zeros_like(grad)
|
|
|
|
state["RMS"] = 0
|
|
else:
|
|
if use_first_moment:
|
|
state["exp_avg"] = state["exp_avg"]
|
|
if factored:
|
|
state["exp_avg_sq_row"] = state["exp_avg_sq_row"]
|
|
state["exp_avg_sq_col"] = state["exp_avg_sq_col"]
|
|
else:
|
|
state["exp_avg_sq"] = state["exp_avg_sq"]
|
|
|
|
state["step"] += 1
|
|
# state["RMS"] = self._rms(p_data_fp32)
|
|
lr = self._get_lr(group, state)
|
|
beta2t = 1.0 - math.pow(state["step"], group["decay_rate"])
|
|
update = (grad**2) + group["eps"][0]
|
|
if factored:
|
|
exp_avg_sq_row = state["exp_avg_sq_row"]
|
|
exp_avg_sq_col = state["exp_avg_sq_col"]
|
|
# Exponential average of row indexes
|
|
exp_avg_sq_row.mul_(beta2t).add_(update.mean(dim=-1), alpha=(1.0 - beta2t))
|
|
# Exponential average of columns indexes
|
|
exp_avg_sq_col.mul_(beta2t).add_(update.mean(dim=-2), alpha=(1.0 - beta2t))
|
|
# Approximation of exponential moving average of square of gradient
|
|
update = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col)
|
|
update.mul_(grad)
|
|
else:
|
|
exp_avg_sq = state["exp_avg_sq"]
|
|
exp_avg_sq.mul_(beta2t).add_(update, alpha=(1.0 - beta2t))
|
|
update = exp_avg_sq.rsqrt().mul_(grad)
|
|
# RMS
|
|
update.div_((self._rms(update) / group["clip_threshold"]).clamp_(min=1.0))
|
|
update.mul_(lr)
|
|
|
|
if use_first_moment:
|
|
exp_avg = state["exp_avg"]
|
|
exp_avg.mul_(group["beta1"]).add_(update, alpha=(1 - group["beta1"]))
|
|
update = exp_avg
|
|
|
|
if group["weight_decay"] != 0:
|
|
p.add_(p, alpha=(-group["weight_decay"] * lr))
|
|
p.add_(-update)
|
|
|
|
return loss
|