[legacy] move builder and registry to legacy (#4603)

This commit is contained in:
Hongxin Liu
2023-09-04 19:56:42 +08:00
parent 8accecd55b
commit ac178ca5c1
65 changed files with 353 additions and 332 deletions

View File

@@ -4,7 +4,7 @@ from typing import Optional
import torch
from colossalai.kernel.op_builder import CPUAdamBuilder
from colossalai.registry import OPTIMIZERS
from colossalai.legacy.registry import OPTIMIZERS
from .nvme_optimizer import NVMeOptimizer

View File

@@ -8,7 +8,7 @@ Licensed under the MIT License.
'''
import torch
from colossalai.registry import OPTIMIZERS
from colossalai.legacy.registry import OPTIMIZERS
from colossalai.utils import multi_tensor_applier

View File

@@ -1,7 +1,7 @@
# modified from https://github.com/NVIDIA/apex/blob/master/apex/optimizers/fused_lamb.py
import torch
from colossalai.registry import OPTIMIZERS
from colossalai.legacy.registry import OPTIMIZERS
from colossalai.utils import multi_tensor_applier

View File

@@ -2,7 +2,7 @@
import torch
from torch.optim.optimizer import Optimizer, required
from colossalai.registry import OPTIMIZERS
from colossalai.legacy.registry import OPTIMIZERS
from colossalai.utils import multi_tensor_applier

View File

@@ -4,7 +4,7 @@ import torch
from torch.optim import Adam
from colossalai.kernel.op_builder import FusedOptimBuilder
from colossalai.registry import OPTIMIZERS
from colossalai.legacy.registry import OPTIMIZERS
from colossalai.utils import multi_tensor_applier
from .cpu_adam import CPUAdam

View File

@@ -5,7 +5,7 @@ Adapted from the pytorch-lamb library at https://github.com/cybertronai/pytorch-
import torch
from torch.optim import Optimizer
from colossalai.registry import OPTIMIZERS
from colossalai.legacy.registry import OPTIMIZERS
@OPTIMIZERS.register_module

View File

@@ -5,7 +5,7 @@ from typing import Iterable
import torch
from torch.optim import Optimizer
from colossalai.registry import OPTIMIZERS
from colossalai.legacy.registry import OPTIMIZERS
@OPTIMIZERS.register_module
@@ -22,28 +22,24 @@ class Lars(Optimizer):
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
"""
def __init__(
self,
params: Iterable[torch.nn.Parameter],
lr=1e-3,
momentum=0,
eeta=1e-3,
weight_decay=0,
epsilon=0.0
) -> None:
def __init__(self,
params: Iterable[torch.nn.Parameter],
lr=1e-3,
momentum=0,
eeta=1e-3,
weight_decay=0,
epsilon=0.0) -> None:
if not isinstance(lr, float) or lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if weight_decay < 0.0:
raise ValueError(
"Invalid weight_decay value: {}".format(weight_decay))
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
if eeta <= 0 or eeta > 1:
raise ValueError("Invalid eeta value: {}".format(eeta))
if epsilon < 0:
raise ValueError("Invalid epsilon value: {}".format(epsilon))
defaults = dict(lr=lr, momentum=momentum,
weight_decay=weight_decay, eeta=eeta, epsilon=epsilon, lars=True)
defaults = dict(lr=lr, momentum=momentum, weight_decay=weight_decay, eeta=eeta, epsilon=epsilon, lars=True)
super().__init__(params, defaults)
@@ -76,11 +72,9 @@ class Lars(Optimizer):
if lars:
w_norm = torch.norm(p)
g_norm = torch.norm(p.grad)
trust_ratio = torch.where(
w_norm > 0 and g_norm > 0,
eeta * w_norm / (g_norm + weight_decay * w_norm + eps),
torch.ones_like(w_norm)
)
trust_ratio = torch.where(w_norm > 0 and g_norm > 0,
eeta * w_norm / (g_norm + weight_decay * w_norm + eps),
torch.ones_like(w_norm))
trust_ratio.clamp_(0.0, 50)
scaled_lr *= trust_ratio.item()
if weight_decay != 0:
@@ -90,8 +84,7 @@ class Lars(Optimizer):
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.clone(
decayed_grad).detach()
buf = param_state['momentum_buffer'] = torch.clone(decayed_grad).detach()
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(decayed_grad)