mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-13 05:01:44 +00:00
[legacy] move communication and nn to legacy and refactor logger (#4671)
* [legacy] move communication to legacy (#4640) * [legacy] refactor logger and clean up legacy codes (#4654) * [legacy] make logger independent to gpc * [legacy] make optim independent to registry * [legacy] move test engine to legacy * [legacy] move nn to legacy (#4656) * [legacy] move nn to legacy * [checkpointio] fix save hf config * [test] remove useledd rpc pp test * [legacy] fix nn init * [example] skip tutorial hybriad parallel example * [devops] test doc check * [devops] test doc check
This commit is contained in:
@@ -4,12 +4,10 @@ from typing import Optional
|
||||
import torch
|
||||
|
||||
from colossalai.kernel.op_builder import CPUAdamBuilder
|
||||
from colossalai.legacy.registry import OPTIMIZERS
|
||||
|
||||
from .nvme_optimizer import NVMeOptimizer
|
||||
|
||||
|
||||
@OPTIMIZERS.register_module
|
||||
class CPUAdam(NVMeOptimizer):
|
||||
"""Implements Adam algorithm.
|
||||
|
||||
|
@@ -8,11 +8,9 @@ Licensed under the MIT License.
|
||||
'''
|
||||
import torch
|
||||
|
||||
from colossalai.legacy.registry import OPTIMIZERS
|
||||
from colossalai.utils import multi_tensor_applier
|
||||
|
||||
|
||||
@OPTIMIZERS.register_module
|
||||
class FusedAdam(torch.optim.Optimizer):
|
||||
"""Implements Adam algorithm.
|
||||
|
||||
|
@@ -1,11 +1,9 @@
|
||||
# modified from https://github.com/NVIDIA/apex/blob/master/apex/optimizers/fused_lamb.py
|
||||
import torch
|
||||
|
||||
from colossalai.legacy.registry import OPTIMIZERS
|
||||
from colossalai.utils import multi_tensor_applier
|
||||
|
||||
|
||||
@OPTIMIZERS.register_module
|
||||
class FusedLAMB(torch.optim.Optimizer):
|
||||
"""Implements LAMB algorithm.
|
||||
|
||||
|
@@ -2,11 +2,9 @@
|
||||
import torch
|
||||
from torch.optim.optimizer import Optimizer, required
|
||||
|
||||
from colossalai.legacy.registry import OPTIMIZERS
|
||||
from colossalai.utils import multi_tensor_applier
|
||||
|
||||
|
||||
@OPTIMIZERS.register_module
|
||||
class FusedSGD(Optimizer):
|
||||
r"""Implements stochastic gradient descent (optionally with momentum).
|
||||
|
||||
|
@@ -4,13 +4,11 @@ import torch
|
||||
from torch.optim import Adam
|
||||
|
||||
from colossalai.kernel.op_builder import FusedOptimBuilder
|
||||
from colossalai.legacy.registry import OPTIMIZERS
|
||||
from colossalai.utils import multi_tensor_applier
|
||||
|
||||
from .cpu_adam import CPUAdam
|
||||
|
||||
|
||||
@OPTIMIZERS.register_module
|
||||
class HybridAdam(CPUAdam):
|
||||
"""Implements Adam algorithm.
|
||||
|
||||
|
@@ -5,10 +5,7 @@ Adapted from the pytorch-lamb library at https://github.com/cybertronai/pytorch-
|
||||
import torch
|
||||
from torch.optim import Optimizer
|
||||
|
||||
from colossalai.legacy.registry import OPTIMIZERS
|
||||
|
||||
|
||||
@OPTIMIZERS.register_module
|
||||
class Lamb(Optimizer):
|
||||
r"""Implements Lamb algorithm.
|
||||
It has been proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_.
|
||||
|
@@ -5,10 +5,7 @@ from typing import Iterable
|
||||
import torch
|
||||
from torch.optim import Optimizer
|
||||
|
||||
from colossalai.legacy.registry import OPTIMIZERS
|
||||
|
||||
|
||||
@OPTIMIZERS.register_module
|
||||
class Lars(Optimizer):
|
||||
r"""Implements the LARS optimizer from `"Large batch training of convolutional networks"
|
||||
<https://arxiv.org/pdf/1708.03888.pdf>`_.
|
||||
|
Reference in New Issue
Block a user