[legacy] move communication and nn to legacy and refactor logger (#4671)

* [legacy] move communication to legacy (#4640)

* [legacy] refactor logger and clean up legacy codes (#4654)

* [legacy] make logger independent to gpc

* [legacy] make optim independent to registry

* [legacy] move test engine to legacy

* [legacy] move nn to legacy (#4656)

* [legacy] move nn to legacy

* [checkpointio] fix save hf config

* [test] remove useledd rpc pp test

* [legacy] fix nn init

* [example] skip tutorial hybriad parallel example

* [devops] test doc check

* [devops] test doc check
This commit is contained in:
Hongxin Liu
2023-09-11 16:24:28 +08:00
committed by GitHub
parent 536397cc95
commit 554aa9592e
170 changed files with 781 additions and 758 deletions

View File

@@ -4,12 +4,10 @@ from typing import Optional
import torch
from colossalai.kernel.op_builder import CPUAdamBuilder
from colossalai.legacy.registry import OPTIMIZERS
from .nvme_optimizer import NVMeOptimizer
@OPTIMIZERS.register_module
class CPUAdam(NVMeOptimizer):
"""Implements Adam algorithm.

View File

@@ -8,11 +8,9 @@ Licensed under the MIT License.
'''
import torch
from colossalai.legacy.registry import OPTIMIZERS
from colossalai.utils import multi_tensor_applier
@OPTIMIZERS.register_module
class FusedAdam(torch.optim.Optimizer):
"""Implements Adam algorithm.

View File

@@ -1,11 +1,9 @@
# modified from https://github.com/NVIDIA/apex/blob/master/apex/optimizers/fused_lamb.py
import torch
from colossalai.legacy.registry import OPTIMIZERS
from colossalai.utils import multi_tensor_applier
@OPTIMIZERS.register_module
class FusedLAMB(torch.optim.Optimizer):
"""Implements LAMB algorithm.

View File

@@ -2,11 +2,9 @@
import torch
from torch.optim.optimizer import Optimizer, required
from colossalai.legacy.registry import OPTIMIZERS
from colossalai.utils import multi_tensor_applier
@OPTIMIZERS.register_module
class FusedSGD(Optimizer):
r"""Implements stochastic gradient descent (optionally with momentum).

View File

@@ -4,13 +4,11 @@ import torch
from torch.optim import Adam
from colossalai.kernel.op_builder import FusedOptimBuilder
from colossalai.legacy.registry import OPTIMIZERS
from colossalai.utils import multi_tensor_applier
from .cpu_adam import CPUAdam
@OPTIMIZERS.register_module
class HybridAdam(CPUAdam):
"""Implements Adam algorithm.

View File

@@ -5,10 +5,7 @@ Adapted from the pytorch-lamb library at https://github.com/cybertronai/pytorch-
import torch
from torch.optim import Optimizer
from colossalai.legacy.registry import OPTIMIZERS
@OPTIMIZERS.register_module
class Lamb(Optimizer):
r"""Implements Lamb algorithm.
It has been proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_.

View File

@@ -5,10 +5,7 @@ from typing import Iterable
import torch
from torch.optim import Optimizer
from colossalai.legacy.registry import OPTIMIZERS
@OPTIMIZERS.register_module
class Lars(Optimizer):
r"""Implements the LARS optimizer from `"Large batch training of convolutional networks"
<https://arxiv.org/pdf/1708.03888.pdf>`_.