mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-07 12:01:39 +00:00
polish optimizer docstring (#619)
This commit is contained in:
@@ -11,7 +11,7 @@ class FusedSGD(Optimizer):
|
||||
r"""Implements stochastic gradient descent (optionally with momentum).
|
||||
|
||||
Currently GPU-only. Requires ColossalAI to be installed via
|
||||
``pip install -v --no-cache-dir --global-option="--cuda_ext" ./``.
|
||||
``pip install .``.
|
||||
|
||||
This version of fused SGD implements 2 fusions.
|
||||
|
||||
@@ -57,8 +57,13 @@ class FusedSGD(Optimizer):
|
||||
The Nesterov version is analogously modified.
|
||||
"""
|
||||
|
||||
def __init__(self, params, lr=required, momentum=0, dampening=0,
|
||||
weight_decay=0, nesterov=False,
|
||||
def __init__(self,
|
||||
params,
|
||||
lr=required,
|
||||
momentum=0,
|
||||
dampening=0,
|
||||
weight_decay=0,
|
||||
nesterov=False,
|
||||
wd_after_momentum=False,
|
||||
materialize_master_grads=True,
|
||||
set_grad_none=False):
|
||||
@@ -67,14 +72,11 @@ class FusedSGD(Optimizer):
|
||||
if momentum < 0.0:
|
||||
raise ValueError("Invalid momentum value: {}".format(momentum))
|
||||
if weight_decay < 0.0:
|
||||
raise ValueError(
|
||||
"Invalid weight_decay value: {}".format(weight_decay))
|
||||
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
|
||||
|
||||
defaults = dict(lr=lr, momentum=momentum, dampening=dampening,
|
||||
weight_decay=weight_decay, nesterov=nesterov)
|
||||
defaults = dict(lr=lr, momentum=momentum, dampening=dampening, weight_decay=weight_decay, nesterov=nesterov)
|
||||
if nesterov and (momentum <= 0 or dampening != 0):
|
||||
raise ValueError(
|
||||
"Nesterov momentum requires a momentum and zero dampening")
|
||||
raise ValueError("Nesterov momentum requires a momentum and zero dampening")
|
||||
super(FusedSGD, self).__init__(params, defaults)
|
||||
|
||||
self.wd_after_momentum = wd_after_momentum
|
||||
@@ -86,8 +88,9 @@ class FusedSGD(Optimizer):
|
||||
if multi_tensor_applier.available:
|
||||
import colossal_C
|
||||
# Skip buffer
|
||||
self._dummy_overflow_buf = torch.tensor(
|
||||
[0], dtype=torch.int, device=self.param_groups[0]["params"][0].device)
|
||||
self._dummy_overflow_buf = torch.tensor([0],
|
||||
dtype=torch.int,
|
||||
device=self.param_groups[0]["params"][0].device)
|
||||
self.multi_tensor_sgd = colossal_C.multi_tensor_sgd
|
||||
else:
|
||||
raise RuntimeError('FusedSGD requires cuda extensions')
|
||||
@@ -133,8 +136,7 @@ class FusedSGD(Optimizer):
|
||||
if closure is not None:
|
||||
loss = closure()
|
||||
|
||||
explicit_master_params = (hasattr(self, "_amp_stash") and
|
||||
hasattr(self._amp_stash, "fp32_from_fp16_groups"))
|
||||
explicit_master_params = (hasattr(self, "_amp_stash") and hasattr(self._amp_stash, "fp32_from_fp16_groups"))
|
||||
|
||||
for gid, group in enumerate(self.param_groups):
|
||||
weight_decay = group['weight_decay']
|
||||
@@ -154,71 +156,52 @@ class FusedSGD(Optimizer):
|
||||
if explicit_master_params:
|
||||
stash = self._amp_stash
|
||||
|
||||
fp32_params = [
|
||||
p for p in stash.fp32_from_fp32_groups[gid] if p.grad is not None]
|
||||
fp32_grads = [
|
||||
p.grad for p in stash.fp32_from_fp32_groups[gid] if p.grad is not None]
|
||||
fp32_params = [p for p in stash.fp32_from_fp32_groups[gid] if p.grad is not None]
|
||||
fp32_grads = [p.grad for p in stash.fp32_from_fp32_groups[gid] if p.grad is not None]
|
||||
fp32_momentums, first_runs[1] = self.get_momentums(fp32_params)
|
||||
|
||||
if self.materialize_master_grads:
|
||||
fp16_model_params = [p for i, p in enumerate(
|
||||
stash.fp16_groups[gid]) if stash.fp32_from_fp16_groups[gid][i].grad is not None]
|
||||
fp32_from_fp16_grads = [
|
||||
p.grad for p in stash.fp32_from_fp16_groups[gid] if p.grad is not None]
|
||||
fp32_from_fp16_params = [
|
||||
p for p in stash.fp32_from_fp16_groups[gid] if p.grad is not None]
|
||||
fp32_from_fp16_momentums, first_runs[0] = self.get_momentums(
|
||||
fp32_from_fp16_params)
|
||||
|
||||
fp16_set = [fp32_from_fp16_grads, fp32_from_fp16_params,
|
||||
fp32_from_fp16_momentums, fp16_model_params]
|
||||
else:
|
||||
fp16_model_params = [
|
||||
p for p in stash.fp16_groups[gid] if p.grad is not None]
|
||||
fp16_model_grads = [
|
||||
p.grad for p in stash.fp16_groups[gid] if p.grad is not None]
|
||||
fp32_from_fp16_params = [p for i, p in enumerate(
|
||||
stash.fp32_from_fp16_groups[gid]) if stash.fp16_groups[gid][i].grad is not None]
|
||||
fp32_from_fp16_momentums, first_runs[0] = self.get_momentums(
|
||||
fp32_from_fp16_params)
|
||||
p for i, p in enumerate(stash.fp16_groups[gid])
|
||||
if stash.fp32_from_fp16_groups[gid][i].grad is not None
|
||||
]
|
||||
fp32_from_fp16_grads = [p.grad for p in stash.fp32_from_fp16_groups[gid] if p.grad is not None]
|
||||
fp32_from_fp16_params = [p for p in stash.fp32_from_fp16_groups[gid] if p.grad is not None]
|
||||
fp32_from_fp16_momentums, first_runs[0] = self.get_momentums(fp32_from_fp16_params)
|
||||
|
||||
fp16_set = [fp16_model_grads, fp32_from_fp16_params,
|
||||
fp32_from_fp16_momentums, fp16_model_params]
|
||||
fp16_set = [
|
||||
fp32_from_fp16_grads, fp32_from_fp16_params, fp32_from_fp16_momentums, fp16_model_params
|
||||
]
|
||||
else:
|
||||
fp16_model_params = [p for p in stash.fp16_groups[gid] if p.grad is not None]
|
||||
fp16_model_grads = [p.grad for p in stash.fp16_groups[gid] if p.grad is not None]
|
||||
fp32_from_fp16_params = [
|
||||
p for i, p in enumerate(stash.fp32_from_fp16_groups[gid])
|
||||
if stash.fp16_groups[gid][i].grad is not None
|
||||
]
|
||||
fp32_from_fp16_momentums, first_runs[0] = self.get_momentums(fp32_from_fp16_params)
|
||||
|
||||
launch_sets = [fp16_set, [
|
||||
fp32_grads, fp32_params, fp32_momentums]]
|
||||
fp16_set = [fp16_model_grads, fp32_from_fp16_params, fp32_from_fp16_momentums, fp16_model_params]
|
||||
|
||||
launch_sets = [fp16_set, [fp32_grads, fp32_params, fp32_momentums]]
|
||||
else:
|
||||
fp16_params = [p for p in group['params'] if (
|
||||
p.dtype == torch.float16 and p.grad is not None)]
|
||||
fp16_grads = [p.grad for p in group['params'] if (
|
||||
p.dtype == torch.float16 and p.grad is not None)]
|
||||
fp16_params = [p for p in group['params'] if (p.dtype == torch.float16 and p.grad is not None)]
|
||||
fp16_grads = [p.grad for p in group['params'] if (p.dtype == torch.float16 and p.grad is not None)]
|
||||
fp16_momentums, first_runs[0] = self.get_momentums(fp16_params)
|
||||
|
||||
fp32_params = [p for p in group['params'] if (
|
||||
p.dtype == torch.float32 and p.grad is not None)]
|
||||
fp32_grads = [p.grad for p in group['params'] if (
|
||||
p.dtype == torch.float32 and p.grad is not None)]
|
||||
fp32_params = [p for p in group['params'] if (p.dtype == torch.float32 and p.grad is not None)]
|
||||
fp32_grads = [p.grad for p in group['params'] if (p.dtype == torch.float32 and p.grad is not None)]
|
||||
fp32_momentums, first_runs[1] = self.get_momentums(fp32_params)
|
||||
|
||||
launch_sets = [[fp16_grads, fp16_params, fp16_momentums],
|
||||
[fp32_grads, fp32_params, fp32_momentums]]
|
||||
launch_sets = [[fp16_grads, fp16_params, fp16_momentums], [fp32_grads, fp32_params, fp32_momentums]]
|
||||
|
||||
for s, (launch_set, first_run) in enumerate(zip(launch_sets, first_runs)):
|
||||
assert len(launch_set[0]) == len(launch_set[1])
|
||||
assert len(launch_set[0]) == len(launch_set[2])
|
||||
if len(launch_set[0]) > 0:
|
||||
multi_tensor_applier(
|
||||
self.multi_tensor_sgd,
|
||||
self._dummy_overflow_buf,
|
||||
launch_set,
|
||||
weight_decay,
|
||||
momentum,
|
||||
dampening,
|
||||
group['lr'],
|
||||
nesterov,
|
||||
first_run,
|
||||
self.wd_after_momentum,
|
||||
1.0 / self.most_recent_scale)
|
||||
multi_tensor_applier(self.multi_tensor_sgd, self._dummy_overflow_buf, launch_set, weight_decay,
|
||||
momentum, dampening, group['lr'], nesterov, first_run, self.wd_after_momentum,
|
||||
1.0 / self.most_recent_scale)
|
||||
|
||||
self.most_recent_scale = 1.0
|
||||
self.scale_set_by_backward = False
|
||||
|
Reference in New Issue
Block a user