mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-11 05:49:55 +00:00
[misc] update pre-commit and run all files (#4752)
* [misc] update pre-commit * [misc] run pre-commit * [misc] remove useless configuration files * [misc] ignore cuda for clang-format
This commit is contained in:
@@ -1,5 +1,3 @@
|
||||
import time
|
||||
|
||||
import datasets
|
||||
import torch
|
||||
import transformers
|
||||
@@ -12,7 +10,6 @@ from transformers.utils.versions import require_version
|
||||
import colossalai
|
||||
from colossalai.booster import Booster
|
||||
from colossalai.booster.plugin import GeminiPlugin, HybridParallelPlugin, LowLevelZeroPlugin, TorchDDPPlugin
|
||||
from colossalai.booster.plugin.hybrid_parallel_plugin import HybridParallelModule
|
||||
from colossalai.cluster import DistCoordinator
|
||||
from colossalai.logging import disable_existing_loggers, get_dist_logger
|
||||
from colossalai.nn.optimizer import HybridAdam
|
||||
@@ -29,7 +26,6 @@ def move_to_cuda(batch, device):
|
||||
|
||||
|
||||
def train_epoch(epoch, model, optimizer, _criterion, lr_scheduler, dataloader, booster, coordinator):
|
||||
|
||||
torch.cuda.synchronize()
|
||||
|
||||
use_pipeline = isinstance(booster.plugin, HybridParallelPlugin) and booster.plugin.pp_size > 1
|
||||
@@ -39,22 +35,19 @@ def train_epoch(epoch, model, optimizer, _criterion, lr_scheduler, dataloader, b
|
||||
model.train()
|
||||
optimizer.zero_grad()
|
||||
dataloader = iter(dataloader)
|
||||
with tqdm(range(total_step), desc=f'Epoch [{epoch + 1}]',
|
||||
disable=not (coordinator.is_master() or is_pp_last_stage)) as pbar:
|
||||
|
||||
with tqdm(
|
||||
range(total_step), desc=f"Epoch [{epoch + 1}]", disable=not (coordinator.is_master() or is_pp_last_stage)
|
||||
) as pbar:
|
||||
# Forward pass
|
||||
for _ in pbar:
|
||||
if use_pipeline:
|
||||
outputs = booster.execute_pipeline(dataloader,
|
||||
model,
|
||||
_criterion,
|
||||
optimizer,
|
||||
return_loss=True,
|
||||
return_outputs=True)
|
||||
outputs = booster.execute_pipeline(
|
||||
dataloader, model, _criterion, optimizer, return_loss=True, return_outputs=True
|
||||
)
|
||||
# Backward and optimize
|
||||
if is_pp_last_stage:
|
||||
loss = outputs['loss']
|
||||
pbar.set_postfix({'loss': loss.item()})
|
||||
loss = outputs["loss"]
|
||||
pbar.set_postfix({"loss": loss.item()})
|
||||
else:
|
||||
data = next(dataloader)
|
||||
data = move_to_cuda(data)
|
||||
@@ -62,7 +55,7 @@ def train_epoch(epoch, model, optimizer, _criterion, lr_scheduler, dataloader, b
|
||||
loss = _criterion(outputs, None)
|
||||
# Backward
|
||||
booster.backward(loss, optimizer)
|
||||
pbar.set_postfix({'loss': loss.item()})
|
||||
pbar.set_postfix({"loss": loss.item()})
|
||||
|
||||
optimizer.step()
|
||||
optimizer.zero_grad()
|
||||
@@ -70,7 +63,6 @@ def train_epoch(epoch, model, optimizer, _criterion, lr_scheduler, dataloader, b
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
args = parse_demo_args()
|
||||
|
||||
# Launch ColossalAI
|
||||
@@ -98,34 +90,34 @@ def main():
|
||||
|
||||
# Set plugin
|
||||
booster_kwargs = {}
|
||||
if args.plugin == 'torch_ddp_fp16':
|
||||
booster_kwargs['mixed_precision'] = 'fp16'
|
||||
if args.plugin.startswith('torch_ddp'):
|
||||
if args.plugin == "torch_ddp_fp16":
|
||||
booster_kwargs["mixed_precision"] = "fp16"
|
||||
if args.plugin.startswith("torch_ddp"):
|
||||
plugin = TorchDDPPlugin()
|
||||
elif args.plugin == 'gemini':
|
||||
elif args.plugin == "gemini":
|
||||
plugin = GeminiPlugin(offload_optim_frac=1.0, pin_memory=True, initial_scale=2**5)
|
||||
elif args.plugin == 'low_level_zero':
|
||||
elif args.plugin == "low_level_zero":
|
||||
plugin = LowLevelZeroPlugin(initial_scale=2**5)
|
||||
elif args.plugin == 'hybrid_parallel':
|
||||
elif args.plugin == "hybrid_parallel":
|
||||
# modify the param accordingly for finetuning test cases
|
||||
plugin = HybridParallelPlugin(tp_size=2,
|
||||
pp_size=2,
|
||||
num_microbatches=2,
|
||||
enable_all_optimization=True,
|
||||
zero_stage=0,
|
||||
precision='fp16',
|
||||
initial_scale=1)
|
||||
plugin = HybridParallelPlugin(
|
||||
tp_size=2,
|
||||
pp_size=2,
|
||||
num_microbatches=2,
|
||||
enable_all_optimization=True,
|
||||
zero_stage=0,
|
||||
precision="fp16",
|
||||
initial_scale=1,
|
||||
)
|
||||
|
||||
logger.info(f"Set plugin as {args.plugin}", ranks=[0])
|
||||
|
||||
# Prepare tokenizer and dataloader
|
||||
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path)
|
||||
dataset = NetflixDataset(tokenizer)
|
||||
dataloader = plugin.prepare_dataloader(dataset,
|
||||
batch_size=args.batch_size,
|
||||
shuffle=True,
|
||||
drop_last=True,
|
||||
collate_fn=netflix_collator)
|
||||
dataloader = plugin.prepare_dataloader(
|
||||
dataset, batch_size=args.batch_size, shuffle=True, drop_last=True, collate_fn=netflix_collator
|
||||
)
|
||||
|
||||
# Set optimizer
|
||||
optimizer = HybridAdam(model.parameters(), lr=(args.learning_rate * world_size), weight_decay=args.weight_decay)
|
||||
@@ -133,9 +125,9 @@ def main():
|
||||
# Set lr scheduler
|
||||
total_steps = len(dataloader) * args.num_epoch
|
||||
num_warmup_steps = int(args.warmup_ratio * total_steps)
|
||||
lr_scheduler = get_linear_schedule_with_warmup(optimizer,
|
||||
num_warmup_steps=num_warmup_steps,
|
||||
num_training_steps=len(dataloader) * args.num_epoch)
|
||||
lr_scheduler = get_linear_schedule_with_warmup(
|
||||
optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=len(dataloader) * args.num_epoch
|
||||
)
|
||||
|
||||
# Define criterion
|
||||
def _criterion(outputs, inputs):
|
||||
@@ -145,11 +137,9 @@ def main():
|
||||
|
||||
# Set booster
|
||||
booster = Booster(plugin=plugin, **booster_kwargs)
|
||||
model, optimizer, _criterion, dataloader, lr_scheduler = booster.boost(model=model,
|
||||
optimizer=optimizer,
|
||||
dataloader=dataloader,
|
||||
criterion=_criterion,
|
||||
lr_scheduler=lr_scheduler)
|
||||
model, optimizer, _criterion, dataloader, lr_scheduler = booster.boost(
|
||||
model=model, optimizer=optimizer, dataloader=dataloader, criterion=_criterion, lr_scheduler=lr_scheduler
|
||||
)
|
||||
|
||||
# Start finetuning
|
||||
logger.info(f"Start finetuning", ranks=[0])
|
||||
|
Reference in New Issue
Block a user