mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-03 10:06:44 +00:00
[shardformer] update llama2/opt finetune example and fix llama2 policy (#4645)
* [shardformer] update shardformer readme [shardformer] update shardformer readme [shardformer] update shardformer readme * [shardformer] update llama2/opt finetune example and shardformer update to llama2 * [shardformer] update llama2/opt finetune example and shardformer update to llama2 * [shardformer] update llama2/opt finetune example and shardformer update to llama2 * [shardformer] change dataset * [shardformer] change dataset * [shardformer] fix CI * [shardformer] fix * [shardformer] fix * [shardformer] fix * [shardformer] fix * [shardformer] fix [example] update opt example [example] resolve comments fix fix
This commit is contained in:
@@ -58,25 +58,24 @@ def evaluate_model(
|
||||
model.eval()
|
||||
|
||||
def evaluate_subset(dataloader: DataLoader):
|
||||
use_pipeline = isinstance(booster.plugin, HybridParallelPlugin) and booster.plugin.pp_size > 1
|
||||
is_pp_last_stage = use_pipeline and booster.plugin.stage_manager.is_last_stage()
|
||||
|
||||
accum_loss = torch.zeros(1, device=get_current_device())
|
||||
for batch in dataloader:
|
||||
batch = move_to_cuda(batch)
|
||||
labels = batch["labels"]
|
||||
batch_size = batch["input_ids"].shape[0]
|
||||
if hasattr(booster.plugin, "stage_manager") and booster.plugin.stage_manager is not None:
|
||||
if use_pipeline:
|
||||
pg_mesh = booster.plugin.pg_mesh
|
||||
pp_group = booster.plugin.pp_group
|
||||
current_pp_group_ranks = pg_mesh.get_ranks_in_group(pp_group)
|
||||
current_rank = dist.get_rank()
|
||||
#TODO pass dataloader to execute_pipeline directly
|
||||
batch = iter([batch])
|
||||
outputs = booster.execute_pipeline(batch, model, criterion, return_loss=True, return_outputs=True)
|
||||
|
||||
if booster.plugin.stage_manager.is_last_stage():
|
||||
val_loss = outputs["loss"]
|
||||
|
||||
if is_pp_last_stage:
|
||||
logits = outputs["outputs"]["logits"]
|
||||
|
||||
val_loss = outputs["loss"]
|
||||
accum_loss.add_(val_loss)
|
||||
|
||||
if num_labels > 1:
|
||||
@@ -84,19 +83,15 @@ def evaluate_model(
|
||||
elif num_labels == 1:
|
||||
preds = logits.squeeze()
|
||||
|
||||
dist.broadcast(preds, src=current_rank, group=pp_group)
|
||||
dist.broadcast(val_loss, src=current_rank, group=pp_group)
|
||||
dist.broadcast_object_list([preds, val_loss], src=current_pp_group_ranks[-1], group=pp_group)
|
||||
|
||||
metric.add_batch(predictions=preds, references=labels)
|
||||
elif current_rank in current_pp_group_ranks:
|
||||
val_loss = torch.empty((1,), device=get_current_device())
|
||||
preds = torch.empty((batch_size,), dtype=torch.int64, device=get_current_device())
|
||||
object_list = [None, None]
|
||||
dist.broadcast_object_list(object_list, src=current_pp_group_ranks[-1], group=pp_group)
|
||||
|
||||
dist.broadcast(preds, src=current_pp_group_ranks[-1], group=pp_group)
|
||||
dist.broadcast(val_loss, src=current_pp_group_ranks[-1], group=pp_group)
|
||||
|
||||
accum_loss.add_(val_loss)
|
||||
metric.add_batch(predictions=preds, references=labels)
|
||||
metric.add_batch(predictions=object_list[0].to(get_current_device()), references=labels)
|
||||
accum_loss.add_(object_list[1].to(get_current_device()))
|
||||
|
||||
else:
|
||||
batch = move_to_cuda(batch)
|
||||
@@ -132,31 +127,33 @@ def evaluate_model(
|
||||
def train_epoch(epoch: int, model: nn.Module, optimizer: Optimizer, _criterion: Callable, lr_scheduler: LRScheduler,
|
||||
train_dataloader: DataLoader, booster: Booster, coordinator: DistCoordinator):
|
||||
|
||||
use_pipeline = isinstance(booster.plugin, HybridParallelPlugin) and booster.plugin.pp_size > 1
|
||||
is_pp_last_stage = use_pipeline and booster.plugin.stage_manager.is_last_stage()
|
||||
total_step = len(train_dataloader)
|
||||
|
||||
model.train()
|
||||
is_pp_last_stage = hasattr(
|
||||
booster.plugin,
|
||||
"stage_manager") and booster.plugin.stage_manager is not None and booster.plugin.stage_manager.is_last_stage()
|
||||
with tqdm(train_dataloader,
|
||||
optimizer.zero_grad()
|
||||
train_dataloader_iter = iter(train_dataloader)
|
||||
with tqdm(range(total_step),
|
||||
desc=f'Epoch [{epoch + 1}/{NUM_EPOCHS}]',
|
||||
disable=not (coordinator.is_master() or is_pp_last_stage)) as pbar:
|
||||
for batch in pbar:
|
||||
# Forward pass
|
||||
batch = move_to_cuda(batch)
|
||||
if hasattr(booster.plugin, "stage_manager") and booster.plugin.stage_manager is not None:
|
||||
#TODO pass train_dataloader to execute_pipeline directly
|
||||
batch = iter([batch])
|
||||
outputs = booster.execute_pipeline(batch,
|
||||
# Forward pass
|
||||
for _ in pbar:
|
||||
if use_pipeline:
|
||||
outputs = booster.execute_pipeline(train_dataloader_iter,
|
||||
model,
|
||||
_criterion,
|
||||
optimizer,
|
||||
return_loss=True,
|
||||
return_outputs=True)
|
||||
# Backward and optimize
|
||||
if booster.plugin.stage_manager.is_last_stage():
|
||||
if is_pp_last_stage:
|
||||
loss = outputs['loss']
|
||||
pbar.set_postfix({'loss': loss.item()})
|
||||
else:
|
||||
outputs = model(**batch)
|
||||
data = next(train_dataloader_iter)
|
||||
data = move_to_cuda(data)
|
||||
outputs = model(**data)
|
||||
loss = _criterion(outputs, None)
|
||||
# Backward
|
||||
booster.backward(loss, optimizer)
|
||||
|
Reference in New Issue
Block a user