mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-01 09:07:51 +00:00
replace the customized dataloader setup with the build-in one
This commit is contained in:
@@ -1 +1,5 @@
|
||||
172.27.183.199
|
||||
XXX.XX.XXX.XXX # Your master IP
|
||||
XXX.XX.XXX.XXX # Your slave IPs
|
||||
XXX.XX.XXX.XXX # Your slave IPs
|
||||
XXX.XX.XXX.XXX # Your slave IPs
|
||||
XXX.XX.XXX.XXX # Your slave IPs
|
||||
|
@@ -5,12 +5,7 @@ import resource
|
||||
from contextlib import nullcontext
|
||||
|
||||
import torch
|
||||
from coati.dataset import (
|
||||
DataCollatorForPreferenceDataset,
|
||||
StatefulDistributedSampler,
|
||||
load_tokenized_dataset,
|
||||
setup_distributed_dataloader,
|
||||
)
|
||||
from coati.dataset import DataCollatorForPreferenceDataset, StatefulDistributedSampler, load_tokenized_dataset
|
||||
from coati.models import convert_to_lora_module, disable_dropout
|
||||
from coati.trainer import DPOTrainer
|
||||
from coati.utils import load_checkpoint
|
||||
@@ -174,15 +169,14 @@ def train(args):
|
||||
mode_map = {"train": "train", "valid": "validation", "test": "test"}
|
||||
train_dataset = load_tokenized_dataset(dataset_paths=args.dataset, mode="train", mode_map=mode_map)
|
||||
data_collator = DataCollatorForPreferenceDataset(tokenizer=tokenizer, max_length=args.max_length)
|
||||
train_dataloader = setup_distributed_dataloader(
|
||||
|
||||
train_dataloader = plugin.prepare_dataloader(
|
||||
dataset=train_dataset,
|
||||
batch_size=args.batch_size,
|
||||
shuffle=True,
|
||||
drop_last=True,
|
||||
collate_fn=data_collator,
|
||||
tp_size=plugin.tp_size if hasattr(plugin, "tp_size") else 1,
|
||||
sp_size=plugin.sp_size if hasattr(plugin, "sp_size") else 1,
|
||||
pp_size=plugin.pp_size if hasattr(plugin, "pp_size") else 1,
|
||||
distributed_sampler_cls=StatefulDistributedSampler,
|
||||
)
|
||||
|
||||
num_update_steps_per_epoch = len(train_dataloader) // args.accumulation_steps
|
||||
|
@@ -12,7 +12,6 @@ from coati.dataset import (
|
||||
StatefulDistributedSampler,
|
||||
load_tokenized_dataset,
|
||||
setup_conversation_template,
|
||||
setup_distributed_dataloader,
|
||||
)
|
||||
from coati.models import Critic, RewardModel, convert_to_lora_module, disable_dropout
|
||||
from coati.trainer import PPOTrainer
|
||||
@@ -209,6 +208,9 @@ def train(args):
|
||||
max_norm=args.grad_clip,
|
||||
)
|
||||
elif args.plugin == "3d":
|
||||
if args.use_flash_attn and (args.tp > 1 or args.pp > 1 or args.sp > 1 or args.enable_sequence_parallelism):
|
||||
logger.warning("Flash attention cannot be used with 3D parallelism for PPO training. Disabling it.")
|
||||
args.use_flash_attn = False
|
||||
plugin = HybridParallelPlugin(
|
||||
tp_size=args.tp,
|
||||
pp_size=args.pp,
|
||||
@@ -247,29 +249,26 @@ def train(args):
|
||||
mode_map = {"train": "train", "valid": "validation", "test": "test"}
|
||||
train_prompt_dataset = load_tokenized_dataset(dataset_paths=args.prompt_dataset, mode="train", mode_map=mode_map)
|
||||
data_collator = DataCollatorForPromptDataset(tokenizer=tokenizer, max_length=args.max_length - args.max_seq_len)
|
||||
train_prompt_dataloader = setup_distributed_dataloader(
|
||||
|
||||
train_prompt_dataloader = plugin.prepare_dataloader(
|
||||
dataset=train_prompt_dataset,
|
||||
batch_size=args.experience_batch_size,
|
||||
shuffle=True,
|
||||
drop_last=True,
|
||||
collate_fn=data_collator,
|
||||
tp_size=plugin.tp_size if hasattr(plugin, "tp_size") else 1,
|
||||
sp_size=plugin.sp_size if hasattr(plugin, "sp_size") else 1,
|
||||
pp_size=plugin.pp_size if hasattr(plugin, "pp_size") else 1,
|
||||
distributed_sampler_cls=StatefulDistributedSampler,
|
||||
)
|
||||
|
||||
if len(args.ptx_dataset) > 0:
|
||||
train_ptx_dataset = load_tokenized_dataset(dataset_paths=args.ptx_dataset, mode="train", mode_map=mode_map)
|
||||
data_collator = DataCollatorForSupervisedDataset(tokenizer=tokenizer, max_length=args.max_length)
|
||||
train_pretrain_dataloader = setup_distributed_dataloader(
|
||||
train_pretrain_dataloader = plugin.prepare_dataloader(
|
||||
dataset=train_ptx_dataset,
|
||||
batch_size=args.ptx_batch_size,
|
||||
shuffle=True,
|
||||
drop_last=True,
|
||||
collate_fn=data_collator,
|
||||
tp_size=plugin.tp_size if hasattr(plugin, "tp_size") else 1,
|
||||
sp_size=plugin.sp_size if hasattr(plugin, "sp_size") else 1,
|
||||
pp_size=plugin.pp_size if hasattr(plugin, "pp_size") else 1,
|
||||
distributed_sampler_cls=StatefulDistributedSampler,
|
||||
)
|
||||
else:
|
||||
train_pretrain_dataloader = None
|
||||
|
@@ -6,12 +6,7 @@ import resource
|
||||
from contextlib import nullcontext
|
||||
|
||||
import torch
|
||||
from coati.dataset import (
|
||||
DataCollatorForPreferenceDataset,
|
||||
StatefulDistributedSampler,
|
||||
load_tokenized_dataset,
|
||||
setup_distributed_dataloader,
|
||||
)
|
||||
from coati.dataset import DataCollatorForPreferenceDataset, StatefulDistributedSampler, load_tokenized_dataset
|
||||
from coati.models import LogExpLoss, LogSigLoss, RewardModel, convert_to_lora_module
|
||||
from coati.trainer import RewardModelTrainer
|
||||
from coati.utils import load_checkpoint
|
||||
@@ -169,17 +164,15 @@ def train(args):
|
||||
mode_map = {"train": "train", "valid": "validation", "test": "test"}
|
||||
train_dataset = load_tokenized_dataset(dataset_paths=args.dataset, mode="train", mode_map=mode_map)
|
||||
data_collator = DataCollatorForPreferenceDataset(tokenizer=tokenizer, max_length=args.max_length)
|
||||
train_dataloader = setup_distributed_dataloader(
|
||||
|
||||
train_dataloader = plugin.prepare_dataloader(
|
||||
dataset=train_dataset,
|
||||
batch_size=args.batch_size,
|
||||
shuffle=True,
|
||||
drop_last=True,
|
||||
collate_fn=data_collator,
|
||||
tp_size=plugin.tp_size if hasattr(plugin, "tp_size") else 1,
|
||||
sp_size=plugin.sp_size if hasattr(plugin, "sp_size") else 1,
|
||||
pp_size=plugin.pp_size if hasattr(plugin, "pp_size") else 1,
|
||||
distributed_sampler_cls=StatefulDistributedSampler,
|
||||
)
|
||||
|
||||
num_update_steps_per_epoch = len(train_dataloader) // args.accumulation_steps
|
||||
math.ceil(args.max_epochs * num_update_steps_per_epoch)
|
||||
|
||||
|
@@ -8,7 +8,7 @@ import sys
|
||||
from contextlib import nullcontext
|
||||
|
||||
import torch
|
||||
from coati.dataset import DataCollatorForSupervisedDataset, load_tokenized_dataset, setup_distributed_dataloader
|
||||
from coati.dataset import DataCollatorForSupervisedDataset, StatefulDistributedSampler, load_tokenized_dataset
|
||||
from coati.models import convert_to_lora_module
|
||||
from coati.trainer import SFTTrainer
|
||||
from coati.utils import load_checkpoint
|
||||
@@ -189,21 +189,15 @@ def train(args):
|
||||
)
|
||||
dataset = load_tokenized_dataset(dataset_paths=args.dataset, mode="train")
|
||||
data_collator = DataCollatorForSupervisedDataset(tokenizer=tokenizer, max_length=args.max_len)
|
||||
train_dataloader = setup_distributed_dataloader(
|
||||
|
||||
train_dataloader = plugin.prepare_dataloader(
|
||||
dataset=dataset,
|
||||
batch_size=args.batch_size,
|
||||
shuffle=True,
|
||||
drop_last=True,
|
||||
collate_fn=data_collator,
|
||||
tp_size=plugin.tp_size if hasattr(plugin, "tp_size") else 1,
|
||||
sp_size=plugin.sp_size if hasattr(plugin, "sp_size") else 1,
|
||||
pp_size=plugin.pp_size if hasattr(plugin, "pp_size") else 1,
|
||||
distributed_sampler_cls=StatefulDistributedSampler,
|
||||
)
|
||||
# print(len(train_dataloader))
|
||||
# for batch in train_dataloader:
|
||||
# print(dist.get_rank(), tokenizer.batch_decode(batch["input_ids"]))
|
||||
# break
|
||||
|
||||
coordinator.print_on_master(
|
||||
f"Max CUDA memory after data loader: {torch.cuda.max_memory_allocated() / 1024 ** 2:.2f} MB"
|
||||
)
|
||||
|
Reference in New Issue
Block a user