mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-11 05:49:55 +00:00
polish code (#3194)
Co-authored-by: YuliangLiu0306 <72588413+YuliangLiu0306@users.noreply.github.com>
This commit is contained in:
@@ -44,14 +44,18 @@ from ldm.util import instantiate_from_config
|
||||
|
||||
|
||||
class DataLoaderX(DataLoader):
|
||||
|
||||
# A custom data loader class that inherits from DataLoader
|
||||
def __iter__(self):
|
||||
# Overriding the __iter__ method of DataLoader to return a BackgroundGenerator
|
||||
#This is to enable data laoding in the background to improve training performance
|
||||
return BackgroundGenerator(super().__iter__())
|
||||
|
||||
|
||||
def get_parser(**parser_kwargs):
|
||||
#A function to create an ArgumentParser object and add arguments to it
|
||||
|
||||
def str2bool(v):
|
||||
# A helper function to parse boolean values from command line arguments
|
||||
if isinstance(v, bool):
|
||||
return v
|
||||
if v.lower() in ("yes", "true", "t", "y", "1"):
|
||||
@@ -60,8 +64,10 @@ def get_parser(**parser_kwargs):
|
||||
return False
|
||||
else:
|
||||
raise argparse.ArgumentTypeError("Boolean value expected.")
|
||||
|
||||
# Create an ArgumentParser object with specifies kwargs
|
||||
parser = argparse.ArgumentParser(**parser_kwargs)
|
||||
|
||||
# Add vairous command line arguments with their default balues and descriptions
|
||||
parser.add_argument(
|
||||
"-n",
|
||||
"--name",
|
||||
@@ -161,14 +167,18 @@ def get_parser(**parser_kwargs):
|
||||
|
||||
return parser
|
||||
|
||||
|
||||
# A function that returns the non-default arguments between two objects
|
||||
def nondefault_trainer_args(opt):
|
||||
# create an argument parsser
|
||||
parser = argparse.ArgumentParser()
|
||||
# add pytorch lightning trainer default arguments
|
||||
parser = Trainer.add_argparse_args(parser)
|
||||
# parse the empty arguments to obtain the default values
|
||||
args = parser.parse_args([])
|
||||
# return all non-default arguments
|
||||
return sorted(k for k in vars(args) if getattr(opt, k) != getattr(args, k))
|
||||
|
||||
|
||||
# A dataset wrapper class to create a pytorch dataset from an arbitrary object
|
||||
class WrappedDataset(Dataset):
|
||||
"""Wraps an arbitrary object with __len__ and __getitem__ into a pytorch dataset"""
|
||||
|
||||
@@ -181,7 +191,7 @@ class WrappedDataset(Dataset):
|
||||
def __getitem__(self, idx):
|
||||
return self.data[idx]
|
||||
|
||||
|
||||
# A function to initialize worker processes
|
||||
def worker_init_fn(_):
|
||||
worker_info = torch.utils.data.get_worker_info()
|
||||
|
||||
@@ -189,15 +199,18 @@ def worker_init_fn(_):
|
||||
worker_id = worker_info.id
|
||||
|
||||
if isinstance(dataset, Txt2ImgIterableBaseDataset):
|
||||
#divide the dataset into equal parts for each worker
|
||||
split_size = dataset.num_records // worker_info.num_workers
|
||||
#set the sample IDs for the current worker
|
||||
# reset num_records to the true number to retain reliable length information
|
||||
dataset.sample_ids = dataset.valid_ids[worker_id * split_size:(worker_id + 1) * split_size]
|
||||
# set the seed for the current worker
|
||||
current_id = np.random.choice(len(np.random.get_state()[1]), 1)
|
||||
return np.random.seed(np.random.get_state()[1][current_id] + worker_id)
|
||||
else:
|
||||
return np.random.seed(np.random.get_state()[1][0] + worker_id)
|
||||
|
||||
|
||||
#Provide functionality for creating data loadedrs based on provided dataset configurations
|
||||
class DataModuleFromConfig(pl.LightningDataModule):
|
||||
|
||||
def __init__(self,
|
||||
@@ -212,10 +225,12 @@ class DataModuleFromConfig(pl.LightningDataModule):
|
||||
use_worker_init_fn=False,
|
||||
shuffle_val_dataloader=False):
|
||||
super().__init__()
|
||||
# Set data module attributes
|
||||
self.batch_size = batch_size
|
||||
self.dataset_configs = dict()
|
||||
self.num_workers = num_workers if num_workers is not None else batch_size * 2
|
||||
self.use_worker_init_fn = use_worker_init_fn
|
||||
# If a dataset is passed, add it to the dataset configs and create a corresponding dataloader method
|
||||
if train is not None:
|
||||
self.dataset_configs["train"] = train
|
||||
self.train_dataloader = self._train_dataloader
|
||||
@@ -231,21 +246,28 @@ class DataModuleFromConfig(pl.LightningDataModule):
|
||||
self.wrap = wrap
|
||||
|
||||
def prepare_data(self):
|
||||
# Instantiate datasets
|
||||
for data_cfg in self.dataset_configs.values():
|
||||
instantiate_from_config(data_cfg)
|
||||
|
||||
def setup(self, stage=None):
|
||||
# Instantiate datasets from the dataset configs
|
||||
self.datasets = dict((k, instantiate_from_config(self.dataset_configs[k])) for k in self.dataset_configs)
|
||||
|
||||
# If wrap is true, create a WrappedDataset for each dataset
|
||||
if self.wrap:
|
||||
for k in self.datasets:
|
||||
self.datasets[k] = WrappedDataset(self.datasets[k])
|
||||
|
||||
def _train_dataloader(self):
|
||||
#Check if the train dataset is iterable
|
||||
is_iterable_dataset = isinstance(self.datasets['train'], Txt2ImgIterableBaseDataset)
|
||||
#Set the worker initialization function of the dataset isiterable or use_worker_init_fn is True
|
||||
if is_iterable_dataset or self.use_worker_init_fn:
|
||||
init_fn = worker_init_fn
|
||||
else:
|
||||
init_fn = None
|
||||
# Return a DataLoaderX object for the train dataset
|
||||
return DataLoaderX(self.datasets["train"],
|
||||
batch_size=self.batch_size,
|
||||
num_workers=self.num_workers,
|
||||
@@ -253,10 +275,12 @@ class DataModuleFromConfig(pl.LightningDataModule):
|
||||
worker_init_fn=init_fn)
|
||||
|
||||
def _val_dataloader(self, shuffle=False):
|
||||
#Check if the validation dataset is iterable
|
||||
if isinstance(self.datasets['validation'], Txt2ImgIterableBaseDataset) or self.use_worker_init_fn:
|
||||
init_fn = worker_init_fn
|
||||
else:
|
||||
init_fn = None
|
||||
# Return a DataLoaderX object for the validation dataset
|
||||
return DataLoaderX(self.datasets["validation"],
|
||||
batch_size=self.batch_size,
|
||||
num_workers=self.num_workers,
|
||||
@@ -264,7 +288,9 @@ class DataModuleFromConfig(pl.LightningDataModule):
|
||||
shuffle=shuffle)
|
||||
|
||||
def _test_dataloader(self, shuffle=False):
|
||||
# Check if the test dataset is iterable
|
||||
is_iterable_dataset = isinstance(self.datasets['train'], Txt2ImgIterableBaseDataset)
|
||||
# Set the worker initialization function if the dataset is iterable or use_worker_init_fn is True
|
||||
if is_iterable_dataset or self.use_worker_init_fn:
|
||||
init_fn = worker_init_fn
|
||||
else:
|
||||
@@ -291,6 +317,7 @@ class DataModuleFromConfig(pl.LightningDataModule):
|
||||
|
||||
|
||||
class SetupCallback(Callback):
|
||||
# I nitialize the callback with the necessary parameters
|
||||
|
||||
def __init__(self, resume, now, logdir, ckptdir, cfgdir, config, lightning_config):
|
||||
super().__init__()
|
||||
@@ -302,12 +329,14 @@ class SetupCallback(Callback):
|
||||
self.config = config
|
||||
self.lightning_config = lightning_config
|
||||
|
||||
# Save a checkpoint if training is interrupted with keyboard interrupt
|
||||
def on_keyboard_interrupt(self, trainer, pl_module):
|
||||
if trainer.global_rank == 0:
|
||||
print("Summoning checkpoint.")
|
||||
ckpt_path = os.path.join(self.ckptdir, "last.ckpt")
|
||||
trainer.save_checkpoint(ckpt_path)
|
||||
|
||||
# Create necessary directories and save configuration files before training starts
|
||||
# def on_pretrain_routine_start(self, trainer, pl_module):
|
||||
def on_fit_start(self, trainer, pl_module):
|
||||
if trainer.global_rank == 0:
|
||||
@@ -316,6 +345,7 @@ class SetupCallback(Callback):
|
||||
os.makedirs(self.ckptdir, exist_ok=True)
|
||||
os.makedirs(self.cfgdir, exist_ok=True)
|
||||
|
||||
#Create trainstep checkpoint directory if necessary
|
||||
if "callbacks" in self.lightning_config:
|
||||
if 'metrics_over_trainsteps_checkpoint' in self.lightning_config['callbacks']:
|
||||
os.makedirs(os.path.join(self.ckptdir, 'trainstep_checkpoints'), exist_ok=True)
|
||||
@@ -323,11 +353,13 @@ class SetupCallback(Callback):
|
||||
print(OmegaConf.to_yaml(self.config))
|
||||
OmegaConf.save(self.config, os.path.join(self.cfgdir, "{}-project.yaml".format(self.now)))
|
||||
|
||||
# Save project config and lightning config as YAML files
|
||||
print("Lightning config")
|
||||
print(OmegaConf.to_yaml(self.lightning_config))
|
||||
OmegaConf.save(OmegaConf.create({"lightning": self.lightning_config}),
|
||||
os.path.join(self.cfgdir, "{}-lightning.yaml".format(self.now)))
|
||||
|
||||
# Remove log directory if resuming training and directory already exists
|
||||
else:
|
||||
# ModelCheckpoint callback created log directory --- remove it
|
||||
if not self.resume and os.path.exists(self.logdir):
|
||||
@@ -346,25 +378,28 @@ class SetupCallback(Callback):
|
||||
# trainer.save_checkpoint(ckpt_path)
|
||||
|
||||
|
||||
# PyTorch Lightning callback for ogging images during training and validation of a deep learning model
|
||||
class ImageLogger(Callback):
|
||||
|
||||
def __init__(self,
|
||||
batch_frequency,
|
||||
max_images,
|
||||
clamp=True,
|
||||
increase_log_steps=True,
|
||||
rescale=True,
|
||||
disabled=False,
|
||||
log_on_batch_idx=False,
|
||||
log_first_step=False,
|
||||
log_images_kwargs=None):
|
||||
batch_frequency, # Frequency of batches on which to log images
|
||||
max_images, # Maximum number of images to log
|
||||
clamp=True, # Whether to clamp pixel values to [-1,1]
|
||||
increase_log_steps=True, # Whether to increase frequency of log steps exponentially
|
||||
rescale=True, # Whetehr to rescale pixel values to [0,1]
|
||||
disabled=False, # Whether to disable logging
|
||||
log_on_batch_idx=False, # Whether to log on baych index instead of global step
|
||||
log_first_step=False, # Whetehr to log on the first step
|
||||
log_images_kwargs=None): # Additional keyword arguments to pass to log_images method
|
||||
super().__init__()
|
||||
self.rescale = rescale
|
||||
self.batch_freq = batch_frequency
|
||||
self.max_images = max_images
|
||||
self.logger_log_images = {
|
||||
pl.loggers.CSVLogger: self._testtube,
|
||||
# Dictionary of logger classes and their corresponding logging methods
|
||||
pl.loggers.CSVLogger: self._testtube,
|
||||
}
|
||||
# Create a list of exponentially increasing log steps, starting from 1 and ending at batch_frequency
|
||||
self.log_steps = [2**n for n in range(int(np.log2(self.batch_freq)) + 1)]
|
||||
if not increase_log_steps:
|
||||
self.log_steps = [self.batch_freq]
|
||||
@@ -374,17 +409,32 @@ class ImageLogger(Callback):
|
||||
self.log_images_kwargs = log_images_kwargs if log_images_kwargs else {}
|
||||
self.log_first_step = log_first_step
|
||||
|
||||
@rank_zero_only
|
||||
def _testtube(self, pl_module, images, batch_idx, split):
|
||||
@rank_zero_only # Ensure that only the first process in distributed training executes this method
|
||||
def _testtube(self, # The PyTorch Lightning module
|
||||
pl_module, # A dictionary of images to log.
|
||||
images, #
|
||||
batch_idx, # The batch index.
|
||||
split # The split (train/val) on which to log the images
|
||||
):
|
||||
# Method for logging images using test-tube logger
|
||||
for k in images:
|
||||
grid = torchvision.utils.make_grid(images[k])
|
||||
grid = (grid + 1.0) / 2.0 # -1,1 -> 0,1; c,h,w
|
||||
|
||||
tag = f"{split}/{k}"
|
||||
# Add image grid to logger's experiment
|
||||
pl_module.logger.experiment.add_image(tag, grid, global_step=pl_module.global_step)
|
||||
|
||||
@rank_zero_only
|
||||
def log_local(self, save_dir, split, images, global_step, current_epoch, batch_idx):
|
||||
def log_local(self,
|
||||
save_dir,
|
||||
split, # The split (train/val) on which to log the images
|
||||
images, # A dictionary of images to log
|
||||
global_step, # The global step
|
||||
current_epoch, # The current epoch.
|
||||
batch_idx
|
||||
):
|
||||
# Method for saving image grids to local file system
|
||||
root = os.path.join(save_dir, "images", split)
|
||||
for k in images:
|
||||
grid = torchvision.utils.make_grid(images[k], nrow=4)
|
||||
@@ -396,12 +446,16 @@ class ImageLogger(Callback):
|
||||
filename = "{}_gs-{:06}_e-{:06}_b-{:06}.png".format(k, global_step, current_epoch, batch_idx)
|
||||
path = os.path.join(root, filename)
|
||||
os.makedirs(os.path.split(path)[0], exist_ok=True)
|
||||
# Save image grid as PNG file
|
||||
Image.fromarray(grid).save(path)
|
||||
|
||||
def log_img(self, pl_module, batch, batch_idx, split="train"):
|
||||
#Function for logging images to both the logger and local file system.
|
||||
check_idx = batch_idx if self.log_on_batch_idx else pl_module.global_step
|
||||
# check if it's time to log an image batch
|
||||
if (self.check_frequency(check_idx) and # batch_idx % self.batch_freq == 0
|
||||
hasattr(pl_module, "log_images") and callable(pl_module.log_images) and self.max_images > 0):
|
||||
# Get logger type and check if training mode is on
|
||||
logger = type(pl_module.logger)
|
||||
|
||||
is_train = pl_module.training
|
||||
@@ -409,8 +463,10 @@ class ImageLogger(Callback):
|
||||
pl_module.eval()
|
||||
|
||||
with torch.no_grad():
|
||||
# Get images from log_images method of the pl_module
|
||||
images = pl_module.log_images(batch, split=split, **self.log_images_kwargs)
|
||||
|
||||
# Clip images if specified and convert to CPU tensor
|
||||
for k in images:
|
||||
N = min(images[k].shape[0], self.max_images)
|
||||
images[k] = images[k][:N]
|
||||
@@ -419,15 +475,19 @@ class ImageLogger(Callback):
|
||||
if self.clamp:
|
||||
images[k] = torch.clamp(images[k], -1., 1.)
|
||||
|
||||
# Log images locally to file system
|
||||
self.log_local(pl_module.logger.save_dir, split, images, pl_module.global_step, pl_module.current_epoch,
|
||||
batch_idx)
|
||||
|
||||
# log the images using the logger
|
||||
logger_log_images = self.logger_log_images.get(logger, lambda *args, **kwargs: None)
|
||||
logger_log_images(pl_module, images, pl_module.global_step, split)
|
||||
|
||||
# switch back to training mode if necessary
|
||||
if is_train:
|
||||
pl_module.train()
|
||||
|
||||
# The function checks if it's time to log an image batch
|
||||
def check_frequency(self, check_idx):
|
||||
if ((check_idx % self.batch_freq) == 0 or
|
||||
(check_idx in self.log_steps)) and (check_idx > 0 or self.log_first_step):
|
||||
@@ -439,14 +499,17 @@ class ImageLogger(Callback):
|
||||
return True
|
||||
return False
|
||||
|
||||
# Log images on train batch end if logging is not disabled
|
||||
def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx):
|
||||
# if not self.disabled and (pl_module.global_step > 0 or self.log_first_step):
|
||||
# self.log_img(pl_module, batch, batch_idx, split="train")
|
||||
pass
|
||||
|
||||
# Log images on validation batch end if logging is not disabled and in validation mode
|
||||
def on_validation_batch_end(self, trainer, pl_module, outputs, batch, batch_idx):
|
||||
if not self.disabled and pl_module.global_step > 0:
|
||||
self.log_img(pl_module, batch, batch_idx, split="val")
|
||||
# log gradients during calibration if necessary
|
||||
if hasattr(pl_module, 'calibrate_grad_norm'):
|
||||
if (pl_module.calibrate_grad_norm and batch_idx % 25 == 0) and batch_idx > 0:
|
||||
self.log_gradients(trainer, pl_module, batch_idx=batch_idx)
|
||||
@@ -458,6 +521,7 @@ class CUDACallback(Callback):
|
||||
def on_train_start(self, trainer, pl_module):
|
||||
rank_zero_info("Training is starting")
|
||||
|
||||
#the method is called at the end of each training epoch
|
||||
def on_train_end(self, trainer, pl_module):
|
||||
rank_zero_info("Training is ending")
|
||||
|
||||
@@ -524,6 +588,7 @@ if __name__ == "__main__":
|
||||
# params:
|
||||
# key: value
|
||||
|
||||
# get the current time to create a new logging directory
|
||||
now = datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S")
|
||||
|
||||
# add cwd for convenience and to make classes in this file available when
|
||||
@@ -535,11 +600,13 @@ if __name__ == "__main__":
|
||||
parser = Trainer.add_argparse_args(parser)
|
||||
|
||||
opt, unknown = parser.parse_known_args()
|
||||
# Veirfy the arguments are both specified
|
||||
if opt.name and opt.resume:
|
||||
raise ValueError("-n/--name and -r/--resume cannot be specified both."
|
||||
"If you want to resume training in a new log folder, "
|
||||
"use -n/--name in combination with --resume_from_checkpoint")
|
||||
|
||||
# Check if the "resume" option is specified, resume training from the checkpoint if it is true
|
||||
ckpt = None
|
||||
if opt.resume:
|
||||
rank_zero_info("Resuming from {}".format(opt.resume))
|
||||
@@ -557,8 +624,10 @@ if __name__ == "__main__":
|
||||
logdir = opt.resume.rstrip("/")
|
||||
ckpt = os.path.join(logdir, "checkpoints", "last.ckpt")
|
||||
|
||||
# Finds all ".yaml" configuration files in the log directory and adds them to the list of base configurations
|
||||
base_configs = sorted(glob.glob(os.path.join(logdir, "configs/*.yaml")))
|
||||
opt.base = base_configs + opt.base
|
||||
# Gets the name of the current log directory by splitting the path and taking the last element.
|
||||
_tmp = logdir.split("/")
|
||||
nowname = _tmp[-1]
|
||||
else:
|
||||
@@ -574,13 +643,17 @@ if __name__ == "__main__":
|
||||
nowname = now + name + opt.postfix
|
||||
logdir = os.path.join(opt.logdir, nowname)
|
||||
|
||||
# Sets the checkpoint path of the 'ckpt' option is specified
|
||||
if opt.ckpt:
|
||||
ckpt = opt.ckpt
|
||||
|
||||
# Create the checkpoint and configuration directories within the log directory.
|
||||
ckptdir = os.path.join(logdir, "checkpoints")
|
||||
cfgdir = os.path.join(logdir, "configs")
|
||||
# Sets the seed for the random number generator to ensure reproducibility
|
||||
seed_everything(opt.seed)
|
||||
|
||||
# Intinalize and save configuratioon using teh OmegaConf library.
|
||||
try:
|
||||
# init and save configs
|
||||
configs = [OmegaConf.load(cfg) for cfg in opt.base]
|
||||
@@ -593,6 +666,7 @@ if __name__ == "__main__":
|
||||
for k in nondefault_trainer_args(opt):
|
||||
trainer_config[k] = getattr(opt, k)
|
||||
|
||||
# Check whether the accelerator is gpu
|
||||
if not trainer_config["accelerator"] == "gpu":
|
||||
del trainer_config["accelerator"]
|
||||
cpu = True
|
||||
@@ -609,6 +683,7 @@ if __name__ == "__main__":
|
||||
config.model["params"].update({"use_fp16": False})
|
||||
|
||||
if ckpt is not None:
|
||||
#If a checkpoint path is specified in the ckpt variable, the code updates the "ckpt" key in the "params" dictionary of the config.model configuration with the value of ckpt
|
||||
config.model["params"].update({"ckpt": ckpt})
|
||||
rank_zero_info("Using ckpt_path = {}".format(config.model["params"]["ckpt"]))
|
||||
|
||||
@@ -617,7 +692,8 @@ if __name__ == "__main__":
|
||||
trainer_kwargs = dict()
|
||||
|
||||
# config the logger
|
||||
# default logger configs
|
||||
# Default logger configs to log training metrics during the training process.
|
||||
# These loggers are specified as targets in the dictionary, along with the configuration settings specific to each logger.
|
||||
default_logger_cfgs = {
|
||||
"wandb": {
|
||||
"target": LIGHTNING_PACK_NAME + "loggers.WandbLogger",
|
||||
@@ -638,6 +714,7 @@ if __name__ == "__main__":
|
||||
}
|
||||
}
|
||||
|
||||
# Set up the logger for TensorBoard
|
||||
default_logger_cfg = default_logger_cfgs["tensorboard"]
|
||||
if "logger" in lightning_config:
|
||||
logger_cfg = lightning_config.logger
|
||||
@@ -660,6 +737,7 @@ if __name__ == "__main__":
|
||||
|
||||
trainer_kwargs["strategy"] = instantiate_from_config(strategy_cfg)
|
||||
|
||||
# Set up ModelCheckpoint callback to save best models
|
||||
# modelcheckpoint - use TrainResult/EvalResult(checkpoint_on=metric) to
|
||||
# specify which metric is used to determine best models
|
||||
default_modelckpt_cfg = {
|
||||
@@ -683,45 +761,50 @@ if __name__ == "__main__":
|
||||
if version.parse(pl.__version__) < version.parse('1.4.0'):
|
||||
trainer_kwargs["checkpoint_callback"] = instantiate_from_config(modelckpt_cfg)
|
||||
|
||||
# Set up various callbacks, including logging, learning rate monitoring, and CUDA management
|
||||
# add callback which sets up log directory
|
||||
default_callbacks_cfg = {
|
||||
"setup_callback": {
|
||||
"setup_callback": { # callback to set up the training
|
||||
"target": "main.SetupCallback",
|
||||
"params": {
|
||||
"resume": opt.resume,
|
||||
"now": now,
|
||||
"logdir": logdir,
|
||||
"ckptdir": ckptdir,
|
||||
"cfgdir": cfgdir,
|
||||
"config": config,
|
||||
"lightning_config": lightning_config,
|
||||
"resume": opt.resume, # resume training if applicable
|
||||
"now": now,
|
||||
"logdir": logdir, # directory to save the log file
|
||||
"ckptdir": ckptdir, # directory to save the checkpoint file
|
||||
"cfgdir": cfgdir, # directory to save the configuration file
|
||||
"config": config, # configuration dictionary
|
||||
"lightning_config": lightning_config, # LightningModule configuration
|
||||
}
|
||||
},
|
||||
"image_logger": {
|
||||
"image_logger": { # callback to log image data
|
||||
"target": "main.ImageLogger",
|
||||
"params": {
|
||||
"batch_frequency": 750,
|
||||
"max_images": 4,
|
||||
"clamp": True
|
||||
"batch_frequency": 750, # how frequently to log images
|
||||
"max_images": 4, # maximum number of images to log
|
||||
"clamp": True # whether to clamp pixel values to [0,1]
|
||||
}
|
||||
},
|
||||
"learning_rate_logger": {
|
||||
"learning_rate_logger": { # callback to log learning rate
|
||||
"target": "main.LearningRateMonitor",
|
||||
"params": {
|
||||
"logging_interval": "step",
|
||||
# "log_momentum": True
|
||||
"logging_interval": "step", # logging frequency (either 'step' or 'epoch')
|
||||
# "log_momentum": True # whether to log momentum (currently commented out)
|
||||
}
|
||||
},
|
||||
"cuda_callback": {
|
||||
"cuda_callback": { # callback to handle CUDA-related operations
|
||||
"target": "main.CUDACallback"
|
||||
},
|
||||
}
|
||||
|
||||
# If the LightningModule configuration has specified callbacks, use those
|
||||
# Otherwise, create an empty OmegaConf configuration object
|
||||
if "callbacks" in lightning_config:
|
||||
callbacks_cfg = lightning_config.callbacks
|
||||
else:
|
||||
callbacks_cfg = OmegaConf.create()
|
||||
|
||||
|
||||
# If the 'metrics_over_trainsteps_checkpoint' callback is specified in the
|
||||
# LightningModule configuration, update the default callbacks configuration
|
||||
if 'metrics_over_trainsteps_checkpoint' in callbacks_cfg:
|
||||
print(
|
||||
'Caution: Saving checkpoints every n train steps without deleting. This might require some free space.')
|
||||
@@ -739,15 +822,17 @@ if __name__ == "__main__":
|
||||
}
|
||||
}
|
||||
default_callbacks_cfg.update(default_metrics_over_trainsteps_ckpt_dict)
|
||||
|
||||
|
||||
# Merge the default callbacks configuration with the specified callbacks configuration, and instantiate the callbacks
|
||||
callbacks_cfg = OmegaConf.merge(default_callbacks_cfg, callbacks_cfg)
|
||||
|
||||
trainer_kwargs["callbacks"] = [instantiate_from_config(callbacks_cfg[k]) for k in callbacks_cfg]
|
||||
|
||||
# Create a Trainer object with the specified command-line arguments and keyword arguments, and set the log directory
|
||||
trainer = Trainer.from_argparse_args(trainer_opt, **trainer_kwargs)
|
||||
trainer.logdir = logdir
|
||||
|
||||
# data
|
||||
# Create a data module based on the configuration file
|
||||
data = instantiate_from_config(config.data)
|
||||
# NOTE according to https://pytorch-lightning.readthedocs.io/en/latest/datamodules.html
|
||||
# calling these ourselves should not be necessary but it is.
|
||||
@@ -755,10 +840,12 @@ if __name__ == "__main__":
|
||||
data.prepare_data()
|
||||
data.setup()
|
||||
|
||||
# Print some information about the datasets in the data module
|
||||
for k in data.datasets:
|
||||
rank_zero_info(f"{k}, {data.datasets[k].__class__.__name__}, {len(data.datasets[k])}")
|
||||
|
||||
# configure learning rate
|
||||
# Configure learning rate based on the batch size, base learning rate and number of GPUs
|
||||
# If scale_lr is true, calculate the learning rate based on additional factors
|
||||
bs, base_lr = config.data.params.batch_size, config.model.base_learning_rate
|
||||
if not cpu:
|
||||
ngpu = trainer_config["devices"]
|
||||
@@ -780,7 +867,7 @@ if __name__ == "__main__":
|
||||
rank_zero_info("++++ NOT USING LR SCALING ++++")
|
||||
rank_zero_info(f"Setting learning rate to {model.learning_rate:.2e}")
|
||||
|
||||
# allow checkpointing via USR1
|
||||
# Allow checkpointing via USR1
|
||||
def melk(*args, **kwargs):
|
||||
# run all checkpoint hooks
|
||||
if trainer.global_rank == 0:
|
||||
@@ -794,20 +881,23 @@ if __name__ == "__main__":
|
||||
pudb.set_trace()
|
||||
|
||||
import signal
|
||||
|
||||
# Assign melk to SIGUSR1 signal and divein to SIGUSR2 signal
|
||||
signal.signal(signal.SIGUSR1, melk)
|
||||
signal.signal(signal.SIGUSR2, divein)
|
||||
|
||||
# run
|
||||
# Run the training and validation
|
||||
if opt.train:
|
||||
try:
|
||||
trainer.fit(model, data)
|
||||
except Exception:
|
||||
melk()
|
||||
raise
|
||||
# Print the maximum GPU memory allocated during training
|
||||
print(f"GPU memory usage: {torch.cuda.max_memory_allocated() / 1024**2:.0f} MB")
|
||||
# if not opt.no_test and not trainer.interrupted:
|
||||
# trainer.test(model, data)
|
||||
except Exception:
|
||||
# If there's an exception, debug it if opt.debug is true and the trainer's global rank is 0
|
||||
if opt.debug and trainer.global_rank == 0:
|
||||
try:
|
||||
import pudb as debugger
|
||||
@@ -816,7 +906,7 @@ if __name__ == "__main__":
|
||||
debugger.post_mortem()
|
||||
raise
|
||||
finally:
|
||||
# move newly created debug project to debug_runs
|
||||
# Move the log directory to debug_runs if opt.debug is true and the trainer's global
|
||||
if opt.debug and not opt.resume and trainer.global_rank == 0:
|
||||
dst, name = os.path.split(logdir)
|
||||
dst = os.path.join(dst, "debug_runs", name)
|
||||
|
Reference in New Issue
Block a user