[misc] update pre-commit and run all files (#4752)

* [misc] update pre-commit

* [misc] run pre-commit

* [misc] remove useless configuration files

* [misc] ignore cuda for clang-format
This commit is contained in:
Hongxin Liu
2023-09-19 14:20:26 +08:00
committed by GitHub
parent 3c6b831c26
commit 079bf3cb26
1268 changed files with 50037 additions and 38444 deletions

View File

@@ -6,7 +6,6 @@ from torch.utils.tensorboard import SummaryWriter
class WandbLog:
@classmethod
def init_wandb(cls, project, notes=None, name=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), config=None):
wandb.init(project=project, notes=notes, name=name, config=config)
@@ -23,7 +22,6 @@ class WandbLog:
class TensorboardLog:
def __init__(self, location, name=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), config=None):
if not os.path.exists(location):
os.mkdir(location)
@@ -31,12 +29,12 @@ class TensorboardLog:
def log_train(self, result, step):
for k, v in result.items():
self.writer.add_scalar(f'{k}/train', v, step)
self.writer.add_scalar(f"{k}/train", v, step)
def log_eval(self, result, step):
for k, v in result.items():
self.writer.add_scalar(f'{k}/eval', v, step)
self.writer.add_scalar(f"{k}/eval", v, step)
def log_zeroshot(self, result, step):
for k, v in result.items():
self.writer.add_scalar(f'{k}_acc/eval', v, step)
self.writer.add_scalar(f"{k}_acc/eval", v, step)

View File

@@ -12,8 +12,8 @@ def logging(s, log_path, print_=True, log_=True):
if print_:
print(s)
if log_:
with open(log_path, 'a+') as f_log:
f_log.write(s + '\n')
with open(log_path, "a+") as f_log:
f_log.write(s + "\n")
def get_logger(log_path, **kwargs):
@@ -22,22 +22,22 @@ def get_logger(log_path, **kwargs):
def create_exp_dir(dir_path, scripts_to_save=None, debug=False):
if debug:
print('Debug Mode : no experiment dir created')
print("Debug Mode : no experiment dir created")
return functools.partial(logging, log_path=None, log_=False)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
print('Experiment dir : {}'.format(dir_path))
print("Experiment dir : {}".format(dir_path))
if scripts_to_save is not None:
script_path = os.path.join(dir_path, 'scripts')
script_path = os.path.join(dir_path, "scripts")
if not os.path.exists(script_path):
os.makedirs(script_path)
for script in scripts_to_save:
dst_file = os.path.join(dir_path, 'scripts', os.path.basename(script))
dst_file = os.path.join(dir_path, "scripts", os.path.basename(script))
shutil.copyfile(script, dst_file)
return get_logger(log_path=os.path.join(dir_path, 'log.txt'))
return get_logger(log_path=os.path.join(dir_path, "log.txt"))
def get_cpu_mem():
@@ -48,8 +48,8 @@ def get_gpu_mem():
return torch.cuda.memory_allocated() / 1024**2
def get_mem_info(prefix=''):
return f'{prefix}GPU memory usage: {get_gpu_mem():.2f} MB, CPU memory usage: {get_cpu_mem():.2f} MB'
def get_mem_info(prefix=""):
return f"{prefix}GPU memory usage: {get_gpu_mem():.2f} MB, CPU memory usage: {get_cpu_mem():.2f} MB"
def get_tflops(model_numel, batch_size, seq_len, step_time):
@@ -59,11 +59,12 @@ def get_tflops(model_numel, batch_size, seq_len, step_time):
def get_parameters_in_billions(model, world_size=1):
gpus_per_model = world_size
approx_parameters_in_billions = sum([
sum([p.ds_numel if hasattr(p, 'ds_id') else p.nelement()
for p in model_module.parameters()])
for model_module in model
])
approx_parameters_in_billions = sum(
[
sum([p.ds_numel if hasattr(p, "ds_id") else p.nelement() for p in model_module.parameters()])
for model_module in model
]
)
return approx_parameters_in_billions * gpus_per_model / (1e9)
@@ -71,13 +72,13 @@ def get_parameters_in_billions(model, world_size=1):
def throughput_calculator(numel, args, config, iteration_time, total_iterations, world_size=1):
gpus_per_model = 1
batch_size = args.train_micro_batch_size_per_gpu
samples_per_model = batch_size * args.max_seq_length
model_replica_count = world_size / gpus_per_model
batch_size * args.max_seq_length
world_size / gpus_per_model
approx_parameters_in_billions = numel
elapsed_time_per_iter = iteration_time / total_iterations
samples_per_second = batch_size / elapsed_time_per_iter
#flops calculator
# flops calculator
hidden_size = config.hidden_size
num_layers = config.num_hidden_layers
vocab_size = config.vocab_size
@@ -87,9 +88,9 @@ def throughput_calculator(numel, args, config, iteration_time, total_iterations,
# The factor of 4 is when used with activation check-pointing,
# otherwise it will be 3.
checkpoint_activations_factor = 4 if args.checkpoint_activations else 3
flops_per_iteration = (24 * checkpoint_activations_factor * batch_size * args.max_seq_length * num_layers *
(hidden_size**2)) * (1. + (args.max_seq_length / (6. * hidden_size)) +
(vocab_size / (16. * num_layers * hidden_size)))
flops_per_iteration = (
24 * checkpoint_activations_factor * batch_size * args.max_seq_length * num_layers * (hidden_size**2)
) * (1.0 + (args.max_seq_length / (6.0 * hidden_size)) + (vocab_size / (16.0 * num_layers * hidden_size)))
tflops = flops_per_iteration / (elapsed_time_per_iter * (10**12))
return samples_per_second, tflops, approx_parameters_in_billions
@@ -106,9 +107,9 @@ def synchronize():
def log_args(logger, args):
logger.info('--------args----------')
message = '\n'.join([f'{k:<30}: {v}' for k, v in vars(args).items()])
message += '\n'
message += '\n'.join([f'{k:<30}: {v}' for k, v in gpc.config.items()])
logger.info("--------args----------")
message = "\n".join([f"{k:<30}: {v}" for k, v in vars(args).items()])
message += "\n"
message += "\n".join([f"{k:<30}: {v}" for k, v in gpc.config.items()])
logger.info(message)
logger.info('--------args----------\n')
logger.info("--------args----------\n")

View File

@@ -16,21 +16,21 @@ def set_global_variables(launch_time, tensorboard_path):
def _set_timers():
"""Initialize timers."""
global _GLOBAL_TIMERS
_ensure_var_is_not_initialized(_GLOBAL_TIMERS, 'timers')
_ensure_var_is_not_initialized(_GLOBAL_TIMERS, "timers")
_GLOBAL_TIMERS = Timers()
def _set_tensorboard_writer(launch_time, tensorboard_path):
"""Set tensorboard writer."""
global _GLOBAL_TENSORBOARD_WRITER
_ensure_var_is_not_initialized(_GLOBAL_TENSORBOARD_WRITER, 'tensorboard writer')
_ensure_var_is_not_initialized(_GLOBAL_TENSORBOARD_WRITER, "tensorboard writer")
if torch.distributed.get_rank() == 0:
_GLOBAL_TENSORBOARD_WRITER = TensorboardLog(tensorboard_path + f'/{launch_time}', launch_time)
_GLOBAL_TENSORBOARD_WRITER = TensorboardLog(tensorboard_path + f"/{launch_time}", launch_time)
def get_timers():
"""Return timers."""
_ensure_var_is_initialized(_GLOBAL_TIMERS, 'timers')
_ensure_var_is_initialized(_GLOBAL_TIMERS, "timers")
return _GLOBAL_TIMERS
@@ -42,12 +42,12 @@ def get_tensorboard_writer():
def _ensure_var_is_initialized(var, name):
"""Make sure the input variable is not None."""
assert var is not None, '{} is not initialized.'.format(name)
assert var is not None, "{} is not initialized.".format(name)
def _ensure_var_is_not_initialized(var, name):
"""Make sure the input variable is not None."""
assert var is None, '{} is already initialized.'.format(name)
assert var is None, "{} is already initialized.".format(name)
class _Timer:
@@ -68,9 +68,9 @@ class _Timer:
def stop(self):
"""Stop the timer."""
assert self.started_, 'timer is not started'
assert self.started_, "timer is not started"
torch.cuda.synchronize()
self.elapsed_ += (time.time() - self.start_time)
self.elapsed_ += time.time() - self.start_time
self.started_ = False
def reset(self):
@@ -114,15 +114,15 @@ class Timers:
assert normalizer > 0.0
for name in names:
value = self.timers[name].elapsed(reset=reset) / normalizer
writer.add_scalar(name + '-time', value, iteration)
writer.add_scalar(name + "-time", value, iteration)
def log(self, names, normalizer=1.0, reset=True):
"""Log a group of timers."""
assert normalizer > 0.0
string = 'time (ms)'
string = "time (ms)"
for name in names:
elapsed_time = self.timers[name].elapsed(reset=reset) * 1000.0 / normalizer
string += ' | {}: {:.2f}'.format(name, elapsed_time)
string += " | {}: {:.2f}".format(name, elapsed_time)
if torch.distributed.is_initialized():
if torch.distributed.get_rank() == (torch.distributed.get_world_size() - 1):
print(string, flush=True)

View File

@@ -1,16 +1,14 @@
import logging
import os
import torch.distributed as dist
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
logger = logging.getLogger(__name__)
class Logger():
class Logger:
def __init__(self, log_path, cuda=False, debug=False):
self.logger = logging.getLogger(__name__)
self.cuda = cuda
@@ -23,8 +21,8 @@ class Logger():
self.logger.info(message, *args, **kwargs)
if log_:
with open(self.log_path, 'a+') as f_log:
f_log.write(message + '\n')
with open(self.log_path, "a+") as f_log:
f_log.write(message + "\n")
def error(self, message, *args, **kwargs):
self.logger.error(message, *args, **kwargs)