[misc] update pre-commit and run all files (#4752)

* [misc] update pre-commit

* [misc] run pre-commit

* [misc] remove useless configuration files

* [misc] ignore cuda for clang-format
This commit is contained in:
Hongxin Liu
2023-09-19 14:20:26 +08:00
committed by GitHub
parent 3c6b831c26
commit 079bf3cb26
1268 changed files with 50037 additions and 38444 deletions

View File

@@ -2,36 +2,35 @@ from colossalai import get_default_parser
def parse_demo_args():
parser = get_default_parser()
parser.add_argument("--model_name_or_path",
type=str,
default="facebook/opt-350m",
help="Path to pretrained model or model identifier from huggingface.co/models.")
parser.add_argument("--output_path",
type=str,
default="./output_model.bin",
help="The path of your saved model after finetuning.")
parser.add_argument(
"--model_name_or_path",
type=str,
default="facebook/opt-350m",
help="Path to pretrained model or model identifier from huggingface.co/models.",
)
parser.add_argument(
"--output_path", type=str, default="./output_model.bin", help="The path of your saved model after finetuning."
)
parser.add_argument(
"--plugin",
type=str,
default="gemini",
help=
"Plugin to use. Valid plugins include 'torch_ddp','torch_ddp_fp16','gemini','low_level_zero', 'hybrid_parallel'."
help="Plugin to use. Valid plugins include 'torch_ddp','torch_ddp_fp16','gemini','low_level_zero', 'hybrid_parallel'.",
)
parser.add_argument("--num_epoch", type=int, default=10, help="Number of epochs.")
parser.add_argument("--batch_size",
type=int,
default=32,
help="Batch size (per dp group) for the training dataloader.")
parser.add_argument("--learning_rate",
type=float,
default=5e-5,
help="Initial learning rate (after the potential warmup period) to use.")
parser.add_argument("--warmup_ratio",
type=float,
default=0.1,
help="Ratio of warmup steps against total training steps.")
parser.add_argument(
"--batch_size", type=int, default=32, help="Batch size (per dp group) for the training dataloader."
)
parser.add_argument(
"--learning_rate",
type=float,
default=5e-5,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument(
"--warmup_ratio", type=float, default=0.1, help="Ratio of warmup steps against total training steps."
)
parser.add_argument("--weight_decay", type=float, default=0.01, help="Weight decay to use.")
parser.add_argument("--seed", type=int, default=42, help="A seed for reproducible training.")
@@ -40,25 +39,28 @@ def parse_demo_args():
def parse_benchmark_args():
parser = get_default_parser()
parser.add_argument("--model_name_or_path",
type=str,
default="facebook/opt-125m",
help="Path to pretrained model or model identifier from huggingface.co/models.")
parser.add_argument(
"--model_name_or_path",
type=str,
default="facebook/opt-125m",
help="Path to pretrained model or model identifier from huggingface.co/models.",
)
parser.add_argument(
"--plugin",
type=str,
default="gemini",
help="Plugin to use. Valid plugins include 'torch_ddp','torch_ddp_fp16','gemini','low_level_zero'.")
parser.add_argument("--batch_size",
type=int,
default=32,
help="Batch size (per dp group) for the training dataloader.")
parser.add_argument("--learning_rate",
type=float,
default=5e-5,
help="Initial learning rate (after the potential warmup period) to use.")
help="Plugin to use. Valid plugins include 'torch_ddp','torch_ddp_fp16','gemini','low_level_zero'.",
)
parser.add_argument(
"--batch_size", type=int, default=32, help="Batch size (per dp group) for the training dataloader."
)
parser.add_argument(
"--learning_rate",
type=float,
default=5e-5,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.")
parser.add_argument("--max_train_steps", type=int, default=20, help="Total number of training steps to perform.")
parser.add_argument("--seed", type=int, default=42, help="A seed for reproducible training.")