[hotfix] quick fixes to make legacy tutorials runnable (#5559)

Co-authored-by: Edenzzzz <wtan45@wisc.edu>
This commit is contained in:
Edenzzzz
2024-04-07 12:06:27 +08:00
committed by GitHub
parent 8e412a548e
commit 15055f9a36
8 changed files with 20 additions and 12 deletions

View File

@@ -17,7 +17,7 @@ def synthesize_data():
def main():
colossalai.launch_from_torch(config="./config.py")
colossalai.legacy.launch_from_torch(config="./config.py")
logger = get_dist_logger()

View File

@@ -41,9 +41,9 @@ class DummyDataloader:
def main():
# launch from torch
parser = colossalai.get_default_parser()
parser = colossalai.legacy.get_default_parser()
args = parser.parse_args()
colossalai.launch_from_torch(config=args.config)
colossalai.legacy.launch_from_torch(config=args.config)
# get logger
logger = get_dist_logger()

View File

@@ -37,14 +37,14 @@ class DummyDataloader:
def main():
# initialize distributed setting
parser = colossalai.get_default_parser()
parser = colossalai.legacy.get_default_parser()
parser.add_argument(
"--optimizer", choices=["lars", "lamb"], help="Choose your large-batch optimizer", required=True
)
args = parser.parse_args()
# launch from torch
colossalai.launch_from_torch(config=args.config)
colossalai.legacy.launch_from_torch(config=args.config)
# get logger
logger = get_dist_logger()
@@ -73,7 +73,7 @@ def main():
)
# initialize
engine, train_dataloader, test_dataloader, _ = colossalai.initialize(
engine, train_dataloader, test_dataloader, _ = colossalai.legacy.initialize(
model=model,
optimizer=optimizer,
criterion=criterion,

4
examples/tutorial/opt/opt/run_clm.py Executable file → Normal file
View File

@@ -72,7 +72,7 @@ def get_time_stamp():
def parse_args():
parser = colossalai.get_default_parser()
parser = colossalai.legacy.get_default_parser()
parser.add_argument("-s", "--synthetic", action="store_true")
parser.add_argument(
"--dataset_name",
@@ -289,7 +289,7 @@ class DummyDataloader:
def main():
args = parse_args()
disable_existing_loggers()
colossalai.launch_from_torch(config=dict())
colossalai.legacy.launch_from_torch(config=dict())
logger = get_dist_logger()
is_main_process = dist.get_rank() == 0

View File

@@ -1,9 +1,9 @@
import torch
import torch.nn as nn
from colossalai.kernel.cuda_native import LayerNorm
from colossalai.kernel.jit import bias_dropout_add_fused_inference, bias_dropout_add_fused_train
from colossalai.legacy.nn.layer.parallel_sequence import TransformerSelfAttentionRing
from colossalai.nn.layer.layernorm import MixedFusedLayerNorm as LayerNorm
from .dropout import get_bias_dropout_add
from .mlp import TransformerMLP

View File

@@ -48,7 +48,7 @@ def pipeline_data_process_func(stage_output, micro_batch_data):
def main():
# initialize
parse_args()
colossalai.launch_from_torch(config="./config.py", seed=1234, backend="nccl")
colossalai.legacy.launch_from_torch(config="./config.py", seed=1234, backend="nccl")
logger = get_dist_logger()
@@ -136,7 +136,7 @@ def main():
logger.info(f"LR Scheduler is built with {warmup_steps} warmup steps and {gpc.config.DECAY_ITERS} decay steps")
# # init
engine, *dummy = colossalai.initialize(model, optimizer, criterion, verbose=True)
engine, *dummy = colossalai.legacy.initialize(model, optimizer, criterion, verbose=True)
# build timer
timer = MultiTimer()