mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-11 05:49:55 +00:00
[misc] update pre-commit and run all files (#4752)
* [misc] update pre-commit * [misc] run pre-commit * [misc] remove useless configuration files * [misc] ignore cuda for clang-format
This commit is contained in:
@@ -5,7 +5,6 @@ import torch
|
||||
from torch.optim import Adam
|
||||
from torchvision.models import resnet18
|
||||
|
||||
from colossalai.booster.plugin.gemini_plugin import GeminiCheckpointIO
|
||||
from colossalai.checkpoint_io import GeneralCheckpointIO
|
||||
from colossalai.testing import check_state_dict_equal, clear_cache_before_run, parameterize
|
||||
|
||||
@@ -18,7 +17,7 @@ from colossalai.testing import check_state_dict_equal, clear_cache_before_run, p
|
||||
|
||||
|
||||
@clear_cache_before_run()
|
||||
@parameterize('use_safetensors', [True, False])
|
||||
@parameterize("use_safetensors", [True, False])
|
||||
def test_unsharded_checkpoint(use_safetensors: bool):
|
||||
# create a model and optimizer
|
||||
model = resnet18()
|
||||
@@ -59,7 +58,7 @@ def test_unsharded_checkpoint(use_safetensors: bool):
|
||||
check_state_dict_equal(optimizer.state_dict(), new_optimizer.state_dict())
|
||||
|
||||
|
||||
@pytest.mark.parametrize('use_safetensors', [True, False])
|
||||
@pytest.mark.parametrize("use_safetensors", [True, False])
|
||||
def test_sharded_model_checkpoint(use_safetensors: bool):
|
||||
# create a model and optimizer
|
||||
model = resnet18()
|
||||
@@ -75,11 +74,9 @@ def test_sharded_model_checkpoint(use_safetensors: bool):
|
||||
|
||||
# create a temp file for checkpoint
|
||||
if use_safetensors:
|
||||
suffix = ".safetensors"
|
||||
SAFE_WEIGHTS_INDEX_NAME = "model.safetensors.index.json"
|
||||
pass
|
||||
else:
|
||||
suffix = ".bin"
|
||||
WEIGHTS_INDEX_NAME = "model.bin.index.json"
|
||||
pass
|
||||
|
||||
model_ckpt_dir = tempfile.TemporaryDirectory()
|
||||
optimizer_ckpt_tempfile = tempfile.NamedTemporaryFile()
|
||||
@@ -103,7 +100,6 @@ def test_sharded_model_checkpoint(use_safetensors: bool):
|
||||
|
||||
|
||||
def test_sharded_optimizer_checkpoint():
|
||||
|
||||
# create a model and optimizer
|
||||
model = resnet18()
|
||||
optimizer = Adam(model.parameters(), lr=0.001)
|
||||
@@ -162,16 +158,11 @@ def test_sharded_optimizer_checkpoint():
|
||||
|
||||
|
||||
def test_sharded_optimizer_multiple_param_groups():
|
||||
|
||||
# create a model and optimizer
|
||||
model = resnet18()
|
||||
optimizer = Adam([{
|
||||
'params': model.layer1.parameters()
|
||||
}, {
|
||||
'params': model.layer2.parameters(),
|
||||
'lr': 0.002
|
||||
}],
|
||||
lr=0.001)
|
||||
optimizer = Adam(
|
||||
[{"params": model.layer1.parameters()}, {"params": model.layer2.parameters(), "lr": 0.002}], lr=0.001
|
||||
)
|
||||
|
||||
# create test data sample
|
||||
x = torch.randn(1, 3, 224, 224)
|
||||
@@ -194,13 +185,9 @@ def test_sharded_optimizer_multiple_param_groups():
|
||||
|
||||
# create new model
|
||||
new_model = resnet18()
|
||||
new_optimizer = Adam([{
|
||||
'params': new_model.layer1.parameters()
|
||||
}, {
|
||||
'params': new_model.layer2.parameters(),
|
||||
'lr': 0.002
|
||||
}],
|
||||
lr=0.001)
|
||||
new_optimizer = Adam(
|
||||
[{"params": new_model.layer1.parameters()}, {"params": new_model.layer2.parameters(), "lr": 0.002}], lr=0.001
|
||||
)
|
||||
|
||||
ckpt_io.load_model(new_model, str(model_ckpt_dir.name), strict=True)
|
||||
ckpt_io.load_optimizer(new_optimizer, str(optimizer_ckpt_dir.name))
|
||||
|
Reference in New Issue
Block a user