mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-10-25 01:40:08 +00:00
[zero] sharded model support the reuse of fp16 shard (#495)
* sharded model supports reuse fp16 shard * rename variable * polish code * polish code * polish code
This commit is contained in:
@@ -16,7 +16,7 @@ from colossalai.zero.sharded_optim._utils import has_inf_or_nan
|
||||
from tests.components_to_test.registry import non_distributed_component_funcs
|
||||
from torch.nn.parallel import DistributedDataParallel as DDP
|
||||
|
||||
from common import (MP_PARALLEL_CONFIG, ZERO_PARALLEL_CONFIG, check_params, check_sharded_params_padding)
|
||||
from common import (MP_PARALLEL_CONFIG, ZERO_PARALLEL_CONFIG, check_params, check_sharded_model_params)
|
||||
|
||||
|
||||
def run_dist(rank, world_size, port, parallel_config):
|
||||
@@ -87,7 +87,7 @@ def run_dist(rank, world_size, port, parallel_config):
|
||||
if parallel_config == MP_PARALLEL_CONFIG:
|
||||
check_params(torch_model, colo_model, loose=True)
|
||||
elif parallel_config == ZERO_PARALLEL_CONFIG:
|
||||
check_sharded_params_padding(torch_model, colo_model, loose=True)
|
||||
check_sharded_model_params(torch_model, colo_model, loose=True)
|
||||
|
||||
|
||||
# FIXME: enable this test in next PR
|
||||
|
||||
Reference in New Issue
Block a user