[refactor] moving memtracer to gemini (#801)

This commit is contained in:
Jiarui Fang
2022-04-19 10:13:08 +08:00
committed by GitHub
parent 8711c706f4
commit 4d9332b4c5
24 changed files with 102 additions and 87 deletions

View File

@@ -78,6 +78,7 @@ def run_data_sampler(rank, world_size, port):
torch.cuda.empty_cache()
@pytest.mark.skip
@pytest.mark.cpu
@rerun_if_address_is_in_use()
def test_data_sampler():

View File

@@ -3,8 +3,8 @@ import colossalai
import pytest
import torch.multiprocessing as mp
from colossalai.utils.cuda import get_current_device
from colossalai.utils.memory_tracer import MemStatsCollector
from colossalai.utils.memory_tracer.model_data_memtracer import GLOBAL_MODEL_DATA_TRACER
from colossalai.gemini.memory_tracer import MemStatsCollector
from colossalai.gemini.memory_tracer import GLOBAL_MODEL_DATA_TRACER
from colossalai.utils.memory import colo_set_process_memory_fraction
from colossalai.gemini import StatefulTensorMgr
from colossalai.zero.sharded_param.sharded_param import ShardedParamV2

View File

@@ -11,7 +11,7 @@ from colossalai.logging import get_dist_logger
from colossalai.testing import parameterize, rerun_if_address_is_in_use
from colossalai.utils import free_port
from colossalai.utils.cuda import get_current_device
from colossalai.utils.memory_tracer.model_data_memtracer import \
from colossalai.gemini.memory_tracer.model_data_memtracer import \
colo_model_mem_usage
from colossalai.utils.memory import colo_device_memory_used
from colossalai.zero.init_ctx import ZeroInitContext

View File

@@ -14,7 +14,7 @@ from colossalai.testing import rerun_if_address_is_in_use
from functools import partial
class TestModel(torch.nn.Module):
class MyTestModel(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
@@ -37,7 +37,7 @@ def run_mem_collector_testing():
colo_set_process_memory_fraction(fraction)
shard_strategy = BucketTensorShardStrategy()
with ZeroInitContext(target_device=get_current_device(), shard_strategy=shard_strategy, shard_param=True):
model = TestModel()
model = MyTestModel()
model = ShardedModelV2(module=model,
shard_strategy=shard_strategy,

View File

@@ -91,8 +91,6 @@ def run_dist(rank, world_size, port, parallel_config):
# FIXME: enable this test in next PR
@pytest.mark.skip
@pytest.mark.dist
@pytest.mark.parametrize("world_size", [2, 4])