From 5fc60a3a04b9f445e1f43d0247987439989ec8a5 Mon Sep 17 00:00:00 2001 From: ver217 Date: Wed, 5 Jul 2023 14:49:05 +0800 Subject: [PATCH] [test] add shard util tests --- tests/test_shardformer/test_shard_utils.py | 27 ++++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 tests/test_shardformer/test_shard_utils.py diff --git a/tests/test_shardformer/test_shard_utils.py b/tests/test_shardformer/test_shard_utils.py new file mode 100644 index 000000000..220b8291c --- /dev/null +++ b/tests/test_shardformer/test_shard_utils.py @@ -0,0 +1,27 @@ +import torch +import torch.nn as nn + +from colossalai.shardformer.shard.utils import set_tensors_to_none + + +class Net(nn.Module): + + def __init__(self) -> None: + super().__init__() + self.layers = nn.Sequential(nn.Linear(1, 2), nn.Linear(2, 3)) + self.out = nn.Linear(3, 1) + + +def test_release_layer(): + orig_cuda_allocated = torch.cuda.memory_allocated() + model = Net().cuda() + set_tensors_to_none(model, exclude={model.layers[0]}) + assert model.layers[1].weight is None + assert model.layers[1].bias is None + assert model.out.weight is None + assert model.out.bias is None + set_tensors_to_none(model) + assert model.layers[0].weight is None + assert model.layers[0].bias is None + assert len(list(model.parameters())) == 0 + assert torch.cuda.memory_allocated() == orig_cuda_allocated