From afe07a63aceee8f0d9dfe27dd1763acc8fd26386 Mon Sep 17 00:00:00 2001 From: flybird11111 <1829166702@qq.com> Date: Thu, 17 Apr 2025 17:53:48 +0800 Subject: [PATCH] fiux --- tests/test_booster/test_mixed_precision/test_fp16_torch.py | 2 -- .../test_kernels/cuda/test_flash_decoding_attention.py | 2 -- 2 files changed, 4 deletions(-) diff --git a/tests/test_booster/test_mixed_precision/test_fp16_torch.py b/tests/test_booster/test_mixed_precision/test_fp16_torch.py index 1d4a5c0d8..f6d6e8303 100644 --- a/tests/test_booster/test_mixed_precision/test_fp16_torch.py +++ b/tests/test_booster/test_mixed_precision/test_fp16_torch.py @@ -1,4 +1,3 @@ -import pytest import torch from torch.optim import Adam @@ -36,7 +35,6 @@ def run_torch_amp(rank, world_size, port): del model, optimizer, criterion, data, output, mixed_precision -@pytest.mark.skip(reason="Skip because assertion may fail for CI devices") @rerun_if_address_is_in_use() def test_torch_ddp_plugin(): spawn(run_torch_amp, 1) diff --git a/tests/test_infer/test_kernels/cuda/test_flash_decoding_attention.py b/tests/test_infer/test_kernels/cuda/test_flash_decoding_attention.py index d656c4834..c93055fec 100644 --- a/tests/test_infer/test_kernels/cuda/test_flash_decoding_attention.py +++ b/tests/test_infer/test_kernels/cuda/test_flash_decoding_attention.py @@ -11,7 +11,6 @@ from tests.test_infer.test_kernels.triton.test_context_attn_unpad import generat inference_ops = InferenceOpsLoader().load() -from colossalai.testing import clear_cache_before_run from tests.test_infer.test_kernels.triton.kernel_utils import ( convert_kv_unpad_to_padded, create_attention_mask, @@ -57,7 +56,6 @@ def numpy_allclose(x, y, rtol, atol): np.testing.assert_allclose(x_numpy, y_numpy, rtol=rtol, atol=atol) -@clear_cache_before_run() @pytest.mark.parametrize("BATCH_SIZE", [1, 4, 7, 32]) @pytest.mark.parametrize("BLOCK_SIZE", [8, 16, 32]) @pytest.mark.parametrize("MAX_NUM_BLOCKS_PER_SEQ", [1, 8, 32, 256, 512])