mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-01 09:07:51 +00:00
[hotfix/hybridengine] fix bug when tp*pp size = 1 (#5069)
This commit is contained in:
@@ -78,17 +78,32 @@ def run_tp_inference_test(tp_size, pp_size, max_output_len, micro_batch_size):
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
|
||||
def check_tp_pipeline_inference(rank, world_size, port):
|
||||
@parameterize("tp_size", [1])
|
||||
@parameterize("pp_size", [1])
|
||||
@parameterize("max_output_len", [2])
|
||||
@parameterize("micro_batch_size", [1])
|
||||
@clear_cache_before_run()
|
||||
def run_single_inference_test(tp_size, pp_size, max_output_len, micro_batch_size):
|
||||
pipeline_inference_test(tp_size, pp_size, max_output_len, micro_batch_size)
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
|
||||
def check_tp_pp_inference(rank, world_size, port):
|
||||
colossalai.launch(config={}, rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
|
||||
run_tp_pipeline_inference_test()
|
||||
|
||||
|
||||
def check_single_inference(rank, world_size, port):
|
||||
def check_tp_or_pp_inference(rank, world_size, port):
|
||||
colossalai.launch(config={}, rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
|
||||
run_tp_inference_test()
|
||||
run_pipeline_inference_test()
|
||||
|
||||
|
||||
def check_single_inference(rank, world_size, port):
|
||||
colossalai.launch(config={}, rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
|
||||
run_single_inference_test
|
||||
|
||||
|
||||
@pytest.mark.skipif(
|
||||
not CUDA_SUPPORT or not HAS_LIGHTLLM_KERNEL,
|
||||
reason="kv-cache manager engine requires cuda version to be higher than 11.5",
|
||||
@@ -97,8 +112,9 @@ def check_single_inference(rank, world_size, port):
|
||||
@rerun_if_address_is_in_use()
|
||||
@clear_cache_before_run()
|
||||
def test_pipeline_inference():
|
||||
spawn(check_tp_pipeline_inference, nprocs=4)
|
||||
spawn(check_single_inference, nprocs=2)
|
||||
spawn(check_tp_pp_inference, nprocs=4)
|
||||
spawn(check_tp_or_pp_inference, nprocs=2)
|
||||
spawn(check_single_inference, nprocs=1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
@@ -86,17 +86,32 @@ def run_tp_inference_test(tp_size, pp_size, max_output_len, micro_batch_size):
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
|
||||
def check_tp_pipeline_inference(rank, world_size, port):
|
||||
@parameterize("tp_size", [1])
|
||||
@parameterize("pp_size", [1])
|
||||
@parameterize("max_output_len", [2])
|
||||
@parameterize("micro_batch_size", [1])
|
||||
@clear_cache_before_run()
|
||||
def run_single_inference_test(tp_size, pp_size, max_output_len, micro_batch_size):
|
||||
pipeline_inference_test(tp_size, pp_size, max_output_len, micro_batch_size)
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
|
||||
def check_tp_pp_inference(rank, world_size, port):
|
||||
colossalai.launch(config={}, rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
|
||||
run_tp_pipeline_inference_test()
|
||||
|
||||
|
||||
def check_single_inference(rank, world_size, port):
|
||||
def check_tp_or_pp_inference(rank, world_size, port):
|
||||
colossalai.launch(config={}, rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
|
||||
run_tp_inference_test()
|
||||
run_pipeline_inference_test()
|
||||
|
||||
|
||||
def check_single_inference(rank, world_size, port):
|
||||
colossalai.launch(config={}, rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
|
||||
run_single_inference_test
|
||||
|
||||
|
||||
@pytest.mark.skipif(
|
||||
not CUDA_SUPPORT or not HAS_LIGHTLLM_KERNEL,
|
||||
reason="kv-cache manager engine requires cuda version to be higher than 11.5",
|
||||
@@ -105,8 +120,9 @@ def check_single_inference(rank, world_size, port):
|
||||
@rerun_if_address_is_in_use()
|
||||
@clear_cache_before_run()
|
||||
def test_pipeline_inference():
|
||||
spawn(check_tp_pipeline_inference, nprocs=4)
|
||||
spawn(check_single_inference, nprocs=2)
|
||||
spawn(check_tp_pp_inference, nprocs=4)
|
||||
spawn(check_tp_or_pp_inference, nprocs=2)
|
||||
spawn(check_single_inference, nprocs=1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
@@ -83,17 +83,32 @@ def run_tp_inference_test(tp_size, pp_size, max_output_len, micro_batch_size):
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
|
||||
def check_tp_pipeline_inference(rank, world_size, port):
|
||||
@parameterize("tp_size", [1])
|
||||
@parameterize("pp_size", [1])
|
||||
@parameterize("max_output_len", [2])
|
||||
@parameterize("micro_batch_size", [1])
|
||||
@clear_cache_before_run()
|
||||
def run_single_inference_test(tp_size, pp_size, max_output_len, micro_batch_size):
|
||||
pipeline_inference_test(tp_size, pp_size, max_output_len, micro_batch_size)
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
|
||||
def check_tp_pp_inference(rank, world_size, port):
|
||||
colossalai.launch(config={}, rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
|
||||
run_tp_pipeline_inference_test()
|
||||
|
||||
|
||||
def check_single_inference(rank, world_size, port):
|
||||
def check_tp_or_pp_inference(rank, world_size, port):
|
||||
colossalai.launch(config={}, rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
|
||||
run_tp_inference_test()
|
||||
run_pipeline_inference_test()
|
||||
|
||||
|
||||
def check_single_inference(rank, world_size, port):
|
||||
colossalai.launch(config={}, rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
|
||||
run_single_inference_test
|
||||
|
||||
|
||||
@pytest.mark.skipif(
|
||||
not CUDA_SUPPORT or not HAS_LIGHTLLM_KERNEL,
|
||||
reason="kv-cache manager engine requires cuda version to be higher than 11.5",
|
||||
@@ -102,8 +117,9 @@ def check_single_inference(rank, world_size, port):
|
||||
@rerun_if_address_is_in_use()
|
||||
@clear_cache_before_run()
|
||||
def test_pipeline_inference():
|
||||
spawn(check_tp_pipeline_inference, nprocs=4)
|
||||
spawn(check_single_inference, nprocs=2)
|
||||
spawn(check_tp_pp_inference, nprocs=4)
|
||||
spawn(check_tp_or_pp_inference, nprocs=2)
|
||||
spawn(check_single_inference, nprocs=1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
Reference in New Issue
Block a user