[Inference/Kernel] Optimize paged attention: Refactor key cache layout (#5643)

* optimize flashdecodingattention: refactor code with different key cache layout(from [num_blocks, num_kv_heads, block_size, head_size] to [num_blocks, num_kv_heads, head_size/x, block_size, x])

* [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci

---------

Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
This commit is contained in:
Steve Luo
2024-04-25 14:24:02 +08:00
committed by GitHub
parent 90cd5227a3
commit a8fd3b0342
8 changed files with 152 additions and 49 deletions

View File

@@ -5,6 +5,7 @@ from colossalai.kernel.triton import flash_decoding_attention
from colossalai.utils import get_current_device
from tests.test_infer.test_ops.triton.kernel_utils import (
generate_caches_and_block_tables_v2,
generate_caches_and_block_tables_v3,
generate_caches_and_block_tables_vllm,
)
@@ -95,7 +96,11 @@ def benchmark_flash_decoding_attention(
BATCH_SIZE, HEAD_SIZE, NUM_ATTN_HEADS, NUM_KV_HEADS, MAX_SEQ_LEN, dtype, device
)
k_cache, v_cache, block_tables = generate_caches_and_block_tables_v2(
triton_k_cache, triton_v_cache, _ = generate_caches_and_block_tables_v2(
k_unpad, v_unpad, kv_seq_lengths, BATCH_SIZE, MAX_NUM_BLOCKS_PER_SEQ, BLOCK_SIZE, dtype, device
)
k_cache, v_cache, block_tables = generate_caches_and_block_tables_v3(
k_unpad, v_unpad, kv_seq_lengths, BATCH_SIZE, MAX_NUM_BLOCKS_PER_SEQ, BLOCK_SIZE, dtype, device
)
@@ -135,8 +140,8 @@ def benchmark_flash_decoding_attention(
elif provider == "triton_flash_decoding_attention":
fn = lambda: flash_decoding_attention(
q.squeeze(2),
k_cache,
v_cache,
triton_k_cache,
triton_v_cache,
kv_seq_lengths,
block_tables,
BLOCK_SIZE,