mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-08 12:30:42 +00:00
[Inference/Kernel] Optimize paged attention: Refactor key cache layout (#5643)
* optimize flashdecodingattention: refactor code with different key cache layout(from [num_blocks, num_kv_heads, block_size, head_size] to [num_blocks, num_kv_heads, head_size/x, block_size, x]) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
This commit is contained in:
@@ -593,7 +593,7 @@ class NopadLlamaAttention(ParallelModule, LlamaAttention):
|
||||
high_precision,
|
||||
)
|
||||
# inference_ops.flash_decoding_attention(
|
||||
# attn_output,
|
||||
# output_tensor,
|
||||
# query_states,
|
||||
# k_cache,
|
||||
# v_cache,
|
||||
@@ -605,6 +605,7 @@ class NopadLlamaAttention(ParallelModule, LlamaAttention):
|
||||
# fd_inter_tensor.mid_output_lse,
|
||||
# sm_scale,
|
||||
# )
|
||||
# attn_output = output_tensor
|
||||
else:
|
||||
if is_verifier:
|
||||
rotary_embedding(query_states, key_states, cos_sin[0], cos_sin[1])
|
||||
|
Reference in New Issue
Block a user