mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-09-05 19:13:01 +00:00
[Infer] Optimize Blocked KVCache And Kernels Using It (#5325)
* revise shape of kvcache (context attn kernel) * revise shape of kvcache (flash decoding kernel) * revise shape of kvcache (kvcache copy) and attn func * init of kvcache in kvcache manager * revise llama modeling * revise block size retrieval * use torch for rms_norm benchmarking * revise block size retrieval
This commit is contained in:
@@ -93,7 +93,7 @@ def check_cache_manager(test_config):
|
||||
assert len(cache_manager._cache_blocks) == num_blocks
|
||||
key_caches = cache_manager._kv_caches[0] # key caches for all the blocks in all the layers
|
||||
assert len(key_caches) == num_layers
|
||||
expected_kv_shape = (num_blocks, num_attention_heads, head_size, block_size)
|
||||
expected_kv_shape = (num_blocks, num_attention_heads, block_size, head_size)
|
||||
assert key_caches[0].shape == expected_kv_shape
|
||||
k_cache_block0, v_cache_block0 = cache_manager.get_physical_cache(0, 0)
|
||||
expected_kv_block_shape = expected_kv_shape[1:]
|
||||
|
Reference in New Issue
Block a user