[NFC] polish colossalai/kernel/cuda_native/csrc/scaled_masked_softmax.h code style (#1263)

This commit is contained in:
shenggan 2022-07-12 18:10:08 +08:00 committed by Frank Lee
parent 197a2c89e2
commit 5d7366b144

View File

@ -4,12 +4,12 @@
#pragma once #pragma once
#include <assert.h> #include <assert.h>
#include <c10/macros/Macros.h>
#include <cuda_fp16.h> #include <cuda_fp16.h>
#include <stdint.h>
#include <cfloat> #include <cfloat>
#include <limits> #include <limits>
#include <stdint.h>
#include <cuda_fp16.h>
#include <c10/macros/Macros.h>
namespace { namespace {
@ -17,37 +17,53 @@ template <typename Datatype, int ELEMENTS_PER_LDG>
__device__ __inline__ void copy_vector(Datatype *dst, const Datatype *src); __device__ __inline__ void copy_vector(Datatype *dst, const Datatype *src);
template <> template <>
__device__ __inline__ void copy_vector<c10::BFloat16, 1>(c10::BFloat16 *dst, const c10::BFloat16 *src) { *dst = *src; } __device__ __inline__ void copy_vector<c10::BFloat16, 1>(
c10::BFloat16 *dst, const c10::BFloat16 *src) {
template <> *dst = *src;
__device__ __inline__ void copy_vector<c10::BFloat16, 4>(c10::BFloat16 *dst, const c10::BFloat16 *src) { *((float2*) dst) = *((float2*) src); }
template <>
__device__ __inline__ void copy_vector<c10::Half, 1>(c10::Half *dst, const c10::Half *src) { *dst = *src; }
template <>
__device__ __inline__ void copy_vector<c10::Half, 4>(c10::Half *dst, const c10::Half *src) { *((float2*) dst) = *((float2*) src); }
template <>
__device__ __inline__ void copy_vector<uint8_t, 1>(uint8_t *dst, const uint8_t *src) { *dst = *src; }
template <>
__device__ __inline__ void copy_vector<uint8_t, 4>(uint8_t *dst, const uint8_t *src) {*((half2*) dst) = *((half2*) src); }
int log2_ceil(int value) {
int log2_value = 0;
while ((1 << log2_value) < value) ++log2_value;
return log2_value;
} }
template<typename T> template <>
__device__ __inline__ void copy_vector<c10::BFloat16, 4>(
c10::BFloat16 *dst, const c10::BFloat16 *src) {
*((float2 *)dst) = *((float2 *)src);
}
template <>
__device__ __inline__ void copy_vector<c10::Half, 1>(c10::Half *dst,
const c10::Half *src) {
*dst = *src;
}
template <>
__device__ __inline__ void copy_vector<c10::Half, 4>(c10::Half *dst,
const c10::Half *src) {
*((float2 *)dst) = *((float2 *)src);
}
template <>
__device__ __inline__ void copy_vector<uint8_t, 1>(uint8_t *dst,
const uint8_t *src) {
*dst = *src;
}
template <>
__device__ __inline__ void copy_vector<uint8_t, 4>(uint8_t *dst,
const uint8_t *src) {
*((half2 *)dst) = *((half2 *)src);
}
int log2_ceil(int value) {
int log2_value = 0;
while ((1 << log2_value) < value) ++log2_value;
return log2_value;
}
template <typename T>
struct Add { struct Add {
__device__ __forceinline__ T operator()(T a, T b) const { __device__ __forceinline__ T operator()(T a, T b) const { return a + b; }
return a + b;
}
}; };
template<typename T> template <typename T>
struct Max { struct Max {
__device__ __forceinline__ T operator()(T a, T b) const { __device__ __forceinline__ T operator()(T a, T b) const {
return a < b ? b : a; return a < b ? b : a;
@ -55,438 +71,468 @@ struct Max {
}; };
template <typename T> template <typename T>
__device__ __forceinline__ T WARP_SHFL_XOR_NATIVE(T value, int laneMask, int width = warpSize, unsigned int mask = 0xffffffff) __device__ __forceinline__ T
{ WARP_SHFL_XOR_NATIVE(T value, int laneMask, int width = warpSize,
unsigned int mask = 0xffffffff) {
#if CUDA_VERSION >= 9000 #if CUDA_VERSION >= 9000
return __shfl_xor_sync(mask, value, laneMask, width); return __shfl_xor_sync(mask, value, laneMask, width);
#else #else
return __shfl_xor(value, laneMask, width); return __shfl_xor(value, laneMask, width);
#endif #endif
} }
template <typename acc_t, int WARP_BATCH, int WARP_SIZE, template<typename> class ReduceOp> template <typename acc_t, int WARP_BATCH, int WARP_SIZE,
__device__ __forceinline__ void warp_reduce(acc_t* sum) { template <typename> class ReduceOp>
ReduceOp<acc_t> r; __device__ __forceinline__ void warp_reduce(acc_t *sum) {
#pragma unroll ReduceOp<acc_t> r;
for (int offset = WARP_SIZE / 2; offset > 0; offset /= 2) { #pragma unroll
#pragma unroll for (int offset = WARP_SIZE / 2; offset > 0; offset /= 2) {
for (int i = 0; i < WARP_BATCH; ++i) { #pragma unroll
acc_t b = WARP_SHFL_XOR_NATIVE(sum[i], offset, WARP_SIZE); for (int i = 0; i < WARP_BATCH; ++i) {
sum[i] = r(sum[i], b); acc_t b = WARP_SHFL_XOR_NATIVE(sum[i], offset, WARP_SIZE);
} sum[i] = r(sum[i], b);
} }
}
} }
/* /*
* Extended softmax (from native aten pytorch) with following additional features * Extended softmax (from native aten pytorch) with following additional
* 1) input scaling * features 1) input scaling 2) Explicit masking
* 2) Explicit masking */
*/ template <typename input_t, typename output_t, typename acc_t,
template <typename input_t, typename output_t, typename acc_t, int log2_elements> int log2_elements>
__global__ void scaled_masked_softmax_warp_forward( __global__ void scaled_masked_softmax_warp_forward(
output_t *dst, output_t *dst, const input_t *src, const uint8_t *mask, const acc_t scale,
const input_t *src, int micro_batch_size, int element_count, int pad_batches) {
const uint8_t *mask, // WARP_SIZE and WARP_BATCH must match the return values batches_per_warp and
const acc_t scale, // warp_size of method warp_softmax_forward_kernel.
int micro_batch_size, constexpr int next_power_of_two = 1 << log2_elements;
int element_count, constexpr int WARP_SIZE =
int pad_batches) (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE;
{ constexpr int WARP_ITERATIONS = next_power_of_two / WARP_SIZE;
// WARP_SIZE and WARP_BATCH must match the return values batches_per_warp and constexpr int WARP_BATCH = (next_power_of_two <= 128) ? 2 : 1;
// warp_size of method warp_softmax_forward_kernel. constexpr int ELEMENTS_PER_LDG_STG = (WARP_ITERATIONS < 4) ? 1 : 4;
constexpr int next_power_of_two = 1 << log2_elements;
constexpr int WARP_SIZE = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE;
constexpr int WARP_ITERATIONS = next_power_of_two / WARP_SIZE;
constexpr int WARP_BATCH = (next_power_of_two <= 128) ? 2 : 1;
constexpr int ELEMENTS_PER_LDG_STG = (WARP_ITERATIONS < 4) ? 1 : 4;
// blockDim/threadIdx = (WARP_SIZE, WARPS_PER_BLOCK, ) // blockDim/threadIdx = (WARP_SIZE, WARPS_PER_BLOCK, )
// gridDim/blockIdx = (seq_len, attn_heads, batches) // gridDim/blockIdx = (seq_len, attn_heads, batches)
int first_batch = (blockDim.y * (blockIdx.x + gridDim.x * (blockIdx.y + gridDim.y * blockIdx.z))+ threadIdx.y) * WARP_BATCH; int first_batch =
int pad_first_batch = 0; (blockDim.y *
if (pad_batches != 1) { // bert style (blockIdx.x + gridDim.x * (blockIdx.y + gridDim.y * blockIdx.z)) +
pad_first_batch = (blockDim.y * (blockIdx.x + gridDim.x * blockIdx.z) + threadIdx.y) * WARP_BATCH; threadIdx.y) *
} else { // gpt2 style WARP_BATCH;
pad_first_batch = (blockDim.y * blockIdx.x + threadIdx.y) * WARP_BATCH; int pad_first_batch = 0;
} if (pad_batches != 1) { // bert style
pad_first_batch =
(blockDim.y * (blockIdx.x + gridDim.x * blockIdx.z) + threadIdx.y) *
WARP_BATCH;
} else { // gpt2 style
pad_first_batch = (blockDim.y * blockIdx.x + threadIdx.y) * WARP_BATCH;
}
// micro_batch_size might not be a multiple of WARP_BATCH. Check how // micro_batch_size might not be a multiple of WARP_BATCH. Check how
// many batches have to computed within this WARP. // many batches have to computed within this WARP.
int local_batches = micro_batch_size - first_batch; int local_batches = micro_batch_size - first_batch;
if (local_batches > WARP_BATCH) if (local_batches > WARP_BATCH) local_batches = WARP_BATCH;
local_batches = WARP_BATCH;
// there might be multiple batches per warp. compute the index within the batch // there might be multiple batches per warp. compute the index within the
int local_idx = threadIdx.x; // batch
int local_idx = threadIdx.x;
src += first_batch * element_count + ELEMENTS_PER_LDG_STG * local_idx; src += first_batch * element_count + ELEMENTS_PER_LDG_STG * local_idx;
dst += first_batch * element_count + ELEMENTS_PER_LDG_STG * local_idx; dst += first_batch * element_count + ELEMENTS_PER_LDG_STG * local_idx;
mask += pad_first_batch * element_count + ELEMENTS_PER_LDG_STG * local_idx; mask += pad_first_batch * element_count + ELEMENTS_PER_LDG_STG * local_idx;
// load data from global memory // load data from global memory
acc_t elements[WARP_BATCH][WARP_ITERATIONS]; acc_t elements[WARP_BATCH][WARP_ITERATIONS];
input_t temp_data[ELEMENTS_PER_LDG_STG]; input_t temp_data[ELEMENTS_PER_LDG_STG];
uint8_t temp_mask[ELEMENTS_PER_LDG_STG]; uint8_t temp_mask[ELEMENTS_PER_LDG_STG];
#pragma unroll #pragma unroll
for (int i = 0; i < WARP_BATCH; ++i) { for (int i = 0; i < WARP_BATCH; ++i) {
int batch_element_count = (i >= local_batches) ? 0 : element_count; int batch_element_count = (i >= local_batches) ? 0 : element_count;
#pragma unroll #pragma unroll
for (int it = 0; it < WARP_ITERATIONS; it+=ELEMENTS_PER_LDG_STG) { for (int it = 0; it < WARP_ITERATIONS; it += ELEMENTS_PER_LDG_STG) {
int element_index = ELEMENTS_PER_LDG_STG * local_idx + it * WARP_SIZE; int element_index = ELEMENTS_PER_LDG_STG * local_idx + it * WARP_SIZE;
if (element_index < batch_element_count) { if (element_index < batch_element_count) {
int itr_idx = i*element_count+it*WARP_SIZE; int itr_idx = i * element_count + it * WARP_SIZE;
copy_vector<input_t, ELEMENTS_PER_LDG_STG>(temp_data, src + itr_idx); copy_vector<input_t, ELEMENTS_PER_LDG_STG>(temp_data, src + itr_idx);
copy_vector<uint8_t, ELEMENTS_PER_LDG_STG>(temp_mask, mask + itr_idx); copy_vector<uint8_t, ELEMENTS_PER_LDG_STG>(temp_mask, mask + itr_idx);
#pragma unroll #pragma unroll
for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) { for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) {
if (temp_mask[element] != 1) { if (temp_mask[element] != 1) {
elements[i][it + element] = (acc_t)temp_data[element] * scale; elements[i][it + element] = (acc_t)temp_data[element] * scale;
} else { } else {
elements[i][it + element] = -10000.0; elements[i][it + element] = -10000.0;
} }
}
} else {
#pragma unroll
for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) {
elements[i][it + element] = -std::numeric_limits<acc_t>::infinity();
}
}
} }
} } else {
#pragma unroll
// compute max_value for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) {
acc_t max_value[WARP_BATCH]; elements[i][it + element] = -std::numeric_limits<acc_t>::infinity();
#pragma unroll
for (int i = 0; i < WARP_BATCH; ++i) {
max_value[i] = elements[i][0];
#pragma unroll
for (int it = 1; it < WARP_ITERATIONS; ++it) {
max_value[i] = (max_value[i] > elements[i][it]) ? max_value[i] : elements[i][it];
} }
}
} }
warp_reduce<acc_t, WARP_BATCH, WARP_SIZE, Max>(max_value); }
acc_t sum[WARP_BATCH] { 0.0f }; // compute max_value
#pragma unroll acc_t max_value[WARP_BATCH];
for (int i = 0; i < WARP_BATCH; ++i) { #pragma unroll
#pragma unroll for (int i = 0; i < WARP_BATCH; ++i) {
for (int it = 0; it < WARP_ITERATIONS; ++it) { max_value[i] = elements[i][0];
elements[i][it] = std::exp((elements[i][it] - max_value[i])); #pragma unroll
sum[i] += elements[i][it]; for (int it = 1; it < WARP_ITERATIONS; ++it) {
} max_value[i] =
(max_value[i] > elements[i][it]) ? max_value[i] : elements[i][it];
} }
warp_reduce<acc_t, WARP_BATCH, WARP_SIZE, Add>(sum); }
warp_reduce<acc_t, WARP_BATCH, WARP_SIZE, Max>(max_value);
// store result acc_t sum[WARP_BATCH]{0.0f};
output_t out[ELEMENTS_PER_LDG_STG]; #pragma unroll
#pragma unroll for (int i = 0; i < WARP_BATCH; ++i) {
for (int i = 0; i < WARP_BATCH; ++i) { #pragma unroll
if (i >= local_batches) for (int it = 0; it < WARP_ITERATIONS; ++it) {
break; elements[i][it] = std::exp((elements[i][it] - max_value[i]));
#pragma unroll sum[i] += elements[i][it];
for (int it = 0; it < WARP_ITERATIONS; it+=ELEMENTS_PER_LDG_STG) {
int element_index = ELEMENTS_PER_LDG_STG * local_idx + it * WARP_SIZE;
if (element_index < element_count) {
#pragma unroll
for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) {
out[element] = elements[i][it + element] / sum[i];
}
copy_vector<output_t, ELEMENTS_PER_LDG_STG>(dst + i * element_count + it * WARP_SIZE, out);
} else {
break;
}
}
} }
}
warp_reduce<acc_t, WARP_BATCH, WARP_SIZE, Add>(sum);
// store result
output_t out[ELEMENTS_PER_LDG_STG];
#pragma unroll
for (int i = 0; i < WARP_BATCH; ++i) {
if (i >= local_batches) break;
#pragma unroll
for (int it = 0; it < WARP_ITERATIONS; it += ELEMENTS_PER_LDG_STG) {
int element_index = ELEMENTS_PER_LDG_STG * local_idx + it * WARP_SIZE;
if (element_index < element_count) {
#pragma unroll
for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) {
out[element] = elements[i][it + element] / sum[i];
}
copy_vector<output_t, ELEMENTS_PER_LDG_STG>(
dst + i * element_count + it * WARP_SIZE, out);
} else {
break;
}
}
}
} }
template <typename input_t, typename output_t, typename acc_t, int log2_elements> template <typename input_t, typename output_t, typename acc_t,
int log2_elements>
__global__ void scaled_masked_softmax_warp_backward( __global__ void scaled_masked_softmax_warp_backward(
output_t *gradInput, output_t *gradInput, input_t *grad, const input_t *output, acc_t scale,
input_t *grad, int micro_batch_size, int element_count) {
const input_t *output, // WARP_SIZE and WARP_BATCH must match the return values batches_per_warp and
acc_t scale, // warp_size of method warp_softmax_backward_kernel.
int micro_batch_size, constexpr int next_power_of_two = 1 << log2_elements;
int element_count) constexpr int WARP_SIZE =
{ (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE;
// WARP_SIZE and WARP_BATCH must match the return values batches_per_warp and constexpr int WARP_ITERATIONS = next_power_of_two / WARP_SIZE;
// warp_size of method warp_softmax_backward_kernel. constexpr int WARP_BATCH = (next_power_of_two <= 128) ? 2 : 1;
constexpr int next_power_of_two = 1 << log2_elements; constexpr int ELEMENTS_PER_LDG_STG = (WARP_ITERATIONS < 4) ? 1 : 4;
constexpr int WARP_SIZE = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE;
constexpr int WARP_ITERATIONS = next_power_of_two / WARP_SIZE;
constexpr int WARP_BATCH = (next_power_of_two <= 128) ? 2 : 1;
constexpr int ELEMENTS_PER_LDG_STG = (WARP_ITERATIONS < 4) ? 1 : 4;
// blockDim/threadIdx = (WARP_SIZE, WARPS_PER_BLOCK, ) // blockDim/threadIdx = (WARP_SIZE, WARPS_PER_BLOCK, )
// gridDim/blockIdx = (seq_len, attn_heads, batches) // gridDim/blockIdx = (seq_len, attn_heads, batches)
int first_batch = (blockDim.y * blockIdx.x + threadIdx.y) * WARP_BATCH; int first_batch = (blockDim.y * blockIdx.x + threadIdx.y) * WARP_BATCH;
// micro_batch_size might not be a multiple of WARP_BATCH. Check how
// many batches have to computed within this WARP.
int local_batches = micro_batch_size - first_batch;
if (local_batches > WARP_BATCH)
local_batches = WARP_BATCH;
// there might be multiple batches per warp. compute the index within the batch // micro_batch_size might not be a multiple of WARP_BATCH. Check how
int local_idx = threadIdx.x; // many batches have to computed within this WARP.
int local_batches = micro_batch_size - first_batch;
if (local_batches > WARP_BATCH) local_batches = WARP_BATCH;
// the first element to process by the current thread // there might be multiple batches per warp. compute the index within the
int thread_offset = first_batch * element_count + ELEMENTS_PER_LDG_STG * local_idx; // batch
grad += thread_offset; int local_idx = threadIdx.x;
output += thread_offset;
gradInput += thread_offset;
// load data from global memory // the first element to process by the current thread
acc_t grad_reg[WARP_BATCH][WARP_ITERATIONS] { 0.0f }; int thread_offset =
acc_t output_reg[WARP_BATCH][WARP_ITERATIONS] { 0.0f }; first_batch * element_count + ELEMENTS_PER_LDG_STG * local_idx;
input_t temp_grad[ELEMENTS_PER_LDG_STG]; grad += thread_offset;
input_t temp_output[ELEMENTS_PER_LDG_STG]; output += thread_offset;
#pragma unroll gradInput += thread_offset;
for (int i = 0; i < WARP_BATCH; ++i) {
int batch_element_count = (i >= local_batches) ? 0 : element_count;
#pragma unroll // load data from global memory
for (int it = 0; it < WARP_ITERATIONS; it+=ELEMENTS_PER_LDG_STG) { acc_t grad_reg[WARP_BATCH][WARP_ITERATIONS]{0.0f};
int element_index = ELEMENTS_PER_LDG_STG * local_idx + it * WARP_SIZE; acc_t output_reg[WARP_BATCH][WARP_ITERATIONS]{0.0f};
if (element_index < batch_element_count) { input_t temp_grad[ELEMENTS_PER_LDG_STG];
copy_vector<input_t, ELEMENTS_PER_LDG_STG>(temp_grad, grad + i * element_count + it * WARP_SIZE); input_t temp_output[ELEMENTS_PER_LDG_STG];
copy_vector<input_t, ELEMENTS_PER_LDG_STG>(temp_output, output + i * element_count + it * WARP_SIZE); #pragma unroll
for (int i = 0; i < WARP_BATCH; ++i) {
int batch_element_count = (i >= local_batches) ? 0 : element_count;
#pragma unroll #pragma unroll
for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) { for (int it = 0; it < WARP_ITERATIONS; it += ELEMENTS_PER_LDG_STG) {
output_reg[i][it + element] = (acc_t)temp_output[element]; int element_index = ELEMENTS_PER_LDG_STG * local_idx + it * WARP_SIZE;
} if (element_index < batch_element_count) {
#pragma unroll copy_vector<input_t, ELEMENTS_PER_LDG_STG>(
for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) { temp_grad, grad + i * element_count + it * WARP_SIZE);
grad_reg[i][it + element] = (acc_t)temp_grad[element] * output_reg[i][it + element]; copy_vector<input_t, ELEMENTS_PER_LDG_STG>(
} temp_output, output + i * element_count + it * WARP_SIZE);
}
#pragma unroll
for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) {
output_reg[i][it + element] = (acc_t)temp_output[element];
} }
} #pragma unroll
for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) {
acc_t sum[WARP_BATCH]; grad_reg[i][it + element] =
#pragma unroll (acc_t)temp_grad[element] * output_reg[i][it + element];
for (int i = 0; i < WARP_BATCH; ++i) {
sum[i] = grad_reg[i][0];
#pragma unroll
for (int it = 1; it < WARP_ITERATIONS; ++it) {
sum[i] += grad_reg[i][it];
} }
}
} }
warp_reduce<acc_t, WARP_BATCH, WARP_SIZE, Add>(sum); }
// store result acc_t sum[WARP_BATCH];
#pragma unroll #pragma unroll
for (int i = 0; i < WARP_BATCH; ++i) { for (int i = 0; i < WARP_BATCH; ++i) {
if (i >= local_batches) sum[i] = grad_reg[i][0];
break; #pragma unroll
#pragma unroll for (int it = 1; it < WARP_ITERATIONS; ++it) {
for (int it = 0; it < WARP_ITERATIONS; it+=ELEMENTS_PER_LDG_STG) { sum[i] += grad_reg[i][it];
int element_index = ELEMENTS_PER_LDG_STG * local_idx + it * WARP_SIZE;
if (element_index < element_count) {
// compute gradients
output_t out[ELEMENTS_PER_LDG_STG];
#pragma unroll
for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) {
out[element] = (output_t)(scale * (grad_reg[i][it + element] - output_reg[i][it + element] * sum[i]));
}
copy_vector<output_t, ELEMENTS_PER_LDG_STG>(gradInput + i * element_count + it * WARP_SIZE, out);
}
}
} }
}
warp_reduce<acc_t, WARP_BATCH, WARP_SIZE, Add>(sum);
// store result
#pragma unroll
for (int i = 0; i < WARP_BATCH; ++i) {
if (i >= local_batches) break;
#pragma unroll
for (int it = 0; it < WARP_ITERATIONS; it += ELEMENTS_PER_LDG_STG) {
int element_index = ELEMENTS_PER_LDG_STG * local_idx + it * WARP_SIZE;
if (element_index < element_count) {
// compute gradients
output_t out[ELEMENTS_PER_LDG_STG];
#pragma unroll
for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) {
out[element] =
(output_t)(scale * (grad_reg[i][it + element] -
output_reg[i][it + element] * sum[i]));
}
copy_vector<output_t, ELEMENTS_PER_LDG_STG>(
gradInput + i * element_count + it * WARP_SIZE, out);
}
}
}
} }
} // end of anonymous namespace } // end of anonymous namespace
int get_batch_per_block(int query_seq_len, int key_seq_len, int batches, int attn_heads){ int get_batch_per_block(int query_seq_len, int key_seq_len, int batches,
int attn_heads) {
int log2_elements = log2_ceil(key_seq_len);
const int next_power_of_two = 1 << log2_elements;
int warp_size =
(next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE;
int batches_per_warp = (next_power_of_two <= 128) ? 2 : 1;
constexpr int threads_per_block = 128;
int warps_per_block = (threads_per_block / warp_size);
int batches_per_block = warps_per_block * batches_per_warp;
return batches_per_block;
}
template <typename input_t, typename output_t, typename acc_t>
void dispatch_scaled_masked_softmax_forward(output_t *dst, const input_t *src,
const uint8_t *mask,
const input_t scale,
int query_seq_len, int key_seq_len,
int batches, int attn_heads,
int pad_batches) {
TORCH_INTERNAL_ASSERT(key_seq_len >= 0 && key_seq_len <= 2048);
if (key_seq_len == 0) {
return;
} else {
int log2_elements = log2_ceil(key_seq_len); int log2_elements = log2_ceil(key_seq_len);
const int next_power_of_two = 1 << log2_elements; const int next_power_of_two = 1 << log2_elements;
int batch_count = batches * attn_heads * query_seq_len;
int warp_size = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE; // This value must match the WARP_SIZE constexpr value computed inside
// softmax_warp_forward.
int warp_size =
(next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE;
// This value must match the WARP_BATCH constexpr value computed inside
// softmax_warp_forward.
int batches_per_warp = (next_power_of_two <= 128) ? 2 : 1; int batches_per_warp = (next_power_of_two <= 128) ? 2 : 1;
// use 128 threads per block to maximimize gpu utilization
constexpr int threads_per_block = 128; constexpr int threads_per_block = 128;
int warps_per_block = (threads_per_block / warp_size); int warps_per_block = (threads_per_block / warp_size);
int batches_per_block = warps_per_block * batches_per_warp; int batches_per_block = warps_per_block * batches_per_warp;
TORCH_INTERNAL_ASSERT(query_seq_len % batches_per_block == 0);
return batches_per_block; dim3 blocks(query_seq_len / batches_per_block, attn_heads, batches);
} dim3 threads(warp_size, warps_per_block, 1);
// Launch code would be more elegant if C++ supported FOR CONSTEXPR
template<typename input_t, typename output_t, typename acc_t> switch (log2_elements) {
void dispatch_scaled_masked_softmax_forward( case 0: // 1
output_t *dst, scaled_masked_softmax_warp_forward<input_t, output_t, acc_t, 0>
const input_t *src, <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
const uint8_t *mask, dst, src, mask, scale, batch_count, key_seq_len, pad_batches);
const input_t scale, break;
int query_seq_len, case 1: // 2
int key_seq_len, scaled_masked_softmax_warp_forward<input_t, output_t, acc_t, 1>
int batches, <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
int attn_heads, dst, src, mask, scale, batch_count, key_seq_len, pad_batches);
int pad_batches) break;
{ case 2: // 4
TORCH_INTERNAL_ASSERT(key_seq_len >= 0 && key_seq_len <= 2048 ); scaled_masked_softmax_warp_forward<input_t, output_t, acc_t, 2>
if (key_seq_len == 0) { <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
return; dst, src, mask, scale, batch_count, key_seq_len, pad_batches);
} else { break;
int log2_elements = log2_ceil(key_seq_len); case 3: // 8
const int next_power_of_two = 1 << log2_elements; scaled_masked_softmax_warp_forward<input_t, output_t, acc_t, 3>
int batch_count = batches * attn_heads * query_seq_len; <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
dst, src, mask, scale, batch_count, key_seq_len, pad_batches);
// This value must match the WARP_SIZE constexpr value computed inside softmax_warp_forward. break;
int warp_size = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE; case 4: // 16
scaled_masked_softmax_warp_forward<input_t, output_t, acc_t, 4>
// This value must match the WARP_BATCH constexpr value computed inside softmax_warp_forward. <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
int batches_per_warp = (next_power_of_two <= 128) ? 2 : 1; dst, src, mask, scale, batch_count, key_seq_len, pad_batches);
break;
// use 128 threads per block to maximimize gpu utilization case 5: // 32
constexpr int threads_per_block = 128; scaled_masked_softmax_warp_forward<input_t, output_t, acc_t, 5>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
int warps_per_block = (threads_per_block / warp_size); dst, src, mask, scale, batch_count, key_seq_len, pad_batches);
int batches_per_block = warps_per_block * batches_per_warp; break;
TORCH_INTERNAL_ASSERT(query_seq_len%batches_per_block == 0); case 6: // 64
dim3 blocks(query_seq_len/batches_per_block, attn_heads, batches); scaled_masked_softmax_warp_forward<input_t, output_t, acc_t, 6>
dim3 threads(warp_size, warps_per_block, 1); <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
// Launch code would be more elegant if C++ supported FOR CONSTEXPR dst, src, mask, scale, batch_count, key_seq_len, pad_batches);
switch (log2_elements) { break;
case 0: // 1 case 7: // 128
scaled_masked_softmax_warp_forward<input_t, output_t, acc_t, 0> scaled_masked_softmax_warp_forward<input_t, output_t, acc_t, 7>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches); <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
break; dst, src, mask, scale, batch_count, key_seq_len, pad_batches);
case 1: // 2 break;
scaled_masked_softmax_warp_forward<input_t, output_t, acc_t, 1> case 8: // 256
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches); scaled_masked_softmax_warp_forward<input_t, output_t, acc_t, 8>
break; <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
case 2: // 4 dst, src, mask, scale, batch_count, key_seq_len, pad_batches);
scaled_masked_softmax_warp_forward<input_t, output_t, acc_t, 2> break;
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches); case 9: // 512
break; scaled_masked_softmax_warp_forward<input_t, output_t, acc_t, 9>
case 3: // 8 <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
scaled_masked_softmax_warp_forward<input_t, output_t, acc_t, 3> dst, src, mask, scale, batch_count, key_seq_len, pad_batches);
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches); break;
break; case 10: // 1024
case 4: // 16 scaled_masked_softmax_warp_forward<input_t, output_t, acc_t, 10>
scaled_masked_softmax_warp_forward<input_t, output_t, acc_t, 4> <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches); dst, src, mask, scale, batch_count, key_seq_len, pad_batches);
break; break;
case 5: // 32 case 11: // 2048
scaled_masked_softmax_warp_forward<input_t, output_t, acc_t, 5> scaled_masked_softmax_warp_forward<input_t, output_t, acc_t, 11>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches); <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
break; dst, src, mask, scale, batch_count, key_seq_len, pad_batches);
case 6: // 64 break;
scaled_masked_softmax_warp_forward<input_t, output_t, acc_t, 6> default:
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches); break;
break;
case 7: // 128
scaled_masked_softmax_warp_forward<input_t, output_t, acc_t, 7>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches);
break;
case 8: // 256
scaled_masked_softmax_warp_forward<input_t, output_t, acc_t, 8>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches);
break;
case 9: // 512
scaled_masked_softmax_warp_forward<input_t, output_t, acc_t, 9>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches);
break;
case 10: // 1024
scaled_masked_softmax_warp_forward<input_t, output_t, acc_t, 10>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches);
break;
case 11: // 2048
scaled_masked_softmax_warp_forward<input_t, output_t, acc_t, 11>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches);
break;
default:
break;
}
} }
}
} }
template<typename input_t, typename output_t, typename acc_t> template <typename input_t, typename output_t, typename acc_t>
void dispatch_scaled_masked_softmax_backward( void dispatch_scaled_masked_softmax_backward(output_t *grad_input,
output_t *grad_input, input_t *grad,
input_t *grad, const input_t *output,
const input_t *output, const acc_t scale,
const acc_t scale, int query_seq_len, int key_seq_len,
int query_seq_len, int batches, int attn_heads) {
int key_seq_len, TORCH_INTERNAL_ASSERT(key_seq_len >= 0 && key_seq_len <= 2048);
int batches, if (key_seq_len == 0) {
int attn_heads) return;
{ } else {
TORCH_INTERNAL_ASSERT( key_seq_len >= 0 && key_seq_len <= 2048 ); int log2_elements = log2_ceil(key_seq_len);
if (key_seq_len == 0) { const int next_power_of_two = 1 << log2_elements;
return; int batch_count = batches * attn_heads * query_seq_len;
} else {
int log2_elements = log2_ceil(key_seq_len);
const int next_power_of_two = 1 << log2_elements;
int batch_count = batches * attn_heads * query_seq_len;
// This value must match the WARP_SIZE constexpr value computed inside softmax_warp_backward. // This value must match the WARP_SIZE constexpr value computed inside
int warp_size = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE; // softmax_warp_backward.
int warp_size =
(next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE;
// This value must match the WARP_BATCH constexpr value computed inside softmax_warp_backward. // This value must match the WARP_BATCH constexpr value computed inside
int batches_per_warp = (next_power_of_two <= 128) ? 2 : 1; // softmax_warp_backward.
int batches_per_warp = (next_power_of_two <= 128) ? 2 : 1;
// use 128 threads per block to maximimize gpu utilization // use 128 threads per block to maximimize gpu utilization
constexpr int threads_per_block = 128; constexpr int threads_per_block = 128;
int warps_per_block = (threads_per_block / warp_size); int warps_per_block = (threads_per_block / warp_size);
int batches_per_block = warps_per_block * batches_per_warp; int batches_per_block = warps_per_block * batches_per_warp;
int blocks = batch_count/batches_per_block; int blocks = batch_count / batches_per_block;
dim3 threads(warp_size, warps_per_block, 1); dim3 threads(warp_size, warps_per_block, 1);
// Launch code would be more elegant if C++ supported FOR CONSTEXPR // Launch code would be more elegant if C++ supported FOR CONSTEXPR
switch (log2_elements) { switch (log2_elements) {
case 0: // 1 case 0: // 1
scaled_masked_softmax_warp_backward<input_t, output_t, acc_t, 0> scaled_masked_softmax_warp_backward<input_t, output_t, acc_t, 0>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, key_seq_len); <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
break; grad_input, grad, output, scale, batch_count, key_seq_len);
case 1: // 2 break;
scaled_masked_softmax_warp_backward<input_t, output_t, acc_t, 1> case 1: // 2
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, key_seq_len); scaled_masked_softmax_warp_backward<input_t, output_t, acc_t, 1>
break; <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
case 2: // 4 grad_input, grad, output, scale, batch_count, key_seq_len);
scaled_masked_softmax_warp_backward<input_t, output_t, acc_t, 2> break;
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, key_seq_len); case 2: // 4
break; scaled_masked_softmax_warp_backward<input_t, output_t, acc_t, 2>
case 3: // 8 <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
scaled_masked_softmax_warp_backward<input_t, output_t, acc_t, 3> grad_input, grad, output, scale, batch_count, key_seq_len);
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, key_seq_len); break;
break; case 3: // 8
case 4: // 16 scaled_masked_softmax_warp_backward<input_t, output_t, acc_t, 3>
scaled_masked_softmax_warp_backward<input_t, output_t, acc_t, 4> <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, key_seq_len); grad_input, grad, output, scale, batch_count, key_seq_len);
break; break;
case 5: // 32 case 4: // 16
scaled_masked_softmax_warp_backward<input_t, output_t, acc_t, 5> scaled_masked_softmax_warp_backward<input_t, output_t, acc_t, 4>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, key_seq_len); <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
break; grad_input, grad, output, scale, batch_count, key_seq_len);
case 6: // 64 break;
scaled_masked_softmax_warp_backward<input_t, output_t, acc_t, 6> case 5: // 32
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, key_seq_len); scaled_masked_softmax_warp_backward<input_t, output_t, acc_t, 5>
break; <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
case 7: // 128 grad_input, grad, output, scale, batch_count, key_seq_len);
scaled_masked_softmax_warp_backward<input_t, output_t, acc_t, 7> break;
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, key_seq_len); case 6: // 64
break; scaled_masked_softmax_warp_backward<input_t, output_t, acc_t, 6>
case 8: // 256 <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
scaled_masked_softmax_warp_backward<input_t, output_t, acc_t, 8> grad_input, grad, output, scale, batch_count, key_seq_len);
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, key_seq_len); break;
break; case 7: // 128
case 9: // 512 scaled_masked_softmax_warp_backward<input_t, output_t, acc_t, 7>
scaled_masked_softmax_warp_backward<input_t, output_t, acc_t, 9> <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, key_seq_len); grad_input, grad, output, scale, batch_count, key_seq_len);
break; break;
case 10: // 1024 case 8: // 256
scaled_masked_softmax_warp_backward<input_t, output_t, acc_t, 10> scaled_masked_softmax_warp_backward<input_t, output_t, acc_t, 8>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, key_seq_len); <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
break; grad_input, grad, output, scale, batch_count, key_seq_len);
case 11: // 2048 break;
scaled_masked_softmax_warp_backward<input_t, output_t, acc_t, 11> case 9: // 512
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, key_seq_len); scaled_masked_softmax_warp_backward<input_t, output_t, acc_t, 9>
break; <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
default: grad_input, grad, output, scale, batch_count, key_seq_len);
break; break;
} case 10: // 1024
scaled_masked_softmax_warp_backward<input_t, output_t, acc_t, 10>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
grad_input, grad, output, scale, batch_count, key_seq_len);
break;
case 11: // 2048
scaled_masked_softmax_warp_backward<input_t, output_t, acc_t, 11>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
grad_input, grad, output, scale, batch_count, key_seq_len);
break;
default:
break;
} }
}
} }