mirror of
https://github.com/hpcaitech/ColossalAI.git
synced 2025-06-22 13:41:43 +00:00
[NFC] polish colossalai/kernel/cuda_native/csrc/kernels/include/block_reduce.h code style (#938)
This commit is contained in:
parent
b6cc9313ef
commit
7aa35eae6a
@ -13,22 +13,23 @@ const float REDUCE_FLOAT_INF_NEG = -100000000.f;
|
|||||||
const float REDUCE_FLOAT_INF_POS = 100000000.f;
|
const float REDUCE_FLOAT_INF_POS = 100000000.f;
|
||||||
const unsigned int WARP_REDUCE_SIZE = 32;
|
const unsigned int WARP_REDUCE_SIZE = 32;
|
||||||
|
|
||||||
template <typename T> __forceinline__ __device__ T warpReduceSum(T val) {
|
template <typename T>
|
||||||
|
__forceinline__ __device__ T warpReduceSum(T val) {
|
||||||
for (int mask = (WARP_REDUCE_SIZE >> 1); mask > 0; mask >>= 1)
|
for (int mask = (WARP_REDUCE_SIZE >> 1); mask > 0; mask >>= 1)
|
||||||
val += __shfl_xor_sync(WARP_REDUCE_MASK, val, mask, WARP_REDUCE_SIZE);
|
val += __shfl_xor_sync(WARP_REDUCE_MASK, val, mask, WARP_REDUCE_SIZE);
|
||||||
return val;
|
return val;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Calculate the sum of all elements in a block */
|
/* Calculate the sum of all elements in a block */
|
||||||
template <typename T> __forceinline__ __device__ T blockReduceSum(T val) {
|
template <typename T>
|
||||||
|
__forceinline__ __device__ T blockReduceSum(T val) {
|
||||||
static __shared__ T shared[32];
|
static __shared__ T shared[32];
|
||||||
int lane = threadIdx.x & 0x1f;
|
int lane = threadIdx.x & 0x1f;
|
||||||
int wid = threadIdx.x >> 5;
|
int wid = threadIdx.x >> 5;
|
||||||
|
|
||||||
val = warpReduceSum<T>(val);
|
val = warpReduceSum<T>(val);
|
||||||
|
|
||||||
if (lane == 0)
|
if (lane == 0) shared[wid] = val;
|
||||||
shared[wid] = val;
|
|
||||||
__syncthreads();
|
__syncthreads();
|
||||||
|
|
||||||
val = (threadIdx.x < (blockDim.x >> 5)) ? shared[lane] : (T)0.0f;
|
val = (threadIdx.x < (blockDim.x >> 5)) ? shared[lane] : (T)0.0f;
|
||||||
|
Loading…
Reference in New Issue
Block a user