[NFC] polish colossalai/kernel/cuda_native/csrc/layer_norm_cuda_kernel.cu code style (#661)

This commit is contained in:
shenggan 2022-04-02 21:28:47 +08:00 committed by binmakeswell
parent c336cd3066
commit 331683bf82

View File

@ -2,23 +2,17 @@
* https://github.com/NVIDIA/apex * https://github.com/NVIDIA/apex
* with minor changes. */ * with minor changes. */
#include <cuda.h>
#include <cuda_runtime.h>
#include "ATen/ATen.h" #include "ATen/ATen.h"
#include "ATen/AccumulateType.h" #include "ATen/AccumulateType.h"
#include "ATen/cuda/CUDAContext.h" #include "ATen/cuda/CUDAContext.h"
#include "ATen/cuda/DeviceUtils.cuh" #include "ATen/cuda/DeviceUtils.cuh"
#include <cuda.h>
#include <cuda_runtime.h>
#include "type_shim.h" #include "type_shim.h"
template<typename U> __device__ template <typename U>
void cuWelfordOnlineSum( __device__ void cuWelfordOnlineSum(const U curr, U& mu, U& sigma2, U& count) {
const U curr,
U& mu,
U& sigma2,
U& count)
{
count = count + U(1); count = count + U(1);
U delta = curr - mu; U delta = curr - mu;
U lmean = mu + delta / count; U lmean = mu + delta / count;
@ -27,15 +21,9 @@ void cuWelfordOnlineSum(
sigma2 = sigma2 + delta * delta2; sigma2 = sigma2 + delta * delta2;
} }
template<typename U> __device__ template <typename U>
void cuChanOnlineSum( __device__ void cuChanOnlineSum(const U muB, const U sigma2B, const U countB,
const U muB, U& mu, U& sigma2, U& count) {
const U sigma2B,
const U countB,
U& mu,
U& sigma2,
U& count)
{
U delta = muB - mu; U delta = muB - mu;
U nA = count; U nA = count;
U nB = countB; U nB = countB;
@ -52,16 +40,10 @@ void cuChanOnlineSum(
} }
} }
template<typename T, typename U> __device__ template <typename T, typename U>
void cuWelfordMuSigma2( __device__ void cuWelfordMuSigma2(const T* __restrict__ vals, const int n1,
const T* __restrict__ vals, const int n2, const int i1, U& mu, U& sigma2,
const int n1, U* buf) {
const int n2,
const int i1,
U& mu,
U& sigma2,
U* buf)
{
// Assumptions: // Assumptions:
// 1) blockDim.x == warpSize // 1) blockDim.x == warpSize
// 2) Tensor is contiguous // 2) Tensor is contiguous
@ -104,7 +86,8 @@ void cuWelfordMuSigma2(
U* ibuf = (U*)(ubuf + blockDim.y); U* ibuf = (U*)(ubuf + blockDim.y);
for (int offset = blockDim.y / 2; offset > 0; offset /= 2) { for (int offset = blockDim.y / 2; offset > 0; offset /= 2) {
// upper half of warps write to shared // upper half of warps write to shared
if (threadIdx.x == 0 && threadIdx.y >= offset && threadIdx.y < 2*offset) { if (threadIdx.x == 0 && threadIdx.y >= offset &&
threadIdx.y < 2 * offset) {
const int wrt_y = threadIdx.y - offset; const int wrt_y = threadIdx.y - offset;
ubuf[2 * wrt_y] = mu; ubuf[2 * wrt_y] = mu;
ubuf[2 * wrt_y + 1] = sigma2; ubuf[2 * wrt_y + 1] = sigma2;
@ -136,16 +119,10 @@ void cuWelfordMuSigma2(
} }
} }
template<> __device__ template <>
void cuWelfordMuSigma2( __device__ void cuWelfordMuSigma2(const at::Half* __restrict__ vals,
const at::Half* __restrict__ vals, const int n1, const int n2, const int i1,
const int n1, float& mu, float& sigma2, float* buf) {
const int n2,
const int i1,
float& mu,
float& sigma2,
float* buf)
{
// Assumptions: // Assumptions:
// 1) blockDim.x == warpSize // 1) blockDim.x == warpSize
// 2) Tensor is contiguous // 2) Tensor is contiguous
@ -199,7 +176,8 @@ void cuWelfordMuSigma2(
float* ibuf = (float*)(ubuf + blockDim.y); float* ibuf = (float*)(ubuf + blockDim.y);
for (int offset = blockDim.y / 2; offset > 0; offset /= 2) { for (int offset = blockDim.y / 2; offset > 0; offset /= 2) {
// upper half of warps write to shared // upper half of warps write to shared
if (threadIdx.x == 0 && threadIdx.y >= offset && threadIdx.y < 2*offset) { if (threadIdx.x == 0 && threadIdx.y >= offset &&
threadIdx.y < 2 * offset) {
const int wrt_y = threadIdx.y - offset; const int wrt_y = threadIdx.y - offset;
ubuf[2 * wrt_y] = mu; ubuf[2 * wrt_y] = mu;
ubuf[2 * wrt_y + 1] = sigma2; ubuf[2 * wrt_y + 1] = sigma2;
@ -231,19 +209,23 @@ void cuWelfordMuSigma2(
} }
} }
template<typename U> U rsqrt(U v) { template <typename U>
U rsqrt(U v) {
return U(1) / sqrt(v); return U(1) / sqrt(v);
} }
template<> float rsqrt(float v) { template <>
float rsqrt(float v) {
return rsqrtf(v); return rsqrtf(v);
} }
template<> double rsqrt(double v) { template <>
double rsqrt(double v) {
return rsqrt(v); return rsqrt(v);
} }
namespace { namespace {
// This is the un-specialized struct. Note that we prevent instantiation of this // This is the un-specialized struct. Note that we prevent instantiation of
// struct by putting an undefined symbol in the function body so it won't compile. // this struct by putting an undefined symbol in the function body so it won't
// compile.
// template <typename T> // template <typename T>
// struct SharedMemory // struct SharedMemory
// { // {
@ -260,30 +242,22 @@ template <typename T>
struct SharedMemory; struct SharedMemory;
template <> template <>
struct SharedMemory <float> struct SharedMemory<float> {
{ __device__ float* getPointer() {
__device__ float *getPointer()
{
extern __shared__ float s_float[]; extern __shared__ float s_float[];
return s_float; return s_float;
} }
}; };
} } // namespace
template<typename T, typename U, typename V> __global__ template <typename T, typename U, typename V>
void cuApplyLayerNorm( __global__ void cuApplyLayerNorm(V* __restrict__ output_vals,
V* __restrict__ output_vals, U* __restrict__ mean, U* __restrict__ invvar,
U* __restrict__ mean, const T* __restrict__ vals, const int n1,
U* __restrict__ invvar, const int n2, const U epsilon,
const T* __restrict__ vals,
const int n1,
const int n2,
const U epsilon,
const V* __restrict__ gamma, const V* __restrict__ gamma,
const V* __restrict__ beta const V* __restrict__ beta) {
)
{
// Assumptions: // Assumptions:
// 1) blockDim.x == warpSize // 1) blockDim.x == warpSize
// 2) Tensors are contiguous // 2) Tensors are contiguous
@ -316,23 +290,12 @@ void cuApplyLayerNorm(
} }
} }
template<typename T, typename U, typename V> __device__ template <typename T, typename U, typename V>
void cuLoadWriteStridedInputs( __device__ void cuLoadWriteStridedInputs(
const int i1_block, const int i1_block, const int thr_load_row_off, const int thr_load_col_off,
const int thr_load_row_off, const int i2_off, const int row_stride, U* warp_buf1, U* warp_buf2,
const int thr_load_col_off, const T* input, const V* dout, const int i1_end, const int n2,
const int i2_off, const U* __restrict__ mean, const U* __restrict__ invvar) {
const int row_stride,
U* warp_buf1,
U* warp_buf2,
const T* input,
const V* dout,
const int i1_end,
const int n2,
const U* __restrict__ mean,
const U* __restrict__ invvar
)
{
int i1 = i1_block + thr_load_row_off; int i1 = i1_block + thr_load_row_off;
if (i1 < i1_end) { if (i1 < i1_end) {
U curr_mean = mean[i1]; U curr_mean = mean[i1];
@ -345,7 +308,8 @@ void cuLoadWriteStridedInputs(
U curr_input = static_cast<U>(input[load_idx]); U curr_input = static_cast<U>(input[load_idx]);
U curr_dout = static_cast<U>(dout[load_idx]); U curr_dout = static_cast<U>(dout[load_idx]);
warp_buf1[write_idx] = curr_dout; warp_buf1[write_idx] = curr_dout;
warp_buf2[write_idx] = curr_dout * (curr_input - curr_mean) * curr_invvar; warp_buf2[write_idx] =
curr_dout * (curr_input - curr_mean) * curr_invvar;
} else { } else {
warp_buf1[write_idx] = U(0); warp_buf1[write_idx] = U(0);
warp_buf2[write_idx] = U(0); warp_buf2[write_idx] = U(0);
@ -360,23 +324,12 @@ void cuLoadWriteStridedInputs(
} }
} }
template<typename T, typename U, typename V> __device__ template <typename T, typename U, typename V>
void cuLoadAddStridedInputs( __device__ void cuLoadAddStridedInputs(
const int i1_block, const int i1_block, const int thr_load_row_off, const int thr_load_col_off,
const int thr_load_row_off, const int i2_off, const int row_stride, U* warp_buf1, U* warp_buf2,
const int thr_load_col_off, const T* input, const V* dout, const int i1_end, const int n2,
const int i2_off, const U* __restrict__ mean, const U* __restrict__ invvar) {
const int row_stride,
U* warp_buf1,
U* warp_buf2,
const T* input,
const V* dout,
const int i1_end,
const int n2,
const U* __restrict__ mean,
const U* __restrict__ invvar
)
{
int i1 = i1_block + thr_load_row_off; int i1 = i1_block + thr_load_row_off;
if (i1 < i1_end) { if (i1 < i1_end) {
U curr_mean = mean[i1]; U curr_mean = mean[i1];
@ -389,42 +342,46 @@ void cuLoadAddStridedInputs(
U curr_input = static_cast<U>(input[load_idx]); U curr_input = static_cast<U>(input[load_idx]);
U curr_dout = static_cast<U>(dout[load_idx]); U curr_dout = static_cast<U>(dout[load_idx]);
warp_buf1[write_idx] += curr_dout; warp_buf1[write_idx] += curr_dout;
warp_buf2[write_idx] += curr_dout * (curr_input - curr_mean) * curr_invvar; warp_buf2[write_idx] +=
curr_dout * (curr_input - curr_mean) * curr_invvar;
} }
} }
} }
} }
template<typename T, typename U, typename V> __global__ template <typename T, typename U, typename V>
void cuComputePartGradGammaBeta( __global__ void cuComputePartGradGammaBeta(
const V* __restrict__ dout, const V* __restrict__ dout, const T* __restrict__ input, const int n1,
const T* __restrict__ input, const int n2, const U* __restrict__ mean, const U* __restrict__ invvar,
const int n1, U epsilon, U* part_grad_gamma, U* part_grad_beta) {
const int n2, const int numsegs_n1 =
const U* __restrict__ mean, (n1 + blockDim.y * blockDim.y - 1) / (blockDim.y * blockDim.y);
const U* __restrict__ invvar,
U epsilon,
U* part_grad_gamma,
U* part_grad_beta)
{
const int numsegs_n1 = (n1+blockDim.y*blockDim.y-1) / (blockDim.y*blockDim.y);
const int segs_per_block = (numsegs_n1 + gridDim.y - 1) / gridDim.y; const int segs_per_block = (numsegs_n1 + gridDim.y - 1) / gridDim.y;
const int i1_beg = blockIdx.y * segs_per_block * blockDim.y * blockDim.y; const int i1_beg = blockIdx.y * segs_per_block * blockDim.y * blockDim.y;
const int i1_beg_plus_one = (blockIdx.y+1) * segs_per_block * blockDim.y*blockDim.y; const int i1_beg_plus_one =
(blockIdx.y + 1) * segs_per_block * blockDim.y * blockDim.y;
const int i1_end = i1_beg_plus_one < n1 ? i1_beg_plus_one : n1; const int i1_end = i1_beg_plus_one < n1 ? i1_beg_plus_one : n1;
const int row_stride = blockDim.x + 1; const int row_stride = blockDim.x + 1;
const int thr_load_col_off = (threadIdx.x * blockDim.y) & (blockDim.x - 1); const int thr_load_col_off = (threadIdx.x * blockDim.y) & (blockDim.x - 1);
const int thr_load_row_off = (threadIdx.x*blockDim.y)/blockDim.x + threadIdx.y*blockDim.y; const int thr_load_row_off =
(threadIdx.x * blockDim.y) / blockDim.x + threadIdx.y * blockDim.y;
const int i2_off = blockIdx.x * blockDim.x + thr_load_col_off; const int i2_off = blockIdx.x * blockDim.x + thr_load_col_off;
SharedMemory<U> shared; SharedMemory<U> shared;
U* buf = shared.getPointer(); // buf has at least blockDim.x * blockDim.y * blockDim.y + (blockDim.y - 1)*(blockDim.x/blockDim.y) elements U* buf = shared.getPointer(); // buf has at least blockDim.x * blockDim.y *
// blockDim.y + (blockDim.y -
// 1)*(blockDim.x/blockDim.y) elements
U* warp_buf1 = (U*)buf; U* warp_buf1 = (U*)buf;
U* warp_buf2 = warp_buf1 + blockDim.y * blockDim.y * row_stride; U* warp_buf2 = warp_buf1 + blockDim.y * blockDim.y * row_stride;
// compute partial sums from strided inputs // compute partial sums from strided inputs
// do this to increase number of loads in flight // do this to increase number of loads in flight
cuLoadWriteStridedInputs(i1_beg,thr_load_row_off,thr_load_col_off,i2_off,row_stride,warp_buf1,warp_buf2,input,dout,i1_end,n2,mean,invvar); cuLoadWriteStridedInputs(i1_beg, thr_load_row_off, thr_load_col_off, i2_off,
for (int i1_block = i1_beg+blockDim.y*blockDim.y; i1_block < i1_end; i1_block+=blockDim.y*blockDim.y) { row_stride, warp_buf1, warp_buf2, input, dout,
cuLoadAddStridedInputs(i1_block,thr_load_row_off,thr_load_col_off,i2_off,row_stride,warp_buf1,warp_buf2,input,dout,i1_end,n2,mean,invvar); i1_end, n2, mean, invvar);
for (int i1_block = i1_beg + blockDim.y * blockDim.y; i1_block < i1_end;
i1_block += blockDim.y * blockDim.y) {
cuLoadAddStridedInputs(i1_block, thr_load_row_off, thr_load_col_off, i2_off,
row_stride, warp_buf1, warp_buf2, input, dout,
i1_end, n2, mean, invvar);
} }
__syncthreads(); __syncthreads();
// inter-warp reductions // inter-warp reductions
@ -463,16 +420,12 @@ void cuComputePartGradGammaBeta(
} }
} }
template<typename U, typename V> __global__ template <typename U, typename V>
void cuComputeGradGammaBeta( __global__ void cuComputeGradGammaBeta(const U* part_grad_gamma,
const U* part_grad_gamma,
const U* part_grad_beta, const U* part_grad_beta,
const int part_size, const int part_size, const int n1,
const int n1, const int n2, V* grad_gamma,
const int n2, V* grad_beta) {
V* grad_gamma,
V* grad_beta)
{
// sum partial gradients for gamma and beta // sum partial gradients for gamma and beta
SharedMemory<U> shared; SharedMemory<U> shared;
U* buf = shared.getPointer(); U* buf = shared.getPointer();
@ -482,9 +435,12 @@ void cuComputeGradGammaBeta(
int num_warp_reductions = part_size / blockDim.y; int num_warp_reductions = part_size / blockDim.y;
U sum_gamma = U(0); U sum_gamma = U(0);
U sum_beta = U(0); U sum_beta = U(0);
const U* part_grad_gamma_ptr = part_grad_gamma + threadIdx.y * num_warp_reductions * n2 + i2; const U* part_grad_gamma_ptr =
const U* part_grad_beta_ptr = part_grad_beta + threadIdx.y * num_warp_reductions * n2 + i2; part_grad_gamma + threadIdx.y * num_warp_reductions * n2 + i2;
for (int warp_offset = 0; warp_offset < num_warp_reductions; ++warp_offset) { const U* part_grad_beta_ptr =
part_grad_beta + threadIdx.y * num_warp_reductions * n2 + i2;
for (int warp_offset = 0; warp_offset < num_warp_reductions;
++warp_offset) {
sum_gamma += part_grad_gamma_ptr[warp_offset * n2]; sum_gamma += part_grad_gamma_ptr[warp_offset * n2];
sum_beta += part_grad_beta_ptr[warp_offset * n2]; sum_beta += part_grad_beta_ptr[warp_offset * n2];
} }
@ -514,18 +470,12 @@ void cuComputeGradGammaBeta(
} }
} }
template<typename T, typename U, typename V> __global__ template <typename T, typename U, typename V>
void cuComputeGradInput( __global__ void cuComputeGradInput(const V* __restrict__ dout,
const V* __restrict__ dout, const T* __restrict__ input, const int n1,
const T* __restrict__ input, const int n2, const U* __restrict__ mean,
const int n1, const U* __restrict__ invvar, U epsilon,
const int n2, const V* gamma, T* grad_input) {
const U* __restrict__ mean,
const U* __restrict__ invvar,
U epsilon,
const V* gamma,
T* grad_input)
{
for (auto i1 = blockIdx.y; i1 < n1; i1 += gridDim.y) { for (auto i1 = blockIdx.y; i1 < n1; i1 += gridDim.y) {
U sum_loss1 = U(0); U sum_loss1 = U(0);
U sum_loss2 = U(0); U sum_loss2 = U(0);
@ -631,90 +581,43 @@ void cuComputeGradInput(
} }
} }
template <typename T, typename U, typename V> template <typename T, typename U, typename V>
void HostApplyLayerNorm( void HostApplyLayerNorm(V* output, U* mean, U* invvar, const T* input, int n1,
V* output, int n2, double epsilon, const V* gamma, const V* beta) {
U* mean,
U* invvar,
const T* input,
int n1,
int n2,
double epsilon,
const V* gamma,
const V* beta
)
{
auto stream = at::cuda::getCurrentCUDAStream().stream(); auto stream = at::cuda::getCurrentCUDAStream().stream();
const dim3 threads(32, 4, 1); const dim3 threads(32, 4, 1);
const uint64_t maxGridY = const uint64_t maxGridY =
at::cuda::getCurrentDeviceProperties()->maxGridSize[1]; at::cuda::getCurrentDeviceProperties()->maxGridSize[1];
const dim3 blocks(1, std::min((uint64_t)n1, maxGridY), 1); const dim3 blocks(1, std::min((uint64_t)n1, maxGridY), 1);
int nshared = int nshared =
threads.y > 1 ? threads.y > 1 ? threads.y * sizeof(U) + (threads.y / 2) * sizeof(U) : 0;
threads.y*sizeof(U)+(threads.y/2)*sizeof(U) :
0;
cuApplyLayerNorm<<<blocks, threads, nshared, stream>>>( cuApplyLayerNorm<<<blocks, threads, nshared, stream>>>(
output, output, mean, invvar, input, n1, n2, U(epsilon), gamma, beta);
mean,
invvar,
input,
n1,n2,
U(epsilon),
gamma,beta);
} }
void cuda_layer_norm(at::Tensor* output, at::Tensor* mean, at::Tensor* invvar,
void cuda_layer_norm( at::Tensor* input, int n1, int n2,
at::Tensor* output,
at::Tensor* mean,
at::Tensor* invvar,
at::Tensor* input,
int n1,
int n2,
#ifdef VERSION_GE_1_1 #ifdef VERSION_GE_1_1
at::IntArrayRef normalized_shape, at::IntArrayRef normalized_shape,
#else #else
at::IntList normalized_shape, at::IntList normalized_shape,
#endif #endif
at::Tensor* gamma, at::Tensor* gamma, at::Tensor* beta, double epsilon) {
at::Tensor* beta,
double epsilon)
{
using namespace at; using namespace at;
DISPATCH_FLOAT_HALF_AND_BFLOAT_INOUT_TYPES( DISPATCH_FLOAT_HALF_AND_BFLOAT_INOUT_TYPES(
input->scalar_type(), output->scalar_type(), "cuda_layer_norm_kernel", input->scalar_type(), output->scalar_type(), "cuda_layer_norm_kernel",
HostApplyLayerNorm( HostApplyLayerNorm(output->DATA_PTR<scalar_t_out>(),
output->DATA_PTR<scalar_t_out>(), mean->DATA_PTR<float>(), invvar->DATA_PTR<float>(),
mean->DATA_PTR<float>(), input->DATA_PTR<scalar_t_in>(), n1, n2, epsilon,
invvar->DATA_PTR<float>(),
input->DATA_PTR<scalar_t_in>(),
n1,n2,
epsilon,
gamma != NULL ? gamma->DATA_PTR<scalar_t_out>() : NULL, gamma != NULL ? gamma->DATA_PTR<scalar_t_out>() : NULL,
beta != NULL ? beta->DATA_PTR<scalar_t_out>() : NULL); beta != NULL ? beta->DATA_PTR<scalar_t_out>() : NULL);)
)
} }
template <typename T, typename U, typename V> template <typename T, typename U, typename V>
void HostLayerNormGradient( void HostLayerNormGradient(const V* dout, const U* mean, const U* invvar,
const V* dout, at::Tensor* input, int n1, int n2, const V* gamma,
const U* mean, const V* beta, double epsilon, T* grad_input,
const U* invvar, V* grad_gamma, V* grad_beta) {
at::Tensor* input,
int n1,
int n2,
const V* gamma,
const V* beta,
double epsilon,
T* grad_input,
V* grad_gamma,
V* grad_beta
)
{
auto stream = at::cuda::getCurrentCUDAStream().stream(); auto stream = at::cuda::getCurrentCUDAStream().stream();
if (gamma != NULL && beta != NULL) { if (gamma != NULL && beta != NULL) {
@ -722,33 +625,23 @@ void HostLayerNormGradient(
const int part_size = 16; const int part_size = 16;
const dim3 threads2(32, 4, 1); const dim3 threads2(32, 4, 1);
const dim3 blocks2((n2 + threads2.x - 1) / threads2.x, part_size, 1); const dim3 blocks2((n2 + threads2.x - 1) / threads2.x, part_size, 1);
const int nshared2_a = 2 * sizeof(U) * threads2.y * threads2.y * const int nshared2_a =
(threads2.x + 1); 2 * sizeof(U) * threads2.y * threads2.y * (threads2.x + 1);
const int nshared2_b = threads2.x * threads2.y * sizeof(U); const int nshared2_b = threads2.x * threads2.y * sizeof(U);
const int nshared2 = nshared2_a > nshared2_b ? nshared2_a : nshared2_b; const int nshared2 = nshared2_a > nshared2_b ? nshared2_a : nshared2_b;
at::Tensor part_grad_gamma = at::empty( at::Tensor part_grad_gamma = at::empty(
{part_size, n2}, input->options().dtype(at::ScalarType::Float)); {part_size, n2}, input->options().dtype(at::ScalarType::Float));
at::Tensor part_grad_beta = at::empty_like(part_grad_gamma); at::Tensor part_grad_beta = at::empty_like(part_grad_gamma);
cuComputePartGradGammaBeta<<<blocks2, threads2, nshared2, stream>>>( cuComputePartGradGammaBeta<<<blocks2, threads2, nshared2, stream>>>(
dout, dout, input->DATA_PTR<T>(), n1, n2, mean, invvar, U(epsilon),
input->DATA_PTR<T>(), part_grad_gamma.DATA_PTR<U>(), part_grad_beta.DATA_PTR<U>());
n1,n2,
mean,
invvar,
U(epsilon),
part_grad_gamma.DATA_PTR<U>(),
part_grad_beta.DATA_PTR<U>());
const dim3 threads3(32, 8, 1); const dim3 threads3(32, 8, 1);
const dim3 blocks3((n2 + threads2.x - 1) / threads2.x, 1, 1); const dim3 blocks3((n2 + threads2.x - 1) / threads2.x, 1, 1);
const int nshared3 = threads3.x * threads3.y * sizeof(U); const int nshared3 = threads3.x * threads3.y * sizeof(U);
cuComputeGradGammaBeta<<<blocks3, threads3, nshared3, stream>>>( cuComputeGradGammaBeta<<<blocks3, threads3, nshared3, stream>>>(
part_grad_gamma.DATA_PTR<U>(), part_grad_gamma.DATA_PTR<U>(), part_grad_beta.DATA_PTR<U>(), part_size,
part_grad_beta.DATA_PTR<U>(), n1, n2, grad_gamma, grad_beta);
part_size,
n1,n2,
grad_gamma,
grad_beta);
} }
// compute grad_input // compute grad_input
@ -756,58 +649,35 @@ void HostLayerNormGradient(
at::cuda::getCurrentDeviceProperties()->maxGridSize[1]; at::cuda::getCurrentDeviceProperties()->maxGridSize[1];
const dim3 blocks1(1, std::min((uint64_t)n1, maxGridY), 1); const dim3 blocks1(1, std::min((uint64_t)n1, maxGridY), 1);
const dim3 threads1(32, 4, 1); const dim3 threads1(32, 4, 1);
int nshared = int nshared = threads1.y > 1 ? threads1.y * threads1.x * sizeof(U) : 0;
threads1.y > 1 ?
threads1.y*threads1.x*sizeof(U) :
0;
cuComputeGradInput<<<blocks1, threads1, nshared, stream>>>( cuComputeGradInput<<<blocks1, threads1, nshared, stream>>>(
dout, dout, input->DATA_PTR<T>(), n1, n2, mean, invvar, U(epsilon), gamma,
input->DATA_PTR<T>(),
n1,n2,
mean,
invvar,
U(epsilon),
gamma,
grad_input); grad_input);
} }
void cuda_layer_norm_gradient(at::Tensor* dout, at::Tensor* mean,
void cuda_layer_norm_gradient( at::Tensor* invvar, at::Tensor* input, int n1,
at::Tensor* dout,
at::Tensor* mean,
at::Tensor* invvar,
at::Tensor* input,
int n1,
int n2, int n2,
#ifdef VERSION_GE_1_1 #ifdef VERSION_GE_1_1
at::IntArrayRef normalized_shape, at::IntArrayRef normalized_shape,
#else #else
at::IntList normalized_shape, at::IntList normalized_shape,
#endif #endif
at::Tensor* gamma, at::Tensor* gamma, at::Tensor* beta,
at::Tensor* beta, double epsilon, at::Tensor* grad_input,
double epsilon, at::Tensor* grad_gamma, at::Tensor* grad_beta) {
at::Tensor* grad_input,
at::Tensor* grad_gamma,
at::Tensor* grad_beta)
{
using namespace at; using namespace at;
DISPATCH_FLOAT_HALF_AND_BFLOAT_INOUT_TYPES( DISPATCH_FLOAT_HALF_AND_BFLOAT_INOUT_TYPES(
input->scalar_type(), gamma->scalar_type(), input->scalar_type(), gamma->scalar_type(),
"cuda_layer_norm_gradient_kernel", "cuda_layer_norm_gradient_kernel",
HostLayerNormGradient( HostLayerNormGradient(
dout->DATA_PTR<scalar_t_out>(), dout->DATA_PTR<scalar_t_out>(), mean->DATA_PTR<float>(),
mean->DATA_PTR<float>(), invvar->DATA_PTR<float>(), input, n1, n2,
invvar->DATA_PTR<float>(),
input,
n1,n2,
// TMJ pass NULL argument for gamma, beta, grad_gamma and grad_beta // TMJ pass NULL argument for gamma, beta, grad_gamma and grad_beta
// if gamma Tensor is NULL on input. // if gamma Tensor is NULL on input.
gamma != NULL ? gamma->DATA_PTR<scalar_t_out>() : NULL, gamma != NULL ? gamma->DATA_PTR<scalar_t_out>() : NULL,
gamma != NULL ? beta->DATA_PTR<scalar_t_out>() : NULL, gamma != NULL ? beta->DATA_PTR<scalar_t_out>() : NULL, epsilon,
epsilon,
grad_input->DATA_PTR<scalar_t_in>(), grad_input->DATA_PTR<scalar_t_in>(),
gamma != NULL ? grad_gamma->DATA_PTR<scalar_t_out>() : NULL, gamma != NULL ? grad_gamma->DATA_PTR<scalar_t_out>() : NULL,
gamma != NULL ? grad_beta->DATA_PTR<scalar_t_out>() : NULL); gamma != NULL ? grad_beta->DATA_PTR<scalar_t_out>() : NULL);)
)
} }