pytorch/caffe2/operators/cbrt_op.h
Xiaomeng Yang 03e7953a98 Use FixedDivisor in Reduce and Broadcast CUDA kernels (#9072)
Summary:
Closes https://github.com/pytorch/pytorch/pull/9072

Use FixedDivisor in Reduce and Broadcast CUDA kernels

Reviewed By: houseroad

Differential Revision: D8710243

fbshipit-source-id: 6f1da12234898594a1be8c979d942aa515832aeb
2018-07-01 00:25:34 -07:00

35 lines
723 B
C++

#ifndef CAFFE2_OPERATORS_CBRT_OP_H_
#define CAFFE2_OPERATORS_CBRT_OP_H_
#include <vector>
#include "caffe2/operators/elementwise_ops.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <class Context>
struct CbrtFunctor {
template <typename T>
bool operator()(const int N, const T* X, T* Y, Context* context) const {
math::Cbrt<T, Context>(N, X, Y, context);
return true;
}
};
template <class Context>
struct CbrtGradientFunctor {
template <typename T>
bool Forward(
const std::vector<int>& dY_dims,
const std::vector<int>& Y_dims,
const T* dY,
const T* Y,
T* dX,
Context* context) const;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_CBRT_OP_H_