pytorch/caffe2/operators/sigmoid_gradient_op.cc
Xiaomeng Yang bb9ff58c6d Add cudnn activation ops (#9379)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/9379

Add cudnn activation ops

Reviewed By: houseroad

Differential Revision: D8818013

fbshipit-source-id: d3881c634a46578b9331da07f9fdf7e1f31d7e8a
2018-07-12 23:18:56 -07:00

54 lines
1.3 KiB
C++

#include "caffe2/operators/sigmoid_op.h"
#include "caffe2/utils/eigen_utils.h"
#include <algorithm>
#include <functional>
#include <string>
#include <vector>
namespace caffe2 {
template <>
template <typename T>
bool SigmoidGradientFunctor<CPUContext>::Forward(
const std::vector<int>& Y_dims,
const std::vector<int>& /* dY_dims */,
const T* Y,
const T* dY,
T* dX,
CPUContext* /* context */) const {
const int size = std::accumulate(
Y_dims.cbegin(), Y_dims.cend(), 1, std::multiplies<int>());
ConstEigenVectorArrayMap<T> dY_arr(dY, size);
ConstEigenVectorArrayMap<T> Y_arr(Y, size);
EigenVectorArrayMap<T>(dX, size) = dY_arr * Y_arr * (T(1) - Y_arr);
return true;
}
REGISTER_CPU_OPERATOR(
SigmoidGradient,
BinaryElementwiseOp<
TensorTypes<float>,
CPUContext,
SigmoidGradientFunctor<CPUContext>>);
namespace {
class GetSigmoidGradient : public GradientMakerBase {
using GradientMakerBase::GradientMakerBase;
std::vector<OperatorDef> GetGradientDefs() override {
return SingleGradientDef(
"SigmoidGradient",
"",
std::vector<std::string>{O(0), GO(0)},
std::vector<std::string>{GI(0)});
}
};
} // namespace
REGISTER_GRADIENT(Sigmoid, GetSigmoidGradient);
} // namespace caffe2