mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 12:21:27 +01:00
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/9379 Add cudnn activation ops Reviewed By: houseroad Differential Revision: D8818013 fbshipit-source-id: d3881c634a46578b9331da07f9fdf7e1f31d7e8a
54 lines
1.3 KiB
C++
54 lines
1.3 KiB
C++
#include "caffe2/operators/sigmoid_op.h"
|
|
|
|
#include "caffe2/utils/eigen_utils.h"
|
|
|
|
#include <algorithm>
|
|
#include <functional>
|
|
#include <string>
|
|
#include <vector>
|
|
|
|
namespace caffe2 {
|
|
|
|
template <>
|
|
template <typename T>
|
|
bool SigmoidGradientFunctor<CPUContext>::Forward(
|
|
const std::vector<int>& Y_dims,
|
|
const std::vector<int>& /* dY_dims */,
|
|
const T* Y,
|
|
const T* dY,
|
|
T* dX,
|
|
CPUContext* /* context */) const {
|
|
const int size = std::accumulate(
|
|
Y_dims.cbegin(), Y_dims.cend(), 1, std::multiplies<int>());
|
|
ConstEigenVectorArrayMap<T> dY_arr(dY, size);
|
|
ConstEigenVectorArrayMap<T> Y_arr(Y, size);
|
|
EigenVectorArrayMap<T>(dX, size) = dY_arr * Y_arr * (T(1) - Y_arr);
|
|
return true;
|
|
}
|
|
|
|
REGISTER_CPU_OPERATOR(
|
|
SigmoidGradient,
|
|
BinaryElementwiseOp<
|
|
TensorTypes<float>,
|
|
CPUContext,
|
|
SigmoidGradientFunctor<CPUContext>>);
|
|
|
|
namespace {
|
|
|
|
class GetSigmoidGradient : public GradientMakerBase {
|
|
using GradientMakerBase::GradientMakerBase;
|
|
std::vector<OperatorDef> GetGradientDefs() override {
|
|
return SingleGradientDef(
|
|
"SigmoidGradient",
|
|
"",
|
|
std::vector<std::string>{O(0), GO(0)},
|
|
std::vector<std::string>{GI(0)});
|
|
}
|
|
};
|
|
|
|
} // namespace
|
|
|
|
REGISTER_GRADIENT(Sigmoid, GetSigmoidGradient);
|
|
|
|
} // namespace caffe2
|