pytorch/caffe2/operators/elementwise_add_gradient_op.cc
Stephen Macke 27cc11226d make broadcast fastpath the default for currently rolled-out ops (#68365)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/68365

title. broadcast fastpath has been running fine for the enabled ops for a while now, so make it the default for these ops.

Test Plan: diff is a no-op, so sandcastle

Differential Revision: D32107847

fbshipit-source-id: b239b127b219985bf7df6a0eea2d879b8e9c79a4
2021-11-15 21:41:57 -08:00

34 lines
683 B
C++

#include "caffe2/operators/elementwise_add_op.h"
#include <string>
#include <vector>
namespace caffe2 {
REGISTER_CPU_OPERATOR(
AddGradient,
BinaryElementwiseGradientOp<
NumericTypes,
CPUContext,
AddFunctor<CPUContext>>);
namespace {
class GetAddGradient final : public GradientMakerBase {
using GradientMakerBase::GradientMakerBase;
std::vector<OperatorDef> GetGradientDefs() override {
return SingleGradientDef(
"AddGradient",
"",
std::vector<std::string>{GO(0), I(0), I(1)},
std::vector<std::string>{GI(0), GI(1)});
}
};
} // namespace
REGISTER_GRADIENT(Add, GetAddGradient);
} // namespace caffe2