pytorch/caffe2/operators/expand_op.cc
Nikita Shulga a9b0a921d5 Disable avoid-non-const-global-variables lint check (#62008)
Summary:
As GoogleTest `TEST` macro is non-compliant with it as well as `DEFINE_DISPATCH`

All changes but the ones to `.clang-tidy` are generated using following script:
```
for i in `find . -type f -iname "*.c*" -or -iname "*.h"|xargs grep cppcoreguidelines-avoid-non-const-global-variables|cut -f1 -d:|sort|uniq`;  do sed -i "/\/\/ NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)/d" $i; done
```

Pull Request resolved: https://github.com/pytorch/pytorch/pull/62008

Reviewed By: driazati, r-barnes

Differential Revision: D29838584

Pulled By: malfet

fbshipit-source-id: 1b2f8602c945bd4ce50a9bfdd204755556e31d13
2021-07-22 18:04:40 -07:00

59 lines
1.7 KiB
C++

#include "caffe2/operators/expand_op.h"
#include <algorithm>
#include <functional>
#include <vector>
#include <caffe2/utils/math.h>
namespace caffe2 {
REGISTER_CPU_OPERATOR(
Expand,
ExpandOp<
TensorTypes<std::int32_t, std::int64_t, float, double>,
CPUContext>);
REGISTER_CPU_OPERATOR(
ExpandGradient,
ExpandGradientOp<
TensorTypes<std::int32_t, std::int64_t, float, double>,
CPUContext>);
OPERATOR_SCHEMA(Expand)
.NumInputs(2)
.NumOutputs(1)
.SetDoc(R"DOC(
Broadcast the input tensor to a materialized new tensor using given shape.
Broadcast rule is similar to "numpy.array(input) * numpy.ones(shape)":
Dimensions are right alignment;
Two corresponding dimensions must have the same value, or one of them
equals to 1.
In order to align with PyTorch's `expand`, `shape` is allowed to have entries
equal to -1, which means to preserve the size of the corresponding dimension
in `X` (so it's actually equivalent to equal to 1).
)DOC")
.Input(0, "X", "(*Tensor`<NumericType>`*): input tensor")
.Input(1, "shape", "(*Tensor`<int>`*): expand shape")
.Output(0, "Y", "(*Tensor`<NumericType>`*): expanded tensor");
OPERATOR_SCHEMA(ExpandGradient).NumInputs(2).NumOutputs(1);
namespace {
class GetExpandGradient final : public GradientMakerBase {
using GradientMakerBase::GradientMakerBase;
std::vector<OperatorDef> GetGradientDefs() override {
return SingleGradientDef(
"ExpandGradient",
"",
std::vector<string>{GO(0), I(0)},
std::vector<string>{GI(0)});
}
};
} // namespace
REGISTER_GRADIENT(Expand, GetExpandGradient);
} // namespace caffe2