mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 12:21:27 +01:00
Summary: As GoogleTest `TEST` macro is non-compliant with it as well as `DEFINE_DISPATCH` All changes but the ones to `.clang-tidy` are generated using following script: ``` for i in `find . -type f -iname "*.c*" -or -iname "*.h"|xargs grep cppcoreguidelines-avoid-non-const-global-variables|cut -f1 -d:|sort|uniq`; do sed -i "/\/\/ NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)/d" $i; done ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/62008 Reviewed By: driazati, r-barnes Differential Revision: D29838584 Pulled By: malfet fbshipit-source-id: 1b2f8602c945bd4ce50a9bfdd204755556e31d13
38 lines
1004 B
C++
38 lines
1004 B
C++
#include "caffe2/operators/quantized/int8_relu_op.h"
|
|
|
|
namespace caffe2 {
|
|
|
|
namespace {
|
|
|
|
OpSchema::Cost CostInferenceForRelu(
|
|
const OperatorDef& def,
|
|
const vector<TensorShape>& in) {
|
|
struct OpSchema::Cost cost = PointwiseCostInference<0>(def, in);
|
|
cost.params_bytes = 0;
|
|
return cost;
|
|
}
|
|
|
|
} // namespace
|
|
|
|
REGISTER_CPU_OPERATOR(Int8Relu, int8::Int8ReluOp);
|
|
|
|
// Input: X, output: Y
|
|
OPERATOR_SCHEMA(Int8Relu)
|
|
.NumInputs(1)
|
|
.NumOutputs(1)
|
|
.Arg("Y_scale", "Output tensor quantization scale")
|
|
.Arg("Y_zero_point", "Output tensor quantization offset")
|
|
.AllowInplace({{0, 0}})
|
|
.CostInferenceFunction(CostInferenceForRelu)
|
|
.IdenticalTypeAndShape()
|
|
.SetDoc(R"DOC(
|
|
Relu takes one input data (Tensor<T>) and produces one output data
|
|
(Tensor<T>) where the rectified linear function, y = max(0, x), is applied to
|
|
the tensor elementwise.
|
|
)DOC")
|
|
.Input(0, "X", "1D input tensor")
|
|
.Output(0, "Y", "1D input tensor")
|
|
.InheritOnnxSchema("Relu");
|
|
|
|
} // namespace caffe2
|