pytorch/caffe2/opt/device_test.cc
Nikita Shulga a9b0a921d5 Disable avoid-non-const-global-variables lint check (#62008)
Summary:
As GoogleTest `TEST` macro is non-compliant with it as well as `DEFINE_DISPATCH`

All changes but the ones to `.clang-tidy` are generated using following script:
```
for i in `find . -type f -iname "*.c*" -or -iname "*.h"|xargs grep cppcoreguidelines-avoid-non-const-global-variables|cut -f1 -d:|sort|uniq`;  do sed -i "/\/\/ NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)/d" $i; done
```

Pull Request resolved: https://github.com/pytorch/pytorch/pull/62008

Reviewed By: driazati, r-barnes

Differential Revision: D29838584

Pulled By: malfet

fbshipit-source-id: 1b2f8602c945bd4ce50a9bfdd204755556e31d13
2021-07-22 18:04:40 -07:00

78 lines
2.4 KiB
C++

#include "caffe2/core/common.h"
#include "caffe2/opt/converter.h"
#include "caffe2/opt/device.h"
#include <gtest/gtest.h>
using namespace nom::repr;
#define ADD_ARG(_op, _name, _type, _val) \
{ \
caffe2::Argument* arg = _op->add_arg(); \
arg->set_name(_name); \
arg->set_##_type(_val); \
}
TEST(DeviceTest, InsertCopies) {
caffe2::NetDef net;
for (auto i = 0; i < 9; ++i) {
if (i % 3 == 0) {
caffe2::OperatorDef* def = net.add_op();
def->set_type("Conv");
def->add_input("X");
def->add_input("W" + c10::to_string(i));
def->add_input("b" + c10::to_string(i));
ADD_ARG(def, "kernel", i, 3);
ADD_ARG(def, "stride", i, 1);
ADD_ARG(def, "pad", i, 0);
ADD_ARG(def, "order", s, "NCHW");
def->add_output("X");
def->mutable_device_option()->set_device_type(caffe2::PROTO_CPU);
} else {
caffe2::OperatorDef* def = net.add_op();
def->set_type("Relu");
def->add_input("X");
def->add_output("X");
def->mutable_device_option()->set_device_type(caffe2::PROTO_CPU);
}
}
auto nn = caffe2::convertToNNModule(net);
for (auto node : nn.dataFlow.getMutableNodes()) {
if (nn::is<Relu>(node)) {
auto annot = nn::get<NeuralNetOperator>(node)->getMutableAnnotation();
auto c2_annot = dyn_cast<caffe2::Caffe2Annotation>(annot);
c2_annot->setDeviceType(caffe2::PROTO_OPENCL);
}
}
caffe2::opt::insertCopies(
&nn,
[](NNGraph::NodeRef node) {
// Ignore all tensors
if (!nn::is<NeuralNetOperator>(node)) {
return true;
}
auto annot = nn::get<NeuralNetOperator>(node)->getMutableAnnotation();
NOM_REQUIRE_OR_RET_FALSE(annot);
auto c2_annot = dyn_cast<caffe2::Caffe2Annotation>(annot);
NOM_REQUIRE_OR_RET_FALSE(c2_annot);
return c2_annot->getDeviceType() == caffe2::PROTO_OPENCL;
},
[](NNGraph& g) {
return g.createNode(std::make_unique<GenericOperator>());
},
[](NNGraph& g) {
return g.createNode(std::make_unique<GenericOperator>());
});
auto proto = caffe2::convertToCaffe2Proto(nn, net);
// Conv -> Relu -> Relu
// becomes
// Conv -> Generic -> Relu -> Relu -> Generic
// thus
// 9 ops of this pattern becomes 15
EXPECT_EQ(proto.op().size(), 15);
}