Revert D31359010: [pytorch][PR] Fix cang-tidy regressions caused by #65954

Test Plan: revert-hammer

Differential Revision:
D31359010 (c269f471f4)

Original commit changeset: dce4b91a9891

fbshipit-source-id: 085417432b6748d3672b9b7141460f47d1c17a7f
This commit is contained in:
Nikita Shulga 2021-10-01 20:34:17 -07:00 committed by Facebook GitHub Bot
parent c269f471f4
commit 5ef350d7cc
6 changed files with 6 additions and 14 deletions

View File

@ -1067,9 +1067,6 @@ endif()
# ---[ JNI
if(BUILD_JNI)
if(NOT MSVC)
string(APPEND CMAKE_CXX_FLAGS " -Wno-unused-variable")
endif()
set(BUILD_LIBTORCH_WITH_JNI 1)
set(FBJNI_SKIP_TESTS 1)
add_subdirectory(android/pytorch_android)

View File

@ -170,7 +170,7 @@ class TestBenchmarkUtils(TestCase):
@unittest.skipIf(IS_SANDCASTLE, "C++ timing is OSS only.")
def test_timer_tiny_fast_snippet(self):
timer = benchmark_utils.Timer(
'auto x = 1;(void)x;',
'auto x = 1;',
timer=timeit.default_timer,
language=benchmark_utils.Language.CPP,
)

View File

@ -1046,6 +1046,7 @@ struct __attribute__((visibility("hidden"))) CustomLibraryImpl
}
at::optional<TLSIndex> tls_lookup_symbol(Elf64_Xword r_info) {
const uint32_t r_type = ELF64_R_TYPE(r_info);
const uint32_t r_sym = ELF64_R_SYM(r_info);
if (r_sym == 0) {

View File

@ -107,8 +107,8 @@ TEST(TorchpyTest, MultiSerialSimpleModel) {
size_t ninterp = 3;
std::vector<at::Tensor> outputs;
// NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores)
for (const auto i : c10::irange(ninterp)) {
(void)i;
outputs.push_back(model({input.alias()}).toTensor());
}
@ -151,12 +151,11 @@ TEST(TorchpyTest, ThreadedSimpleModel) {
std::vector<at::Tensor> outputs;
std::vector<std::future<at::Tensor>> futures;
// NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores)
for (const auto i : c10::irange(nthreads)) {
(void)i;
futures.push_back(std::async(std::launch::async, [&model]() {
auto input = torch::ones({10, 20});
for (const auto j : c10::irange(100)) {
(void)j;
for (const auto i : c10::irange(100)) {
model({input.alias()}).toTensor();
}
auto result = model({input.alias()}).toTensor();
@ -231,8 +230,8 @@ TEST(TorchpyTest, TaggingRace) {
constexpr int64_t trials = 4;
constexpr int64_t nthreads = 16;
torch::deploy::InterpreterManager m(nthreads);
// NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores)
for (const auto n : c10::irange(trials)) {
(void)n;
at::Tensor t = torch::empty(2);
std::atomic<int64_t> success(0);
std::atomic<int64_t> failed(0);
@ -284,7 +283,6 @@ TEST(TorchpyTest, FxModule) {
std::vector<at::Tensor> outputs;
auto input = torch::ones({5, 10});
for (const auto i : c10::irange(nthreads)) {
(void)i;
outputs.push_back(model({input.alias()}).toTensor());
}

View File

@ -31,7 +31,6 @@ double timeit(int n) {
// Main loop
auto start_time = std::chrono::high_resolution_clock::now();
for(const auto loop_idx : c10::irange(n)) {
(void)loop_idx;
// STMT_TEMPLATE_LOCATION
}
auto end_time = std::chrono::high_resolution_clock::now();

View File

@ -44,17 +44,14 @@ int main(int argc, char* argv[]) {
// Warmup
for(const auto i : c10::irange(number_warmup)) {
(void)i;
// STMT_TEMPLATE_LOCATION
}
// Main loop
for(const auto repeat : c10::irange(repeats)) {
(void)repeat;
CALLGRIND_TOGGLE_COLLECT;
for(const auto i : c10::irange(number)) {
(void)i;
// STMT_TEMPLATE_LOCATION
}