pytorch/test/cpp/api/init.cpp
Nikita Shulga 4cb534f92e Make PyTorch code-base clang-tidy compliant (#56892)
Summary:
This is an automatic change generated by the following script:
```
#!/usr/bin/env python3
from subprocess import check_output, check_call
import os

def get_compiled_files_list():
    import json
    with open("build/compile_commands.json") as f:
        data = json.load(f)
    files = [os.path.relpath(node['file']) for node in data]
    for idx, fname in enumerate(files):
        if fname.startswith('build/') and fname.endswith('.DEFAULT.cpp'):
            files[idx] = fname[len('build/'):-len('.DEFAULT.cpp')]
    return files

def run_clang_tidy(fname):
    check_call(["python3", "tools/clang_tidy.py", "-c", "build", "-x", fname,"-s"])
    changes = check_output(["git", "ls-files", "-m"])
    if len(changes) == 0:
        return
    check_call(["git", "commit","--all", "-m", f"NOLINT stubs for {fname}"])

def main():
    git_files = check_output(["git", "ls-files"]).decode("ascii").split("\n")
    compiled_files = get_compiled_files_list()
    for idx, fname in enumerate(git_files):
        if fname not in compiled_files:
            continue
        if fname.startswith("caffe2/contrib/aten/"):
            continue
        print(f"[{idx}/{len(git_files)}] Processing {fname}")
        run_clang_tidy(fname)

if __name__ == "__main__":
    main()
```

Pull Request resolved: https://github.com/pytorch/pytorch/pull/56892

Reviewed By: H-Huang

Differential Revision: D27991944

Pulled By: malfet

fbshipit-source-id: 5415e1eb2c1b34319a4f03024bfaa087007d7179
2021-04-28 14:10:25 -07:00

146 lines
5.0 KiB
C++

#include <gtest/gtest.h>
#include <torch/torch.h>
#include <test/cpp/api/init_baseline.h>
#include <test/cpp/api/support.h>
#include <functional>
#include <vector>
void check_exact_values(
const std::vector<torch::Tensor>& parameters,
const std::vector<std::vector<torch::Tensor>>& expected_parameters) {
ASSERT_EQ(parameters.size(), expected_parameters.size());
for (size_t i = 0; i < parameters.size(); i++) {
auto layerParameters = parameters[i];
auto expectedLayerParameters = expected_parameters[i];
if (layerParameters.size(0) != expectedLayerParameters.size()) {
std::cout << "layer #" << i
<< " layerParameters size: " << layerParameters.size(0)
<< " != "
<< " expectedLayerParameters size: "
<< expectedLayerParameters.size() << std::endl;
ASSERT_TRUE(false);
}
for (size_t p = 0; p < layerParameters.size(0); p++) {
// Always compare using double dtype, regardless of the original dtype of the tensors
auto tensor = layerParameters[p].to(torch::kFloat64);
auto expectedTensor = expectedLayerParameters[p].to(torch::kFloat64);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if (!tensor.allclose(expectedTensor, /*rtol=*/1e-3, /*atol=*/5e-4)) {
std::cout << "layer " << i << ": " << tensor << " != " << expectedTensor
<< " (parameter " << p << ")" << std::endl;
ASSERT_TRUE(false);
}
}
}
}
void check_initializer_against_baseline(
std::function<void(torch::Tensor)> initializer,
std::vector<std::vector<torch::Tensor>> expected) {
torch::manual_seed(0);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto layer1 = torch::nn::Linear(7, 15);
initializer(layer1->weight);
layer1->to(torch::kFloat64);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto layer2 = torch::nn::Linear(15, 15);
initializer(layer2->weight);
layer2->to(torch::kFloat64);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto layer3 = torch::nn::Linear(15, 2);
initializer(layer3->weight);
layer3->to(torch::kFloat64);
auto parameters = std::vector<torch::Tensor>{
layer1->weight,
layer2->weight,
layer3->weight,
};
check_exact_values(parameters, expected);
}
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(InitTest, ProducesPyTorchValues_XavierUniform) {
auto expected = expected_parameters::Xavier_Uniform();
auto initializer = [](torch::Tensor tensor) {
torch::nn::init::xavier_uniform_(tensor);
};
check_initializer_against_baseline(initializer, expected);
}
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(InitTest, ProducesPyTorchValues_XavierNormal) {
auto expected = expected_parameters::Xavier_Normal();
auto initializer = [](torch::Tensor tensor) {
torch::nn::init::xavier_normal_(tensor);
};
check_initializer_against_baseline(initializer, expected);
}
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(InitTest, ProducesPyTorchValues_KaimingNormal) {
auto expected = expected_parameters::Kaiming_Normal();
auto initializer = [](torch::Tensor tensor) {
torch::nn::init::kaiming_normal_(tensor);
};
check_initializer_against_baseline(initializer, expected);
}
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(InitTest, ProducesPyTorchValues_KaimingUniform) {
auto expected = expected_parameters::Kaiming_Uniform();
auto initializer = [](torch::Tensor tensor) {
torch::nn::init::kaiming_uniform_(tensor);
};
check_initializer_against_baseline(initializer, expected);
}
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(InitTest, CanInitializeTensorThatRequiresGrad) {
auto tensor = torch::empty({3, 4}, torch::requires_grad());
ASSERT_THROWS_WITH(
tensor.fill_(1),
"a leaf Variable that requires grad "
"is being used in an in-place operation");
ASSERT_EQ(torch::nn::init::ones_(tensor).sum().item<int32_t>(), 12);
}
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(InitTest, CalculateGainWithTanh) {
double gain =
torch::nn::init::calculate_gain(torch::kTanh);
ASSERT_DOUBLE_EQ(gain, 5.0 / 3.0);
}
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(InitTest, CalculateGainWithRelu) {
double gain =
torch::nn::init::calculate_gain(torch::kReLU);
ASSERT_DOUBLE_EQ(gain, std::sqrt(2.0));
}
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(InitTest, CalculateGainWithLeakyRelu) {
double gain =
torch::nn::init::calculate_gain(torch::kLeakyReLU);
ASSERT_DOUBLE_EQ(gain, std::sqrt(2.0 / (1 + pow(0.01, 2))));
}
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TEST(InitTest, CanInitializeCnnWithOrthogonal) {
torch::nn::Conv2d conv_layer(torch::nn::Conv2dOptions(3, 2, 3).stride(2));
torch::nn::init::orthogonal_(conv_layer->named_parameters()["weight"]);
}