pytorch/test/cpp/api/misc.cpp
Peter Goldsborough 825181ea9d Rewrite C++ API tests in gtest (#11953)
Summary:
This PR is a large codemod to rewrite all C++ API tests with GoogleTest (gtest) instead of Catch.

You can largely trust me to have correctly code-modded the tests, so it's not required to review every of the 2000+ changed lines. However, additional things I changed were:

1. Moved the cmake parts for these tests into their own `CMakeLists.txt` under `test/cpp/api` and calling `add_subdirectory` from `torch/CMakeLists.txt`
2. Fixing DataParallel tests which weren't being compiled because `USE_CUDA` wasn't correctly being set at all.
3. Updated README

ezyang ebetica
Pull Request resolved: https://github.com/pytorch/pytorch/pull/11953

Differential Revision: D9998883

Pulled By: goldsborough

fbshipit-source-id: affe3f320b0ca63e7e0019926a59076bb943db80
2018-09-21 21:28:16 -07:00

54 lines
1.4 KiB
C++

#include <gtest/gtest.h>
#include <torch/nn/init.h>
#include <torch/nn/modules/linear.h>
#include <torch/tensor.h>
#include <torch/utils.h>
#include <test/cpp/api/support.h>
TEST(NoGradTest, SetsGradModeCorrectly) {
torch::manual_seed(0);
torch::NoGradGuard guard;
torch::nn::Linear model(5, 2);
auto x = torch::randn({10, 5}, torch::requires_grad());
auto y = model->forward(x);
torch::Tensor s = y.sum();
s.backward();
ASSERT_FALSE(model->parameters()["weight"].grad().defined());
}
struct AutogradTest : torch::test::SeedingFixture {
AutogradTest() {
x = torch::randn({3, 3}, torch::requires_grad());
y = torch::randn({3, 3});
z = x * y;
}
torch::Tensor x, y, z;
};
TEST_F(AutogradTest, CanTakeDerivatives) {
z.backward();
ASSERT_TRUE(x.grad().allclose(y));
}
TEST_F(AutogradTest, CanTakeDerivativesOfZeroDimTensors) {
z.sum().backward();
ASSERT_TRUE(x.grad().allclose(y));
}
TEST_F(AutogradTest, CanPassCustomGradientInputs) {
z.sum().backward(torch::ones({}) * 2);
ASSERT_TRUE(x.grad().allclose(y * 2));
}
TEST(NNInitTest, CanInitializeTensorThatRequiresGrad) {
auto tensor = torch::empty({3, 4}, torch::requires_grad());
ASSERT_THROWS_WITH(
tensor.fill_(1),
"a leaf Variable that requires grad "
"has been used in an in-place operation");
ASSERT_EQ(torch::nn::init::ones_(tensor).sum().toCInt(), 12);
}