mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 00:21:07 +01:00
Summary: In TorchScript and C++ extensions we currently advocate a mix of `torch::` and `at::` namespace usage. In the C++ frontend I had instead exported all symbols from `at::` and some from `c10::` into the `torch::` namespace. This is far, far easier for users to understand, and also avoid bugs around creating tensors vs. variables. The same should from now on be true for the TorchScript C++ API (for running and loading models) and all C++ extensions. Note that since we're just talking about typedefs, this change does not break any existing code. Once this lands I will update stuff in `pytorch/tutorials` too. zdevito ezyang gchanan Pull Request resolved: https://github.com/pytorch/pytorch/pull/13523 Differential Revision: D12942787 Pulled By: goldsborough fbshipit-source-id: 76058936bd8707b33d9e5bbc2d0705fc3d820763
60 lines
1.6 KiB
C++
60 lines
1.6 KiB
C++
#include <gtest/gtest.h>
|
|
|
|
#include <torch/csrc/utils/tempfile.h>
|
|
#include <torch/nn/init.h>
|
|
#include <torch/nn/modules/linear.h>
|
|
#include <torch/types.h>
|
|
#include <torch/utils.h>
|
|
|
|
#include <test/cpp/api/support.h>
|
|
|
|
TEST(NoGradTest, SetsGradModeCorrectly) {
|
|
torch::manual_seed(0);
|
|
torch::NoGradGuard guard;
|
|
torch::nn::Linear model(5, 2);
|
|
auto x = torch::randn({10, 5}, torch::requires_grad());
|
|
auto y = model->forward(x);
|
|
torch::Tensor s = y.sum();
|
|
|
|
s.backward();
|
|
ASSERT_FALSE(model->parameters()["weight"].grad().defined());
|
|
}
|
|
|
|
struct AutogradTest : torch::test::SeedingFixture {
|
|
AutogradTest() {
|
|
x = torch::randn({3, 3}, torch::requires_grad());
|
|
y = torch::randn({3, 3});
|
|
z = x * y;
|
|
}
|
|
torch::Tensor x, y, z;
|
|
};
|
|
|
|
TEST_F(AutogradTest, CanTakeDerivatives) {
|
|
z.backward();
|
|
ASSERT_TRUE(x.grad().allclose(y));
|
|
}
|
|
|
|
TEST_F(AutogradTest, CanTakeDerivativesOfZeroDimTensors) {
|
|
z.sum().backward();
|
|
ASSERT_TRUE(x.grad().allclose(y));
|
|
}
|
|
|
|
TEST_F(AutogradTest, CanPassCustomGradientInputs) {
|
|
z.sum().backward(torch::ones({}) * 2);
|
|
ASSERT_TRUE(x.grad().allclose(y * 2));
|
|
}
|
|
|
|
TEST(NNInitTest, CanInitializeTensorThatRequiresGrad) {
|
|
auto tensor = torch::empty({3, 4}, torch::requires_grad());
|
|
ASSERT_THROWS_WITH(
|
|
tensor.fill_(1),
|
|
"a leaf Variable that requires grad "
|
|
"has been used in an in-place operation");
|
|
ASSERT_EQ(torch::nn::init::ones_(tensor).sum().item<int32_t>(), 12);
|
|
}
|
|
|
|
TEST(TempFileTest, MatchesExpectedPattern) {
|
|
torch::utils::TempFile pattern = torch::utils::make_tempfile("test-pattern-");
|
|
ASSERT_NE(pattern.name.find("test-pattern-"), std::string::npos);
|
|
}
|