mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 00:21:07 +01:00
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/36984 Follow LOG(WARNING) format for c++ side warnings in order to play well with larger services, especially when using glog. I need to hook up into GLOG internals a bit in order to override FILE/LINE without having to change the whole thing to be macros, but it seems to be stable between glog versions. Note, this also changes caffe2_log_level to warning by default - I think it's a much better default when compiling without glog (or maybe even have info). With glog output, stderr capture doesn't work any more in tests. That's why we instead use c10-level warnings capture. Test Plan: Run unittest in both glog and non-glog build mode: glog: ``` W0416 12:06:49.778215 3311666 exception_test.cpp:23] Warning: I'm a warning (function TestBody) ``` no-glog: ``` [W exception_test.cpp:23] Warning: I'm a warning (function TestBody) ``` Reviewed By: ilia-cher Differential Revision: D21151351 fbshipit-source-id: fa926d9e480db5ff696990dad3d80f79ef79f24a
85 lines
1.8 KiB
C++
85 lines
1.8 KiB
C++
#include <gtest/gtest.h>
|
|
|
|
#include <torch/torch.h>
|
|
|
|
#include <test/cpp/api/support.h>
|
|
|
|
#include <functional>
|
|
|
|
using namespace torch::test;
|
|
|
|
void torch_warn_once_A() {
|
|
TORCH_WARN_ONCE("warn once");
|
|
}
|
|
|
|
void torch_warn_once_B() {
|
|
TORCH_WARN_ONCE("warn something else once");
|
|
}
|
|
|
|
void torch_warn() {
|
|
TORCH_WARN("warn multiple times");
|
|
}
|
|
|
|
TEST(UtilsTest, WarnOnce) {
|
|
{
|
|
WarningCapture warnings;
|
|
|
|
torch_warn_once_A();
|
|
torch_warn_once_A();
|
|
torch_warn_once_B();
|
|
torch_warn_once_B();
|
|
|
|
ASSERT_EQ(count_substr_occurrences(warnings.str(), "warn once"), 1);
|
|
ASSERT_EQ(
|
|
count_substr_occurrences(warnings.str(), "warn something else once"),
|
|
1);
|
|
}
|
|
{
|
|
WarningCapture warnings;
|
|
|
|
torch_warn();
|
|
torch_warn();
|
|
torch_warn();
|
|
|
|
ASSERT_EQ(
|
|
count_substr_occurrences(warnings.str(), "warn multiple times"), 3);
|
|
}
|
|
}
|
|
|
|
TEST(NoGradTest, SetsGradModeCorrectly) {
|
|
torch::manual_seed(0);
|
|
torch::NoGradGuard guard;
|
|
torch::nn::Linear model(5, 2);
|
|
auto x = torch::randn({10, 5}, torch::requires_grad());
|
|
auto y = model->forward(x);
|
|
torch::Tensor s = y.sum();
|
|
|
|
// Mimicking python API behavior:
|
|
ASSERT_THROWS_WITH(s.backward(),
|
|
"element 0 of tensors does not require grad and does not have a grad_fn")
|
|
}
|
|
|
|
struct AutogradTest : torch::test::SeedingFixture {
|
|
AutogradTest() {
|
|
x = torch::randn({3, 3}, torch::requires_grad());
|
|
y = torch::randn({3, 3});
|
|
z = x * y;
|
|
}
|
|
torch::Tensor x, y, z;
|
|
};
|
|
|
|
TEST_F(AutogradTest, CanTakeDerivatives) {
|
|
z.backward(torch::ones_like(z));
|
|
ASSERT_TRUE(x.grad().allclose(y));
|
|
}
|
|
|
|
TEST_F(AutogradTest, CanTakeDerivativesOfZeroDimTensors) {
|
|
z.sum().backward();
|
|
ASSERT_TRUE(x.grad().allclose(y));
|
|
}
|
|
|
|
TEST_F(AutogradTest, CanPassCustomGradientInputs) {
|
|
z.sum().backward(torch::ones({}) * 2);
|
|
ASSERT_TRUE(x.grad().allclose(y * 2));
|
|
}
|