pytorch/test/cpp/api/misc.cpp
Kurt Mohler df7c059428 Throw error if torch.set_deterministic(True) is called with nondeterministic CuBLAS config (#41377)
Summary:
For CUDA >= 10.2, the `CUBLAS_WORKSPACE_CONFIG` environment variable must be set to either `:4096:8` or `:16:8` to ensure deterministic CUDA stream usage. This PR adds some logic inside `torch.set_deterministic()` to raise an error if this environment variable is not set properly and CUDA >= 10.2.

Issue https://github.com/pytorch/pytorch/issues/15359

Pull Request resolved: https://github.com/pytorch/pytorch/pull/41377

Reviewed By: malfet

Differential Revision: D22758459

Pulled By: ezyang

fbshipit-source-id: 4b96f1e9abf85d94ba79140fd927bbd0c05c4522
2020-08-05 12:42:24 -07:00

85 lines
1.8 KiB
C++

#include <gtest/gtest.h>
#include <torch/torch.h>
#include <test/cpp/api/support.h>
#include <functional>
using namespace torch::test;
void torch_warn_once_A() {
TORCH_WARN_ONCE("warn once");
}
void torch_warn_once_B() {
TORCH_WARN_ONCE("warn something else once");
}
void torch_warn() {
TORCH_WARN("warn multiple times");
}
TEST(UtilsTest, WarnOnce) {
{
WarningCapture warnings;
torch_warn_once_A();
torch_warn_once_A();
torch_warn_once_B();
torch_warn_once_B();
ASSERT_EQ(count_substr_occurrences(warnings.str(), "warn once"), 1);
ASSERT_EQ(
count_substr_occurrences(warnings.str(), "warn something else once"),
1);
}
{
WarningCapture warnings;
torch_warn();
torch_warn();
torch_warn();
ASSERT_EQ(
count_substr_occurrences(warnings.str(), "warn multiple times"), 3);
}
}
TEST(NoGradTest, SetsGradModeCorrectly) {
torch::manual_seed(0);
torch::NoGradGuard guard;
torch::nn::Linear model(5, 2);
auto x = torch::randn({10, 5}, torch::requires_grad());
auto y = model->forward(x);
torch::Tensor s = y.sum();
// Mimicking python API behavior:
ASSERT_THROWS_WITH(s.backward(),
"element 0 of tensors does not require grad and does not have a grad_fn")
}
struct AutogradTest : torch::test::SeedingFixture {
AutogradTest() {
x = torch::randn({3, 3}, torch::requires_grad());
y = torch::randn({3, 3});
z = x * y;
}
torch::Tensor x, y, z;
};
TEST_F(AutogradTest, CanTakeDerivatives) {
z.backward(torch::ones_like(z));
ASSERT_TRUE(x.grad().allclose(y));
}
TEST_F(AutogradTest, CanTakeDerivativesOfZeroDimTensors) {
z.sum().backward();
ASSERT_TRUE(x.grad().allclose(y));
}
TEST_F(AutogradTest, CanPassCustomGradientInputs) {
z.sum().backward(torch::ones({}) * 2);
ASSERT_TRUE(x.grad().allclose(y * 2));
}