Fix //:module_test Conversion_MultiCUDA (#79926)

Fixes #79871

Make `module.cpp` tests respect change that was made in #78436 (no int types in autograd).

Note that there still a gap in Cmake test -- it's unclear why it didn't fail CI before.

As far as I can tell it should be executed, because it's included here 79507d2a9d/test/cpp/api/CMakeLists.txt (L17):L17

Pull Request resolved: https://github.com/pytorch/pytorch/pull/79926
Approved by: https://github.com/soulitzer
This commit is contained in:
Sergei Vorobev 2022-06-21 23:32:16 +00:00 committed by PyTorch MergeBot
parent 6c049e62af
commit a8b0988596

View File

@ -342,18 +342,25 @@ TEST_F(ModuleTest, Conversion_MultiCUDA) {
ASSERT_EQ(parameter.device().type(), torch::Device::Type::CPU); ASSERT_EQ(parameter.device().type(), torch::Device::Type::CPU);
} }
} }
{
module->to(torch::kInt32);
for (auto& parameter : module->parameters()) {
ASSERT_EQ(parameter.dtype(), torch::kInt32);
}
}
{ {
module->to(torch::kFloat64); module->to(torch::kFloat64);
for (auto& parameter : module->parameters()) { for (auto& parameter : module->parameters()) {
ASSERT_EQ(parameter.dtype(), torch::kFloat64); ASSERT_EQ(parameter.dtype(), torch::kFloat64);
} }
} }
}
TEST_F(ModuleTest, Conversion_NoGrad_MultiCUDA) {
Linear module(128, 64);
for (auto& parameter : module->parameters()) {
parameter.requires_grad_(false);
}
{
module->to(torch::kInt32);
for (auto& parameter : module->parameters()) {
ASSERT_EQ(parameter.dtype(), torch::kInt32);
}
}
{ {
module->to(torch::Device(torch::kCUDA, 1), torch::kUInt8); module->to(torch::Device(torch::kCUDA, 1), torch::kUInt8);
for (auto& parameter : module->parameters()) { for (auto& parameter : module->parameters()) {