mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
Fix //:module_test Conversion_MultiCUDA (#79926)
Fixes #79871
Make `module.cpp` tests respect change that was made in #78436 (no int types in autograd).
Note that there still a gap in Cmake test -- it's unclear why it didn't fail CI before.
As far as I can tell it should be executed, because it's included here 79507d2a9d/test/cpp/api/CMakeLists.txt (L17):L17
Pull Request resolved: https://github.com/pytorch/pytorch/pull/79926
Approved by: https://github.com/soulitzer
This commit is contained in:
parent
6c049e62af
commit
a8b0988596
|
|
@ -342,18 +342,25 @@ TEST_F(ModuleTest, Conversion_MultiCUDA) {
|
|||
ASSERT_EQ(parameter.device().type(), torch::Device::Type::CPU);
|
||||
}
|
||||
}
|
||||
{
|
||||
module->to(torch::kInt32);
|
||||
for (auto& parameter : module->parameters()) {
|
||||
ASSERT_EQ(parameter.dtype(), torch::kInt32);
|
||||
}
|
||||
}
|
||||
{
|
||||
module->to(torch::kFloat64);
|
||||
for (auto& parameter : module->parameters()) {
|
||||
ASSERT_EQ(parameter.dtype(), torch::kFloat64);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(ModuleTest, Conversion_NoGrad_MultiCUDA) {
|
||||
Linear module(128, 64);
|
||||
for (auto& parameter : module->parameters()) {
|
||||
parameter.requires_grad_(false);
|
||||
}
|
||||
{
|
||||
module->to(torch::kInt32);
|
||||
for (auto& parameter : module->parameters()) {
|
||||
ASSERT_EQ(parameter.dtype(), torch::kInt32);
|
||||
}
|
||||
}
|
||||
{
|
||||
module->to(torch::Device(torch::kCUDA, 1), torch::kUInt8);
|
||||
for (auto& parameter : module->parameters()) {
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user