[inductor] enable test_lowmem_dropout1_dynamic_shapes (#94884)

Pull Request resolved: https://github.com/pytorch/pytorch/pull/94884
Approved by: https://github.com/ezyang, https://github.com/albanD
This commit is contained in:
Nikita Karetnikov 2023-02-15 06:39:50 +01:00 committed by PyTorch MergeBot
parent 5e1de31548
commit 8c44ae2f5d
3 changed files with 4 additions and 4 deletions

View File

@ -5621,7 +5621,6 @@ test_skips = {
"test_cudnn_rnn_dynamic_shapes": ("cuda",),
"test_grid_sampler_2d_dynamic_shapes": ("cpu", "cuda"),
"test_kwargs_dynamic_shapes": ("cpu",),
"test_lowmem_dropout1_dynamic_shapes": ("cpu", "cuda"),
"test_lowmem_dropout2_dynamic_shapes": ("cpu", "cuda"),
"test_nll_loss_forward_dynamic_shapes": ("cpu", "cuda"),
"test_rand_like_deterministic_dynamic_shapes": ("cpu", "cuda"),

View File

@ -12,7 +12,7 @@ VariableInfo::VariableInfo(const Variable& var)
: layout(var.layout()),
device(var.device()),
scalar_type(var.scalar_type()),
size(var.sizes().vec()),
size(var.sym_sizes().vec()),
requires_grad(var.requires_grad()),
is_empty(false) {}
@ -23,7 +23,7 @@ Variable VariableInfo::zeros(at::OptionalDeviceGuard& device_guard) const {
// Return undefined tensor.
return at::Tensor();
} else {
return at::zeros(
return at::zeros_symint(
size, at::TensorOptions(scalar_type).device(device).layout(layout));
}
}

View File

@ -1,6 +1,7 @@
#pragma once
#include <ATen/core/ivalue.h>
#include <c10/core/SymInt.h>
#include <c10/util/flat_hash_map.h>
#include <c10/util/irange.h>
#include <torch/csrc/autograd/function.h>
@ -163,7 +164,7 @@ struct TORCH_API VariableInfo {
at::Layout layout = at::Layout::Strided;
at::Device device = at::kCPU;
at::ScalarType scalar_type = at::kFloat;
std::vector<int64_t> size;
std::vector<c10::SymInt> size;
bool requires_grad;
bool is_empty;
};