Clean up some unused variable warnings (#73151)

Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/73151

Test Plan: Sandcastle

Reviewed By: malfet

Differential Revision: D34365492

fbshipit-source-id: d9eaa2e21aacd8ff0b97152e590d83f682df4667
(cherry picked from commit ca0efc53db)
This commit is contained in:
Richard Barnes 2022-02-22 13:11:44 -08:00 committed by PyTorch MergeBot
parent e9c64168d9
commit 600f4bf20c
3 changed files with 8 additions and 11 deletions

View File

@ -311,7 +311,7 @@ Tensor _bincount_cuda_template(
weights.options().layout_opt(),
weights.options().device_opt(),
weights.options().pinned_memory_opt());
auto ret = cuda::CUDA_tensor_histogram<weights_t, input_t, true>(
cuda::CUDA_tensor_histogram<weights_t, input_t, true>(
output, self, weights, nbins, minvalue, maxvalue);
} else {
output = native::zeros(
@ -320,7 +320,7 @@ Tensor _bincount_cuda_template(
c10::nullopt /* layout */,
DeviceType::CUDA,
c10::nullopt /* pin_memory */);
auto ret = cuda::CUDA_tensor_histogram<int64_t, input_t, false>(
cuda::CUDA_tensor_histogram<int64_t, input_t, false>(
output, self, weights, nbins, minvalue, maxvalue);
}
return output;
@ -374,7 +374,7 @@ Tensor _histc_cuda_template(
#endif
TORCH_CHECK(minvalue < maxvalue, "max must be larger than min");
auto ret = cuda::CUDA_tensor_histogram<input_t, input_t, false>(
cuda::CUDA_tensor_histogram<input_t, input_t, false>(
output, self, Tensor(), nbins, minvalue, maxvalue);
return output;
}

View File

@ -456,7 +456,6 @@ C10_LAUNCH_BOUNDS_1(256) // 256 performs better then 1024
__global__ void upsample_gen2d_aa_out_frame(
const accscalar_t height_scale,
const accscalar_t width_scale,
const bool align_corners,
const PackedTensorAccessor64<scalar_t, 4> idata,
PackedTensorAccessor64<scalar_t, 4> odata,
const InterpFilter & interp_filter) {
@ -550,7 +549,6 @@ C10_LAUNCH_BOUNDS_1(256) // 256 performs better then 1024
__global__ void upsample_gen2d_aa_backward_out_frame(
const accscalar_t height_scale,
const accscalar_t width_scale,
const bool align_corners,
PackedTensorAccessor64<scalar_t, 4> idata,
const PackedTensorAccessor64<scalar_t, 4> odata,
const InterpFilter & interp_filter) {
@ -672,8 +670,6 @@ static void upsample_gen2d_aa_out_cuda_template(
int output_height = output_size[0];
int output_width = output_size[1];
int nbatch = input.size(0);
int channels = input.size(1);
int input_height = input.size(2);
int input_width = input.size(3);
@ -735,7 +731,7 @@ static void upsample_gen2d_aa_out_cuda_template(
<<<grid,
block,
shmem_size,
stream>>>(height_scale, width_scale, align_corners, idata, odata, interp_filter);
stream>>>(height_scale, width_scale, idata, odata, interp_filter);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
@ -766,8 +762,6 @@ static void upsample_gen2d_aa_backward_out_cuda_template(
int output_height = output_size[0];
int output_width = output_size[1];
int nbatch = input_size[0];
int channels = input_size[1];
int input_height = input_size[2];
int input_width = input_size[3];
@ -819,7 +813,7 @@ static void upsample_gen2d_aa_backward_out_cuda_template(
<<<grid,
block,
shmem_size,
stream>>>(height_scale, width_scale, align_corners, idata, odata, interp_filter);
stream>>>(height_scale, width_scale, idata, odata, interp_filter);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
}

View File

@ -119,6 +119,9 @@ void device_reduce<at::Half>(
int N,
Tensor* buffer,
CUDAContext* context) {
(void)N; // Suppress unused variable warning
(void)buffer; // Suppress unused variable warning
(void)context; // Suppress unused variable warning
#if TORCH_HIP_VERSION >= 210
auto buffer_size = 1;