Tensor construction codemod(ResizeLike) - 3/7 (#15122)

Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/15122

Codemod generated with clangr shard mode, 25 files per diff,
motivation: https://github.com/pytorch/pytorch/pull/12407

Reviewed By: dzhulgakov

Differential Revision: D13419643

fbshipit-source-id: 65b5a037b94d458b944d51f790ba2829db1fb530
This commit is contained in:
Jerry Zhang 2018-12-14 02:05:15 -08:00 committed by Facebook Github Bot
parent 78bf1a9065
commit fb8487d708
10 changed files with 35 additions and 44 deletions

View File

@ -70,8 +70,7 @@ class MPIReduceOp final : public Operator<Context> {
bool RunOnDevice() override {
MPI_Comm comm = OperatorBase::Input<MPICommonWorldWrapper>(0).comm();
auto& input = Input(1);
auto* output = Output(0);
output->ResizeLike(input);
auto* output = Output(0, input.sizes(), at::dtype<T>());
MPI_CHECK(MPI_Reduce(
const_cast<T*>(input.template data<T>()),
output->template mutable_data<T>(),
@ -123,8 +122,7 @@ class MPIAllreduceOp final : public Operator<Context> {
bool RunOnDevice() override {
MPI_Comm comm = OperatorBase::Input<MPICommonWorldWrapper>(0).comm();
auto& input = Input(1);
auto* output = Output(0);
output->ResizeLike(input);
auto* output = Output(0, input.sizes(), at::dtype<T>());
void* source;
if (output->template mutable_data<T>() == input.template data<T>()) {
// We are doing in-place call. Special case handling.

View File

@ -68,8 +68,8 @@ class CuDNNActivationOp final : public CuDNNActivationOpBase {
template <typename T>
bool DoRunWithType() {
const auto& X = Input(0);
auto* Y = Output(0);
Y->ResizeLike(X);
auto* Y = Output(0, X.sizes(), at::dtype<T>());
if (X.numel() == 0) {
Y->template mutable_data<T>();
return true;
@ -107,8 +107,8 @@ class CuDNNActivationGradientOp final : public CuDNNActivationOpBase {
bool DoRunWithType() {
const auto& Y = Input(0);
const auto& dY = Input(1);
auto* dX = Output(0);
dX->ResizeLike(Y);
auto* dX = Output(0, Y.sizes(), at::dtype<T>());
if (Y.numel() == 0) {
dX->template mutable_data<T>();
return true;

View File

@ -54,8 +54,8 @@ template <>
bool AffineChannelGradientOp<float, CPUContext>::RunOnDeviceWithOrderNCHW() {
const auto& dY = Input(0);
const auto& scale = is_learnable_ ? Input(2) : Input(1);
auto* dX = Output(0);
dX->ResizeLike(dY);
auto* dX = Output(0, dY.sizes(), at::dtype<float>());
const int N = dY.dim32(0);
const int C = dY.dim32(1);
const int HxW = dY.numel() / (N * C);
@ -75,10 +75,9 @@ bool AffineChannelGradientOp<float, CPUContext>::RunOnDeviceWithOrderNCHW() {
if (is_learnable_) {
const auto& X = Input(1);
const float* X_data = X.data<float>();
auto* dscale = Output(1);
auto* dbias = Output(2);
dscale->ResizeLike(scale);
dbias->ResizeLike(scale);
auto* dscale = Output(1, scale.sizes(), at::dtype<float>());
auto* dbias = Output(2, scale.sizes(), at::dtype<float>());
AffineChannelScaleBiasBackwardNCHW<float>(
N,
C,
@ -95,8 +94,8 @@ template <>
bool AffineChannelGradientOp<float, CPUContext>::RunOnDeviceWithOrderNHWC() {
const auto& dY = Input(0);
const auto& scale = is_learnable_ ? Input(2) : Input(1);
auto* dX = Output(0);
dX->ResizeLike(dY);
auto* dX = Output(0, dY.sizes(), at::dtype<float>());
const int ndim = dY.dim();
const int C = dY.dim32(ndim - 1);
const int rows = dY.numel() / C;
@ -115,10 +114,9 @@ bool AffineChannelGradientOp<float, CPUContext>::RunOnDeviceWithOrderNHWC() {
const float* X_data = X.data<float>();
const int N = X.dim32(0);
const int HxW = rows / N;
auto* dscale = Output(1);
auto* dbias = Output(2);
dscale->ResizeLike(scale);
dbias->ResizeLike(scale);
auto* dscale = Output(1, scale.sizes(), at::dtype<float>());
auto* dbias = Output(2, scale.sizes(), at::dtype<float>());
AffineChannelScaleBiasBackwardNHWC<float>(
N,
C,

View File

@ -32,18 +32,17 @@ class AffineChannelOp final : public Operator<Context> {
const auto& X = Input(0);
const auto& scale = Input(1);
const auto& bias = Input(2);
auto* Y = Output(0);
if (is_learnable_) {
CAFFE_ENFORCE_NE(
Y,
&X,
CAFFE_ENFORCE(
!IsInputOutputAlias(0, 0),
"In-place affine_channel_op is not supported when "
"is_learnable = true.");
}
const int N = X.dim32(0);
const int C = X.dim32(1);
const int HxW = X.numel() / (N * C);
Y->ResizeLike(X);
auto* Y = Output(0, X.sizes(), at::dtype<T>());
math::AffineChannel<T, Context, StorageOrder::NCHW>(
N,
C,
@ -60,11 +59,10 @@ class AffineChannelOp final : public Operator<Context> {
const auto& X = Input(0);
const auto& scale = Input(1);
const auto& bias = Input(2);
auto* Y = Output(0);
if (is_learnable_) {
CAFFE_ENFORCE_NE(
Y,
&X,
CAFFE_ENFORCE(
!IsInputOutputAlias(0, 0),
"In-place affine_channel_op is not supported when "
"is_learnable = true.");
}
@ -72,7 +70,8 @@ class AffineChannelOp final : public Operator<Context> {
const int N = X.dim32(0);
const int C = X.dim32(ndim - 1);
const int HxW = X.numel() / (N * C);
Y->ResizeLike(X);
auto* Y =
Output(0, X.sizes(), at::dtype<T>());
math::AffineChannel<T, Context, StorageOrder::NHWC>(
N,
C,

View File

@ -79,8 +79,7 @@ bool BatchBoxCoxOp<CPUContext>::DoRunWithType() {
auto N = data.size(0);
auto D = data.size_from_dim(1);
auto* output = Output(0);
output->ResizeLike(Input(DATA));
auto* output = Output(0, Input(DATA).sizes(), at::dtype<T>());
auto* output_ptr = output->template mutable_data<T>();
if (data.numel() <= 0) {

View File

@ -58,7 +58,6 @@ class BatchGatherGradientOp final : public Operator<Context> {
auto& data = Input(DATA);
auto& indices = Input(INDICES);
auto& grad = Input(GRAD);
auto* output = Output(0);
// ONNX allows negative axis to index from the back, valid range: [-r, r].
int axis = axis_;
@ -76,7 +75,7 @@ class BatchGatherGradientOp final : public Operator<Context> {
"batch gather outer dimensions should match");
}
output->ResizeLike(data);
auto* output = Output(0, data.sizes(), at::dtype<TData>());
TData* out_data = output->template mutable_data<TData>();
if (data.numel() <= 0) {
return true;

View File

@ -72,12 +72,12 @@ class BatchMomentsGradientOp final : public Operator<Context> {
const auto& dmu = Input(0);
const auto& dvar = Input(1);
const auto& X = Input(2);
auto* dX = Output(0);
const int ndim = X.dim();
const int N = X.dim32(0);
const int C = order_ == StorageOrder::NCHW ? X.dim32(1) : X.dim32(ndim - 1);
const int HxW = X.numel() / (N * C);
dX->ResizeLike(X);
auto* dX = Output(0, X.sizes(), at::dtype<T>());
const T* dmu_data = dmu.template data<T>();
const T* dvar_data = dvar.template data<T>();
const T* X_data = X.template data<T>();

View File

@ -91,7 +91,6 @@ bool BBoxTransformOp<float, CPUContext>::RunOnDevice() {
const auto& roi_in = Input(0);
const auto& delta_in = Input(1);
const auto& iminfo_in = Input(2);
auto* box_out = Output(0);
const int box_dim = rotated_ ? 5 : 4;
const int N = roi_in.dim32(0);
@ -132,7 +131,7 @@ bool BBoxTransformOp<float, CPUContext>::RunOnDevice() {
Eigen::Map<const ERArrXXf> iminfo(
iminfo_in.data<float>(), iminfo_in.size(0), iminfo_in.size(1));
box_out->ResizeLike(delta_in);
auto* box_out = Output(0, delta_in.sizes(), at::dtype<float>());
Eigen::Map<ERArrXXf> new_boxes(
box_out->template mutable_data<float>(),
box_out->dim32(0),

View File

@ -64,8 +64,8 @@ class BisectPercentileOp final : public Operator<Context> {
const float* raw_data = raw.template data<float>();
// Output
auto* pct = Output(PCT);
pct->ResizeLike(raw);
auto* pct = Output(PCT, raw.sizes(), at::dtype<float>());
float* pct_output = pct->template mutable_data<float>();
// Compute percentile for each raw feature value

View File

@ -20,7 +20,7 @@ class BooleanMaskLengthsOp final : public Operator<Context> {
bool DoRunWithType() {
auto& lengths = Input(0);
auto& mask = Input(1);
auto* lengthsOut = Output(0);
CAFFE_ENFORCE(lengths.dim() == 1);
CAFFE_ENFORCE(mask.dim() == 1);
const auto* lengthsPtr = lengths.template data<T>();
@ -28,7 +28,7 @@ class BooleanMaskLengthsOp final : public Operator<Context> {
auto totalLength =
std::accumulate(lengthsPtr, lengthsPtr + lengths.numel(), 0);
CAFFE_ENFORCE(mask.numel() == totalLength);
lengthsOut->ResizeLike(lengths);
auto* lengthsOut = Output(0, lengths.sizes(), at::dtype<T>());
auto* lengthsOutPtr = lengthsOut->template mutable_data<T>();
int p = 0;
for (int i = 0; i < lengths.numel(); ++i) {
@ -365,8 +365,7 @@ bool SequenceMaskOp<CPUContext>::DoRunWithType() {
window_centers = &Input(1);
}
auto* output = Output(0);
output->ResizeLike(*input);
auto* output = Output(0, input->sizes(), at::dtype<T>());
const auto canonical_axis = input->canonical_axis_index(axis_);