pytorch/caffe2/operators/conv_transpose_op_mobile.h
Sebastian Messmer 6706e9af19 Make C10_MOBILE consistent with how feature macros are usually used (#17481)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/17481

Usually, feature macros are either defined or undefined and checked accordingly.
C10_MOBILE was a weird special case that was always defined but either defined to 1 or to 0.

This caused a lot of confusion for me when trying to disable something from mobile build and it also disabled it
from the server build (because I was using ifdef). Also, I found a place in the existing code base that made
that wrong assumption and used the macro wrongly, see https://fburl.com/y4icohts

Reviewed By: dzhulgakov

Differential Revision: D14214825

fbshipit-source-id: f3a155b6d43d334e8839e2b2e3c40ed2c773eab6
2019-02-27 17:57:51 -08:00

46 lines
1.4 KiB
C++

#ifndef CAFFE2_OPERATORS_CONV_TRANSPOSE_MOBILE_OP_H_
#define CAFFE2_OPERATORS_CONV_TRANSPOSE_MOBILE_OP_H_
#include "caffe2/core/common.h"
#ifdef C10_MOBILE
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/operators/conv_transpose_unpool_op_base.h"
namespace caffe2 {
template <typename T, class Context>
class ConvTransposeMobileOp final : public ConvTransposeUnpoolBase<Context> {
public:
USE_CONV_TRANSPOSE_UNPOOL_BASE_FUNCTIONS(Context);
ConvTransposeMobileOp(const OperatorDef& operator_def, Workspace* ws)
: ConvTransposeUnpoolBase<Context>(operator_def, ws) {
OPERATOR_NEEDS_FEATURE(order_ == StorageOrder::NCHW, "Only NCHW order is supported right now.");
OPERATOR_NEEDS_FEATURE(
this->pad_l() == 0, "operator does not handle row width padding");
OPERATOR_NEEDS_FEATURE(
this->pad_r() == 0, "operator does not handle row width padding");
OPERATOR_NEEDS_FEATURE(this->stride_w() <= 4, "stride width must be <= 4");
}
bool RunOnDeviceWithOrderNCHW() override;
bool RunOnDeviceWithOrderNHWC() override;
private:
// We store a numThreasds per-worker tiles of Y, and numThreads per-worker threadBuffer for the
// gemm output, laid out in that order.
Tensor threadBuffer_{CPU};
// Input: X, W, b
// Output: Y
INPUT_TAGS(INPUT, FILTER, BIAS);
};
} // namespace caffe2
#endif // C10_MOBILE
#endif // CAFFE2_OPERATORS_CONV_TRANSPOSE_MOBILE_OP_H_