pytorch/modules/detectron/group_spatial_softmax_op.h
Jerry Zhang 890568a018 Tensor reinitialization codemod - 5/5 (#15884)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/15884

Codemod generated with clangr shard mode, 25 files per diff,
To eliminiate partially initialized Tensor, we split the initialization of local Tensor variables into two steps, first declare un uninitialized Tensor, and
call `ReinitializeTensor` to initialize it.
motivation: https://github.com/pytorch/pytorch/pull/12407

Reviewed By: hyuen

Differential Revision: D13586737

fbshipit-source-id: dc8e49e9f29505b8898bb19f84c1a983f2d811ab
2019-01-10 16:32:26 -08:00

77 lines
2.3 KiB
C++

/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef GROUP_SPATIAL_SOFTMAX_OP_H_
#define GROUP_SPATIAL_SOFTMAX_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <typename T, class Context>
class GroupSpatialSoftmaxOp final : public Operator<Context> {
public:
GroupSpatialSoftmaxOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
num_classes_(this->template GetSingleArgument<int>("num_classes", 81)),
order_(StringToStorageOrder(
this->template GetSingleArgument<string>("order", "NCHW"))) {
CAFFE_ENFORCE_EQ(
order_, StorageOrder::NCHW, "Only NCHW order is supported right now.");
}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override {
// No CPU implementation for now
CAFFE_NOT_IMPLEMENTED;
}
protected:
int num_classes_;
StorageOrder order_;
};
template <typename T, class Context>
class GroupSpatialSoftmaxGradientOp final : public Operator<Context> {
public:
GroupSpatialSoftmaxGradientOp(const OperatorDef& def, Workspace* ws)
: Operator<Context>(def, ws),
num_classes_(this->template GetSingleArgument<int>("num_classes", 81)),
order_(StringToStorageOrder(
this->template GetSingleArgument<string>("order", "NCHW"))) {
CAFFE_ENFORCE_EQ(
order_, StorageOrder::NCHW, "Only NCHW order is supported right now.");
}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override {
// No CPU implementation for now
CAFFE_NOT_IMPLEMENTED;
}
protected:
int num_classes_;
StorageOrder order_;
Tensor sum_probs_;
};
} // namespace caffe2
#endif // GROUP_SPATIAL_SOFTMAX_OP_H_