mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 12:21:27 +01:00
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/15084 Codemod generated with clangr shard mode, 25 files per diff, motivation: https://github.com/pytorch/pytorch/pull/12407 Reviewed By: ezyang Differential Revision: D13419711 fbshipit-source-id: dd2b740c3f13d8087085bafc5571aaf908d1af42
40 lines
1.0 KiB
C++
40 lines
1.0 KiB
C++
#ifndef CAFFE2_OPERATORS_NORMALIZE_L1_OP_H_
|
|
#define CAFFE2_OPERATORS_NORMALIZE_L1_OP_H_
|
|
|
|
#include "caffe2/core/context.h"
|
|
#include "caffe2/core/operator.h"
|
|
#include "caffe2/utils/math.h"
|
|
|
|
namespace caffe2 {
|
|
|
|
template <typename T, class Context>
|
|
class NormalizeL1Op final : public Operator<Context> {
|
|
public:
|
|
USE_OPERATOR_CONTEXT_FUNCTIONS;
|
|
USE_SIMPLE_CTOR_DTOR(NormalizeL1Op)
|
|
|
|
bool RunOnDevice() override {
|
|
const auto& x = Input(0);
|
|
|
|
const auto* xData = x.template data<T>();
|
|
auto* y = Output(0, x.sizes(), at::dtype<T>());
|
|
auto* yData = y->template mutable_data<T>();
|
|
|
|
const auto canonical_axis = x.canonical_axis_index(
|
|
this->template GetSingleArgument<int>("axis", -1));
|
|
const int m = x.dim32(canonical_axis);
|
|
const int n = x.numel() / m;
|
|
const int sf = x.size_from_dim(canonical_axis + 1);
|
|
DoNormalize(xData, yData, m, n, sf);
|
|
return true;
|
|
}
|
|
|
|
private:
|
|
void
|
|
DoNormalize(const T* xData, T* yData, const int m, const int n, const int sf);
|
|
};
|
|
|
|
} // namespace caffe2
|
|
|
|
#endif // CAFFE2_OPERATORS_NORMALIZE_L1_OP_H_
|