pytorch/caffe2/operators/elementwise_ops_utils.h
Xiaomeng Yang 3a34f443c5 Separate reduce functions from math (#16929)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/16929

Separate CPU reduce functions from math

i-am-not-moving-c2-to-c10

Reviewed By: houseroad

Differential Revision: D13999469

fbshipit-source-id: bd628b15a6e3c1f04cc62aefffb0110690e1c0d1
2019-02-13 17:50:47 -08:00

36 lines
1008 B
C++

#ifndef CAFFE2_OPERATORS_ELEMENTWISE_OPS_UTILS_H_
#define CAFFE2_OPERATORS_ELEMENTWISE_OPS_UTILS_H_
#include <tuple>
#include <vector>
#include "caffe2/core/context.h"
#include "caffe2/core/tensor.h"
namespace caffe2 {
namespace elementwise_ops_utils {
CAFFE2_API std::tuple<size_t, size_t, size_t>
ComputeLegacyBroadcastSizes(const Tensor& A, const Tensor& B, int axis);
CAFFE2_API std::vector<int> ComputeBinaryBroadcastForwardDims(
const std::vector<int>& A_dims,
const std::vector<int>& B_dims);
CAFFE2_API void ComputeBinaryBroadcastBackwardAxes(
const std::vector<int>& A_dims,
const std::vector<int>& B_dims,
std::vector<int>* A_axes,
std::vector<int>* B_axes);
CAFFE2_API void ComputeBinaryBroadcastBackwardDims(
const std::vector<int>& A_dims,
const std::vector<int>& B_dims,
std::vector<int>* A_back_dims,
std::vector<int>* B_back_dims);
} // namespace elementwise_ops_utils
} // namespace caffe2
#endif // CAFFE2_OPERATORS_ELEMENTWISE_OPS_UTILS_H_