mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 12:20:52 +01:00
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/54229 Because caffe2 add uses Eigen for add with broadcasting which is not well supported by OSS PyTorch, it's easier to just keep the `c2_add_out` internal for now. Caffe2 does use mkl add when the input dims of A and B are the same and there is no broadcasting needed. Reviewed By: bertmaher Differential Revision: D27036279 fbshipit-source-id: 49f0ec5407ea1f641896f054cad2283faed81687
36 lines
1008 B
C++
36 lines
1008 B
C++
#ifndef CAFFE2_OPERATORS_ELEMENTWISE_OPS_UTILS_H_
|
|
#define CAFFE2_OPERATORS_ELEMENTWISE_OPS_UTILS_H_
|
|
|
|
#include <tuple>
|
|
#include <vector>
|
|
|
|
#include "caffe2/core/context.h"
|
|
#include "caffe2/core/tensor.h"
|
|
|
|
namespace caffe2 {
|
|
namespace elementwise_ops_utils {
|
|
|
|
TORCH_API std::tuple<size_t, size_t, size_t>
|
|
ComputeLegacyBroadcastSizes(const Tensor& A, const Tensor& B, int axis);
|
|
|
|
TORCH_API std::vector<int> ComputeBinaryBroadcastForwardDims(
|
|
const c10::ArrayRef<int>& A_dims,
|
|
const c10::ArrayRef<int>& B_dims);
|
|
|
|
TORCH_API void ComputeBinaryBroadcastBackwardAxes(
|
|
const std::vector<int>& A_dims,
|
|
const std::vector<int>& B_dims,
|
|
std::vector<int>* A_axes,
|
|
std::vector<int>* B_axes);
|
|
|
|
TORCH_API void ComputeBinaryBroadcastBackwardDims(
|
|
const std::vector<int>& A_dims,
|
|
const std::vector<int>& B_dims,
|
|
std::vector<int>* A_back_dims,
|
|
std::vector<int>* B_back_dims);
|
|
|
|
} // namespace elementwise_ops_utils
|
|
} // namespace caffe2
|
|
|
|
#endif // CAFFE2_OPERATORS_ELEMENTWISE_OPS_UTILS_H_
|