mirror of
https://github.com/zebrajr/opencv.git
synced 2025-12-06 00:19:46 +01:00
Merge pull request #27816 from abhishek-gola:reduce_layer
Extended Reduce layer support in new DNN engine #27816 ### Pull Request Readiness Checklist See details at https://github.com/opencv/opencv/wiki/How_to_contribute#making-a-good-pull-request - [x] I agree to contribute to the project under Apache 2 License. - [x] To the best of my knowledge, the proposed patch is not based on a code under GPL or another license that is incompatible with OpenCV - [x] The PR is proposed to the proper branch - [x] There is a reference to the original bug report and related work - [x] There is accuracy test, performance test and test data in opencv_extra repository, if applicable Patch to opencv_extra has the same branch name. - [x] The feature is well documented and sample code can be built with the project CMake
This commit is contained in:
parent
66c9f569bd
commit
93385c6cdf
|
|
@ -439,6 +439,30 @@ CV__DNN_INLINE_NS_BEGIN
|
|||
static Ptr<ReduceLayer> create(const LayerParams& params);
|
||||
};
|
||||
|
||||
class CV_EXPORTS Reduce2Layer : public Layer
|
||||
{
|
||||
public:
|
||||
enum class ReduceType
|
||||
{
|
||||
MAX,
|
||||
MIN,
|
||||
MEAN,
|
||||
SUM,
|
||||
L1,
|
||||
L2,
|
||||
PROD,
|
||||
SUM_SQUARE,
|
||||
LOG_SUM,
|
||||
LOG_SUM_EXP
|
||||
};
|
||||
ReduceType reduce_type;
|
||||
bool keepdims;
|
||||
bool noop_with_empty_axes;
|
||||
std::vector<int> axes;
|
||||
|
||||
static Ptr<Reduce2Layer> create(const LayerParams& params);
|
||||
};
|
||||
|
||||
class CV_EXPORTS SoftmaxLayer : public Layer
|
||||
{
|
||||
public:
|
||||
|
|
@ -782,12 +806,12 @@ CV__DNN_INLINE_NS_BEGIN
|
|||
class CV_EXPORTS ActivationLayer : public Layer
|
||||
{
|
||||
public:
|
||||
virtual void forwardSlice(const float* src, float* dst, int len,
|
||||
size_t outPlaneSize, int cn0, int cn1) const {}
|
||||
virtual void forwardSlice(const int* src, const int* lut, int* dst, int len,
|
||||
size_t outPlaneSize, int cn0, int cn1) const {}
|
||||
virtual void forwardSlice(const int8_t* src, const int8_t* lut, int8_t* dst, int len,
|
||||
size_t outPlaneSize, int cn0, int cn1) const {}
|
||||
virtual void forwardSlice(const float*, float*, int,
|
||||
size_t, int, int) const {}
|
||||
virtual void forwardSlice(const int*, const int*, int*, int,
|
||||
size_t, int, int) const {}
|
||||
virtual void forwardSlice(const int8_t*, const int8_t*, int8_t*, int,
|
||||
size_t, int, int) const {}
|
||||
};
|
||||
|
||||
class CV_EXPORTS ReLULayer : public ActivationLayer
|
||||
|
|
|
|||
|
|
@ -118,6 +118,7 @@ void initializeLayerFactory()
|
|||
CV_DNN_REGISTER_LAYER_CLASS(Det, DetLayer);
|
||||
CV_DNN_REGISTER_LAYER_CLASS(BitShift, BitShiftLayer);
|
||||
CV_DNN_REGISTER_LAYER_CLASS(GridSample, GridSampleLayer);
|
||||
CV_DNN_REGISTER_LAYER_CLASS(Reduce2, Reduce2Layer);
|
||||
|
||||
CV_DNN_REGISTER_LAYER_CLASS(Convolution, ConvolutionLayer);
|
||||
CV_DNN_REGISTER_LAYER_CLASS(Deconvolution, DeconvolutionLayer);
|
||||
|
|
|
|||
|
|
@ -269,9 +269,9 @@ void Layer::getTypes(const std::vector<MatType>&inputs,
|
|||
if (preferableTarget == DNN_TARGET_CUDA_FP16 || preferableTarget == DNN_TARGET_CUDA)
|
||||
CV_CheckTypeEQ(input, CV_32F, "");
|
||||
else if (preferableTarget == DNN_TARGET_OPENCL_FP16)
|
||||
CV_CheckType(input, input == CV_16F || input == CV_8S, "");
|
||||
CV_CheckType(input, input == CV_16F || input == CV_8S || input == CV_64F, "");
|
||||
else
|
||||
CV_CheckType(input, input == CV_32F || input == CV_8S, "");
|
||||
CV_CheckType(input, input == CV_32F || input == CV_64F || input == CV_8S, "");
|
||||
}
|
||||
|
||||
outputs.assign(requiredOutputs, inputs[0]);
|
||||
|
|
|
|||
|
|
@ -224,12 +224,28 @@ public:
|
|||
{
|
||||
const Mat &src = inputs[i];
|
||||
Mat &dst = outputs[i];
|
||||
CV_Assert_N(src.size == dst.size, src.type() == dst.type(),
|
||||
src.isContinuous(), dst.isContinuous(), src.type() == CV_32F);
|
||||
CV_Assert_N(src.size == dst.size, src.isContinuous(), dst.isContinuous());
|
||||
|
||||
const int nstripes = getNumThreads();
|
||||
PBody body(func, src, dst, nstripes);
|
||||
parallel_for_(Range(0, nstripes), body, nstripes);
|
||||
if (src.type() == CV_32F && dst.type() == CV_32F)
|
||||
{
|
||||
const int nstripes = getNumThreads();
|
||||
PBody body(func, src, dst, nstripes);
|
||||
parallel_for_(Range(0, nstripes), body, nstripes);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (src.type() == CV_64F && dst.type() == CV_64F)
|
||||
{
|
||||
Mat src_f, dst_f(dst.size, CV_32F);
|
||||
src.convertTo(src_f, CV_32F);
|
||||
const int nstripes = getNumThreads();
|
||||
PBody body(func, src_f, dst_f, nstripes);
|
||||
parallel_for_(Range(0, nstripes), body, nstripes);
|
||||
dst_f.convertTo(dst, CV_64F);
|
||||
continue;
|
||||
}
|
||||
|
||||
CV_Error(Error::StsUnsupportedFormat, "ElementWiseLayer: unsupported input/output type; expected CV_32F or CV_64F.");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
570
modules/dnn/src/layers/reduce2_layer.cpp
Normal file
570
modules/dnn/src/layers/reduce2_layer.cpp
Normal file
|
|
@ -0,0 +1,570 @@
|
|||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
// Copyright (C) 2025, BigVision LLC, all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
|
||||
#include "../precomp.hpp"
|
||||
#include <opencv2/dnn/shape_utils.hpp>
|
||||
#include "../net_impl.hpp"
|
||||
#include "../op_cann.hpp"
|
||||
#include "layers_common.hpp"
|
||||
#include "../dnn_common.hpp"
|
||||
|
||||
namespace cv {
|
||||
namespace dnn {
|
||||
|
||||
template <typename T> struct WorkType { using type = T; };
|
||||
template <> struct WorkType<hfloat> { using type = float; };
|
||||
template <> struct WorkType<bfloat> { using type = float; };
|
||||
template <> struct WorkType<int8_t> { using type = int; };
|
||||
template <> struct WorkType<uint8_t> { using type = int; };
|
||||
|
||||
class Reduce2LayerImpl CV_FINAL : public Reduce2Layer
|
||||
{
|
||||
public:
|
||||
static const char* reduceTypeToString(ReduceType t)
|
||||
{
|
||||
switch (t) {
|
||||
case ReduceType::MAX: return "MAX";
|
||||
case ReduceType::MIN: return "MIN";
|
||||
case ReduceType::MEAN: return "MEAN";
|
||||
case ReduceType::SUM: return "SUM";
|
||||
case ReduceType::L1: return "L1";
|
||||
case ReduceType::L2: return "L2";
|
||||
case ReduceType::PROD: return "PROD";
|
||||
case ReduceType::SUM_SQUARE: return "SUM_SQUARE";
|
||||
case ReduceType::LOG_SUM: return "LOG_SUM";
|
||||
case ReduceType::LOG_SUM_EXP: return "LOG_SUM_EXP";
|
||||
}
|
||||
return "UNKNOWN";
|
||||
}
|
||||
Reduce2LayerImpl(const LayerParams& params) {
|
||||
setParamsFrom(params);
|
||||
|
||||
CV_Assert(params.has("reduce"));
|
||||
String reduce_type_str = toLowerCase(params.get<String>("reduce"));
|
||||
if (reduce_type_str == "max")
|
||||
reduce_type = ReduceType::MAX;
|
||||
else if (reduce_type_str == "min")
|
||||
reduce_type = ReduceType::MIN;
|
||||
else if (reduce_type_str == "mean")
|
||||
reduce_type = ReduceType::MEAN;
|
||||
else if (reduce_type_str == "sum")
|
||||
reduce_type = ReduceType::SUM;
|
||||
else if (reduce_type_str == "sum_square")
|
||||
reduce_type = ReduceType::SUM_SQUARE;
|
||||
else if (reduce_type_str == "l1")
|
||||
reduce_type = ReduceType::L1;
|
||||
else if (reduce_type_str == "l2")
|
||||
reduce_type = ReduceType::L2;
|
||||
else if (reduce_type_str == "log_sum")
|
||||
reduce_type = ReduceType::LOG_SUM;
|
||||
else if (reduce_type_str == "log_sum_exp")
|
||||
reduce_type = ReduceType::LOG_SUM_EXP;
|
||||
else if (reduce_type_str == "prod")
|
||||
reduce_type = ReduceType::PROD;
|
||||
else
|
||||
CV_Error(Error::StsBadArg, "Unknown reduce type\"" + reduce_type_str + "\"");
|
||||
|
||||
keepdims = params.get<bool>("keepdims", true);
|
||||
noop_with_empty_axes = params.get<bool>("noop_with_empty_axes", false);
|
||||
|
||||
if (params.has("axes")) {
|
||||
auto param_axes = params.get("axes");
|
||||
int num_axes = param_axes.size();
|
||||
axes.resize(num_axes);
|
||||
for (int i = 0; i < num_axes; ++i)
|
||||
axes[i] = param_axes.get<int>(i);
|
||||
}
|
||||
}
|
||||
|
||||
bool dynamicOutputShapes() const CV_OVERRIDE
|
||||
{
|
||||
if (inputs.size() < 2)
|
||||
return false;
|
||||
Net::Impl* netimpl_ = getNetImpl(this);
|
||||
if (!netimpl_)
|
||||
return true;
|
||||
return !netimpl_->isConstArg(inputs[1]);
|
||||
}
|
||||
|
||||
bool getMemoryShapes(const std::vector<MatShape> &inps,
|
||||
const int /*requiredOutputs*/,
|
||||
std::vector<MatShape> &outs,
|
||||
std::vector<MatShape> &/*internals*/) const CV_OVERRIDE
|
||||
{
|
||||
CV_Assert(!inps.empty());
|
||||
outs.resize(1);
|
||||
const MatShape& inp0 = inps[0];
|
||||
if (inp0.empty()) {
|
||||
outs[0] = MatShape();
|
||||
return false;
|
||||
}
|
||||
|
||||
std::vector<int> axes;
|
||||
if (!this->axes.empty()) {
|
||||
axes = this->axes;
|
||||
} else if (inps.size() >= 2) {
|
||||
Net::Impl* netimpl_ = getNetImpl(this);
|
||||
if (netimpl_ && netimpl_->isConstArg(inputs[1])) {
|
||||
Mat axesTensor = netimpl_->argTensor(inputs[1]);
|
||||
tensorToIntVec(axesTensor, axes);
|
||||
}
|
||||
}
|
||||
|
||||
if (axes.empty()) {
|
||||
if (noop_with_empty_axes) {
|
||||
outs[0] = inp0;
|
||||
} else {
|
||||
if (keepdims) {
|
||||
MatShape shape_out = inp0;
|
||||
std::fill(shape_out.begin(), shape_out.end(), 1);
|
||||
outs[0] = shape_out;
|
||||
} else {
|
||||
outs[0] = MatShape(1, 1);
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
std::vector<int> norm_axes = axes;
|
||||
for (size_t i = 0; i < norm_axes.size(); ++i)
|
||||
norm_axes[i] = normalize_axis(norm_axes[i], inp0);
|
||||
|
||||
auto shape_output_ = inp0;
|
||||
for (int axis : norm_axes) shape_output_[axis] = -1;
|
||||
MatShape shape_output;
|
||||
for (size_t i = 0; i < shape_output_.size(); ++i) {
|
||||
if (shape_output_[i] == -1) {
|
||||
if (keepdims) shape_output.push_back(1);
|
||||
} else {
|
||||
shape_output.push_back(shape_output_[i]);
|
||||
}
|
||||
}
|
||||
if (shape_output.empty()) shape_output.push_back(1);
|
||||
outs[0] = shape_output;
|
||||
return false;
|
||||
}
|
||||
|
||||
virtual bool supportBackend(int backendId) CV_OVERRIDE {
|
||||
return backendId == DNN_BACKEND_OPENCV;
|
||||
}
|
||||
|
||||
virtual void getTypes(const std::vector<MatType>& inputs,
|
||||
const int requiredOutputs,
|
||||
const int requiredInternals,
|
||||
std::vector<MatType>& outputs,
|
||||
std::vector<MatType>& internals) const CV_OVERRIDE
|
||||
{
|
||||
CV_CheckType(inputs[0], inputs[0] == CV_32F || inputs[0] == CV_64F || inputs[0] == CV_32S || inputs[0] == CV_64S || inputs[0] == CV_16F || inputs[0] == CV_16BF || inputs[0] == CV_8U || inputs[0] == CV_8S || inputs[0] == CV_Bool, "");
|
||||
outputs.assign(1, inputs[0]);
|
||||
}
|
||||
|
||||
template <typename T, typename WT, typename AccT>
|
||||
class ReduceBase {
|
||||
public:
|
||||
using dtype_input = T;
|
||||
using work_type = WT;
|
||||
using acc_type = AccT;
|
||||
ReduceBase(size_t n, const T& init) : n_(n), accumulator_(static_cast<AccT>(static_cast<WT>(init))) {}
|
||||
AccT finalize() const { return accumulator_; }
|
||||
protected:
|
||||
size_t n_;
|
||||
AccT accumulator_;
|
||||
};
|
||||
|
||||
template <typename T, typename WT, typename AccT>
|
||||
class ReduceMin : public ReduceBase<T, WT, AccT> {
|
||||
public:
|
||||
using Base = ReduceBase<T, WT, AccT>;
|
||||
ReduceMin(size_t n, const WT& init) : Base(n, static_cast<T>(init)) { this->accumulator_ = static_cast<AccT>(init); }
|
||||
inline void update(const WT& a) { this->accumulator_ = a > static_cast<WT>(this->accumulator_) ? this->accumulator_ : static_cast<AccT>(a); }
|
||||
};
|
||||
|
||||
template <typename T, typename WT, typename AccT>
|
||||
class ReduceMax : public ReduceBase<T, WT, AccT> {
|
||||
public:
|
||||
using Base = ReduceBase<T, WT, AccT>;
|
||||
ReduceMax(size_t n, const WT& init) : Base(n, static_cast<T>(init)) { this->accumulator_ = static_cast<AccT>(init); }
|
||||
inline void update(const WT& a) { this->accumulator_ = a > static_cast<WT>(this->accumulator_) ? static_cast<AccT>(a) : this->accumulator_; }
|
||||
};
|
||||
|
||||
template <typename T, typename WT, typename AccT>
|
||||
class ReduceSum : public ReduceBase<T, WT, AccT> {
|
||||
public:
|
||||
using Base = ReduceBase<T, WT, AccT>;
|
||||
ReduceSum(size_t n, const WT&) : Base(n, static_cast<T>(0)) { this->accumulator_ = AccT(0); }
|
||||
inline void update(const WT& a) { this->accumulator_ += static_cast<AccT>(a); }
|
||||
};
|
||||
|
||||
template <typename T, typename WT, typename AccT>
|
||||
class ReduceMean : public ReduceSum<T, WT, AccT> {
|
||||
public:
|
||||
using Base = ReduceSum<T, WT, AccT>;
|
||||
ReduceMean(size_t n, const WT& init) : Base(n, init) {}
|
||||
inline AccT finalize() const { return this->accumulator_ / static_cast<AccT>(this->n_); }
|
||||
};
|
||||
|
||||
template <typename T, typename WT, typename AccT>
|
||||
class ReduceSumSquare : public ReduceBase<T, WT, AccT> {
|
||||
public:
|
||||
using Base = ReduceBase<T, WT, AccT>;
|
||||
ReduceSumSquare(size_t n, const WT&) : Base(n, static_cast<T>(0)) { this->accumulator_ = AccT(0); }
|
||||
inline void update(const WT& a) { this->accumulator_ += static_cast<AccT>(a) * static_cast<AccT>(a); }
|
||||
};
|
||||
|
||||
template <typename T, typename WT, typename AccT>
|
||||
class ReduceL1 : public ReduceBase<T, WT, AccT> {
|
||||
public:
|
||||
using Base = ReduceBase<T, WT, AccT>;
|
||||
ReduceL1(size_t n, const WT&) : Base(n, static_cast<T>(0)) { this->accumulator_ = AccT(0); }
|
||||
inline void update(const WT& a) { this->accumulator_ += static_cast<AccT>(a >= WT(0) ? a : -a); }
|
||||
};
|
||||
|
||||
template <typename T, typename WT, typename AccT>
|
||||
class ReduceL2 : public ReduceBase<T, WT, AccT> {
|
||||
public:
|
||||
using Base = ReduceBase<T, WT, AccT>;
|
||||
ReduceL2(size_t n, const WT&) : Base(n, static_cast<T>(0)) { this->accumulator_ = AccT(0); }
|
||||
inline void update(const WT& a) { this->accumulator_ += static_cast<AccT>(a) * static_cast<AccT>(a); }
|
||||
inline AccT finalize() const { return static_cast<AccT>(std::sqrt(this->accumulator_)); }
|
||||
};
|
||||
|
||||
template <typename T, typename WT, typename AccT>
|
||||
class ReduceProd : public ReduceBase<T, WT, AccT> {
|
||||
public:
|
||||
using Base = ReduceBase<T, WT, AccT>;
|
||||
ReduceProd(size_t n, const WT&) : Base(n, static_cast<T>(1)) { this->accumulator_ = static_cast<AccT>(WT(1)); }
|
||||
inline void update(const WT& a) { this->accumulator_ = static_cast<AccT>(this->accumulator_) * static_cast<AccT>(a); }
|
||||
};
|
||||
|
||||
template <typename T, typename WT, typename AccT>
|
||||
class ReduceLogSum : public ReduceBase<T, WT, AccT> {
|
||||
public:
|
||||
using Base = ReduceBase<T, WT, AccT>;
|
||||
ReduceLogSum(size_t n, const WT&) : Base(n, static_cast<T>(0)) { this->accumulator_ = AccT(0); }
|
||||
inline void update(const WT& a) { this->accumulator_ += static_cast<AccT>(a); }
|
||||
inline AccT finalize() const { return static_cast<AccT>(std::log(this->accumulator_)); }
|
||||
};
|
||||
|
||||
template <typename T, typename WT, typename AccT>
|
||||
class ReduceLogSumExp : public ReduceBase<T, WT, AccT> {
|
||||
public:
|
||||
using Base = ReduceBase<T, WT, AccT>;
|
||||
ReduceLogSumExp(size_t n, const WT&) : Base(n, static_cast<T>(0)) { this->accumulator_ = AccT(0); }
|
||||
inline void update(const WT& a) { this->accumulator_ += static_cast<AccT>(std::exp(static_cast<AccT>(a))); }
|
||||
inline AccT finalize() const { return static_cast<AccT>(std::log(this->accumulator_)); }
|
||||
};
|
||||
|
||||
template <typename Op>
|
||||
class ReduceAllInvoker : public ParallelLoopBody {
|
||||
public:
|
||||
using dtype = typename Op::dtype_input;
|
||||
using WT = typename Op::work_type;
|
||||
const Mat& src;
|
||||
Mat& dst;
|
||||
int n_reduce;
|
||||
int loop_size;
|
||||
int total;
|
||||
int cost_per_thread;
|
||||
|
||||
ReduceAllInvoker(const Mat& src_, Mat& dst_) : src(src_), dst(dst_) {
|
||||
auto shape_src = shape(src);
|
||||
n_reduce = std::accumulate(shape_src.begin(), shape_src.end(), 1, std::multiplies<int>());
|
||||
loop_size = n_reduce;
|
||||
total = 1;
|
||||
cost_per_thread = 1;
|
||||
}
|
||||
|
||||
void operator()(const Range& r) const CV_OVERRIDE {
|
||||
int start = r.start;
|
||||
int end = r.end;
|
||||
const dtype* p_src = src.ptr<const dtype>();
|
||||
dtype* p_dst = dst.ptr<dtype>();
|
||||
for (int i = start; i < end; ++i) {
|
||||
Op accumulator(n_reduce, static_cast<WT>(*p_src));
|
||||
for (int l = 0; l < loop_size; ++l) {
|
||||
accumulator.update(static_cast<WT>(p_src[l]));
|
||||
}
|
||||
auto val = accumulator.finalize();
|
||||
p_dst[i] = saturate_cast<dtype>(static_cast<double>(val));
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Op>
|
||||
class ReduceInvoker : public ParallelLoopBody {
|
||||
public:
|
||||
using dtype = typename Op::dtype_input;
|
||||
using WT = typename Op::work_type;
|
||||
const Mat& src;
|
||||
Mat& dst;
|
||||
std::vector<int> reduced_axes;
|
||||
int n_reduce;
|
||||
int loop_size;
|
||||
int last_reduced_dim;
|
||||
int last_reduced_step;
|
||||
std::vector<int> projected_steps;
|
||||
int last_unreduced_dim;
|
||||
int last_unreduced_step;
|
||||
std::vector<int> unprojected_steps;
|
||||
int total;
|
||||
int cost_per_thread;
|
||||
|
||||
ReduceInvoker(const Mat& src_, Mat& dst_, std::vector<int> axes_) : src(src_), dst(dst_), reduced_axes(axes_) {
|
||||
auto shape_src = shape(src);
|
||||
|
||||
auto steps_src = shape_src;
|
||||
steps_src[steps_src.size() - 1] = 1;
|
||||
for (int i = (int)steps_src.size() - 2; i >= 0; --i)
|
||||
steps_src[i] = steps_src[i + 1] * shape_src[i + 1];
|
||||
|
||||
size_t projection_size = 1;
|
||||
for (auto axis : reduced_axes) projection_size *= shape_src[axis];
|
||||
n_reduce = (int)projection_size;
|
||||
|
||||
last_reduced_dim = shape_src[reduced_axes.back()];
|
||||
last_reduced_step = steps_src[reduced_axes.back()];
|
||||
loop_size = last_reduced_dim * last_reduced_step;
|
||||
projection_size /= last_reduced_dim;
|
||||
|
||||
int last_reduced_axis = (int)reduced_axes.size() - 1;
|
||||
if (last_reduced_axis == 0) {
|
||||
projected_steps.resize(1, 0);
|
||||
} else {
|
||||
projected_steps.resize(projection_size);
|
||||
std::vector<int> projected_indices(last_reduced_axis, 0);
|
||||
for (size_t i = 0, current_step = 0; i < projection_size; ++i) {
|
||||
projected_steps[i] = current_step;
|
||||
++projected_indices[last_reduced_axis - 1];
|
||||
current_step += steps_src[reduced_axes[last_reduced_axis - 1]];
|
||||
for (int j = last_reduced_axis - 1; j > 0; --j) {
|
||||
if (projected_indices[j] < shape_src[reduced_axes[j]])
|
||||
break;
|
||||
projected_indices[j] = 0;
|
||||
++projected_indices[j - 1];
|
||||
current_step = steps_src[reduced_axes[j - 1]];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<int> unreduced_axes;
|
||||
for (int i = 0; i < (int)shape_src.size(); ++i) {
|
||||
if (std::find(reduced_axes.begin(), reduced_axes.end(), i) == reduced_axes.end())
|
||||
unreduced_axes.push_back(i);
|
||||
}
|
||||
size_t unprojection_size = 1;
|
||||
for (auto axis : unreduced_axes) unprojection_size *= shape_src[axis];
|
||||
last_unreduced_dim = shape_src[unreduced_axes.back()];
|
||||
last_unreduced_step = steps_src[unreduced_axes.back()];
|
||||
unprojection_size /= last_unreduced_dim;
|
||||
|
||||
std::vector<int> unprojected_indices(unreduced_axes.size(), 0);
|
||||
unprojected_steps.reserve(unprojection_size);
|
||||
if (unprojected_indices.size() <= 1) {
|
||||
unprojected_steps.push_back(0);
|
||||
} else {
|
||||
for (size_t i = 0, current_step = 0; i < unprojection_size; ++i) {
|
||||
unprojected_steps.push_back(current_step);
|
||||
++unprojected_indices[unprojected_indices.size() - 2];
|
||||
current_step += steps_src[unreduced_axes[unreduced_axes.size() - 2]];
|
||||
for (int j = (int)unreduced_axes.size() - 2; j > 0; --j) {
|
||||
if (unprojected_indices[j] < shape_src[unreduced_axes[j]])
|
||||
break;
|
||||
unprojected_indices[j] -= shape_src[unreduced_axes[j]];
|
||||
current_step -= shape_src[unreduced_axes[j]] * steps_src[unreduced_axes[j]];
|
||||
++unprojected_indices[j - 1];
|
||||
current_step += steps_src[unreduced_axes[j - 1]];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
auto shape_dst = shape(dst);
|
||||
total = std::accumulate(shape_dst.begin(), shape_dst.end(), 1, std::multiplies<int>());
|
||||
cost_per_thread = (int)(projected_steps.size() * last_reduced_step);
|
||||
}
|
||||
|
||||
static void run(const Mat& src, Mat& dst, std::vector<int> axes, bool noop_with_empty_axes) {
|
||||
CV_Assert(src.isContinuous());
|
||||
CV_Assert(dst.isContinuous());
|
||||
if (shape(src).empty() || (shape(src).size() == 1)){
|
||||
ReduceAllInvoker<Op> p(src, dst);
|
||||
p(Range(0, p.total));
|
||||
return;
|
||||
}
|
||||
|
||||
if (axes.empty()) {
|
||||
if (noop_with_empty_axes) {
|
||||
const auto p_src = src.ptr<const dtype>();
|
||||
auto p_dst = dst.ptr<dtype>();
|
||||
std::memcpy(p_dst, p_src, sizeof(dtype) * dst.total());
|
||||
return;
|
||||
}
|
||||
ReduceAllInvoker<Op> p(src, dst);
|
||||
double nstripes = (size_t)p.total * (size_t)p.cost_per_thread * (1 / 1024.0);
|
||||
parallel_for_(Range(0, p.total), p, nstripes);
|
||||
return;
|
||||
}
|
||||
|
||||
ReduceInvoker<Op> p(src, dst, axes);
|
||||
double nstripes = (size_t)p.total * (size_t)p.cost_per_thread * (1 / 1024.0);
|
||||
parallel_for_(Range(0, p.total), p, nstripes);
|
||||
}
|
||||
|
||||
void operator()(const Range& r) const CV_OVERRIDE {
|
||||
int start = r.start;
|
||||
int end = r.end;
|
||||
const dtype* p_src = src.ptr<const dtype>();
|
||||
dtype* p_dst = dst.ptr<dtype>();
|
||||
size_t main_index = start / last_unreduced_dim;
|
||||
size_t loop = start % last_unreduced_dim;
|
||||
size_t origin = unprojected_steps[main_index] + loop * last_unreduced_step;
|
||||
for (int i = start; i < end; ++i) {
|
||||
Op accumulator(n_reduce, static_cast<WT>(p_src[origin + projected_steps[0]]));
|
||||
for (auto projected_step : projected_steps) {
|
||||
const dtype* loop_p_src = p_src + origin + projected_step;
|
||||
for (auto l = 0; l < loop_size; l += last_reduced_step) {
|
||||
accumulator.update(static_cast<WT>(loop_p_src[l]));
|
||||
}
|
||||
}
|
||||
auto val = accumulator.finalize();
|
||||
p_dst[i] = saturate_cast<dtype>(static_cast<double>(val));
|
||||
|
||||
++loop;
|
||||
if (loop >= last_unreduced_dim) {
|
||||
loop = 0;
|
||||
++main_index;
|
||||
if (main_index < unprojected_steps.size())
|
||||
origin = unprojected_steps[main_index];
|
||||
} else {
|
||||
origin += last_unreduced_step;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
|
||||
{
|
||||
CV_TRACE_FUNCTION();
|
||||
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
|
||||
|
||||
std::vector<Mat> inputs, outputs;
|
||||
inputs_arr.getMatVector(inputs);
|
||||
|
||||
CV_Assert(!inputs.empty());
|
||||
Mat& src = inputs[0];
|
||||
std::vector<int> axes;
|
||||
if (!this->axes.empty()) {
|
||||
axes = this->axes;
|
||||
} else if (inputs.size() >= 2) {
|
||||
tensorToIntVec(inputs[1], axes);
|
||||
}
|
||||
|
||||
MatShape inpShape = shape(src);
|
||||
MatShape outShape;
|
||||
if (axes.empty()) {
|
||||
if (noop_with_empty_axes) {
|
||||
outShape = inpShape;
|
||||
} else {
|
||||
if (keepdims) {
|
||||
outShape = inpShape;
|
||||
for (int i = 0; i < (int)outShape.size(); ++i) outShape[i] = 1;
|
||||
} else {
|
||||
outShape.assign(1, 1);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
std::vector<int> norm_axes = axes;
|
||||
for (size_t i = 0; i < norm_axes.size(); ++i)
|
||||
norm_axes[i] = normalize_axis(norm_axes[i], inpShape);
|
||||
MatShape tmp = inpShape;
|
||||
for (int a : norm_axes) tmp[a] = -1;
|
||||
for (size_t i = 0; i < tmp.size(); ++i) {
|
||||
if (tmp[i] == -1) {
|
||||
if (keepdims) outShape.push_back(1);
|
||||
} else {
|
||||
outShape.push_back(tmp[i]);
|
||||
}
|
||||
}
|
||||
if (outShape.empty()) outShape.push_back(1);
|
||||
axes = norm_axes;
|
||||
}
|
||||
|
||||
auto kind = outputs_arr.kind();
|
||||
if (kind == _InputArray::STD_VECTOR_MAT) {
|
||||
outputs_arr.getMatVecRef()[0].fit(outShape, src.type());
|
||||
} else {
|
||||
CV_Assert(kind == _InputArray::STD_VECTOR_UMAT);
|
||||
outputs_arr.getUMatVecRef()[0].fit(outShape, src.type());
|
||||
}
|
||||
outputs_arr.getMatVector(outputs);
|
||||
Mat& dst = outputs[0];
|
||||
|
||||
typeDispatch(dst.type(), src, dst, axes, noop_with_empty_axes);
|
||||
}
|
||||
|
||||
virtual std::ostream& dumpAttrs(std::ostream& strm, int indent) const CV_OVERRIDE
|
||||
{
|
||||
prindent(strm, indent);
|
||||
strm << "reduce_type: \"" << reduceTypeToString(reduce_type) << "\",\n";
|
||||
|
||||
prindent(strm, indent);
|
||||
strm << "keepdims: " << (keepdims ? "true" : "false") << ",\n";
|
||||
|
||||
prindent(strm, indent);
|
||||
strm << "noop_with_empty_axes: " << (noop_with_empty_axes ? "true" : "false") << ",\n";
|
||||
|
||||
prindent(strm, indent);
|
||||
strm << "axes: [";
|
||||
for (size_t i = 0; i < axes.size(); ++i) {
|
||||
if (i > 0) strm << ", ";
|
||||
strm << axes[i];
|
||||
}
|
||||
strm << "]\n";
|
||||
return strm;
|
||||
}
|
||||
|
||||
template <typename T, typename WT, typename AccT, typename... Args>
|
||||
inline void opDispatch(Args&&... args) {
|
||||
switch (reduce_type) {
|
||||
case ReduceType::MAX: ReduceInvoker<ReduceMax<T, WT, WT>>::run(std::forward<Args>(args)...); break;
|
||||
case ReduceType::MIN: ReduceInvoker<ReduceMin<T, WT, WT>>::run(std::forward<Args>(args)...); break;
|
||||
case ReduceType::MEAN: ReduceInvoker<ReduceMean<T, WT, AccT>>::run(std::forward<Args>(args)...); break;
|
||||
case ReduceType::SUM: ReduceInvoker<ReduceSum<T, WT, AccT>>::run(std::forward<Args>(args)...); break;
|
||||
case ReduceType::L1: ReduceInvoker<ReduceL1<T, WT, AccT>>::run(std::forward<Args>(args)...); break;
|
||||
case ReduceType::L2: ReduceInvoker<ReduceL2<T, WT, AccT>>::run(std::forward<Args>(args)...); break;
|
||||
case ReduceType::PROD: ReduceInvoker<ReduceProd<T, WT, WT>>::run(std::forward<Args>(args)...); break;
|
||||
case ReduceType::SUM_SQUARE: ReduceInvoker<ReduceSumSquare<T, WT, AccT>>::run(std::forward<Args>(args)...); break;
|
||||
case ReduceType::LOG_SUM: ReduceInvoker<ReduceLogSum<T, WT, AccT>>::run(std::forward<Args>(args)...); break;
|
||||
case ReduceType::LOG_SUM_EXP: ReduceInvoker<ReduceLogSumExp<T, WT, AccT>>::run(std::forward<Args>(args)...); break;
|
||||
default: CV_Error(Error::StsBadArg, "DNN/Reduce: Unsupported operation.");
|
||||
}
|
||||
}
|
||||
|
||||
template <typename... Args>
|
||||
inline void typeDispatch(const int type, Args&&... args) {
|
||||
switch (type) {
|
||||
case CV_Bool:
|
||||
CV_Assert(reduce_type == ReduceType::MAX || reduce_type == ReduceType::MIN);
|
||||
opDispatch<uint8_t, int, float>(std::forward<Args>(args)...);
|
||||
break;
|
||||
case CV_8U: opDispatch<uint8_t, int, float>(std::forward<Args>(args)...); break;
|
||||
case CV_8S: opDispatch<int8_t, int, float>(std::forward<Args>(args)...); break;
|
||||
case CV_32S: opDispatch<int32_t, int32_t, double>(std::forward<Args>(args)...); break;
|
||||
case CV_64S: opDispatch<int64_t, int64_t, double>(std::forward<Args>(args)...); break;
|
||||
case CV_32F: opDispatch<float, float, float>(std::forward<Args>(args)...); break;
|
||||
case CV_64F: opDispatch<double, double, double>(std::forward<Args>(args)...); break;
|
||||
case CV_16F: opDispatch<hfloat, float, float>(std::forward<Args>(args)...); break;
|
||||
case CV_16BF: opDispatch<bfloat, float, float>(std::forward<Args>(args)...); break;
|
||||
default: CV_Error(cv::Error::BadDepth, "DNN/Reduce: Unsupported type.");
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
Ptr<Reduce2Layer> Reduce2Layer::create(const LayerParams& params)
|
||||
{
|
||||
return Ptr<Reduce2Layer>(new Reduce2LayerImpl(params));
|
||||
}
|
||||
|
||||
}} // cv::dnn
|
||||
|
|
@ -540,7 +540,10 @@ void Net::Impl::setGraphInput(Ptr<Graph>& graph, size_t idx, const Mat& m)
|
|||
int mtype = m.type();
|
||||
MatShape mshape = m.shape();
|
||||
const std::vector<Arg>& gr_inputs = graph->inputs();
|
||||
CV_Assert(idx < gr_inputs.size());
|
||||
if (idx >= gr_inputs.size())
|
||||
{
|
||||
return;
|
||||
}
|
||||
Arg inp = gr_inputs[idx];
|
||||
const ArgData& adata = args.at(inp.idx);
|
||||
/*
|
||||
|
|
@ -563,7 +566,10 @@ void Net::Impl::setGraphInput(Ptr<Graph>& graph, size_t idx, const Mat& m)
|
|||
if (adata_type != mtype &&
|
||||
!((adata_type == CV_64F || adata_type == CV_32F || adata_type == CV_16F || adata_type == CV_16BF) &&
|
||||
(mtype == CV_64F || mtype == CV_32F || mtype == CV_16F || mtype == CV_16BF)) &&
|
||||
!(adata.type == CV_16BF && mtype == CV_16U) && !(adata.type == CV_16F && mtype == CV_16U))
|
||||
!((adata_type == CV_8U || adata_type == CV_8S || adata_type == CV_16U || adata_type == CV_16S || adata_type == CV_32S || adata_type == CV_32U || adata_type == CV_64S || adata_type == CV_64U) &&
|
||||
(mtype == CV_8U || mtype == CV_8S || mtype == CV_16U || mtype == CV_16S || mtype == CV_32S || mtype == CV_32U || mtype == CV_64S || mtype == CV_64U)) &&
|
||||
!(adata.type == CV_16BF && mtype == CV_16U) && !(adata.type == CV_16F && mtype == CV_16U) &&
|
||||
!m.empty())
|
||||
{
|
||||
CV_Error_(Error::StsBadArg, ("incompatible type of input tensor #%zu '%s': %s given, %s expected",
|
||||
idx, adata.name.c_str(), typeToString(mtype).c_str(),
|
||||
|
|
@ -622,12 +628,7 @@ void Net::Impl::forwardGraph(Ptr<Graph>& graph, InputArrayOfArrays inputs_,
|
|||
|
||||
size_t graph_ofs = (size_t)graphofs_it->second;
|
||||
CV_Assert(graph_ofs + nops <= totalLayers);
|
||||
if (inputs_.empty()) {
|
||||
// inputs are already set; it's only possible to do with the main graph
|
||||
for (i = 0; i < n_gr_inputs; i++)
|
||||
CV_CheckFalse(argTensor(gr_inputs[i]).empty(), "Some of the model inputs were not set");
|
||||
}
|
||||
else {
|
||||
if (!inputs_.empty()) {
|
||||
if (inputs_.total() != n_gr_inputs) {
|
||||
CV_Error_(Error::StsBadArg, ("wrong number of inputs in graph '%s': %zu given, %zu expected",
|
||||
graph->name().data(), inputs_.total(), n_gr_inputs));
|
||||
|
|
|
|||
|
|
@ -1058,7 +1058,7 @@ void ONNXImporter2::parseGlobalPool(LayerParams &layerParams, const opencv_onnx:
|
|||
|
||||
void ONNXImporter2::parseReduce(LayerParams& layerParams, const opencv_onnx::NodeProto& node_proto)
|
||||
{
|
||||
layerParams.type = "Reduce";
|
||||
layerParams.type = "Reduce2";
|
||||
const auto& op_type = node_proto.op_type();
|
||||
String reduce_type;
|
||||
if (op_type == "ReduceMax")
|
||||
|
|
@ -1084,27 +1084,6 @@ void ONNXImporter2::parseReduce(LayerParams& layerParams, const opencv_onnx::Nod
|
|||
else
|
||||
CV_Error(Error::StsNotImplemented, "DNN/ONNX: " + op_type + " is not supported.");
|
||||
layerParams.set("reduce", reduce_type);
|
||||
|
||||
int num_inputs = node_proto.input_size();
|
||||
CV_Check(num_inputs, num_inputs >= 1 && num_inputs <= 2, "DNN/ONNX: Reduce layers should have at least one input and at most two inputs");
|
||||
|
||||
bool const_axis_input = false;
|
||||
if (num_inputs >= 2) {
|
||||
CV_CheckTrue(net.isConstArg(node_inputs[1]), "Reduce layer doesn't support non contant axes");
|
||||
const_axis_input = true;
|
||||
}
|
||||
|
||||
// "axes" is turned to one of the inputs since opset 18,
|
||||
// except for ReduceSum, which has "axes" input since opset 13.
|
||||
if (const_axis_input) {
|
||||
Mat mat_axes = net.argTensor(node_inputs[1]);
|
||||
int num_axes = (int)mat_axes.total();
|
||||
std::vector<int> axes(num_axes);
|
||||
for (int i = 0; i < num_axes; ++i)
|
||||
axes[i] = mat_axes.at<int64_t>(i);
|
||||
layerParams.set("axes", DictValue::arrayInt(&axes[0], num_axes));
|
||||
}
|
||||
|
||||
addLayer(layerParams, node_proto);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1702,117 +1702,117 @@ CASE(test_reciprocal)
|
|||
CASE(test_reciprocal_example)
|
||||
// no filter
|
||||
CASE(test_reduce_l1_default_axes_keepdims_example)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_l1_default_axes_keepdims_random)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_l1_do_not_keepdims_example)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_l1_do_not_keepdims_random)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_l1_keep_dims_example)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_l1_keep_dims_random)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_l1_negative_axes_keep_dims_example)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_l1_negative_axes_keep_dims_random)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_l2_default_axes_keepdims_example)
|
||||
#if SKIP_SET_1
|
||||
if (target == DNN_TARGET_MYRIAD)
|
||||
default_l1 = 0.01f; // Expected: (normL1) <= (l1), actual: 0.00490189 vs 0.004)
|
||||
#endif
|
||||
CASE(test_reduce_l2_default_axes_keepdims_random)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_l2_do_not_keepdims_example)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_l2_do_not_keepdims_random)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_l2_keep_dims_example)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_l2_keep_dims_random)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_l2_negative_axes_keep_dims_example)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_l2_negative_axes_keep_dims_random)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_log_sum)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_log_sum_asc_axes)
|
||||
// no filter
|
||||
CASE(test_reduce_log_sum_default)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_log_sum_desc_axes)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_log_sum_exp_default_axes_keepdims_example)
|
||||
#if SKIP_SET_1
|
||||
if (target == DNN_TARGET_MYRIAD)
|
||||
default_l1 = 0.01f; // Expected: (normL1) <= (l1), actual: 0.00671387 vs 0.004
|
||||
#endif
|
||||
CASE(test_reduce_log_sum_exp_default_axes_keepdims_random)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_log_sum_exp_do_not_keepdims_example)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_log_sum_exp_do_not_keepdims_random)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_log_sum_exp_keepdims_example)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_log_sum_exp_keepdims_random)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_log_sum_exp_negative_axes_keepdims_example)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_log_sum_exp_negative_axes_keepdims_random)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_log_sum_negative_axes)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_max_default_axes_keepdim_example)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_max_default_axes_keepdims_random)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_max_do_not_keepdims_example)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_max_do_not_keepdims_random)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_max_keepdims_example)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_max_keepdims_random)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_max_negative_axes_keepdims_example)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_max_negative_axes_keepdims_random)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_mean_default_axes_keepdims_example)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_mean_default_axes_keepdims_random)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_mean_do_not_keepdims_example)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_mean_do_not_keepdims_random)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_mean_keepdims_example)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_mean_keepdims_random)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_mean_negative_axes_keepdims_example)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_mean_negative_axes_keepdims_random)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_min_default_axes_keepdims_example)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_min_default_axes_keepdims_random)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_min_do_not_keepdims_example)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_min_do_not_keepdims_random)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_min_keepdims_example)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_min_keepdims_random)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_min_negative_axes_keepdims_example)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_min_negative_axes_keepdims_random)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_prod_default_axes_keepdims_example)
|
||||
#if SKIP_SET_1
|
||||
SKIP_MYRIAD; // accuracy (Expected: (normL1) <= (l1), actual: inf vs 0.004)
|
||||
|
|
@ -1826,7 +1826,7 @@ CASE(test_reduce_prod_default_axes_keepdims_random)
|
|||
}
|
||||
#endif
|
||||
CASE(test_reduce_prod_do_not_keepdims_example)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_prod_do_not_keepdims_random)
|
||||
#if SKIP_SET_1
|
||||
if (target == DNN_TARGET_MYRIAD)
|
||||
|
|
@ -1836,7 +1836,7 @@ CASE(test_reduce_prod_do_not_keepdims_random)
|
|||
}
|
||||
#endif
|
||||
CASE(test_reduce_prod_keepdims_example)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_prod_keepdims_random)
|
||||
#if SKIP_SET_1
|
||||
if (target == DNN_TARGET_MYRIAD)
|
||||
|
|
@ -1853,7 +1853,7 @@ CASE(test_reduce_prod_keepdims_random)
|
|||
}
|
||||
#endif
|
||||
CASE(test_reduce_prod_negative_axes_keepdims_example)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_prod_negative_axes_keepdims_random)
|
||||
#if SKIP_SET_1
|
||||
if (target == DNN_TARGET_MYRIAD)
|
||||
|
|
@ -1869,28 +1869,106 @@ CASE(test_reduce_prod_negative_axes_keepdims_random)
|
|||
default_lInf = 0.05f; // Expected: (normInf) <= (lInf), actual: 0.0201836 vs 0.02
|
||||
}
|
||||
#endif
|
||||
CASE(test_reduce_l1_default_axes_keepdims_example_expanded)
|
||||
SKIP;
|
||||
CASE(test_reduce_l1_default_axes_keepdims_random_expanded)
|
||||
SKIP;
|
||||
CASE(test_reduce_l1_do_not_keepdims_example_expanded)
|
||||
SKIP;
|
||||
CASE(test_reduce_l1_do_not_keepdims_random_expanded)
|
||||
SKIP;
|
||||
CASE(test_reduce_l1_keep_dims_example_expanded)
|
||||
SKIP;
|
||||
CASE(test_reduce_l1_keep_dims_random_expanded)
|
||||
SKIP;
|
||||
CASE(test_reduce_l1_negative_axes_keep_dims_example_expanded)
|
||||
SKIP;
|
||||
CASE(test_reduce_l1_negative_axes_keep_dims_random_expanded)
|
||||
SKIP;
|
||||
CASE(test_reduce_log_sum_asc_axes_expanded)
|
||||
SKIP;
|
||||
CASE(test_reduce_log_sum_default_expanded)
|
||||
SKIP;
|
||||
CASE(test_reduce_log_sum_desc_axes_expanded)
|
||||
SKIP;
|
||||
CASE(test_reduce_log_sum_negative_axes_expanded)
|
||||
SKIP;
|
||||
CASE(test_reduce_max_bool_inputs)
|
||||
SKIP;
|
||||
CASE(test_reduce_min_bool_inputs)
|
||||
SKIP;
|
||||
CASE(test_reduce_sum_square_default_axes_keepdims_example_expanded)
|
||||
SKIP;
|
||||
CASE(test_reduce_sum_square_default_axes_keepdims_random_expanded)
|
||||
SKIP;
|
||||
CASE(test_reduce_sum_square_do_not_keepdims_example_expanded)
|
||||
SKIP;
|
||||
CASE(test_reduce_sum_square_do_not_keepdims_random_expanded)
|
||||
SKIP;
|
||||
CASE(test_reduce_sum_square_keepdims_example_expanded)
|
||||
SKIP;
|
||||
CASE(test_reduce_sum_square_keepdims_random_expanded)
|
||||
SKIP;
|
||||
CASE(test_reduce_sum_square_negative_axes_keepdims_example_expanded)
|
||||
SKIP;
|
||||
CASE(test_reduce_sum_square_negative_axes_keepdims_random_expanded)
|
||||
SKIP;
|
||||
CASE(test_reduce_sum_default_axes_keepdims_example)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_sum_default_axes_keepdims_random)
|
||||
// no filter
|
||||
CASE(test_reduce_sum_do_not_keepdims_example)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_sum_do_not_keepdims_random)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_sum_empty_axes_input_noop_example)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_sum_empty_axes_input_noop_random)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_sum_keepdims_example)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_sum_keepdims_random)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_sum_negative_axes_keepdims_example)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_sum_negative_axes_keepdims_random)
|
||||
// no filter
|
||||
SKIP;
|
||||
CASE(test_reduce_sum_square_default_axes_keepdims_example)
|
||||
// no filter
|
||||
CASE(test_reduce_l2_default_axes_keepdims_example_expanded)
|
||||
SKIP;
|
||||
CASE(test_reduce_l2_default_axes_keepdims_random_expanded)
|
||||
SKIP;
|
||||
CASE(test_reduce_l2_do_not_keepdims_example_expanded)
|
||||
SKIP;
|
||||
CASE(test_reduce_l2_do_not_keepdims_random_expanded)
|
||||
SKIP;
|
||||
CASE(test_reduce_l2_keep_dims_example_expanded)
|
||||
SKIP;
|
||||
CASE(test_reduce_l2_keep_dims_random_expanded)
|
||||
SKIP;
|
||||
CASE(test_reduce_l2_negative_axes_keep_dims_example_expanded)
|
||||
SKIP;
|
||||
CASE(test_reduce_l2_negative_axes_keep_dims_random_expanded)
|
||||
SKIP;
|
||||
CASE(test_reduce_log_sum_exp_default_axes_keepdims_example_expanded)
|
||||
SKIP;
|
||||
CASE(test_reduce_log_sum_exp_default_axes_keepdims_random_expanded)
|
||||
SKIP;
|
||||
CASE(test_reduce_log_sum_exp_do_not_keepdims_example_expanded)
|
||||
SKIP;
|
||||
CASE(test_reduce_log_sum_exp_do_not_keepdims_random_expanded)
|
||||
SKIP;
|
||||
CASE(test_reduce_log_sum_exp_keepdims_example_expanded)
|
||||
SKIP;
|
||||
CASE(test_reduce_log_sum_exp_keepdims_random_expanded)
|
||||
SKIP;
|
||||
CASE(test_reduce_log_sum_exp_negative_axes_keepdims_example_expanded)
|
||||
SKIP;
|
||||
CASE(test_reduce_log_sum_exp_negative_axes_keepdims_random_expanded)
|
||||
SKIP;
|
||||
CASE(test_reduce_sum_empty_axes_input_noop)
|
||||
SKIP;
|
||||
CASE(test_reduce_sum_square_default_axes_keepdims_random)
|
||||
#if SKIP_SET_1
|
||||
if (target == DNN_TARGET_MYRIAD)
|
||||
|
|
|
|||
|
|
@ -322,3 +322,114 @@
|
|||
"test_bitwise_xor_i32_2d",
|
||||
"test_bitwise_xor_ui64_bcast_3v1d",
|
||||
"test_bitwise_xor_ui8_bcast_4v3d",
|
||||
"test_reduce_sum_default_axes_keepdims_example",
|
||||
"test_reduce_sum_do_not_keepdims_example",
|
||||
"test_reduce_sum_do_not_keepdims_random",
|
||||
"test_reduce_sum_empty_axes_input_noop_example",
|
||||
"test_reduce_sum_empty_axes_input_noop_random",
|
||||
"test_reduce_sum_keepdims_example",
|
||||
"test_reduce_sum_keepdims_random",
|
||||
"test_reduce_sum_negative_axes_keepdims_example",
|
||||
"test_reduce_sum_negative_axes_keepdims_random",
|
||||
"test_reduce_sum_default_axes_keepdims_random",
|
||||
"test_reduce_l1_default_axes_keepdims_example_expanded",
|
||||
"test_reduce_l1_default_axes_keepdims_random_expanded",
|
||||
"test_reduce_l1_do_not_keepdims_example_expanded",
|
||||
"test_reduce_l1_do_not_keepdims_random_expanded",
|
||||
"test_reduce_l1_keep_dims_example_expanded",
|
||||
"test_reduce_l1_keep_dims_random_expanded",
|
||||
"test_reduce_l1_negative_axes_keep_dims_example_expanded",
|
||||
"test_reduce_l1_negative_axes_keep_dims_random_expanded",
|
||||
"test_reduce_log_sum_asc_axes_expanded",
|
||||
"test_reduce_log_sum_default_expanded",
|
||||
"test_reduce_log_sum_desc_axes_expanded",
|
||||
"test_reduce_log_sum_negative_axes_expanded",
|
||||
"test_reduce_max_bool_inputs",
|
||||
"test_reduce_min_bool_inputs",
|
||||
"test_reduce_sum_square_default_axes_keepdims_example_expanded",
|
||||
"test_reduce_sum_square_default_axes_keepdims_random_expanded",
|
||||
"test_reduce_sum_square_do_not_keepdims_example_expanded",
|
||||
"test_reduce_sum_square_do_not_keepdims_random_expanded",
|
||||
"test_reduce_sum_square_keepdims_example_expanded",
|
||||
"test_reduce_sum_square_keepdims_random_expanded",
|
||||
"test_reduce_sum_square_negative_axes_keepdims_example_expanded",
|
||||
"test_reduce_sum_square_negative_axes_keepdims_random_expanded",
|
||||
"test_reduce_sum_empty_axes_input_noop",
|
||||
"test_reduce_l1_default_axes_keepdims_example",
|
||||
"test_reduce_l1_default_axes_keepdims_random",
|
||||
"test_reduce_l1_do_not_keepdims_example",
|
||||
"test_reduce_l1_do_not_keepdims_random",
|
||||
"test_reduce_l1_keep_dims_example",
|
||||
"test_reduce_l1_keep_dims_random",
|
||||
"test_reduce_l1_negative_axes_keep_dims_example",
|
||||
"test_reduce_l1_negative_axes_keep_dims_random",
|
||||
"test_reduce_l2_default_axes_keepdims_example",
|
||||
"test_reduce_l2_default_axes_keepdims_example_expanded",
|
||||
"test_reduce_l2_default_axes_keepdims_random",
|
||||
"test_reduce_l2_default_axes_keepdims_random_expanded",
|
||||
"test_reduce_l2_do_not_keepdims_example",
|
||||
"test_reduce_l2_do_not_keepdims_example_expanded",
|
||||
"test_reduce_l2_do_not_keepdims_random",
|
||||
"test_reduce_l2_do_not_keepdims_random_expanded",
|
||||
"test_reduce_l2_keep_dims_example",
|
||||
"test_reduce_l2_keep_dims_example_expanded",
|
||||
"test_reduce_l2_keep_dims_random",
|
||||
"test_reduce_l2_keep_dims_random_expanded",
|
||||
"test_reduce_l2_negative_axes_keep_dims_example",
|
||||
"test_reduce_l2_negative_axes_keep_dims_example_expanded",
|
||||
"test_reduce_l2_negative_axes_keep_dims_random",
|
||||
"test_reduce_l2_negative_axes_keep_dims_random_expanded",
|
||||
"test_reduce_log_sum_asc_axes",
|
||||
"test_reduce_log_sum_default",
|
||||
"test_reduce_log_sum_desc_axes",
|
||||
"test_reduce_log_sum_exp_default_axes_keepdims_example",
|
||||
"test_reduce_log_sum_exp_default_axes_keepdims_example_expanded",
|
||||
"test_reduce_log_sum_exp_default_axes_keepdims_random",
|
||||
"test_reduce_log_sum_exp_default_axes_keepdims_random_expanded",
|
||||
"test_reduce_log_sum_exp_do_not_keepdims_example",
|
||||
"test_reduce_log_sum_exp_do_not_keepdims_example_expanded",
|
||||
"test_reduce_log_sum_exp_do_not_keepdims_random",
|
||||
"test_reduce_log_sum_exp_do_not_keepdims_random_expanded",
|
||||
"test_reduce_log_sum_exp_keepdims_example",
|
||||
"test_reduce_log_sum_exp_keepdims_example_expanded",
|
||||
"test_reduce_log_sum_exp_keepdims_random",
|
||||
"test_reduce_log_sum_exp_keepdims_random_expanded",
|
||||
"test_reduce_log_sum_exp_negative_axes_keepdims_example",
|
||||
"test_reduce_log_sum_exp_negative_axes_keepdims_example_expanded",
|
||||
"test_reduce_log_sum_exp_negative_axes_keepdims_random",
|
||||
"test_reduce_log_sum_exp_negative_axes_keepdims_random_expanded",
|
||||
"test_reduce_log_sum_negative_axes",
|
||||
"test_reduce_max_do_not_keepdims_example",
|
||||
"test_reduce_max_do_not_keepdims_random",
|
||||
"test_reduce_max_keepdims_example",
|
||||
"test_reduce_max_keepdims_random",
|
||||
"test_reduce_max_negative_axes_keepdims_example",
|
||||
"test_reduce_max_negative_axes_keepdims_random",
|
||||
"test_reduce_mean_default_axes_keepdims_example",
|
||||
"test_reduce_mean_default_axes_keepdims_random",
|
||||
"test_reduce_mean_do_not_keepdims_example",
|
||||
"test_reduce_mean_do_not_keepdims_random",
|
||||
"test_reduce_mean_keepdims_example",
|
||||
"test_reduce_mean_keepdims_random",
|
||||
"test_reduce_mean_negative_axes_keepdims_example",
|
||||
"test_reduce_mean_negative_axes_keepdims_random",
|
||||
"test_reduce_min_do_not_keepdims_example",
|
||||
"test_reduce_min_do_not_keepdims_random",
|
||||
"test_reduce_min_keepdims_example",
|
||||
"test_reduce_min_keepdims_random",
|
||||
"test_reduce_min_negative_axes_keepdims_example",
|
||||
"test_reduce_min_negative_axes_keepdims_random",
|
||||
"test_reduce_prod_do_not_keepdims_example",
|
||||
"test_reduce_prod_do_not_keepdims_random",
|
||||
"test_reduce_prod_keepdims_example",
|
||||
"test_reduce_prod_keepdims_random",
|
||||
"test_reduce_prod_negative_axes_keepdims_example",
|
||||
"test_reduce_prod_negative_axes_keepdims_random",
|
||||
"test_reduce_sum_square_default_axes_keepdims_example",
|
||||
"test_reduce_sum_square_default_axes_keepdims_random",
|
||||
"test_reduce_sum_square_do_not_keepdims_example",
|
||||
"test_reduce_sum_square_do_not_keepdims_random",
|
||||
"test_reduce_sum_square_keepdims_example",
|
||||
"test_reduce_sum_square_keepdims_random",
|
||||
"test_reduce_sum_square_negative_axes_keepdims_example",
|
||||
"test_reduce_sum_square_negative_axes_keepdims_random",
|
||||
|
|
|
|||
|
|
@ -567,132 +567,21 @@
|
|||
"test_quantizelinear_uint4",
|
||||
"test_range_float_type_positive_delta_expanded", // ---- Unsupported operations: Loop ---
|
||||
"test_range_int32_type_negative_delta_expanded", // ---- same as above ---
|
||||
"test_reduce_l1_default_axes_keepdims_example",
|
||||
"test_reduce_l1_default_axes_keepdims_example_expanded",
|
||||
"test_reduce_l1_default_axes_keepdims_random",
|
||||
"test_reduce_l1_default_axes_keepdims_random_expanded",
|
||||
"test_reduce_l1_do_not_keepdims_example",
|
||||
"test_reduce_l1_do_not_keepdims_example_expanded",
|
||||
"test_reduce_l1_do_not_keepdims_random",
|
||||
"test_reduce_l1_do_not_keepdims_random_expanded",
|
||||
"test_reduce_l1_empty_set",
|
||||
"test_reduce_l1_empty_set_expanded",
|
||||
"test_reduce_l1_keep_dims_example",
|
||||
"test_reduce_l1_keep_dims_example_expanded",
|
||||
"test_reduce_l1_keep_dims_random",
|
||||
"test_reduce_l1_keep_dims_random_expanded",
|
||||
"test_reduce_l1_negative_axes_keep_dims_example",
|
||||
"test_reduce_l1_negative_axes_keep_dims_example_expanded",
|
||||
"test_reduce_l1_negative_axes_keep_dims_random",
|
||||
"test_reduce_l1_negative_axes_keep_dims_random_expanded",
|
||||
"test_reduce_l2_default_axes_keepdims_example",
|
||||
"test_reduce_l2_default_axes_keepdims_example_expanded",
|
||||
"test_reduce_l2_default_axes_keepdims_random",
|
||||
"test_reduce_l2_default_axes_keepdims_random_expanded",
|
||||
"test_reduce_l2_do_not_keepdims_example",
|
||||
"test_reduce_l2_do_not_keepdims_example_expanded",
|
||||
"test_reduce_l2_do_not_keepdims_random",
|
||||
"test_reduce_l2_do_not_keepdims_random_expanded",
|
||||
"test_reduce_l2_empty_set",
|
||||
"test_reduce_l2_empty_set_expanded",
|
||||
"test_reduce_l2_keep_dims_example",
|
||||
"test_reduce_l2_keep_dims_example_expanded",
|
||||
"test_reduce_l2_keep_dims_random",
|
||||
"test_reduce_l2_keep_dims_random_expanded",
|
||||
"test_reduce_l2_negative_axes_keep_dims_example",
|
||||
"test_reduce_l2_negative_axes_keep_dims_example_expanded",
|
||||
"test_reduce_l2_negative_axes_keep_dims_random",
|
||||
"test_reduce_l2_negative_axes_keep_dims_random_expanded",
|
||||
"test_reduce_log_sum_asc_axes",
|
||||
"test_reduce_log_sum_asc_axes_expanded",
|
||||
"test_reduce_log_sum_default",
|
||||
"test_reduce_log_sum_default_expanded",
|
||||
"test_reduce_log_sum_desc_axes",
|
||||
"test_reduce_log_sum_desc_axes_expanded",
|
||||
"test_reduce_log_sum_empty_set",
|
||||
"test_reduce_log_sum_empty_set_expanded",
|
||||
"test_reduce_log_sum_exp_default_axes_keepdims_example",
|
||||
"test_reduce_log_sum_exp_default_axes_keepdims_example_expanded",
|
||||
"test_reduce_log_sum_exp_default_axes_keepdims_random",
|
||||
"test_reduce_log_sum_exp_default_axes_keepdims_random_expanded",
|
||||
"test_reduce_log_sum_exp_do_not_keepdims_example",
|
||||
"test_reduce_log_sum_exp_do_not_keepdims_example_expanded",
|
||||
"test_reduce_log_sum_exp_do_not_keepdims_random",
|
||||
"test_reduce_log_sum_exp_do_not_keepdims_random_expanded",
|
||||
"test_reduce_log_sum_exp_empty_set",
|
||||
"test_reduce_log_sum_exp_empty_set_expanded",
|
||||
"test_reduce_log_sum_exp_keepdims_example",
|
||||
"test_reduce_log_sum_exp_keepdims_example_expanded",
|
||||
"test_reduce_log_sum_exp_keepdims_random",
|
||||
"test_reduce_log_sum_exp_keepdims_random_expanded",
|
||||
"test_reduce_log_sum_exp_negative_axes_keepdims_example",
|
||||
"test_reduce_log_sum_exp_negative_axes_keepdims_example_expanded",
|
||||
"test_reduce_log_sum_exp_negative_axes_keepdims_random",
|
||||
"test_reduce_log_sum_exp_negative_axes_keepdims_random_expanded",
|
||||
"test_reduce_log_sum_negative_axes",
|
||||
"test_reduce_log_sum_negative_axes_expanded",
|
||||
"test_reduce_max_bool_inputs",
|
||||
"test_reduce_max_do_not_keepdims_example",
|
||||
"test_reduce_max_do_not_keepdims_random",
|
||||
"test_reduce_max_empty_set",
|
||||
"test_reduce_max_keepdims_example",
|
||||
"test_reduce_max_keepdims_random",
|
||||
"test_reduce_max_negative_axes_keepdims_example",
|
||||
"test_reduce_max_negative_axes_keepdims_random",
|
||||
"test_reduce_mean_default_axes_keepdims_example",
|
||||
"test_reduce_mean_default_axes_keepdims_random",
|
||||
"test_reduce_mean_do_not_keepdims_example",
|
||||
"test_reduce_mean_do_not_keepdims_random",
|
||||
"test_reduce_mean_keepdims_example",
|
||||
"test_reduce_mean_keepdims_random",
|
||||
"test_reduce_mean_negative_axes_keepdims_example",
|
||||
"test_reduce_mean_negative_axes_keepdims_random",
|
||||
"test_reduce_min_bool_inputs",
|
||||
"test_reduce_min_do_not_keepdims_example",
|
||||
"test_reduce_min_do_not_keepdims_random",
|
||||
"test_reduce_min_empty_set",
|
||||
"test_reduce_min_keepdims_example",
|
||||
"test_reduce_min_keepdims_random",
|
||||
"test_reduce_min_negative_axes_keepdims_example",
|
||||
"test_reduce_min_negative_axes_keepdims_random",
|
||||
"test_reduce_prod_do_not_keepdims_example",
|
||||
"test_reduce_prod_do_not_keepdims_random",
|
||||
"test_reduce_prod_empty_set",
|
||||
"test_reduce_prod_keepdims_example",
|
||||
"test_reduce_prod_keepdims_random",
|
||||
"test_reduce_prod_negative_axes_keepdims_example",
|
||||
"test_reduce_prod_negative_axes_keepdims_random",
|
||||
"test_reduce_sum_default_axes_keepdims_example", // Issue:: Parser: Reduce layer doesn't support non contant axes: 'constBlobs.find(node_proto.input(1)) != constBlobs.end()' must be 'true' (layer does not support dynamic parameters)
|
||||
"test_reduce_sum_default_axes_keepdims_random", // ---- same as above ---
|
||||
"test_reduce_sum_do_not_keepdims_example", // ---- same as above ---
|
||||
"test_reduce_sum_do_not_keepdims_random", // ---- same as above ---
|
||||
"test_reduce_sum_empty_axes_input_noop",
|
||||
"test_reduce_sum_empty_axes_input_noop_example", // ---- same as above ---
|
||||
"test_reduce_sum_empty_axes_input_noop_random", // ---- same as above ---
|
||||
"test_reduce_sum_empty_set",
|
||||
"test_reduce_sum_empty_set_non_reduced_axis_zero",
|
||||
"test_reduce_sum_keepdims_example", // ---- same as above ---
|
||||
"test_reduce_sum_keepdims_random", // ---- same as above ---
|
||||
"test_reduce_sum_negative_axes_keepdims_example",
|
||||
"test_reduce_sum_negative_axes_keepdims_random", // ---- same as above ---
|
||||
"test_reduce_sum_square_default_axes_keepdims_example",
|
||||
"test_reduce_sum_square_default_axes_keepdims_example_expanded",
|
||||
"test_reduce_sum_square_default_axes_keepdims_random",
|
||||
"test_reduce_sum_square_default_axes_keepdims_random_expanded",
|
||||
"test_reduce_sum_square_do_not_keepdims_example",
|
||||
"test_reduce_sum_square_do_not_keepdims_example_expanded",
|
||||
"test_reduce_sum_square_do_not_keepdims_random",
|
||||
"test_reduce_sum_square_do_not_keepdims_random_expanded",
|
||||
"test_reduce_sum_square_empty_set",
|
||||
"test_reduce_sum_square_empty_set_expanded",
|
||||
"test_reduce_sum_square_keepdims_example",
|
||||
"test_reduce_sum_square_keepdims_example_expanded",
|
||||
"test_reduce_sum_square_keepdims_random",
|
||||
"test_reduce_sum_square_keepdims_random_expanded",
|
||||
"test_reduce_sum_square_negative_axes_keepdims_example",
|
||||
"test_reduce_sum_square_negative_axes_keepdims_example_expanded",
|
||||
"test_reduce_sum_square_negative_axes_keepdims_random",
|
||||
"test_reduce_sum_square_negative_axes_keepdims_random_expanded",
|
||||
"test_regex_full_match_basic",
|
||||
"test_regex_full_match_email_domain",
|
||||
"test_regex_full_match_empty",
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user