feat: support more cann operators

This commit is contained in:
fengyuentau 2023-12-23 17:48:13 +08:00 committed by Yuantao Feng
parent e392b3843e
commit 77d2a5868a
7 changed files with 163 additions and 30 deletions

View File

@ -845,8 +845,12 @@ struct GeluFunctor : public BaseFunctor {
#endif
}
bool supportBackend(int backendId, int) {
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
bool supportBackend(int backendId, int)
{
return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_CUDA ||
backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH ||
backendId == DNN_BACKEND_CANN;
}
void apply(const float* srcptr, float* dstptr, int stripeStart, int len, size_t planeSize, int cn0, int cn1) const {
@ -943,7 +947,19 @@ struct GeluFunctor : public BaseFunctor {
const std::vector<Ptr<BackendWrapper> > &inputs,
const std::vector<Ptr<BackendNode> >& nodes)
{
CV_Error(Error::StsNotImplemented, "");
auto input_wrapper = inputs[0].dynamicCast<CannBackendWrapper>();
auto op = std::make_shared<ge::op::Gelu>(name);
auto input_node = nodes[0].dynamicCast<CannBackendNode>()->getOp();
op->set_input_x_by_name(*input_node, input_wrapper->name.c_str());
auto input_desc = input_wrapper->getTensorDesc();
op->update_input_desc_x(*input_desc);
auto output_desc = std::make_shared<ge::TensorDesc>(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT);
op->update_output_desc_y(*output_desc);
return Ptr<BackendNode>(new CannBackendNode(op));
}
#endif // HAVE_CANN
@ -1781,7 +1797,10 @@ struct SqrtFunctor : public BaseDefaultFunctor<SqrtFunctor>
bool supportBackend(int backendId, int)
{
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA || backendId == DNN_BACKEND_HALIDE;
return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_CUDA ||
backendId == DNN_BACKEND_CANN ||
backendId == DNN_BACKEND_HALIDE;
}
inline float calculate(float x) const
@ -1811,6 +1830,27 @@ struct SqrtFunctor : public BaseDefaultFunctor<SqrtFunctor>
}
#endif // HAVE_DNN_NGRAPH
#ifdef HAVE_CANN
Ptr<BackendNode> initCannOp(const std::string& name,
const std::vector<Ptr<BackendWrapper> > &inputs,
const std::vector<Ptr<BackendNode> >& nodes)
{
auto input_wrapper = inputs[0].dynamicCast<CannBackendWrapper>();
auto op = std::make_shared<ge::op::Sqrt>(name);
auto input_node = nodes[0].dynamicCast<CannBackendNode>()->getOp();
op->set_input_x_by_name(*input_node, input_wrapper->name.c_str());
auto input_desc = input_wrapper->getTensorDesc();
op->update_input_desc_x(*input_desc);
auto output_desc = std::make_shared<ge::TensorDesc>(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT);
op->update_output_desc_y(*output_desc);
return Ptr<BackendNode>(new CannBackendNode(op));
}
#endif // HAVE_CANN
int64 getFLOPSPerElement() const { return 1; }
};

View File

@ -271,7 +271,7 @@ public:
if (backendId == DNN_BACKEND_CANN)
return op == OPERATION::ADD || op == OPERATION::PROD || op == OPERATION::SUB ||
op == OPERATION::DIV || op == OPERATION::MAX || op == OPERATION::MIN ||
op == OPERATION::MOD || op == OPERATION::FMOD;
op == OPERATION::MOD || op == OPERATION::FMOD || op == OPERATION::POW;
#endif
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return (op == OPERATION::ADD ||
@ -979,6 +979,7 @@ public:
BUILD_CANN_ELTWISE_OP(OPERATION::MIN, Minimum, name);
BUILD_CANN_ELTWISE_OP(OPERATION::MOD, Mod, name);
BUILD_CANN_ELTWISE_OP(OPERATION::FMOD, Mod, name);
BUILD_CANN_ELTWISE_OP(OPERATION::POW, Pow, name);
#undef BUILD_CANN_ELTWISE_OP
default: CV_Error(Error::StsNotImplemented, "Unsupported eltwise operation");
}

View File

@ -5,6 +5,8 @@
#include "../precomp.hpp"
#include <opencv2/dnn/shape_utils.hpp>
#include "../op_cann.hpp"
namespace cv { namespace dnn {
@ -54,6 +56,13 @@ public:
}
virtual bool supportBackend(int backendId) CV_OVERRIDE {
#ifdef HAVE_CANN
if (backendId == DNN_BACKEND_CANN)
return reduce_type == ReduceType::MAX || reduce_type == ReduceType::MIN ||
reduce_type == ReduceType::MEAN || reduce_type == ReduceType::SUM ||
reduce_type == ReduceType::PROD || reduce_type == ReduceType::LOG_SUM ||
reduce_type == ReduceType::LOG_SUM_EXP;
#endif
return backendId == DNN_BACKEND_OPENCV;
}
@ -497,6 +506,53 @@ public:
}
}
#ifdef HAVE_CANN
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputs,
const std::vector<Ptr<BackendWrapper> > &outputs,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
CV_CheckFalse(axes.empty(), "DNN/CANN: Reduce layers need axes to build CANN operators");
auto input_node = nodes[0].dynamicCast<CannBackendNode>()->getOp();
auto input_wrapper = inputs[0].dynamicCast<CannBackendWrapper>();
auto input_desc = input_wrapper->getTensorDesc();
std::vector<int> axes_shape{(int)axes.size()};
Mat axes_mat(axes_shape, CV_32S, &axes[0]);
auto axes_node = std::make_shared<CannConstOp>(axes_mat.data, axes_mat.type(), axes_shape, cv::format("%s_axes", name.c_str()));
auto axes_desc = axes_node->getTensorDesc();
auto output_desc = std::make_shared<ge::TensorDesc>(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT);
std::shared_ptr<ge::Operator> reduce_op = nullptr;
switch (reduce_type)
{
#define BUILD_CANN_REDUCE_OP(op_type, class_name, op_name) \
case op_type: { \
auto op = std::make_shared<ge::op::class_name>(op_name); \
op->set_input_x_by_name(*input_node, input_wrapper->name.c_str()); \
op->set_input_axes(*(axes_node)->getOp()); \
op->set_attr_keep_dims(keepdims); \
op->update_input_desc_x(*input_desc); \
op->update_input_desc_axes(*axes_desc); \
op->update_output_desc_y(*output_desc); \
reduce_op = op; \
} break;
BUILD_CANN_REDUCE_OP(ReduceType::MAX, ReduceMax, name);
BUILD_CANN_REDUCE_OP(ReduceType::MIN, ReduceMin, name);
BUILD_CANN_REDUCE_OP(ReduceType::MEAN, ReduceMean, name);
BUILD_CANN_REDUCE_OP(ReduceType::SUM, ReduceSum, name);
BUILD_CANN_REDUCE_OP(ReduceType::PROD, ReduceProd, name);
BUILD_CANN_REDUCE_OP(ReduceType::LOG_SUM, ReduceLogSum, name);
BUILD_CANN_REDUCE_OP(ReduceType::LOG_SUM_EXP, ReduceLogSumExp, name);
#undef BUILD_CANN_REDUCE_OP
default: CV_Error(Error::StsNotImplemented, "Unsupported reduce operation");
}
return Ptr<BackendNode>(new CannBackendNode(reduce_op));
}
#endif // HAVE_CANN
private:
enum ReduceType
{

View File

@ -184,6 +184,16 @@ public:
for (i = 0; i < dims; i++)
newShapeDesc[i] = paramShape.get<int>(i);
}
if (params.has("unsqueeze_axes"))
{
const DictValue& param_unsqueeze_axes = params.get("unsqueeze_axes");
int len_axes = param_unsqueeze_axes.size();
unsqueeze_axes.resize(len_axes);
for (int i = 0; i < len_axes; ++i)
{
unsqueeze_axes[i] = (int64_t)param_unsqueeze_axes.get<int>(i);
}
}
if (hasDynamicShapes)
{
dynamicShapes.clear();
@ -331,8 +341,30 @@ public:
const std::vector<Ptr<BackendWrapper> > &outputs,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
auto x = inputs[0].dynamicCast<CannBackendWrapper>();
auto input_wrapper = inputs[0].dynamicCast<CannBackendWrapper>();
if (!unsqueeze_axes.empty())
{
auto op = std::make_shared<ge::op::Unsqueeze>(name);
// set attributes
op->set_attr_axes(unsqueeze_axes);
// set inputs
// set inputs : x
auto input_node = nodes[0].dynamicCast<CannBackendNode>()->getOp();
op->set_input_x_by_name(*input_node, input_wrapper->name.c_str());
auto input_desc = input_wrapper->getTensorDesc();
op->update_input_desc_x(*input_desc);
// set outputs
auto desc_y = std::make_shared<ge::TensorDesc>(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT);
op->update_output_desc_y(*desc_y);
return Ptr<BackendNode>(new CannBackendNode(op));
}
else
{
// create operator
auto op = std::make_shared<ge::op::Reshape>(name);
@ -342,10 +374,10 @@ public:
// set inputs
// set inputs : x
auto op_x = nodes[0].dynamicCast<CannBackendNode>()->getOp();
op->set_input_x_by_name(*op_x, x->name.c_str());
auto x_desc = x->getTensorDesc();
op->update_input_desc_x(*x_desc);
auto input_node = nodes[0].dynamicCast<CannBackendNode>()->getOp();
op->set_input_x_by_name(*input_node, input_wrapper->name.c_str());
auto input_desc = input_wrapper->getTensorDesc();
op->update_input_desc_x(*input_desc);
// set inputs : shape
std::vector<int> shape_of_shape{(int)newShapeDesc.size()};
Mat shape_mat(shape_of_shape, CV_32S, newShapeDesc.data());
@ -354,11 +386,12 @@ public:
op->update_input_desc_shape(*(op_const_shape->getTensorDesc()));
// set outputs
auto output_y_desc = std::make_shared<ge::TensorDesc>(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT);
op->update_output_desc_y(*output_y_desc);
auto desc_y = std::make_shared<ge::TensorDesc>(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT);
op->update_output_desc_y(*desc_y);
return Ptr<BackendNode>(new CannBackendNode(op));
}
}
#endif // HAVE_CANN
#ifdef HAVE_DNN_NGRAPH
@ -509,6 +542,7 @@ private:
bool shapesInitialized;
float scale;
int zeropoint;
std::vector<int64_t> unsqueeze_axes;
};
Ptr<ReshapeLayer> ReshapeLayer::create(const LayerParams& params)

View File

@ -651,7 +651,7 @@ public:
auto op = std::make_shared<ge::op::SplitV>(name);
// set attr
int n_split = static_cast<int>(sliceRanges[0].size());
int n_split = static_cast<int>(outputs.size());
op->set_attr_num_split(n_split);
// set inputs

View File

@ -2281,6 +2281,8 @@ void ONNXImporter::parseUnsqueeze(LayerParams& layerParams, const opencv_onnx::N
if (axes.size() != 1)
CV_Error(Error::StsNotImplemented, "Multidimensional unsqueeze");
layerParams.set("unsqueeze_axes", axes);
int depth = layerParams.get<int>("depth", CV_32F);
MatShape inpShape = outShapes[node_proto.input(0)];

View File

@ -61,14 +61,14 @@ CannConstOp::CannConstOp(const uint8_t* data, const int dtype, const std::vector
{
case CV_32F: break;
case CV_32S: ge_dtype = ge::DT_INT32; break;
default: CV_Error(Error::StsNotImplemented, "Unsupported data type");
default: CV_Error(Error::StsNotImplemented, cv::format("Unsupported data type %d of node %s", dtype, name.c_str()));
}
auto size_of_type = sizeof(float);
switch (dtype)
{
case CV_32F: break;
case CV_32S: size_of_type = sizeof(int); break;
default: CV_Error(Error::StsNotImplemented, "Unsupported data type");
default: CV_Error(Error::StsNotImplemented, cv::format("Unsupported data type %d of node %s", dtype, name.c_str()));
}
desc_ = std::make_shared<ge::TensorDesc>(ge_shape, ge::FORMAT_NCHW, ge_dtype);
auto ge_tensor = std::make_shared<ge::Tensor>();