mirror of
https://github.com/zebrajr/opencv.git
synced 2025-12-06 12:19:50 +01:00
Merge remote-tracking branch 'upstream/3.4' into merge-3.4
This commit is contained in:
commit
ca8c3dd9b5
|
|
@ -7,6 +7,7 @@
|
|||
|
||||
#include <opencv2/core/async.hpp>
|
||||
#include <opencv2/core/detail/async_promise.hpp>
|
||||
#include <opencv2/core/utils/logger.hpp>
|
||||
|
||||
#include <stdexcept>
|
||||
|
||||
|
|
@ -147,7 +148,27 @@ AsyncArray testAsyncException()
|
|||
namespace fs {
|
||||
CV_EXPORTS_W cv::String getCacheDirectoryForDownloads();
|
||||
} // namespace fs
|
||||
//! @}
|
||||
}} // namespaces cv / utils
|
||||
|
||||
//! @} // core_utils
|
||||
} // namespace cv::utils
|
||||
|
||||
//! @cond IGNORED
|
||||
|
||||
CV_WRAP static inline
|
||||
int setLogLevel(int level)
|
||||
{
|
||||
// NB: Binding generators doesn't work with enums properly yet, so we define separate overload here
|
||||
return cv::utils::logging::setLogLevel((cv::utils::logging::LogLevel)level);
|
||||
}
|
||||
|
||||
CV_WRAP static inline
|
||||
int getLogLevel()
|
||||
{
|
||||
return cv::utils::logging::getLogLevel();
|
||||
}
|
||||
|
||||
//! @endcond IGNORED
|
||||
|
||||
} // namespaces cv / utils
|
||||
|
||||
#endif // OPENCV_CORE_BINDINGS_UTILS_HPP
|
||||
|
|
|
|||
|
|
@ -49,6 +49,8 @@ CV_EXPORTS_W void resetMyriadDevice();
|
|||
#define CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_2 "Myriad2"
|
||||
/// Intel(R) Neural Compute Stick 2, NCS2 (USB 03e7:2485), MyriadX (https://software.intel.com/ru-ru/neural-compute-stick)
|
||||
#define CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X "MyriadX"
|
||||
#define CV_DNN_INFERENCE_ENGINE_CPU_TYPE_ARM_COMPUTE "ARM_COMPUTE"
|
||||
#define CV_DNN_INFERENCE_ENGINE_CPU_TYPE_X86 "X86"
|
||||
|
||||
|
||||
/** @brief Returns Inference Engine VPU type.
|
||||
|
|
@ -57,6 +59,11 @@ CV_EXPORTS_W void resetMyriadDevice();
|
|||
*/
|
||||
CV_EXPORTS_W cv::String getInferenceEngineVPUType();
|
||||
|
||||
/** @brief Returns Inference Engine CPU type.
|
||||
*
|
||||
* Specify OpenVINO plugin: CPU or ARM.
|
||||
*/
|
||||
CV_EXPORTS_W cv::String getInferenceEngineCPUType();
|
||||
|
||||
/** @brief Release a HDDL plugin.
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -1382,11 +1382,12 @@ struct Net::Impl : public detail::NetImplBase
|
|||
CV_Assert(preferableBackend != DNN_BACKEND_HALIDE ||
|
||||
preferableTarget == DNN_TARGET_CPU ||
|
||||
preferableTarget == DNN_TARGET_OPENCL);
|
||||
#ifdef HAVE_INF_ENGINE
|
||||
if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 ||
|
||||
preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
||||
{
|
||||
CV_Assert(
|
||||
preferableTarget == DNN_TARGET_CPU ||
|
||||
(preferableTarget == DNN_TARGET_CPU && (!isArmComputePlugin() || preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)) ||
|
||||
preferableTarget == DNN_TARGET_OPENCL ||
|
||||
preferableTarget == DNN_TARGET_OPENCL_FP16 ||
|
||||
preferableTarget == DNN_TARGET_MYRIAD ||
|
||||
|
|
@ -1394,6 +1395,7 @@ struct Net::Impl : public detail::NetImplBase
|
|||
preferableTarget == DNN_TARGET_FPGA
|
||||
);
|
||||
}
|
||||
#endif
|
||||
CV_Assert(preferableBackend != DNN_BACKEND_VKCOM ||
|
||||
preferableTarget == DNN_TARGET_VULKAN);
|
||||
CV_Assert(preferableBackend != DNN_BACKEND_CUDA ||
|
||||
|
|
@ -2098,8 +2100,8 @@ struct Net::Impl : public detail::NetImplBase
|
|||
return;
|
||||
}
|
||||
|
||||
bool supportsCPUFallback = preferableTarget == DNN_TARGET_CPU ||
|
||||
BackendRegistry::checkIETarget(DNN_TARGET_CPU);
|
||||
bool supportsCPUFallback = !isArmComputePlugin() && (preferableTarget == DNN_TARGET_CPU ||
|
||||
BackendRegistry::checkIETarget(DNN_TARGET_CPU));
|
||||
|
||||
// Build Inference Engine networks from sets of layers that support this
|
||||
// backend. Split a whole model on several Inference Engine networks if
|
||||
|
|
|
|||
|
|
@ -324,10 +324,13 @@ public:
|
|||
#ifdef HAVE_INF_ENGINE
|
||||
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
||||
{
|
||||
if (ksize == 1)
|
||||
bool isArmTarget = preferableTarget == DNN_TARGET_CPU && isArmComputePlugin();
|
||||
if (isArmTarget && blobs.empty())
|
||||
return false;
|
||||
if (ksize == 1)
|
||||
return isArmTarget;
|
||||
if (ksize == 3)
|
||||
return preferableTarget == DNN_TARGET_CPU;
|
||||
return preferableTarget != DNN_TARGET_MYRIAD && !isArmTarget;
|
||||
bool isMyriad = preferableTarget == DNN_TARGET_MYRIAD || preferableTarget == DNN_TARGET_HDDL;
|
||||
if ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || !isMyriad) && blobs.empty())
|
||||
return false;
|
||||
|
|
@ -805,7 +808,7 @@ public:
|
|||
CV_Assert_N(inputs.size() >= 1, nodes.size() >= 1);
|
||||
auto& ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
|
||||
std::vector<size_t> dims = ieInpNode->get_shape();
|
||||
CV_Assert(dims.size() == 4 || dims.size() == 5);
|
||||
CV_Check(dims.size(), dims.size() >= 3 && dims.size() <= 5, "");
|
||||
std::shared_ptr<ngraph::Node> ieWeights = nodes.size() > 1 ? nodes[1].dynamicCast<InfEngineNgraphNode>()->node : nullptr;
|
||||
if (nodes.size() > 1)
|
||||
CV_Assert(ieWeights); // dynamic_cast should not fail
|
||||
|
|
@ -843,7 +846,7 @@ public:
|
|||
else
|
||||
{
|
||||
auto shape = std::make_shared<ngraph::op::Constant>(ngraph::element::i64,
|
||||
ngraph::Shape{kernel_shape.size()}, kernel_shape.data());
|
||||
ngraph::Shape{kernel_shape.size()}, std::vector<int64_t>(kernel_shape.begin(), kernel_shape.end()));
|
||||
ieWeights = std::make_shared<ngraph::op::v1::Reshape>(ieWeights, shape, true);
|
||||
}
|
||||
|
||||
|
|
@ -878,7 +881,7 @@ public:
|
|||
if (nodes.size() == 3)
|
||||
{
|
||||
auto bias_shape = std::make_shared<ngraph::op::Constant>(ngraph::element::i64,
|
||||
ngraph::Shape{shape.size()}, shape.data());
|
||||
ngraph::Shape{shape.size()}, std::vector<int64_t>(shape.begin(), shape.end()));
|
||||
bias = std::make_shared<ngraph::op::v1::Reshape>(nodes[2].dynamicCast<InfEngineNgraphNode>()->node, bias_shape, true);
|
||||
}
|
||||
else
|
||||
|
|
|
|||
|
|
@ -1354,11 +1354,15 @@ struct PowerFunctor : public BaseFunctor
|
|||
ngraph::Shape{1}, &scale);
|
||||
auto shift_node = std::make_shared<ngraph::op::Constant>(ngraph::element::f32,
|
||||
ngraph::Shape{1}, &shift);
|
||||
auto power_node = std::make_shared<ngraph::op::Constant>(ngraph::element::f32,
|
||||
ngraph::Shape{1}, &power);
|
||||
|
||||
auto mul = std::make_shared<ngraph::op::v1::Multiply>(scale_node, node, ngraph::op::AutoBroadcastType::NUMPY);
|
||||
auto scale_shift = std::make_shared<ngraph::op::v1::Add>(mul, shift_node, ngraph::op::AutoBroadcastType::NUMPY);
|
||||
|
||||
if (power == 1)
|
||||
return scale_shift;
|
||||
|
||||
auto power_node = std::make_shared<ngraph::op::Constant>(ngraph::element::f32,
|
||||
ngraph::Shape{1}, &power);
|
||||
return std::make_shared<ngraph::op::v1::Power>(scale_shift, power_node, ngraph::op::AutoBroadcastType::NUMPY);
|
||||
}
|
||||
#endif // HAVE_DNN_NGRAPH
|
||||
|
|
|
|||
|
|
@ -334,8 +334,8 @@ public:
|
|||
if (!acrossSpatial) {
|
||||
axes_data.push_back(1);
|
||||
} else {
|
||||
axes_data.resize(ieInpNode->get_shape().size());
|
||||
std::iota(axes_data.begin(), axes_data.end(), 0);
|
||||
axes_data.resize(ieInpNode->get_shape().size() - 1);
|
||||
std::iota(axes_data.begin(), axes_data.end(), 1);
|
||||
}
|
||||
auto axes = std::make_shared<ngraph::op::Constant>(ngraph::element::i64, ngraph::Shape{axes_data.size()}, axes_data);
|
||||
auto norm = std::make_shared<ngraph::op::NormalizeL2>(ieInpNode, axes, epsilon, ngraph::op::EpsMode::ADD);
|
||||
|
|
@ -344,23 +344,18 @@ public:
|
|||
std::vector<size_t> shape(ieInpNode->get_shape().size(), 1);
|
||||
shape[0] = blobs.empty() ? 1 : batch;
|
||||
shape[1] = numChannels;
|
||||
std::shared_ptr<ngraph::op::Constant> weight;
|
||||
if (blobs.empty())
|
||||
if (!blobs.empty())
|
||||
{
|
||||
std::vector<float> ones(numChannels, 1);
|
||||
weight = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, ngraph::Shape(shape), ones.data());
|
||||
}
|
||||
else
|
||||
{
|
||||
weight = std::make_shared<ngraph::op::Constant>(
|
||||
auto weight = std::make_shared<ngraph::op::Constant>(
|
||||
ngraph::element::f32, ngraph::Shape(shape), blobs[0].data);
|
||||
}
|
||||
#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2021_2)
|
||||
auto mul = std::make_shared<ngraph::op::v1::Multiply>(norm, weight, ngraph::op::AutoBroadcastType::NUMPY);
|
||||
auto mul = std::make_shared<ngraph::op::v1::Multiply>(norm, weight, ngraph::op::AutoBroadcastType::NUMPY);
|
||||
#else
|
||||
auto mul = std::make_shared<ngraph::op::v0::Multiply>(norm, weight, ngraph::op::AutoBroadcastType::NUMPY);
|
||||
auto mul = std::make_shared<ngraph::op::v0::Multiply>(norm, weight, ngraph::op::AutoBroadcastType::NUMPY);
|
||||
#endif
|
||||
return Ptr<BackendNode>(new InfEngineNgraphNode(mul));
|
||||
return Ptr<BackendNode>(new InfEngineNgraphNode(mul));
|
||||
}
|
||||
return Ptr<BackendNode>(new InfEngineNgraphNode(norm));
|
||||
}
|
||||
#endif // HAVE_DNN_NGRAPH
|
||||
|
||||
|
|
|
|||
|
|
@ -105,9 +105,10 @@ public:
|
|||
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
||||
{
|
||||
bool isMyriad = preferableTarget == DNN_TARGET_MYRIAD || preferableTarget == DNN_TARGET_HDDL;
|
||||
return INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1) &&
|
||||
(!isMyriad ||
|
||||
(dstRanges.size() == 4 && paddings[0].first == 0 && paddings[0].second == 0));
|
||||
if (INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1) && isMyriad)
|
||||
return dstRanges.size() == 4 && paddings[0].first == 0 && paddings[0].second == 0;
|
||||
|
||||
return (dstRanges.size() <= 4 || !isArmComputePlugin());
|
||||
}
|
||||
#endif
|
||||
return backendId == DNN_BACKEND_OPENCV ||
|
||||
|
|
|
|||
|
|
@ -113,6 +113,10 @@ public:
|
|||
|
||||
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
||||
{
|
||||
#ifdef HAVE_INF_ENGINE
|
||||
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && preferableTarget == DNN_TARGET_CPU)
|
||||
return _order.size() <= 4 || !isArmComputePlugin();
|
||||
#endif
|
||||
return backendId == DNN_BACKEND_OPENCV ||
|
||||
backendId == DNN_BACKEND_CUDA ||
|
||||
((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine()) ||
|
||||
|
|
|
|||
|
|
@ -220,7 +220,9 @@ public:
|
|||
#endif
|
||||
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
||||
{
|
||||
return !computeMaxIdx && type != STOCHASTIC && kernel_size.size() > 1;
|
||||
#ifdef HAVE_DNN_NGRAPH
|
||||
return !computeMaxIdx && type != STOCHASTIC && kernel_size.size() > 1 && (kernel_size.size() != 3 || !isArmComputePlugin());
|
||||
#endif
|
||||
}
|
||||
else if (backendId == DNN_BACKEND_OPENCV)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -460,8 +460,10 @@ public:
|
|||
std::vector<int64_t> mask(anchors, 1);
|
||||
region = std::make_shared<ngraph::op::RegionYolo>(tr_input, coords, classes, anchors, useSoftmax, mask, 1, 3, anchors_vec);
|
||||
|
||||
auto tr_shape = tr_input->get_shape();
|
||||
auto shape_as_inp = std::make_shared<ngraph::op::Constant>(ngraph::element::i64,
|
||||
ngraph::Shape{tr_input->get_shape().size()}, tr_input->get_shape().data());
|
||||
ngraph::Shape{tr_shape.size()},
|
||||
std::vector<int64_t>(tr_shape.begin(), tr_shape.end()));
|
||||
|
||||
region = std::make_shared<ngraph::op::v1::Reshape>(region, shape_as_inp, true);
|
||||
new_axes = std::make_shared<ngraph::op::Constant>(ngraph::element::i64, ngraph::Shape{4}, std::vector<int64_t>{0, 2, 3, 1});
|
||||
|
|
@ -607,7 +609,7 @@ public:
|
|||
result = std::make_shared<ngraph::op::Transpose>(result, tr_axes);
|
||||
if (b > 1)
|
||||
{
|
||||
std::vector<size_t> sizes = {(size_t)b, result->get_shape()[0] / b, result->get_shape()[1]};
|
||||
std::vector<int64_t> sizes{b, static_cast<int64_t>(result->get_shape()[0]) / b, static_cast<int64_t>(result->get_shape()[1])};
|
||||
auto shape_node = std::make_shared<ngraph::op::Constant>(ngraph::element::i64, ngraph::Shape{sizes.size()}, sizes.data());
|
||||
result = std::make_shared<ngraph::op::v1::Reshape>(result, shape_node, true);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -655,6 +655,22 @@ InferenceEngine::Core& getCore(const std::string& id)
|
|||
}
|
||||
#endif
|
||||
|
||||
static bool detectArmPlugin_()
|
||||
{
|
||||
InferenceEngine::Core& ie = getCore("CPU");
|
||||
const std::vector<std::string> devices = ie.GetAvailableDevices();
|
||||
for (std::vector<std::string>::const_iterator i = devices.begin(); i != devices.end(); ++i)
|
||||
{
|
||||
if (i->find("CPU") != std::string::npos)
|
||||
{
|
||||
const std::string name = ie.GetMetric(*i, METRIC_KEY(FULL_DEVICE_NAME)).as<std::string>();
|
||||
CV_LOG_INFO(NULL, "CPU plugin: " << name);
|
||||
return name.find("arm_compute::NEON") != std::string::npos;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
#if !defined(OPENCV_DNN_IE_VPU_TYPE_DEFAULT)
|
||||
static bool detectMyriadX_(std::string device)
|
||||
{
|
||||
|
|
@ -1185,6 +1201,12 @@ bool isMyriadX()
|
|||
return myriadX;
|
||||
}
|
||||
|
||||
bool isArmComputePlugin()
|
||||
{
|
||||
static bool armPlugin = getInferenceEngineCPUType() == CV_DNN_INFERENCE_ENGINE_CPU_TYPE_ARM_COMPUTE;
|
||||
return armPlugin;
|
||||
}
|
||||
|
||||
static std::string getInferenceEngineVPUType_()
|
||||
{
|
||||
static std::string param_vpu_type = utils::getConfigurationParameterString("OPENCV_DNN_IE_VPU_TYPE", "");
|
||||
|
|
@ -1223,6 +1245,14 @@ cv::String getInferenceEngineVPUType()
|
|||
return vpu_type;
|
||||
}
|
||||
|
||||
cv::String getInferenceEngineCPUType()
|
||||
{
|
||||
static cv::String cpu_type = detectArmPlugin_() ?
|
||||
CV_DNN_INFERENCE_ENGINE_CPU_TYPE_ARM_COMPUTE :
|
||||
CV_DNN_INFERENCE_ENGINE_CPU_TYPE_X86;
|
||||
return cpu_type;
|
||||
}
|
||||
|
||||
#else // HAVE_INF_ENGINE
|
||||
|
||||
cv::String getInferenceEngineBackendType()
|
||||
|
|
@ -1238,6 +1268,11 @@ cv::String getInferenceEngineVPUType()
|
|||
{
|
||||
CV_Error(Error::StsNotImplemented, "This OpenCV build doesn't include InferenceEngine support");
|
||||
}
|
||||
|
||||
cv::String getInferenceEngineCPUType()
|
||||
{
|
||||
CV_Error(Error::StsNotImplemented, "This OpenCV build doesn't include InferenceEngine support");
|
||||
}
|
||||
#endif // HAVE_INF_ENGINE
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -255,8 +255,11 @@ CV__DNN_INLINE_NS_BEGIN
|
|||
|
||||
bool isMyriadX();
|
||||
|
||||
bool isArmComputePlugin();
|
||||
|
||||
CV__DNN_INLINE_NS_END
|
||||
|
||||
|
||||
InferenceEngine::Core& getCore(const std::string& id);
|
||||
|
||||
template<typename T = size_t>
|
||||
|
|
|
|||
|
|
@ -35,6 +35,7 @@
|
|||
#define CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_2 "dnn_skip_ie_myriad2"
|
||||
#define CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X "dnn_skip_ie_myriadx"
|
||||
#define CV_TEST_TAG_DNN_SKIP_IE_MYRIAD CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_2, CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X
|
||||
#define CV_TEST_TAG_DNN_SKIP_IE_ARM_CPU "dnn_skip_ie_arm_cpu"
|
||||
|
||||
#define CV_TEST_TAG_DNN_SKIP_VULKAN "dnn_skip_vulkan"
|
||||
|
||||
|
|
|
|||
|
|
@ -156,6 +156,10 @@ TEST_P(Test_ONNX_layers, Convolution_variable_weight_bias)
|
|||
if (backend == DNN_BACKEND_VKCOM)
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_VULKAN); // not supported
|
||||
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_CPU &&
|
||||
getInferenceEngineCPUType() == CV_DNN_INFERENCE_ENGINE_CPU_TYPE_ARM_COMPUTE)
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_ARM_CPU, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
|
||||
|
||||
String basename = "conv_variable_wb";
|
||||
Net net = readNetFromONNX(_tf("models/" + basename + ".onnx"));
|
||||
ASSERT_FALSE(net.empty());
|
||||
|
|
@ -766,6 +770,8 @@ TEST_P(Test_ONNX_layers, Conv1d_variable_weight_bias)
|
|||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
||||
{
|
||||
if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
|
||||
if (target == DNN_TARGET_CPU && getInferenceEngineCPUType() == CV_DNN_INFERENCE_ENGINE_CPU_TYPE_ARM_COMPUTE)
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_ARM_CPU, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
|
||||
}
|
||||
String basename = "conv1d_variable_wb";
|
||||
Net net = readNetFromONNX(_tf("models/" + basename + ".onnx"));
|
||||
|
|
|
|||
|
|
@ -481,8 +481,7 @@ article](http://en.wikipedia.org/wiki/Maximally_stable_extremal_regions)).
|
|||
than union-find method; it actually get 1.5~2m/s on my centrino L7200 1.2GHz laptop.
|
||||
|
||||
- the color image algorithm is taken from: @cite forssen2007maximally ; it should be much slower
|
||||
than grey image method ( 3~4 times ); the chi_table.h file is taken directly from paper's source
|
||||
code which is distributed under GPL.
|
||||
than grey image method ( 3~4 times )
|
||||
|
||||
- (Python) A complete example showing the use of the %MSER detector can be found at samples/python/mser.py
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -35,7 +35,7 @@
|
|||
* it actually get 1.5~2m/s on my centrino L7200 1.2GHz laptop.
|
||||
* 3. the color image algorithm is taken from: Maximally Stable Colour Regions for Recognition and Match;
|
||||
* it should be much slower than gray image method ( 3~4 times );
|
||||
* the chi_table.h file is taken directly from paper's source code which is distributed under GPL.
|
||||
* the chi_table.h file is taken directly from paper's source code which is distributed under permissive BSD-like license: http://users.isy.liu.se/cvl/perfo/software/chi_table.h
|
||||
* 4. though the name is *contours*, the result actually is a list of point set.
|
||||
*/
|
||||
|
||||
|
|
|
|||
|
|
@ -26,7 +26,9 @@ ocv_list_filterout(opencv_hdrs "modules/cuda.*")
|
|||
ocv_list_filterout(opencv_hdrs "modules/cudev")
|
||||
ocv_list_filterout(opencv_hdrs "modules/core/.*/hal/")
|
||||
ocv_list_filterout(opencv_hdrs "modules/.*/detection_based_tracker.hpp") # Conditional compilation
|
||||
ocv_list_filterout(opencv_hdrs "modules/core/include/opencv2/core/utils/.*")
|
||||
ocv_list_filterout(opencv_hdrs "modules/core/include/opencv2/core/utils/*.private.*")
|
||||
ocv_list_filterout(opencv_hdrs "modules/core/include/opencv2/core/utils/instrumentation.hpp")
|
||||
ocv_list_filterout(opencv_hdrs "modules/core/include/opencv2/core/utils/trace*")
|
||||
|
||||
ocv_update_file("${CMAKE_CURRENT_BINARY_DIR}/headers.txt" "${opencv_hdrs}")
|
||||
|
||||
|
|
|
|||
|
|
@ -1,57 +1,12 @@
|
|||
///////////////////////////////////////////////////////////////////////////////////////
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// AUTHOR: Rahul Kavi rahulkavi[at]live[at]com
|
||||
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
|
||||
// This is a implementation of the Logistic Regression algorithm in C++ in OpenCV.
|
||||
|
||||
// AUTHOR:
|
||||
// Rahul Kavi rahulkavi[at]live[at]com
|
||||
|
||||
// # You are free to use, change, or redistribute the code in any way you wish for
|
||||
// # non-commercial purposes, but please maintain the name of the original author.
|
||||
// # This code comes with no warranty of any kind.
|
||||
|
||||
// #
|
||||
// # You are free to use, change, or redistribute the code in any way you wish for
|
||||
// # non-commercial purposes, but please maintain the name of the original author.
|
||||
// # This code comes with no warranty of any kind.
|
||||
|
||||
// # Logistic Regression ALGORITHM
|
||||
|
||||
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2008-2011, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
// This is a implementation of the Logistic Regression algorithm
|
||||
//
|
||||
|
||||
#include "precomp.hpp"
|
||||
|
||||
|
|
|
|||
|
|
@ -1,11 +1,13 @@
|
|||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// AUTHOR: Rahul Kavi rahulkavi[at]live[at]com
|
||||
|
||||
// This is a implementation of the Logistic Regression algorithm in C++ in OpenCV.
|
||||
|
||||
// AUTHOR:
|
||||
// Rahul Kavi rahulkavi[at]live[at]com
|
||||
//
|
||||
// Test data uses subset of data from the popular Iris Dataset (1936):
|
||||
// - http://archive.ics.uci.edu/ml/datasets/Iris
|
||||
// - https://en.wikipedia.org/wiki/Iris_flower_data_set
|
||||
//
|
||||
|
||||
#include "test_precomp.hpp"
|
||||
|
|
|
|||
|
|
@ -1,11 +1,17 @@
|
|||
# Classes and methods whitelist
|
||||
core = {'': ['absdiff', 'add', 'addWeighted', 'bitwise_and', 'bitwise_not', 'bitwise_or', 'bitwise_xor', 'cartToPolar',\
|
||||
'compare', 'convertScaleAbs', 'copyMakeBorder', 'countNonZero', 'determinant', 'dft', 'divide', 'eigen', \
|
||||
'exp', 'flip', 'getOptimalDFTSize','gemm', 'hconcat', 'inRange', 'invert', 'kmeans', 'log', 'magnitude', \
|
||||
'max', 'mean', 'meanStdDev', 'merge', 'min', 'minMaxLoc', 'mixChannels', 'multiply', 'norm', 'normalize', \
|
||||
'perspectiveTransform', 'polarToCart', 'pow', 'randn', 'randu', 'reduce', 'repeat', 'rotate', 'setIdentity', 'setRNGSeed', \
|
||||
'solve', 'solvePoly', 'split', 'sqrt', 'subtract', 'trace', 'transform', 'transpose', 'vconcat'],
|
||||
'Algorithm': []}
|
||||
|
||||
core = {
|
||||
'': [
|
||||
'absdiff', 'add', 'addWeighted', 'bitwise_and', 'bitwise_not', 'bitwise_or', 'bitwise_xor', 'cartToPolar',
|
||||
'compare', 'convertScaleAbs', 'copyMakeBorder', 'countNonZero', 'determinant', 'dft', 'divide', 'eigen',
|
||||
'exp', 'flip', 'getOptimalDFTSize','gemm', 'hconcat', 'inRange', 'invert', 'kmeans', 'log', 'magnitude',
|
||||
'max', 'mean', 'meanStdDev', 'merge', 'min', 'minMaxLoc', 'mixChannels', 'multiply', 'norm', 'normalize',
|
||||
'perspectiveTransform', 'polarToCart', 'pow', 'randn', 'randu', 'reduce', 'repeat', 'rotate', 'setIdentity', 'setRNGSeed',
|
||||
'solve', 'solvePoly', 'split', 'sqrt', 'subtract', 'trace', 'transform', 'transpose', 'vconcat',
|
||||
'setLogLevel', 'getLogLevel',
|
||||
],
|
||||
'Algorithm': [],
|
||||
}
|
||||
|
||||
imgproc = {'': ['Canny', 'GaussianBlur', 'Laplacian', 'HoughLines', 'HoughLinesP', 'HoughCircles', 'Scharr','Sobel', \
|
||||
'adaptiveThreshold','approxPolyDP','arcLength','bilateralFilter','blur','boundingRect','boxFilter',\
|
||||
|
|
|
|||
|
|
@ -1,60 +1,5 @@
|
|||
/*//////////////////////////////////////////////////////////////////////////////////////
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
|
||||
// This is a implementation of the Logistic Regression algorithm in C++ in OpenCV.
|
||||
|
||||
// AUTHOR:
|
||||
// Rahul Kavi rahulkavi[at]live[at]com
|
||||
//
|
||||
|
||||
// contains a subset of data from the popular Iris Dataset (taken from
|
||||
// "http://archive.ics.uci.edu/ml/datasets/Iris")
|
||||
|
||||
// # You are free to use, change, or redistribute the code in any way you wish for
|
||||
// # non-commercial purposes, but please maintain the name of the original author.
|
||||
// # This code comes with no warranty of any kind.
|
||||
|
||||
// #
|
||||
// # You are free to use, change, or redistribute the code in any way you wish for
|
||||
// # non-commercial purposes, but please maintain the name of the original author.
|
||||
// # This code comes with no warranty of any kind.
|
||||
|
||||
// # Logistic Regression ALGORITHM
|
||||
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
|
||||
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
||||
// Copyright (C) 2008-2011, Willow Garage Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
|
||||
// This software is provided by the copyright holders and contributors "as is" and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.*/
|
||||
// Logistic Regression sample
|
||||
// AUTHOR: Rahul Kavi rahulkavi[at]live[at]com
|
||||
|
||||
#include <iostream>
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user