Merge branch 4.x

This commit is contained in:
Alexander Smorkalov 2025-08-06 15:25:19 +03:00
commit 02f0779d0e
16 changed files with 299 additions and 63 deletions

View File

@ -168,7 +168,7 @@ OCV_OPTION(WITH_CAP_IOS "Enable iOS video capture" ON
OCV_OPTION(WITH_CAROTENE "Use NVidia carotene acceleration library for ARM platform" (NOT CV_DISABLE_OPTIMIZATION)
VISIBLE_IF (ARM OR AARCH64) AND NOT IOS AND NOT XROS)
OCV_OPTION(WITH_KLEIDICV "Use KleidiCV library for ARM platforms" (ANDROID AND AARCH64 AND NOT CV_DISABLE_OPTIMIZATION)
VISIBLE_IF (AARCH64 AND (ANDROID OR UNIX AND NOT IOS AND NOT XROS)))
VISIBLE_IF (AARCH64 AND (ANDROID OR UNIX)))
OCV_OPTION(WITH_NDSRVP "Use Andes RVP extension" (NOT CV_DISABLE_OPTIMIZATION)
VISIBLE_IF RISCV)
OCV_OPTION(WITH_HAL_RVV "Use HAL RVV optimizations" (NOT CV_DISABLE_OPTIMIZATION)

View File

@ -7,6 +7,7 @@ Copyright (C) 2008-2016, Itseez Inc., all rights reserved.
Copyright (C) 2019-2023, Xperience AI, all rights reserved.
Copyright (C) 2019-2022, Shenzhen Institute of Artificial Intelligence and Robotics for Society, all rights reserved.
Copyright (C) 2022-2023, Southern University of Science And Technology, all rights reserved.
Copyright (C) 2023-2025, OpenCV AI, all rights reserved.
Copyright (C) 2024, Bigvision LLC, all rights reserved.
Third party copyrights are property of their respective owners.

View File

@ -315,7 +315,7 @@ Following formats can be read by OpenCV without help of any third-party library:
| [Sun Raster](https://en.wikipedia.org/wiki/Sun_Raster) | `WITH_IMGCODEC_SUNRASTER` | _ON_ |
| [PPM, PGM, PBM, PAM](https://en.wikipedia.org/wiki/Netpbm#File_formats) | `WITH_IMGCODEC_PXM` | _ON_ |
| [PFM](https://en.wikipedia.org/wiki/Netpbm#File_formats) | `WITH_IMGCODEC_PFM` | _ON_ |
| [GIF](https://en.wikipedia.org/wiki/GIF) | `WITH_IMGCODEC_GIF` | _OFF_ |
| [GIF](https://en.wikipedia.org/wiki/GIF) | `WITH_IMGCODEC_GIF` | _ON_ |
### PNG, JPEG, TIFF, WEBP, JPEG 2000, EXR, JPEG XL support

View File

@ -97,7 +97,7 @@ enum ImwriteFlags {
IMWRITE_PNG_STRATEGY = 17, //!< For PNG, One of cv::ImwritePNGFlags, default is IMWRITE_PNG_STRATEGY_RLE.
IMWRITE_PNG_BILEVEL = 18, //!< For PNG, Binary level PNG, 0 or 1, default is 0.
IMWRITE_PNG_FILTER = 19, //!< For PNG, One of cv::ImwritePNGFilterFlags, default is IMWRITE_PNG_FILTER_SUB.
IMWRITE_PNG_ZLIBBUFFER_SIZE = 20, //!< For PNG, sets the size of the internal zlib compression buffer in bytes.
IMWRITE_PNG_ZLIBBUFFER_SIZE = 20, //!< For PNG with libpng, sets the size of the internal zlib compression buffer in bytes, from 6 to 1048576(1024 KiB). Default is 8192(8 KiB). For normal use, 131072(128 KiB) or 262144(256 KiB) may be sufficient. If WITH_SPNG=ON, it is not supported.
IMWRITE_PXM_BINARY = 32, //!< For PPM, PGM, or PBM, it can be a binary format flag, 0 or 1. Default value is 1.
IMWRITE_EXR_TYPE = (3 << 4) + 0 /* 48 */, //!< override EXR storage type (FLOAT (FP32) is default)
IMWRITE_EXR_COMPRESSION = (3 << 4) + 1 /* 49 */, //!< override EXR compression type (ZIP_COMPRESSION = 3 is default)

View File

@ -1007,7 +1007,11 @@ bool PngEncoder::write( const Mat& img, const std::vector<int>& params )
break;
case IMWRITE_PNG_ZLIBBUFFER_SIZE:
png_set_compression_buffer_size(png_ptr, params[i+1]);
// The default value is 8 KiB.
// The minimum limit is 6, which is from from https://github.com/opencv/opencv/blob/4.12.0/3rdparty/libpng/pngset.c#L1600 .
// The maximum limit is 1 MiB, which has been provisionally set. libpng limitation is 2 GiB(INT32_MAX), but it is too large.
// For normal use, 128 or 256 KiB may be sufficient. See https://zlib.net/zlib_how.html .
png_set_compression_buffer_size(png_ptr, MIN(MAX(params[i+1],6), 1024*1024));
break;
default:

View File

@ -23,6 +23,7 @@
#include <zlib.h>
#include "grfmt_spng.hpp"
#include <opencv2/core/utils/logger.hpp>
/*
* libspng does not support RGB -> Gray conversion. In order to decode colorful images as grayscale
@ -554,6 +555,11 @@ bool SPngEncoder::write(const Mat &img, const std::vector<int> &params)
filter = params[i+1];
set_filter = true;
}
if( params[i] == IMWRITE_PNG_ZLIBBUFFER_SIZE )
{
// See https://libspng.org/docs/migrate-libpng/#miscellaneous-functions
CV_LOG_WARNING(nullptr, "libspng does not support png_set_compression_buffer_size() which is required for IMWRITE_PNG_ZLIBBUFFER_SIZE");
}
}
ihdr.bit_depth = depth == CV_8U ? isBilevel ? 1 : 8 : 16;

View File

@ -569,6 +569,26 @@ INSTANTIATE_TEST_CASE_P(/**/,
make_tuple("../perf/512x512.png", 8, 153708),
make_tuple("../perf/512x512.png", 9, 152181)));
// See https://github.com/opencv/opencv/issues/27614
typedef testing::TestWithParam<int> Imgcodecs_Png_ZLIBBUFFER_SIZE;
TEST_P(Imgcodecs_Png_ZLIBBUFFER_SIZE, encode_regression_27614)
{
Mat img(320,240,CV_8UC3,cv::Scalar(64,76,43));
vector<uint8_t> buff;
bool status = false;
ASSERT_NO_THROW(status = imencode(".png", img, buff, { IMWRITE_PNG_ZLIBBUFFER_SIZE, GetParam() }));
ASSERT_TRUE(status);
}
INSTANTIATE_TEST_CASE_P(/*nothing*/, Imgcodecs_Png_ZLIBBUFFER_SIZE,
testing::Values(5,
6, // Minimum limit
8192, // Default value
131072, // 128 KiB
262144, // 256 KiB
1048576, // Maximum limit
1048577));
#endif // HAVE_PNG
}} // namespace

View File

@ -2,7 +2,7 @@ __all__ = [
"apply_manual_api_refinement"
]
from typing import cast, Sequence, Callable, Iterable
from typing import cast, Sequence, Callable, Iterable, Optional
from .nodes import (NamespaceNode, FunctionNode, OptionalTypeNode, TypeNode,
ClassProperty, PrimitiveTypeNode, ASTNodeTypeNode,
@ -93,19 +93,35 @@ def export_matrix_type_constants(root: NamespaceNode) -> None:
)
def make_optional_arg(arg_name: str) -> Callable[[NamespaceNode, SymbolName], None]:
def make_optional_arg(*arg_names: str) -> Callable[[NamespaceNode, SymbolName], None]:
def _make_optional_arg(root_node: NamespaceNode,
function_symbol_name: SymbolName) -> None:
function = find_function_node(root_node, function_symbol_name)
for overload in function.overloads:
arg_idx = _find_argument_index(overload.arguments, arg_name)
# Avoid multiplying optional qualification
if isinstance(overload.arguments[arg_idx].type_node, OptionalTypeNode):
continue
for arg_name in arg_names:
found_overload_with_arg = False
overload.arguments[arg_idx].type_node = OptionalTypeNode(
cast(TypeNode, overload.arguments[arg_idx].type_node)
)
for overload in function.overloads:
arg_idx = _find_argument_index(overload.arguments, arg_name)
# skip overloads without this argument
if arg_idx is None:
continue
# Avoid multiplying optional qualification
if isinstance(overload.arguments[arg_idx].type_node, OptionalTypeNode):
continue
overload.arguments[arg_idx].type_node = OptionalTypeNode(
cast(TypeNode, overload.arguments[arg_idx].type_node)
)
found_overload_with_arg = True
if not found_overload_with_arg:
raise RuntimeError(
f"Failed to find argument with name: '{arg_name}'"
f" in '{function_symbol_name.name}' overloads"
)
return _make_optional_arg
@ -327,13 +343,11 @@ def _trim_class_name_from_argument_types(
def _find_argument_index(arguments: Sequence[FunctionNode.Arg],
name: str) -> int:
name: str) -> Optional[int]:
for i, arg in enumerate(arguments):
if arg.name == name:
return i
raise RuntimeError(
f"Failed to find argument with name: '{name}' in {arguments}"
)
return None
NODES_TO_REFINE = {
@ -341,6 +355,23 @@ NODES_TO_REFINE = {
SymbolName(("cv", ), (), "calcHist"): make_optional_arg("mask"),
SymbolName(("cv", ), (), "floodFill"): make_optional_arg("mask"),
SymbolName(("cv", ), ("Feature2D", ), "detectAndCompute"): make_optional_arg("mask"),
SymbolName(("cv", ), (), "findEssentialMat"): make_optional_arg(
"distCoeffs1", "distCoeffs2", "dist_coeff1", "dist_coeff2"
),
SymbolName(("cv", ), (), "drawFrameAxes"): make_optional_arg("distCoeffs"),
SymbolName(("cv", ), (), "getOptimalNewCameraMatrix"): make_optional_arg("distCoeffs"),
SymbolName(("cv", ), (), "initInverseRectificationMap"): make_optional_arg("distCoeffs", "R"),
SymbolName(("cv", ), (), "initUndistortRectifyMap"): make_optional_arg("distCoeffs", "R"),
SymbolName(("cv", ), (), "projectPoints"): make_optional_arg("distCoeffs"),
SymbolName(("cv", ), (), "solveP3P"): make_optional_arg("distCoeffs"),
SymbolName(("cv", ), (), "solvePnP"): make_optional_arg("distCoeffs"),
SymbolName(("cv", ), (), "solvePnPGeneric"): make_optional_arg("distCoeffs"),
SymbolName(("cv", ), (), "solvePnPRansac"): make_optional_arg("distCoeffs"),
SymbolName(("cv", ), (), "solvePnPRefineLM"): make_optional_arg("distCoeffs"),
SymbolName(("cv", ), (), "solvePnPRefineVVS"): make_optional_arg("distCoeffs"),
SymbolName(("cv", ), (), "undistort"): make_optional_arg("distCoeffs"),
SymbolName(("cv", ), (), "undistortPoints"): make_optional_arg("distCoeffs"),
SymbolName(("cv", "fisheye"), (), "initUndistortRectifyMap"): make_optional_arg("D"),
SymbolName(("cv", ), (), "imread"): make_optional_none_return,
SymbolName(("cv", ), (), "imdecode"): make_optional_none_return,
}

View File

@ -707,6 +707,11 @@ enum VideoCaptureOBSensorProperties{
CAP_PROP_OBSENSOR_INTRINSIC_FY=26002,
CAP_PROP_OBSENSOR_INTRINSIC_CX=26003,
CAP_PROP_OBSENSOR_INTRINSIC_CY=26004,
CAP_PROP_OBSENSOR_RGB_POS_MSEC=26005,
CAP_PROP_OBSENSOR_DEPTH_POS_MSEC=26006,
CAP_PROP_OBSENSOR_DEPTH_WIDTH=26007,
CAP_PROP_OBSENSOR_DEPTH_HEIGHT=26008,
CAP_PROP_OBSENSOR_DEPTH_FPS=26009
};
//! @} OBSENSOR

View File

@ -376,7 +376,7 @@ Ptr<IVideoWriter> createAndroidVideoWriter(const std::string& filename, int four
double fps, const Size& frameSize,
const VideoWriterParameters& params);
Ptr<IVideoCapture> create_obsensor_capture(int index);
Ptr<IVideoCapture> create_obsensor_capture(int index, const cv::VideoCaptureParameters& params);
bool VideoCapture_V4L_waitAny(
const std::vector<VideoCapture>& streams,

View File

@ -23,15 +23,20 @@
#include "cap_obsensor_capture.hpp"
#include "cap_obsensor/obsensor_stream_channel_interface.hpp"
#include <cstdint>
#define OB_WIDTH_ANY 0
#define OB_HEIGHT_ANY 0
#define OB_FPS_ANY 0
#if defined(HAVE_OBSENSOR) && !defined(HAVE_OBSENSOR_ORBBEC_SDK)
namespace cv {
Ptr<IVideoCapture> create_obsensor_capture(int index)
Ptr<IVideoCapture> create_obsensor_capture(int index, const cv::VideoCaptureParameters& params)
{
return makePtr<VideoCapture_obsensor>(index);
return makePtr<VideoCapture_obsensor>(index, params);
}
VideoCapture_obsensor::VideoCapture_obsensor(int index) : isOpened_(false)
VideoCapture_obsensor::VideoCapture_obsensor(int index, const cv::VideoCaptureParameters& params) : isOpened_(false)
{
static const obsensor::StreamProfile colorProfile = { 640, 480, 30, obsensor::FRAME_FORMAT_MJPG };
static const obsensor::StreamProfile depthProfile = { 640, 480, 30, obsensor::FRAME_FORMAT_Y16 };
@ -55,15 +60,23 @@ VideoCapture_obsensor::VideoCapture_obsensor(int index) : isOpened_(false)
{
case obsensor::OBSENSOR_STREAM_COLOR:
{
auto profile = colorProfile;
if(OBSENSOR_FEMTO_MEGA_PID == channel->getPid()){
profile = megaColorProfile;
}else if(OBSENSOR_GEMINI2L_PID == channel->getPid()){
profile = gemini2lColorProfile;
}else if(OBSENSOR_ASTRA2_PID == channel->getPid()){
profile = astra2ColorProfile;
}else if(OBSENSOR_GEMINI2XL_PID == channel->getPid()){
profile = gemini2XlColorProfile;
uint32_t color_width = params.get<uint32_t>(CAP_PROP_FRAME_WIDTH, OB_WIDTH_ANY);
uint32_t color_height = params.get<uint32_t>(CAP_PROP_FRAME_HEIGHT, OB_HEIGHT_ANY);
uint32_t color_fps = params.get<uint32_t>(CAP_PROP_FPS, OB_FPS_ANY);
obsensor::StreamProfile profile = colorProfile;
if (color_width != OB_WIDTH_ANY || color_height != OB_HEIGHT_ANY || color_fps != OB_FPS_ANY) {
profile = { color_width, color_height, color_fps, obsensor::FRAME_FORMAT_MJPG };
} else {
if(OBSENSOR_FEMTO_MEGA_PID == channel->getPid()){
profile = megaColorProfile;
}else if(OBSENSOR_GEMINI2L_PID == channel->getPid()){
profile = gemini2lColorProfile;
}else if(OBSENSOR_ASTRA2_PID == channel->getPid()){
profile = astra2ColorProfile;
}else if(OBSENSOR_GEMINI2XL_PID == channel->getPid()){
profile = gemini2XlColorProfile;
}
}
channel->start(profile, [&](obsensor::Frame* frame) {
std::unique_lock<std::mutex> lk(frameMutex_);
@ -77,17 +90,25 @@ VideoCapture_obsensor::VideoCapture_obsensor(int index) : isOpened_(false)
uint8_t data = 1;
channel->setProperty(obsensor::DEPTH_TO_COLOR_ALIGN, &data, 1);
uint32_t depth_width = params.get<uint32_t>(CAP_PROP_OBSENSOR_DEPTH_WIDTH, OB_WIDTH_ANY);
uint32_t depth_height = params.get<uint32_t>(CAP_PROP_OBSENSOR_DEPTH_HEIGHT, OB_HEIGHT_ANY);
uint32_t depth_fps = params.get<uint32_t>(CAP_PROP_OBSENSOR_DEPTH_FPS, OB_FPS_ANY);
obsensor::StreamProfile profile = depthProfile;
if(OBSENSOR_GEMINI2_PID == channel->getPid()){
profile = gemini2DepthProfile;
}else if(OBSENSOR_ASTRA2_PID == channel->getPid()){
profile = astra2DepthProfile;
}else if(OBSENSOR_FEMTO_MEGA_PID == channel->getPid()){
profile = megaDepthProfile;
}else if(OBSENSOR_GEMINI2L_PID == channel->getPid()){
profile = gemini2lDepthProfile;
}else if(OBSENSOR_GEMINI2XL_PID == channel->getPid()){
profile = gemini2XlDepthProfile;
if (depth_width != OB_WIDTH_ANY || depth_height != OB_HEIGHT_ANY || depth_fps != OB_FPS_ANY) {
profile = { depth_width, depth_height, depth_fps, obsensor::FRAME_FORMAT_Y16 };
} else {
if(OBSENSOR_GEMINI2_PID == channel->getPid()){
profile = gemini2DepthProfile;
}else if(OBSENSOR_ASTRA2_PID == channel->getPid()){
profile = astra2DepthProfile;
}else if(OBSENSOR_FEMTO_MEGA_PID == channel->getPid()){
profile = megaDepthProfile;
}else if(OBSENSOR_GEMINI2L_PID == channel->getPid()){
profile = gemini2lDepthProfile;
}else if(OBSENSOR_GEMINI2XL_PID == channel->getPid()){
profile = gemini2XlDepthProfile;
}
}
channel->start(profile, [&](obsensor::Frame* frame) {
std::unique_lock<std::mutex> lk(frameMutex_);
@ -218,7 +239,22 @@ double VideoCapture_obsensor::getProperty(int propIdx) const {
bool VideoCapture_obsensor::setProperty(int propIdx, double /*propVal*/)
{
CV_LOG_WARNING(NULL, "Unsupported or read only property, id=" << propIdx);
switch(propIdx)
{
case CAP_PROP_OBSENSOR_DEPTH_WIDTH:
case CAP_PROP_OBSENSOR_DEPTH_HEIGHT:
case CAP_PROP_OBSENSOR_DEPTH_FPS:
CV_LOG_WARNING(NULL, "CAP_PROP_OBSENSOR_DEPTH_WIDTH, CAP_PROP_OBSENSOR_DEPTH_HEIGHT, CAP_PROP_OBSENSOR_DEPTH_FPS options are supported during camera initialization only");
break;
case CAP_PROP_FRAME_WIDTH:
case CAP_PROP_FRAME_HEIGHT:
case CAP_PROP_FPS:
CV_LOG_WARNING(NULL, "CAP_PROP_FRAME_WIDTH, CAP_PROP_FRAME_HEIGHT, CAP_PROP_FPS options are supported during camera initialization only");
break;
default:
CV_LOG_WARNING(NULL, "Unsupported or read only property, id=" << propIdx);
}
return false;
}

View File

@ -34,7 +34,7 @@ namespace cv {
class VideoCapture_obsensor : public IVideoCapture
{
public:
VideoCapture_obsensor(int index);
VideoCapture_obsensor(int index, const cv::VideoCaptureParameters& params);
virtual ~VideoCapture_obsensor();
virtual double getProperty(int propIdx) const CV_OVERRIDE;

View File

@ -28,23 +28,52 @@
namespace cv
{
Ptr<IVideoCapture> create_obsensor_capture(int index)
Ptr<IVideoCapture> create_obsensor_capture(int index, const cv::VideoCaptureParameters& params)
{
return makePtr<VideoCapture_obsensor>(index);
return makePtr<VideoCapture_obsensor>(index, params);
}
VideoCapture_obsensor::VideoCapture_obsensor(int)
VideoCapture_obsensor::VideoCapture_obsensor(int, const cv::VideoCaptureParameters& params)
{
ob::Context::setLoggerToFile(OB_LOG_SEVERITY_OFF, "");
config = std::make_shared<ob::Config>();
pipe = std::make_shared<ob::Pipeline>();
int color_width = params.get<double>(CAP_PROP_FRAME_WIDTH, OB_WIDTH_ANY);
int color_height = params.get<double>(CAP_PROP_FRAME_HEIGHT, OB_HEIGHT_ANY);
int color_fps = params.get<double>(CAP_PROP_FPS, OB_FPS_ANY);
auto colorProfiles = pipe->getStreamProfileList(OB_SENSOR_COLOR);
auto colorProfile = colorProfiles->getProfile(OB_PROFILE_DEFAULT);
config->enableStream(colorProfile->as<ob::VideoStreamProfile>());
if (color_width == OB_WIDTH_ANY && color_height == OB_HEIGHT_ANY && color_fps == OB_FPS_ANY)
{
CV_LOG_INFO(NULL, "Use default color stream profile");
auto colorProfile = colorProfiles->getProfile(OB_PROFILE_DEFAULT);
config->enableStream(colorProfile->as<ob::VideoStreamProfile>());
}
else
{
CV_LOG_INFO(NULL, "Looking for custom color profile " << color_width << "x" << color_height << "@" << color_fps << " fps");
auto colorProfile = colorProfiles->getVideoStreamProfile(color_width, color_height, OB_FORMAT_MJPG, color_fps);
config->enableStream(colorProfile->as<ob::VideoStreamProfile>());
}
int depth_width = params.get<double>(CAP_PROP_OBSENSOR_DEPTH_WIDTH, OB_WIDTH_ANY);
int depth_height = params.get<double>(CAP_PROP_OBSENSOR_DEPTH_HEIGHT, OB_HEIGHT_ANY);
int depth_fps = params.get<double>(CAP_PROP_OBSENSOR_DEPTH_FPS, OB_FPS_ANY);
auto depthProfiles = pipe->getStreamProfileList(OB_SENSOR_DEPTH);
auto depthProfile = depthProfiles->getProfile(OB_PROFILE_DEFAULT);
config->enableStream(depthProfile->as<ob::VideoStreamProfile>());
if (depth_width == OB_WIDTH_ANY && depth_height == OB_HEIGHT_ANY && depth_fps == OB_FPS_ANY)
{
CV_LOG_INFO(NULL, "Use default depth stream profile");
auto depthProfile = depthProfiles->getProfile(OB_PROFILE_DEFAULT);
config->enableStream(depthProfile->as<ob::VideoStreamProfile>());
}
else
{
CV_LOG_INFO(NULL, "Looking for custom color profile " << depth_width << "x" << depth_height << "@" << depth_fps << " fps");
auto depthProfile = depthProfiles->getVideoStreamProfile(depth_width, depth_height, OB_FORMAT_Y14, depth_fps);
config->enableStream(depthProfile->as<ob::VideoStreamProfile>());
}
config->setAlignMode(ALIGN_D2C_SW_MODE);
@ -83,12 +112,50 @@ double VideoCapture_obsensor::getProperty(int propIdx) const
case CAP_PROP_OBSENSOR_INTRINSIC_CY:
rst = camParam.p1[3];
break;
case CAP_PROP_POS_MSEC:
case CAP_PROP_OBSENSOR_RGB_POS_MSEC:
if (grabbedColorFrame)
{
rst = grabbedColorFrame->globalTimeStampUs();
if (rst == 0.0)
{
CV_LOG_ONCE_WARNING(NULL, "Camera reports zero global timestamp. System timestamp is used instead.");
rst = grabbedColorFrame->systemTimeStamp();
}
}
break;
case CAP_PROP_OBSENSOR_DEPTH_POS_MSEC:
if (grabbedDepthFrame)
{
rst = grabbedDepthFrame->systemTimeStamp();
if (rst == 0.0)
{
CV_LOG_ONCE_WARNING(NULL, "Camera reports zero global timestamp. System timestamp is used instead.");
rst = grabbedDepthFrame->systemTimeStamp();
}
}
break;
}
return rst;
}
bool VideoCapture_obsensor::setProperty(int, double)
bool VideoCapture_obsensor::setProperty(int prop, double)
{
switch(prop)
{
case CAP_PROP_OBSENSOR_DEPTH_WIDTH:
case CAP_PROP_OBSENSOR_DEPTH_HEIGHT:
case CAP_PROP_OBSENSOR_DEPTH_FPS:
CV_LOG_WARNING(NULL, "CAP_PROP_OBSENSOR_DEPTH_WIDTH, CAP_PROP_OBSENSOR_DEPTH_HEIGHT, CAP_PROP_OBSENSOR_DEPTH_FPS options are supported during camera initialization only");
break;
case CAP_PROP_FRAME_WIDTH:
case CAP_PROP_FRAME_HEIGHT:
case CAP_PROP_FPS:
CV_LOG_WARNING(NULL, "CAP_PROP_FRAME_WIDTH, CAP_PROP_FRAME_HEIGHT, CAP_PROP_FPS options are supported during camera initialization only");
break;
}
return false;
}

View File

@ -46,7 +46,7 @@ struct CameraParam
class VideoCapture_obsensor : public IVideoCapture
{
public:
VideoCapture_obsensor(int index);
VideoCapture_obsensor(int index, const cv::VideoCaptureParameters& params);
virtual ~VideoCapture_obsensor();
virtual double getProperty(int propIdx) const CV_OVERRIDE;

View File

@ -50,7 +50,7 @@ CURRENT_FILE_DIR = os.path.dirname(__file__)
class Builder:
def __init__(self, opencv, contrib, dynamic, bitcodedisabled, exclude, disable, enablenonfree, targets, debug, debug_info, framework_name, run_tests, build_docs, swiftdisabled):
def __init__(self, opencv, contrib, dynamic, embed_bitcode, exclude, disable, enablenonfree, targets, debug, debug_info, framework_name, run_tests, build_docs, swiftdisabled):
self.opencv = os.path.abspath(opencv)
self.contrib = None
if contrib:
@ -60,7 +60,7 @@ class Builder:
else:
print("Note: contrib repository is bad - modules subfolder not found", file=sys.stderr)
self.dynamic = dynamic
self.bitcodedisabled = bitcodedisabled
self.embed_bitcode = embed_bitcode
self.exclude = exclude
self.build_objc_wrapper = not "objc" in self.exclude
self.disable = disable
@ -112,7 +112,7 @@ class Builder:
cmake_flags = []
if self.contrib:
cmake_flags.append("-DOPENCV_EXTRA_MODULES_PATH=%s" % self.contrib)
if xcode_ver >= 7 and target[1] == 'iPhoneOS' and self.bitcodedisabled == False:
if xcode_ver >= 7 and target[1] == 'iPhoneOS' and self.embed_bitcode:
cmake_flags.append("-DCMAKE_C_FLAGS=-fembed-bitcode")
cmake_flags.append("-DCMAKE_CXX_FLAGS=-fembed-bitcode")
if xcode_ver >= 7 and target[1] == 'Catalyst':
@ -123,7 +123,7 @@ class Builder:
"-iframework %s/System/iOSSupport/System/Library/Frameworks" % sdk_path,
"-isystem %s/System/iOSSupport/usr/include" % sdk_path,
]
if self.bitcodedisabled == False:
if self.embed_bitcode:
c_flags.append("-fembed-bitcode")
cmake_flags.append("-DCMAKE_C_FLAGS=" + " ".join(c_flags))
cmake_flags.append("-DCMAKE_CXX_FLAGS=" + " ".join(c_flags))
@ -139,12 +139,20 @@ class Builder:
cmake_flags.append("-DCMAKE_OSX_SYSROOT=%s" % sdk_path)
cmake_flags.append("-DCMAKE_CXX_COMPILER_WORKS=TRUE")
cmake_flags.append("-DCMAKE_C_COMPILER_WORKS=TRUE")
print("::group::Building target", target[0], target[1], flush=True)
self.buildOne(target[0], target[1], main_build_dir, cmake_flags)
print("::endgroup::", flush=True)
if not self.dynamic:
print("::group::Merge libs", target[0], target[1], flush=True)
self.mergeLibs(main_build_dir)
print("::endgroup::", flush=True)
else:
print("::group::Make dynamic lib", target[0], target[1], flush=True)
self.makeDynamicLib(main_build_dir)
print("::endgroup::", flush=True)
self.makeFramework(outdir, dirs)
if self.build_objc_wrapper:
if self.run_tests:
@ -232,7 +240,7 @@ class Builder:
"xcodebuild",
]
if (self.dynamic or self.build_objc_wrapper) and not self.bitcodedisabled and target == "iPhoneOS":
if (self.dynamic or self.build_objc_wrapper) and self.embed_bitcode and target == "iPhoneOS":
buildcmd.append("BITCODE_GENERATION_MODE=bitcode")
buildcmd += [
@ -364,7 +372,7 @@ class Builder:
link_target = "%s-apple-ios14.0-macabi" % target[:target.find("-")]
else:
link_target = "%s-apple-darwin" % target[:target.find("-")]
bitcode_flags = ["-fembed-bitcode", "-Xlinker", "-bitcode_verify"] if is_device and not self.bitcodedisabled else []
bitcode_flags = ["-fembed-bitcode", "-Xlinker", "-bitcode_verify"] if is_device and self.embed_bitcode else []
toolchain_dir = get_xcode_setting("TOOLCHAIN_DIR", builddir)
sdk_dir = get_xcode_setting("SDK_DIR", builddir)
framework_options = []
@ -532,7 +540,7 @@ if __name__ == "__main__":
parser.add_argument('--without', metavar='MODULE', default=[], action='append', help='OpenCV modules to exclude from the framework. To exclude multiple, specify this flag again, e.g. "--without video --without objc"')
parser.add_argument('--disable', metavar='FEATURE', default=[], action='append', help='OpenCV features to disable (add WITH_*=OFF). To disable multiple, specify this flag again, e.g. "--disable tbb --disable openmp"')
parser.add_argument('--dynamic', default=False, action='store_true', help='build dynamic framework (default is "False" - builds static framework)')
parser.add_argument('--disable-bitcode', default=False, dest='bitcodedisabled', action='store_true', help='disable bitcode (enabled by default)')
parser.add_argument('--embed_bitcode', default=False, dest='embed_bitcode', action='store_true', help='disable bitcode (enabled by default)')
parser.add_argument('--iphoneos_deployment_target', default=os.environ.get('IPHONEOS_DEPLOYMENT_TARGET', IPHONEOS_DEPLOYMENT_TARGET), help='specify IPHONEOS_DEPLOYMENT_TARGET')
parser.add_argument('--build_only_specified_archs', default=False, action='store_true', help='if enabled, only directly specified archs are built and defaults are ignored')
parser.add_argument('--iphoneos_archs', default=None, help='select iPhoneOS target ARCHS. Default is "arm64"')
@ -598,6 +606,6 @@ if __name__ == "__main__":
if iphonesimulator_archs:
targets.append((iphonesimulator_archs, "iPhoneSimulator"))
b = iOSBuilder(args.opencv, args.contrib, args.dynamic, args.bitcodedisabled, args.without, args.disable, args.enablenonfree, targets, args.debug, args.debug_info, args.framework_name, args.run_tests, args.build_docs, args.swiftdisabled)
b = iOSBuilder(args.opencv, args.contrib, args.dynamic, args.embed_bitcode, args.without, args.disable, args.enablenonfree, targets, args.debug, args.debug_info, args.framework_name, args.run_tests, args.build_docs, args.swiftdisabled)
b.build(args.out)

View File

@ -8,10 +8,68 @@
#include <iostream>
using namespace cv;
int main()
int main(int argc, char** argv)
{
VideoCapture obsensorCapture(0, CAP_OBSENSOR);
if(!obsensorCapture.isOpened()){
cv::CommandLineParser parser(argc, argv,
"{help h ? | | help message}"
"{dw | | depth width }"
"{dh | | depth height }"
"{df | | depth fps }"
"{cw | | color width }"
"{ch | | color height }"
"{cf | | depth fps }"
);
if (parser.has("help"))
{
parser.printMessage();
return 0;
}
std::vector<int> params;
if (parser.has("dw"))
{
params.push_back(CAP_PROP_OBSENSOR_DEPTH_WIDTH);
params.push_back(parser.get<int>("dw"));
}
if (parser.has("dh"))
{
params.push_back(CAP_PROP_OBSENSOR_DEPTH_HEIGHT);
params.push_back(parser.get<int>("dh"));
}
if (parser.has("df"))
{
params.push_back(CAP_PROP_OBSENSOR_DEPTH_FPS);
params.push_back(parser.get<int>("df"));
}
if (parser.has("cw"))
{
params.push_back(CAP_PROP_FRAME_WIDTH);
params.push_back(parser.get<int>("cw"));
}
if (parser.has("ch"))
{
params.push_back(CAP_PROP_FRAME_HEIGHT);
params.push_back(parser.get<int>("ch"));
}
if (parser.has("cf"))
{
params.push_back(CAP_PROP_FPS);
params.push_back(parser.get<int>("cf"));
}
VideoCapture obsensorCapture;
if (params.empty())
obsensorCapture.open(0, CAP_OBSENSOR);
else
obsensorCapture.open(0, CAP_OBSENSOR, params);
if(!obsensorCapture.isOpened()) {
std::cerr << "Failed to open obsensor capture! Index out of range or no response from device";
return -1;
}