Merge pull request #27524 from DDmytro:multichannel_ecc

Extend findTransformECC and computeECC to support multichannel input (1 or 3 channels)
This commit is contained in:
Alexander Smorkalov 2025-10-24 11:24:15 +03:00 committed by GitHub
commit 52393156c8
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 536 additions and 619 deletions

View File

@ -268,17 +268,43 @@ enum
MOTION_HOMOGRAPHY = 3 MOTION_HOMOGRAPHY = 3
}; };
/** @brief Computes the Enhanced Correlation Coefficient value between two images @cite EP08 . /**
@brief Computes the Enhanced Correlation Coefficient (ECC) value between two images
@param templateImage single-channel template image; CV_8U or CV_32F array. The Enhanced Correlation Coefficient (ECC) is a normalized measure of similarity between two images.
@param inputImage single-channel input image to be warped to provide an image similar to The result lies in the range [-1, 1], where 1 corresponds to perfect similarity (modulo affine shift and scale),
templateImage, same type as templateImage. 0 indicates no correlation, and -1 indicates perfect negative correlation.
@param inputMask An optional mask to indicate valid values of inputImage.
@sa For single-channel images, the ECC is defined as:
findTransformECC
\f[
\mathrm{ECC}(I, T) = \frac{\sum_{x} (I(x) - \mu_I)(T(x) - \mu_T)}
{\sqrt{\sum_{x} (I(x) - \mu_I)^2} \cdot \sqrt{\sum_{x} (T(x) - \mu_T)^2}}
\f]
For multi-channel images (e.g., 3-channel RGB), the formula generalizes to:
\f[
\mathrm{ECC}(I, T) =
\frac{\sum_{x} \sum_{c=1}^{C} (I_c(x) - \mu_{I_c})(T_c(x) - \mu_{T_c})}
{\sqrt{\sum_{x} \sum_{c=1}^{C} (I_c(x) - \mu_{I_c})^2} \cdot
\sqrt{\sum_{x} \sum_{c=1}^{C} (T_c(x) - \mu_{T_c})^2}}
\f]
Where:
- \f$I_c(x), T_c(x)\f$ are the values of channel \f$c\f$ at spatial location \f$x\f$,
- \f$\mu_{I_c}, \mu_{T_c}\f$ are the mean values of channel \f$c\f$ over the masked region (if provided),
- \f$C\f$ is the number of channels (only 1 and 3 are currently supported),
- The sums run over all pixels \f$x\f$ in the image domain (optionally restricted by mask).
@param templateImage Input template image; must have either 1 or 3 channels and be of type CV_8U, CV_16U, CV_32F, or CV_64F.
@param inputImage Input image to be compared with the template; must have the same type and number of channels as templateImage.
@param inputMask Optional single-channel mask to specify the valid region of interest in inputImage and templateImage.
@return The ECC similarity coefficient in the range [-1, 1].
@sa findTransformECC
*/ */
CV_EXPORTS_W double computeECC(InputArray templateImage, InputArray inputImage, InputArray inputMask = noArray()); CV_EXPORTS_W double computeECC(InputArray templateImage, InputArray inputImage, InputArray inputMask = noArray());
/** @example samples/cpp/image_alignment.cpp /** @example samples/cpp/image_alignment.cpp
@ -287,8 +313,8 @@ An example using the image alignment ECC algorithm
/** @brief Finds the geometric transform (warp) between two images in terms of the ECC criterion @cite EP08 . /** @brief Finds the geometric transform (warp) between two images in terms of the ECC criterion @cite EP08 .
@param templateImage single-channel template image; CV_8U or CV_32F array. @param templateImage 1 or 3 channel template image; CV_8U, CV_16U, CV_32F, CV_64F type.
@param inputImage single-channel input image which should be warped with the final warpMatrix in @param inputImage input image which should be warped with the final warpMatrix in
order to provide an image similar to templateImage, same type as templateImage. order to provide an image similar to templateImage, same type as templateImage.
@param warpMatrix floating-point \f$2\times 3\f$ or \f$3\times 3\f$ mapping matrix (warp). @param warpMatrix floating-point \f$2\times 3\f$ or \f$3\times 3\f$ mapping matrix (warp).
@param motionType parameter, specifying the type of motion: @param motionType parameter, specifying the type of motion:
@ -305,7 +331,7 @@ order to provide an image similar to templateImage, same type as templateImage.
criteria.epsilon defines the threshold of the increment in the correlation coefficient between two criteria.epsilon defines the threshold of the increment in the correlation coefficient between two
iterations (a negative criteria.epsilon makes criteria.maxcount the only termination criterion). iterations (a negative criteria.epsilon makes criteria.maxcount the only termination criterion).
Default values are shown in the declaration above. Default values are shown in the declaration above.
@param inputMask An optional mask to indicate valid values of inputImage. @param inputMask An optional single channel mask to indicate valid values of inputImage.
@param gaussFiltSize An optional value indicating size of gaussian blur filter; (DEFAULT: 5) @param gaussFiltSize An optional value indicating size of gaussian blur filter; (DEFAULT: 5)
The function estimates the optimum transformation (warpMatrix) with respect to ECC criterion The function estimates the optimum transformation (warpMatrix) with respect to ECC criterion

View File

@ -1,24 +1,22 @@
#include "perf_precomp.hpp" #include "perf_precomp.hpp"
namespace opencv_test namespace opencv_test {
{
using namespace perf; using namespace perf;
CV_ENUM(MotionType, MOTION_TRANSLATION, MOTION_EUCLIDEAN, MOTION_AFFINE, MOTION_HOMOGRAPHY) CV_ENUM(MotionType, MOTION_TRANSLATION, MOTION_EUCLIDEAN, MOTION_AFFINE, MOTION_HOMOGRAPHY)
CV_ENUM(ReadFlag, IMREAD_GRAYSCALE, IMREAD_COLOR)
typedef tuple<MotionType> MotionType_t; typedef std::tuple<MotionType, ReadFlag> TestParams;
typedef perf::TestBaseWithParam<MotionType_t> TransformationType; typedef perf::TestBaseWithParam<TestParams> ECCPerfTest;
PERF_TEST_P(TransformationType, findTransformECC, /*testing::ValuesIn(MotionType::all())*/
testing::Values((int) MOTION_TRANSLATION, (int) MOTION_EUCLIDEAN,
(int) MOTION_AFFINE, (int) MOTION_HOMOGRAPHY)
)
{
Mat img = imread(getDataPath("cv/shared/fruits_ecc.png"),0);
Mat templateImage;
PERF_TEST_P(ECCPerfTest, findTransformECC,
testing::Combine(testing::Values(MOTION_TRANSLATION, MOTION_EUCLIDEAN, MOTION_AFFINE, MOTION_HOMOGRAPHY),
testing::Values(IMREAD_GRAYSCALE, IMREAD_COLOR))) {
int transform_type = get<0>(GetParam()); int transform_type = get<0>(GetParam());
int readFlag = get<1>(GetParam());
Mat img = imread(getDataPath("cv/shared/fruits_ecc.png"), readFlag);
Mat templateImage;
Mat warpMat; Mat warpMat;
Mat warpGround; Mat warpGround;
@ -26,37 +24,28 @@ PERF_TEST_P(TransformationType, findTransformECC, /*testing::ValuesIn(MotionType
double angle; double angle;
switch (transform_type) { switch (transform_type) {
case MOTION_TRANSLATION: case MOTION_TRANSLATION:
warpGround = (Mat_<float>(2,3) << 1.f, 0.f, 7.234f, warpGround = (Mat_<float>(2, 3) << 1.f, 0.f, 7.234f, 0.f, 1.f, 11.839f);
0.f, 1.f, 11.839f);
warpAffine(img, templateImage, warpGround, warpAffine(img, templateImage, warpGround, Size(200, 200), INTER_LINEAR + WARP_INVERSE_MAP);
Size(200,200), INTER_LINEAR + WARP_INVERSE_MAP);
break; break;
case MOTION_EUCLIDEAN: case MOTION_EUCLIDEAN:
angle = CV_PI / 30; angle = CV_PI / 30;
warpGround = (Mat_<float>(2,3) << (float)cos(angle), (float)-sin(angle), 12.123f, warpGround = (Mat_<float>(2, 3) << (float)cos(angle), (float)-sin(angle), 12.123f, (float)sin(angle),
(float)sin(angle), (float)cos(angle), 14.789f); (float)cos(angle), 14.789f);
warpAffine(img, templateImage, warpGround, warpAffine(img, templateImage, warpGround, Size(200, 200), INTER_LINEAR + WARP_INVERSE_MAP);
Size(200,200), INTER_LINEAR + WARP_INVERSE_MAP);
break; break;
case MOTION_AFFINE: case MOTION_AFFINE:
warpGround = (Mat_<float>(2,3) << 0.98f, 0.03f, 15.523f, warpGround = (Mat_<float>(2, 3) << 0.98f, 0.03f, 15.523f, -0.02f, 0.95f, 10.456f);
-0.02f, 0.95f, 10.456f); warpAffine(img, templateImage, warpGround, Size(200, 200), INTER_LINEAR + WARP_INVERSE_MAP);
warpAffine(img, templateImage, warpGround,
Size(200,200), INTER_LINEAR + WARP_INVERSE_MAP);
break; break;
case MOTION_HOMOGRAPHY: case MOTION_HOMOGRAPHY:
warpGround = (Mat_<float>(3,3) << 0.98f, 0.03f, 15.523f, warpGround = (Mat_<float>(3, 3) << 0.98f, 0.03f, 15.523f, -0.02f, 0.95f, 10.456f, 0.0002f, 0.0003f, 1.f);
-0.02f, 0.95f, 10.456f, warpPerspective(img, templateImage, warpGround, Size(200, 200), INTER_LINEAR + WARP_INVERSE_MAP);
0.0002f, 0.0003f, 1.f);
warpPerspective(img, templateImage, warpGround,
Size(200,200), INTER_LINEAR + WARP_INVERSE_MAP);
break; break;
} }
TEST_CYCLE() TEST_CYCLE() {
{
if (transform_type < 3) if (transform_type < 3)
warpMat = Mat::eye(2, 3, CV_32F); warpMat = Mat::eye(2, 3, CV_32F);
else else
@ -65,7 +54,9 @@ PERF_TEST_P(TransformationType, findTransformECC, /*testing::ValuesIn(MotionType
findTransformECC(templateImage, img, warpMat, transform_type, findTransformECC(templateImage, img, warpMat, transform_type,
TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 5, -1)); TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 5, -1));
} }
SANITY_CHECK(warpMat, 3e-3);
// TODO: Update baseline for new test
SANITY_CHECK_NOTHING();
} }
} // namespace } // namespace opencv_test

View File

@ -41,30 +41,24 @@
#include "precomp.hpp" #include "precomp.hpp"
/****************************************************************************************\ /****************************************************************************************\
* Image Alignment (ECC algorithm) * * Image Alignment (ECC algorithm) *
\****************************************************************************************/ \****************************************************************************************/
using namespace cv; using namespace cv;
static void image_jacobian_homo_ECC(const Mat& src1, const Mat& src2, static void image_jacobian_homo_ECC(const Mat& src1, const Mat& src2, const Mat& src3, const Mat& src4, const Mat& src5,
const Mat& src3, const Mat& src4, Mat& dst) {
const Mat& src5, Mat& dst)
{
CV_Assert(src1.size() == src2.size()); CV_Assert(src1.size() == src2.size());
CV_Assert(src1.size() == src3.size()); CV_Assert(src1.size() == src3.size());
CV_Assert(src1.size() == src4.size()); CV_Assert(src1.size() == src4.size());
CV_Assert(src1.rows == dst.rows); CV_Assert(src1.rows == dst.rows);
CV_Assert(dst.cols == (src1.cols * 8)); CV_Assert(dst.cols == (src1.cols * 8));
CV_Assert(dst.type() == CV_32FC1); CV_Assert(dst.type() == CV_MAKETYPE(CV_32F, src1.channels()));
CV_Assert(src5.isContinuous()); CV_Assert(src5.isContinuous());
const float* hptr = src5.ptr<float>(0); const float* hptr = src5.ptr<float>(0);
const float h0_ = hptr[0]; const float h0_ = hptr[0];
@ -78,16 +72,20 @@ static void image_jacobian_homo_ECC(const Mat& src1, const Mat& src2,
const int w = src1.cols; const int w = src1.cols;
// create denominator for all points as a block // create denominator for all points as a block
Mat den_ = src3*h2_ + src4*h5_ + 1.0;//check the time of this! otherwise use addWeighted Mat den_;
addWeighted(src3, h2_, src4, h5_, 1.0, den_);
// create projected points // create projected points
Mat hatX_ = -src3*h0_ - src4*h3_ - h6_; Mat hatX_, hatY_;
divide(hatX_, den_, hatX_); addWeighted(src3, h0_, src4, h3_, 0.0, hatX_);
Mat hatY_ = -src3*h1_ - src4*h4_ - h7_; hatX_ += h6_;
divide(hatY_, den_, hatY_);
addWeighted(src3, h1_, src4, h4_, 0.0, hatY_);
hatY_ += h7_;
divide(-hatY_, den_, hatY_);
divide(-hatX_, den_, hatX_);
// instead of dividing each block with den, // instead of dividing each block with den,
// just pre-divide the block of gradients (it's more efficient) // just pre-divide the block of gradients (it's more efficient)
@ -98,7 +96,6 @@ static void image_jacobian_homo_ECC(const Mat& src1, const Mat& src2,
divide(src1, den_, src1Divided_); divide(src1, den_, src1Divided_);
divide(src2, den_, src2Divided_); divide(src2, den_, src2Divided_);
// compute Jacobian blocks (8 blocks) // compute Jacobian blocks (8 blocks)
dst.colRange(0, w) = src1Divided_.mul(src3); // 1 dst.colRange(0, w) = src1Divided_.mul(src3); // 1
@ -122,18 +119,15 @@ static void image_jacobian_homo_ECC(const Mat& src1, const Mat& src2,
src2Divided_.copyTo(dst.colRange(7 * w, 8 * w)); // 8 src2Divided_.copyTo(dst.colRange(7 * w, 8 * w)); // 8
} }
static void image_jacobian_euclidean_ECC(const Mat& src1, const Mat& src2, static void image_jacobian_euclidean_ECC(const Mat& src1, const Mat& src2, const Mat& src3, const Mat& src4,
const Mat& src3, const Mat& src4, const Mat& src5, Mat& dst) {
const Mat& src5, Mat& dst)
{
CV_Assert(src1.size() == src2.size()); CV_Assert(src1.size() == src2.size());
CV_Assert(src1.size() == src3.size()); CV_Assert(src1.size() == src3.size());
CV_Assert(src1.size() == src4.size()); CV_Assert(src1.size() == src4.size());
CV_Assert(src1.rows == dst.rows); CV_Assert(src1.rows == dst.rows);
CV_Assert(dst.cols == (src1.cols * 3)); CV_Assert(dst.cols == (src1.cols * 3));
CV_Assert(dst.type() == CV_32FC1); CV_Assert(dst.type() == CV_MAKETYPE(CV_32F, src1.channels()));
CV_Assert(src5.isContinuous()); CV_Assert(src5.isContinuous());
@ -150,7 +144,6 @@ static void image_jacobian_euclidean_ECC(const Mat& src1, const Mat& src2,
// create cos(theta)*X -sin(theta)*Y for all points as a block -> hatY // create cos(theta)*X -sin(theta)*Y for all points as a block -> hatY
Mat hatY = (src3 * h0) - (src4 * h1); Mat hatY = (src3 * h0) - (src4 * h1);
// compute Jacobian blocks (3 blocks) // compute Jacobian blocks (3 blocks)
dst.colRange(0, w) = (src1.mul(hatX)) + (src2.mul(hatY)); // 1 dst.colRange(0, w) = (src1.mul(hatX)) + (src2.mul(hatY)); // 1
@ -158,12 +151,7 @@ static void image_jacobian_euclidean_ECC(const Mat& src1, const Mat& src2,
src2.copyTo(dst.colRange(2 * w, 3 * w)); // 3 src2.copyTo(dst.colRange(2 * w, 3 * w)); // 3
} }
static void image_jacobian_affine_ECC(const Mat& src1, const Mat& src2, const Mat& src3, const Mat& src4, Mat& dst) {
static void image_jacobian_affine_ECC(const Mat& src1, const Mat& src2,
const Mat& src3, const Mat& src4,
Mat& dst)
{
CV_Assert(src1.size() == src2.size()); CV_Assert(src1.size() == src2.size());
CV_Assert(src1.size() == src3.size()); CV_Assert(src1.size() == src3.size());
CV_Assert(src1.size() == src4.size()); CV_Assert(src1.size() == src4.size());
@ -171,8 +159,7 @@ static void image_jacobian_affine_ECC(const Mat& src1, const Mat& src2,
CV_Assert(src1.rows == dst.rows); CV_Assert(src1.rows == dst.rows);
CV_Assert(dst.cols == (6 * src1.cols)); CV_Assert(dst.cols == (6 * src1.cols));
CV_Assert(dst.type() == CV_32FC1); CV_Assert(dst.type() == CV_MAKETYPE(CV_32F, src1.channels()));
const int w = src1.cols; const int w = src1.cols;
@ -186,15 +173,12 @@ static void image_jacobian_affine_ECC(const Mat& src1, const Mat& src2,
src2.copyTo(dst.colRange(5 * w, 6 * w)); // 6 src2.copyTo(dst.colRange(5 * w, 6 * w)); // 6
} }
static void image_jacobian_translation_ECC(const Mat& src1, const Mat& src2, Mat& dst) {
static void image_jacobian_translation_ECC(const Mat& src1, const Mat& src2, Mat& dst)
{
CV_Assert(src1.size() == src2.size()); CV_Assert(src1.size() == src2.size());
CV_Assert(src1.rows == dst.rows); CV_Assert(src1.rows == dst.rows);
CV_Assert(dst.cols == (src1.cols * 2)); CV_Assert(dst.cols == (src1.cols * 2));
CV_Assert(dst.type() == CV_32FC1); CV_Assert(dst.type() == CV_MAKETYPE(CV_32F, src1.channels()));
const int w = src1.cols; const int w = src1.cols;
@ -203,9 +187,7 @@ static void image_jacobian_translation_ECC(const Mat& src1, const Mat& src2, Mat
src2.copyTo(dst.colRange(w, 2 * w)); src2.copyTo(dst.colRange(w, 2 * w));
} }
static void project_onto_jacobian_ECC(const Mat& src1, const Mat& src2, Mat& dst) {
static void project_onto_jacobian_ECC(const Mat& src1, const Mat& src2, Mat& dst)
{
/* this functions is used for two types of projections. If src1.cols ==src.cols /* this functions is used for two types of projections. If src1.cols ==src.cols
it does a blockwise multiplication (like in the outer product of vectors) it does a blockwise multiplication (like in the outer product of vectors)
of the blocks in matrices src1 and src2 and dst of the blocks in matrices src1 and src2 and dst
@ -234,7 +216,6 @@ static void project_onto_jacobian_ECC(const Mat& src1, const Mat& src2, Mat& dst
w = src2.cols / dst.cols; w = src2.cols / dst.cols;
Mat mat; Mat mat;
for (int i = 0; i < dst.rows; i++) { for (int i = 0; i < dst.rows; i++) {
mat = Mat(src1.colRange(i * w, (i + 1) * w)); mat = Mat(src1.colRange(i * w, (i + 1) * w));
dstPtr[i * (dst.rows + 1)] = (float)pow(norm(mat), 2); // diagonal elements dstPtr[i * (dst.rows + 1)] = (float)pow(norm(mat), 2); // diagonal elements
@ -246,14 +227,12 @@ static void project_onto_jacobian_ECC(const Mat& src1, const Mat& src2, Mat& dst
} }
} }
static void update_warping_matrix_ECC(Mat& map_matrix, const Mat& update, const int motionType) {
static void update_warping_matrix_ECC (Mat& map_matrix, const Mat& update, const int motionType)
{
CV_Assert(map_matrix.type() == CV_32FC1); CV_Assert(map_matrix.type() == CV_32FC1);
CV_Assert(update.type() == CV_32FC1); CV_Assert(update.type() == CV_32FC1);
CV_Assert (motionType == MOTION_TRANSLATION || motionType == MOTION_EUCLIDEAN || CV_Assert(motionType == MOTION_TRANSLATION || motionType == MOTION_EUCLIDEAN || motionType == MOTION_AFFINE ||
motionType == MOTION_AFFINE || motionType == MOTION_HOMOGRAPHY); motionType == MOTION_HOMOGRAPHY);
if (motionType == MOTION_HOMOGRAPHY) if (motionType == MOTION_HOMOGRAPHY)
CV_Assert(map_matrix.rows == 3 && update.rows == 8); CV_Assert(map_matrix.rows == 3 && update.rows == 8);
@ -269,11 +248,9 @@ static void update_warping_matrix_ECC (Mat& map_matrix, const Mat& update, const
CV_Assert(map_matrix.isContinuous()); CV_Assert(map_matrix.isContinuous());
CV_Assert(update.isContinuous()); CV_Assert(update.isContinuous());
float* mapPtr = map_matrix.ptr<float>(0); float* mapPtr = map_matrix.ptr<float>(0);
const float* updatePtr = update.ptr<float>(0); const float* updatePtr = update.ptr<float>(0);
if (motionType == MOTION_TRANSLATION) { if (motionType == MOTION_TRANSLATION) {
mapPtr[2] += updatePtr[0]; mapPtr[2] += updatePtr[0];
mapPtr[5] += updatePtr[1]; mapPtr[5] += updatePtr[1];
@ -308,15 +285,15 @@ static void update_warping_matrix_ECC (Mat& map_matrix, const Mat& update, const
} }
} }
/** Function that computes enhanced corelation coefficient from Georgios et.al. 2008 /** Function that computes enhanced corelation coefficient from Georgios et.al. 2008
* See https://github.com/opencv/opencv/issues/12432 * See https://github.com/opencv/opencv/issues/12432
*/ */
double cv::computeECC(InputArray templateImage, InputArray inputImage, InputArray inputMask) double cv::computeECC(InputArray templateImage, InputArray inputImage, InputArray inputMask) {
{
CV_Assert(!templateImage.empty()); CV_Assert(!templateImage.empty());
CV_Assert(!inputImage.empty()); CV_Assert(!inputImage.empty());
CV_Assert(templateImage.channels() == 1 || templateImage.channels() == 3);
if (!(templateImage.type() == inputImage.type())) if (!(templateImage.type() == inputImage.type()))
CV_Error(Error::StsUnmatchedFormats, "Both input images must have the same data type"); CV_Error(Error::StsUnmatchedFormats, "Both input images must have the same data type");
@ -344,29 +321,20 @@ double cv::computeECC(InputArray templateImage, InputArray inputImage, InputArra
cv::swap(inputMat, inputMatConverted); cv::swap(inputMat, inputMatConverted);
} }
subtract(templateMat, meanTemplate, templateImage_zeromean, inputMask); subtract(templateMat, meanTemplate, templateImage_zeromean, inputMask);
double templateImagenorm = std::sqrt(active_pixels*sdTemplate.val[0]*sdTemplate.val[0]); double templateImagenorm = std::sqrt(active_pixels * cv::norm(sdTemplate, NORM_L2SQR));
Scalar meanInput, sdInput; Scalar meanInput, sdInput;
Mat inputImage_zeromean = Mat::zeros(inputImage.size(), inputImage.type()); Mat inputImage_zeromean = Mat::zeros(inputImage.size(), inputImage.type());
meanStdDev(inputImage, meanInput, sdInput, inputMask); meanStdDev(inputImage, meanInput, sdInput, inputMask);
subtract(inputMat, meanInput, inputImage_zeromean, inputMask); subtract(inputMat, meanInput, inputImage_zeromean, inputMask);
double inputImagenorm = std::sqrt(active_pixels*sdInput.val[0]*sdInput.val[0]); double inputImagenorm = std::sqrt(active_pixels * norm(sdInput, NORM_L2SQR));
return templateImage_zeromean.dot(inputImage_zeromean) / (templateImagenorm * inputImagenorm); return templateImage_zeromean.dot(inputImage_zeromean) / (templateImagenorm * inputImagenorm);
} }
double cv::findTransformECC(InputArray templateImage, InputArray inputImage, InputOutputArray warpMatrix,
double cv::findTransformECC(InputArray templateImage, int motionType, TermCriteria criteria, InputArray inputMask, int gaussFiltSize) {
InputArray inputImage,
InputOutputArray warpMatrix,
int motionType,
TermCriteria criteria,
InputArray inputMask,
int gaussFiltSize)
{
Mat src = templateImage.getMat(); // template image Mat src = templateImage.getMat(); // template image
Mat dst = inputImage.getMat(); // input image (to be warped) Mat dst = inputImage.getMat(); // input image (to be warped)
Mat map = warpMatrix.getMat(); // warp (transformation) Mat map = warpMatrix.getMat(); // warp (transformation)
@ -374,6 +342,11 @@ double cv::findTransformECC(InputArray templateImage,
CV_Assert(!src.empty()); CV_Assert(!src.empty());
CV_Assert(!dst.empty()); CV_Assert(!dst.empty());
CV_Assert(src.channels() == 1 || src.channels() == 3);
CV_Assert(src.channels() == dst.channels());
CV_Assert(src.depth() == dst.depth());
CV_Assert(src.depth() == CV_8U || src.depth() == CV_16U || src.depth() == CV_32F || src.depth() == CV_64F);
// If the user passed an un-initialized warpMatrix, initialize to identity // If the user passed an un-initialized warpMatrix, initialize to identity
if (map.empty()) { if (map.empty()) {
int rowCount = 2; int rowCount = 2;
@ -388,18 +361,14 @@ double cv::findTransformECC(InputArray templateImage,
if (!(src.type() == dst.type())) if (!(src.type() == dst.type()))
CV_Error(Error::StsUnmatchedFormats, "Both input images must have the same data type"); CV_Error(Error::StsUnmatchedFormats, "Both input images must have the same data type");
//accept only 1-channel images
if( src.type() != CV_8UC1 && src.type()!= CV_32FC1)
CV_Error( Error::StsUnsupportedFormat, "Images must have 8uC1 or 32fC1 type");
if (map.type() != CV_32FC1) if (map.type() != CV_32FC1)
CV_Error(Error::StsUnsupportedFormat, "warpMatrix must be single-channel floating-point matrix"); CV_Error(Error::StsUnsupportedFormat, "warpMatrix must be single-channel floating-point matrix");
CV_Assert(map.cols == 3); CV_Assert(map.cols == 3);
CV_Assert(map.rows == 2 || map.rows == 3); CV_Assert(map.rows == 2 || map.rows == 3);
CV_Assert (motionType == MOTION_AFFINE || motionType == MOTION_HOMOGRAPHY || CV_Assert(motionType == MOTION_AFFINE || motionType == MOTION_HOMOGRAPHY || motionType == MOTION_EUCLIDEAN ||
motionType == MOTION_EUCLIDEAN || motionType == MOTION_TRANSLATION); motionType == MOTION_TRANSLATION);
if (motionType == MOTION_HOMOGRAPHY) { if (motionType == MOTION_HOMOGRAPHY) {
CV_Assert(map.rows == 3); CV_Assert(map.rows == 3);
@ -422,7 +391,6 @@ double cv::findTransformECC(InputArray templateImage,
break; break;
} }
const int numberOfParameters = paramTemp; const int numberOfParameters = paramTemp;
const int ws = src.cols; const int ws = src.cols;
@ -438,10 +406,8 @@ double cv::findTransformECC(InputArray templateImage,
float* XcoPtr = Xcoord.ptr<float>(0); float* XcoPtr = Xcoord.ptr<float>(0);
float* YcoPtr = Ycoord.ptr<float>(0); float* YcoPtr = Ycoord.ptr<float>(0);
int j; int j;
for (j=0; j<ws; j++) for (j = 0; j < ws; j++) XcoPtr[j] = (float)j;
XcoPtr[j] = (float) j; for (j = 0; j < hs; j++) YcoPtr[j] = (float)j;
for (j=0; j<hs; j++)
YcoPtr[j] = (float) j;
repeat(Xcoord, hs, 1, Xgrid); repeat(Xcoord, hs, 1, Xgrid);
repeat(Ycoord, 1, ws, Ygrid); repeat(Ycoord, 1, ws, Ygrid);
@ -449,10 +415,19 @@ double cv::findTransformECC(InputArray templateImage,
Xcoord.release(); Xcoord.release();
Ycoord.release(); Ycoord.release();
Mat templateZM = Mat(hs, ws, CV_32F);// to store the (smoothed)zero-mean version of template const int channels = src.channels();
Mat templateFloat = Mat(hs, ws, CV_32F);// to store the (smoothed) template int type = CV_MAKETYPE(CV_32F, channels); // используем отдельно, если нужно явно
Mat imageFloat = Mat(hd, wd, CV_32F);// to store the (smoothed) input image
Mat imageWarped = Mat(hs, ws, CV_32F);// to store the warped zero-mean input image std::vector<cv::Mat> XgridCh(channels, Xgrid);
cv::merge(XgridCh, Xgrid);
std::vector<cv::Mat> YgridCh(channels, Ygrid);
cv::merge(YgridCh, Ygrid);
Mat templateZM = Mat(hs, ws, type); // to store the (smoothed)zero-mean version of template
Mat templateFloat = Mat(hs, ws, type); // to store the (smoothed) template
Mat imageFloat = Mat(hd, wd, type); // to store the (smoothed) input image
Mat imageWarped = Mat(hs, ws, type); // to store the warped zero-mean input image
Mat imageMask = Mat(hs, ws, CV_8U); // to store the final mask Mat imageMask = Mat(hs, ws, CV_8U); // to store the final mask
Mat inputMaskMat = inputMask.getMat(); Mat inputMaskMat = inputMask.getMat();
@ -463,12 +438,12 @@ double cv::findTransformECC(InputArray templateImage,
else else
threshold(inputMask, preMask, 0, 1, THRESH_BINARY); threshold(inputMask, preMask, 0, 1, THRESH_BINARY);
//gaussian filtering is optional // Gaussian filtering is optional
src.convertTo(templateFloat, templateFloat.type()); src.convertTo(templateFloat, templateFloat.type());
GaussianBlur(templateFloat, templateFloat, Size(gaussFiltSize, gaussFiltSize), 0, 0); GaussianBlur(templateFloat, templateFloat, Size(gaussFiltSize, gaussFiltSize), 0, 0);
Mat preMaskFloat; Mat preMaskFloat;
preMask.convertTo(preMaskFloat, CV_32F); preMask.convertTo(preMaskFloat, type);
GaussianBlur(preMaskFloat, preMaskFloat, Size(gaussFiltSize, gaussFiltSize), 0, 0); GaussianBlur(preMaskFloat, preMaskFloat, Size(gaussFiltSize, gaussFiltSize), 0, 0);
// Change threshold. // Change threshold.
preMaskFloat *= (0.5 / 0.95); preMaskFloat *= (0.5 / 0.95);
@ -480,11 +455,10 @@ double cv::findTransformECC(InputArray templateImage,
GaussianBlur(imageFloat, imageFloat, Size(gaussFiltSize, gaussFiltSize), 0, 0); GaussianBlur(imageFloat, imageFloat, Size(gaussFiltSize, gaussFiltSize), 0, 0);
// needed matrices for gradients and warped gradients // needed matrices for gradients and warped gradients
Mat gradientX = Mat::zeros(hd, wd, CV_32FC1); Mat gradientX = Mat::zeros(hd, wd, type);
Mat gradientY = Mat::zeros(hd, wd, CV_32FC1); Mat gradientY = Mat::zeros(hd, wd, type);
Mat gradientXWarped = Mat(hs, ws, CV_32FC1); Mat gradientXWarped = Mat(hs, ws, type);
Mat gradientYWarped = Mat(hs, ws, CV_32FC1); Mat gradientYWarped = Mat(hs, ws, type);
// calculate first order image derivatives // calculate first order image derivatives
Matx13f dx(-0.5f, 0.0f, 0.5f); Matx13f dx(-0.5f, 0.0f, 0.5f);
@ -492,11 +466,15 @@ double cv::findTransformECC(InputArray templateImage,
filter2D(imageFloat, gradientX, -1, dx); filter2D(imageFloat, gradientX, -1, dx);
filter2D(imageFloat, gradientY, -1, dx.t()); filter2D(imageFloat, gradientY, -1, dx.t());
gradientX = gradientX.mul(preMaskFloat); cv::Mat preMaskFloatNCh;
gradientY = gradientY.mul(preMaskFloat); std::vector<cv::Mat> maskChannels(gradientX.channels(), preMaskFloat);
cv::merge(maskChannels, preMaskFloatNCh);
gradientX = gradientX.mul(preMaskFloatNCh);
gradientY = gradientY.mul(preMaskFloatNCh);
// matrices needed for solving linear equation system for maximizing ECC // matrices needed for solving linear equation system for maximizing ECC
Mat jacobian = Mat(hs, ws*numberOfParameters, CV_32F); Mat jacobian = Mat(hs, ws * numberOfParameters, type);
Mat hessian = Mat(numberOfParameters, numberOfParameters, CV_32F); Mat hessian = Mat(numberOfParameters, numberOfParameters, CV_32F);
Mat hessianInv = Mat(numberOfParameters, numberOfParameters, CV_32F); Mat hessianInv = Mat(numberOfParameters, numberOfParameters, CV_32F);
Mat imageProjection = Mat(numberOfParameters, 1, CV_32F); Mat imageProjection = Mat(numberOfParameters, 1, CV_32F);
@ -510,23 +488,17 @@ double cv::findTransformECC(InputArray templateImage,
const int imageFlags = INTER_LINEAR + WARP_INVERSE_MAP; const int imageFlags = INTER_LINEAR + WARP_INVERSE_MAP;
const int maskFlags = INTER_NEAREST + WARP_INVERSE_MAP; const int maskFlags = INTER_NEAREST + WARP_INVERSE_MAP;
// iteratively update map_matrix // iteratively update map_matrix
double rho = -1; double rho = -1;
double last_rho = -termination_eps; double last_rho = -termination_eps;
for (int i = 1; (i <= numberOfIterations) && (fabs(rho-last_rho)>= termination_eps); i++) for (int i = 1; (i <= numberOfIterations) && (fabs(rho - last_rho) >= termination_eps); i++) {
{
// warp-back portion of the inputImage and gradients to the coordinate space of the templateImage // warp-back portion of the inputImage and gradients to the coordinate space of the templateImage
if (motionType != MOTION_HOMOGRAPHY) if (motionType != MOTION_HOMOGRAPHY) {
{
warpAffine(imageFloat, imageWarped, map, imageWarped.size(), imageFlags); warpAffine(imageFloat, imageWarped, map, imageWarped.size(), imageFlags);
warpAffine(gradientX, gradientXWarped, map, gradientXWarped.size(), imageFlags); warpAffine(gradientX, gradientXWarped, map, gradientXWarped.size(), imageFlags);
warpAffine(gradientY, gradientYWarped, map, gradientYWarped.size(), imageFlags); warpAffine(gradientY, gradientYWarped, map, gradientYWarped.size(), imageFlags);
warpAffine(preMask, imageMask, map, imageMask.size(), maskFlags); warpAffine(preMask, imageMask, map, imageMask.size(), maskFlags);
} } else {
else
{
warpPerspective(imageFloat, imageWarped, map, imageWarped.size(), imageFlags); warpPerspective(imageFloat, imageWarped, map, imageWarped.size(), imageFlags);
warpPerspective(gradientX, gradientXWarped, map, gradientXWarped.size(), imageFlags); warpPerspective(gradientX, gradientXWarped, map, gradientXWarped.size(), imageFlags);
warpPerspective(gradientY, gradientYWarped, map, gradientYWarped.size(), imageFlags); warpPerspective(gradientY, gradientYWarped, map, gradientYWarped.size(), imageFlags);
@ -541,8 +513,9 @@ double cv::findTransformECC(InputArray templateImage,
templateZM = Mat::zeros(templateZM.rows, templateZM.cols, templateZM.type()); templateZM = Mat::zeros(templateZM.rows, templateZM.cols, templateZM.type());
subtract(templateFloat, tmpMean, templateZM, imageMask); // zero-mean template subtract(templateFloat, tmpMean, templateZM, imageMask); // zero-mean template
const double tmpNorm = std::sqrt(countNonZero(imageMask)*(tmpStd.val[0])*(tmpStd.val[0])); int validPixels = countNonZero(imageMask);
const double imgNorm = std::sqrt(countNonZero(imageMask)*(imgStd.val[0])*(imgStd.val[0])); double tmpNorm = std::sqrt(validPixels * cv::norm(tmpStd, cv::NORM_L2SQR));
double imgNorm = std::sqrt(validPixels * cv::norm(imgStd, cv::NORM_L2SQR));
// calculate jacobian of image wrt parameters // calculate jacobian of image wrt parameters
switch (motionType) { switch (motionType) {
@ -578,16 +551,15 @@ double cv::findTransformECC(InputArray templateImage,
project_onto_jacobian_ECC(jacobian, imageWarped, imageProjection); project_onto_jacobian_ECC(jacobian, imageWarped, imageProjection);
project_onto_jacobian_ECC(jacobian, templateZM, templateProjection); project_onto_jacobian_ECC(jacobian, templateZM, templateProjection);
// calculate the parameter lambda to account for illumination variation // calculate the parameter lambda to account for illumination variation
imageProjectionHessian = hessianInv * imageProjection; imageProjectionHessian = hessianInv * imageProjection;
const double lambda_n = (imgNorm * imgNorm) - imageProjection.dot(imageProjectionHessian); const double lambda_n = (imgNorm * imgNorm) - imageProjection.dot(imageProjectionHessian);
const double lambda_d = correlation - templateProjection.dot(imageProjectionHessian); const double lambda_d = correlation - templateProjection.dot(imageProjectionHessian);
if (lambda_d <= 0.0) if (lambda_d <= 0.0) {
{
rho = -1; rho = -1;
CV_Error(Error::StsNoConv, "The algorithm stopped before its convergence. The correlation is going to be minimized. Images may be uncorrelated or non-overlapped"); CV_Error(Error::StsNoConv,
"The algorithm stopped before its convergence. The correlation is going to be minimized. Images "
"may be uncorrelated or non-overlapped");
} }
const double lambda = (lambda_n / lambda_d); const double lambda = (lambda_n / lambda_d);
@ -598,19 +570,14 @@ double cv::findTransformECC(InputArray templateImage,
// update warping matrix // update warping matrix
update_warping_matrix_ECC(map, deltaP, motionType); update_warping_matrix_ECC(map, deltaP, motionType);
} }
// return final correlation coefficient // return final correlation coefficient
return rho; return rho;
} }
double cv::findTransformECC(InputArray templateImage, InputArray inputImage, double cv::findTransformECC(InputArray templateImage, InputArray inputImage, InputOutputArray warpMatrix,
InputOutputArray warpMatrix, int motionType, int motionType, TermCriteria criteria, InputArray inputMask) {
TermCriteria criteria,
InputArray inputMask)
{
// Use default value of 5 for gaussFiltSize to maintain backward compatibility. // Use default value of 5 for gaussFiltSize to maintain backward compatibility.
return findTransformECC(templateImage, inputImage, warpMatrix, motionType, criteria, inputMask, 5); return findTransformECC(templateImage, inputImage, warpMatrix, motionType, criteria, inputMask, 5);
} }

View File

@ -42,37 +42,45 @@
#include "test_precomp.hpp" #include "test_precomp.hpp"
namespace opencv_test { namespace { namespace opencv_test {
namespace {
class CV_ECC_BaseTest : public cvtest::BaseTest class CV_ECC_BaseTest : public cvtest::BaseTest {
{
public: public:
CV_ECC_BaseTest(); CV_ECC_BaseTest();
virtual ~CV_ECC_BaseTest();
protected: protected:
double computeRMS(const Mat& mat1, const Mat& mat2); double computeRMS(const Mat& mat1, const Mat& mat2);
bool isMapCorrect(const Mat& mat); bool isMapCorrect(const Mat& mat);
virtual bool test(const Mat) { return true; }; // single test
bool testAllTypes(const Mat img); // run test for all supported data types (U8, U16, F32, F64)
bool testAllChNum(const Mat img); // run test for all supported channels count (gray, RGB)
void run(int);
bool checkMap(const Mat& map, const Mat& ground);
double MAX_RMS_ECC; // upper bound for RMS error double MAX_RMS_ECC; // upper bound for RMS error
int ntests; // number of tests per motion type int ntests; // number of tests per motion type
int ECC_iterations; // number of iterations for ECC int ECC_iterations; // number of iterations for ECC
double ECC_epsilon; // we choose a negative value, so that double ECC_epsilon; // we choose a negative value, so that
// ECC_iterations are always executed // ECC_iterations are always executed
TermCriteria criteria;
}; };
CV_ECC_BaseTest::CV_ECC_BaseTest() CV_ECC_BaseTest::CV_ECC_BaseTest() {
{
MAX_RMS_ECC = 0.1; MAX_RMS_ECC = 0.1;
ntests = 3; ntests = 3;
ECC_iterations = 50; ECC_iterations = 50;
ECC_epsilon = -1; //-> negative value means that ECC_Iterations will be executed ECC_epsilon = -1; //-> negative value means that ECC_Iterations will be executed
criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, ECC_iterations, ECC_epsilon);
} }
CV_ECC_BaseTest::~CV_ECC_BaseTest() {}
bool CV_ECC_BaseTest::isMapCorrect(const Mat& map) bool CV_ECC_BaseTest::isMapCorrect(const Mat& map) {
{
bool tr = true; bool tr = true;
float mapVal; float mapVal;
for (int i = 0; i < map.rows; i++) for (int i = 0; i < map.rows; i++)
@ -85,414 +93,325 @@ bool CV_ECC_BaseTest::isMapCorrect(const Mat& map)
} }
double CV_ECC_BaseTest::computeRMS(const Mat& mat1, const Mat& mat2) { double CV_ECC_BaseTest::computeRMS(const Mat& mat1, const Mat& mat2) {
CV_Assert(mat1.rows == mat2.rows); CV_Assert(mat1.rows == mat2.rows);
CV_Assert(mat1.cols == mat2.cols); CV_Assert(mat1.cols == mat2.cols);
Mat errorMat; Mat errorMat;
subtract(mat1, mat2, errorMat); subtract(mat1, mat2, errorMat);
return sqrt(errorMat.dot(errorMat)/(mat1.rows*mat1.cols)); return sqrt(errorMat.dot(errorMat) / (mat1.rows * mat1.cols * mat1.channels()));
} }
class CV_ECC_Test_Translation : public CV_ECC_BaseTest bool CV_ECC_BaseTest::checkMap(const Mat& map, const Mat& ground) {
{ if (!isMapCorrect(map)) {
public:
CV_ECC_Test_Translation();
protected:
void run(int);
bool testTranslation(int);
};
CV_ECC_Test_Translation::CV_ECC_Test_Translation(){}
bool CV_ECC_Test_Translation::testTranslation(int from)
{
Mat img = imread( string(ts->get_data_path()) + "shared/fruits.png", 0);
if (img.empty())
{
ts->printf( ts->LOG, "test image can not be read");
ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_TEST_DATA);
return false;
}
Mat testImg;
resize(img, testImg, Size(216, 216), 0, 0, INTER_LINEAR_EXACT);
cv::RNG rng = ts->get_rng();
int progress=0;
for (int k=from; k<ntests; k++){
ts->update_context( this, k, true );
progress = update_progress(progress, k, ntests, 0);
Mat translationGround = (Mat_<float>(2,3) << 1, 0, (rng.uniform(10.f, 20.f)),
0, 1, (rng.uniform(10.f, 20.f)));
Mat warpedImage;
warpAffine(testImg, warpedImage, translationGround,
Size(200,200), INTER_LINEAR + WARP_INVERSE_MAP);
Mat mapTranslation = (Mat_<float>(2,3) << 1, 0, 0, 0, 1, 0);
findTransformECC(warpedImage, testImg, mapTranslation, 0,
TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, ECC_iterations, ECC_epsilon));
if (!isMapCorrect(mapTranslation)){
ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_OUTPUT); ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_OUTPUT);
return false; return false;
} }
if (computeRMS(mapTranslation, translationGround)>MAX_RMS_ECC){ if (computeRMS(map, ground) > MAX_RMS_ECC) {
ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY); ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
ts->printf( ts->LOG, "RMS = %f", ts->printf(ts->LOG, "RMS = %f", computeRMS(map, ground));
computeRMS(mapTranslation, translationGround));
return false; return false;
} }
}
return true; return true;
} }
void CV_ECC_Test_Translation::run(int from) bool CV_ECC_BaseTest::testAllTypes(const Mat img) {
{ auto types = {CV_8U, CV_16U, CV_32F, CV_64F};
for (auto type : types) {
Mat timg;
img.convertTo(timg, type);
if (!test(timg))
return false;
}
return true;
}
if (!testTranslation(from)) bool CV_ECC_BaseTest::testAllChNum(const Mat img) {
if (!testAllTypes(img))
return false;
Mat gray;
cvtColor(img, gray, COLOR_RGB2GRAY);
if (!testAllTypes(gray))
return false;
return true;
}
void CV_ECC_BaseTest::run(int) {
Mat img = imread(string(ts->get_data_path()) + "shared/fruits.png");
if (img.empty()) {
ts->printf(ts->LOG, "test image can not be read");
ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_TEST_DATA);
return; return;
}
Mat testImg;
resize(img, testImg, Size(216, 216), 0, 0, INTER_LINEAR_EXACT);
testAllChNum(testImg);
ts->set_failed_test_info(cvtest::TS::OK); ts->set_failed_test_info(cvtest::TS::OK);
} }
class CV_ECC_Test_Translation : public CV_ECC_BaseTest {
public:
CV_ECC_Test_Translation();
protected:
bool test(const Mat);
};
class CV_ECC_Test_Euclidean : public CV_ECC_BaseTest CV_ECC_Test_Translation::CV_ECC_Test_Translation() {}
{
bool CV_ECC_Test_Translation::test(const Mat testImg) {
cv::RNG rng = ts->get_rng();
int progress = 0;
for (int k = 0; k < ntests; k++) {
ts->update_context(this, k, true);
progress = update_progress(progress, k, ntests, 0);
Mat translationGround = (Mat_<float>(2, 3) << 1, 0, (rng.uniform(10.f, 20.f)), 0, 1, (rng.uniform(10.f, 20.f)));
Mat warpedImage;
warpAffine(testImg, warpedImage, translationGround, Size(200, 200), INTER_LINEAR + WARP_INVERSE_MAP);
Mat mapTranslation = (Mat_<float>(2, 3) << 1, 0, 0, 0, 1, 0);
findTransformECC(warpedImage, testImg, mapTranslation, 0, criteria);
if (!checkMap(mapTranslation, translationGround))
return false;
}
return true;
}
class CV_ECC_Test_Euclidean : public CV_ECC_BaseTest {
public: public:
CV_ECC_Test_Euclidean(); CV_ECC_Test_Euclidean();
protected:
void run(int);
bool testEuclidean(int); protected:
bool test(const Mat);
}; };
CV_ECC_Test_Euclidean::CV_ECC_Test_Euclidean() {} CV_ECC_Test_Euclidean::CV_ECC_Test_Euclidean() {}
bool CV_ECC_Test_Euclidean::testEuclidean(int from) bool CV_ECC_Test_Euclidean::test(const Mat testImg) {
{
Mat img = imread( string(ts->get_data_path()) + "shared/fruits.png", 0);
if (img.empty())
{
ts->printf( ts->LOG, "test image can not be read");
ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_TEST_DATA);
return false;
}
Mat testImg;
resize(img, testImg, Size(216, 216), 0, 0, INTER_LINEAR_EXACT);
cv::RNG rng = ts->get_rng(); cv::RNG rng = ts->get_rng();
int progress = 0; int progress = 0;
for (int k=from; k<ntests; k++){ for (int k = 0; k < ntests; k++) {
ts->update_context(this, k, true); ts->update_context(this, k, true);
progress = update_progress(progress, k, ntests, 0); progress = update_progress(progress, k, ntests, 0);
double angle = CV_PI / 30 + CV_PI * rng.uniform((double)-2.f, (double)2.f) / 180; double angle = CV_PI / 30 + CV_PI * rng.uniform((double)-2.f, (double)2.f) / 180;
Mat euclideanGround = (Mat_<float>(2,3) << cos(angle), -sin(angle), (rng.uniform(10.f, 20.f)), Mat euclideanGround = (Mat_<float>(2, 3) << cos(angle), -sin(angle), (rng.uniform(10.f, 20.f)), sin(angle),
sin(angle), cos(angle), (rng.uniform(10.f, 20.f))); cos(angle), (rng.uniform(10.f, 20.f)));
Mat warpedImage; Mat warpedImage;
warpAffine(testImg, warpedImage, euclideanGround, warpAffine(testImg, warpedImage, euclideanGround, Size(200, 200), INTER_LINEAR + WARP_INVERSE_MAP);
Size(200,200), INTER_LINEAR + WARP_INVERSE_MAP);
Mat mapEuclidean = (Mat_<float>(2, 3) << 1, 0, 0, 0, 1, 0); Mat mapEuclidean = (Mat_<float>(2, 3) << 1, 0, 0, 0, 1, 0);
findTransformECC(warpedImage, testImg, mapEuclidean, 1, findTransformECC(warpedImage, testImg, mapEuclidean, 1, criteria);
TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, ECC_iterations, ECC_epsilon));
if (!isMapCorrect(mapEuclidean)){ if (!checkMap(mapEuclidean, euclideanGround))
ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_OUTPUT);
return false; return false;
} }
if (computeRMS(mapEuclidean, euclideanGround)>MAX_RMS_ECC){
ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
ts->printf( ts->LOG, "RMS = %f",
computeRMS(mapEuclidean, euclideanGround));
return false;
}
}
return true; return true;
} }
class CV_ECC_Test_Affine : public CV_ECC_BaseTest {
void CV_ECC_Test_Euclidean::run(int from)
{
if (!testEuclidean(from))
return;
ts->set_failed_test_info(cvtest::TS::OK);
}
class CV_ECC_Test_Affine : public CV_ECC_BaseTest
{
public: public:
CV_ECC_Test_Affine(); CV_ECC_Test_Affine();
protected:
void run(int);
bool testAffine(int); protected:
bool test(const Mat img);
}; };
CV_ECC_Test_Affine::CV_ECC_Test_Affine() {} CV_ECC_Test_Affine::CV_ECC_Test_Affine() {}
bool CV_ECC_Test_Affine::test(const Mat testImg) {
bool CV_ECC_Test_Affine::testAffine(int from)
{
Mat img = imread( string(ts->get_data_path()) + "shared/fruits.png", 0);
if (img.empty())
{
ts->printf( ts->LOG, "test image can not be read");
ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_TEST_DATA);
return false;
}
Mat testImg;
resize(img, testImg, Size(216, 216), 0, 0, INTER_LINEAR_EXACT);
cv::RNG rng = ts->get_rng(); cv::RNG rng = ts->get_rng();
int progress = 0; int progress = 0;
for (int k=from; k<ntests; k++){ for (int k = 0; k < ntests; k++) {
ts->update_context(this, k, true); ts->update_context(this, k, true);
progress = update_progress(progress, k, ntests, 0); progress = update_progress(progress, k, ntests, 0);
Mat affineGround = (Mat_<float>(2, 3) << (1 - rng.uniform(-0.05f, 0.05f)), (rng.uniform(-0.03f, 0.03f)),
Mat affineGround = (Mat_<float>(2,3) << (1-rng.uniform(-0.05f, 0.05f)), (rng.uniform(10.f, 20.f)), (rng.uniform(-0.03f, 0.03f)), (1 - rng.uniform(-0.05f, 0.05f)),
(rng.uniform(-0.03f, 0.03f)), (rng.uniform(10.f, 20.f)),
(rng.uniform(-0.03f, 0.03f)), (1-rng.uniform(-0.05f, 0.05f)),
(rng.uniform(10.f, 20.f))); (rng.uniform(10.f, 20.f)));
Mat warpedImage; Mat warpedImage;
warpAffine(testImg, warpedImage, affineGround, warpAffine(testImg, warpedImage, affineGround, Size(200, 200), INTER_LINEAR + WARP_INVERSE_MAP);
Size(200,200), INTER_LINEAR + WARP_INVERSE_MAP);
Mat mapAffine = (Mat_<float>(2, 3) << 1, 0, 0, 0, 1, 0); Mat mapAffine = (Mat_<float>(2, 3) << 1, 0, 0, 0, 1, 0);
findTransformECC(warpedImage, testImg, mapAffine, 2, findTransformECC(warpedImage, testImg, mapAffine, 2, criteria);
TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, ECC_iterations, ECC_epsilon));
if (!isMapCorrect(mapAffine)){ if (!checkMap(mapAffine, affineGround))
ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_OUTPUT);
return false; return false;
} }
if (computeRMS(mapAffine, affineGround)>MAX_RMS_ECC){
ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
ts->printf( ts->LOG, "RMS = %f",
computeRMS(mapAffine, affineGround));
return false;
}
}
return true; return true;
} }
class CV_ECC_Test_Homography : public CV_ECC_BaseTest {
void CV_ECC_Test_Affine::run(int from)
{
if (!testAffine(from))
return;
ts->set_failed_test_info(cvtest::TS::OK);
}
class CV_ECC_Test_Homography : public CV_ECC_BaseTest
{
public: public:
CV_ECC_Test_Homography(); CV_ECC_Test_Homography();
protected:
void run(int);
bool testHomography(int); protected:
bool test(const Mat testImg);
}; };
CV_ECC_Test_Homography::CV_ECC_Test_Homography() {} CV_ECC_Test_Homography::CV_ECC_Test_Homography() {}
bool CV_ECC_Test_Homography::testHomography(int from) bool CV_ECC_Test_Homography::test(const Mat testImg) {
{
Mat img = imread( string(ts->get_data_path()) + "shared/fruits.png", 0);
if (img.empty())
{
ts->printf( ts->LOG, "test image can not be read");
ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_TEST_DATA);
return false;
}
Mat testImg;
resize(img, testImg, Size(216, 216), 0, 0, INTER_LINEAR_EXACT);
cv::RNG rng = ts->get_rng(); cv::RNG rng = ts->get_rng();
int progress = 0; int progress = 0;
for (int k=from; k<ntests; k++){ for (int k = 0; k < ntests; k++) {
ts->update_context(this, k, true); ts->update_context(this, k, true);
progress = update_progress(progress, k, ntests, 0); progress = update_progress(progress, k, ntests, 0);
Mat homoGround = (Mat_<float>(3,3) << (1-rng.uniform(-0.05f, 0.05f)), Mat homoGround =
(rng.uniform(-0.03f, 0.03f)), (rng.uniform(10.f, 20.f)), (Mat_<float>(3, 3) << (1 - rng.uniform(-0.05f, 0.05f)), (rng.uniform(-0.03f, 0.03f)),
(rng.uniform(-0.03f, 0.03f)), (1-rng.uniform(-0.05f, 0.05f)),(rng.uniform(10.f, 20.f)), (rng.uniform(10.f, 20.f)), (rng.uniform(-0.03f, 0.03f)), (1 - rng.uniform(-0.05f, 0.05f)),
(rng.uniform(0.0001f, 0.0003f)), (rng.uniform(0.0001f, 0.0003f)), 1.f); (rng.uniform(10.f, 20.f)), (rng.uniform(0.0001f, 0.0003f)), (rng.uniform(0.0001f, 0.0003f)), 1.f);
Mat warpedImage; Mat warpedImage;
warpPerspective(testImg, warpedImage, homoGround, warpPerspective(testImg, warpedImage, homoGround, Size(200, 200), INTER_LINEAR + WARP_INVERSE_MAP);
Size(200,200), INTER_LINEAR + WARP_INVERSE_MAP);
Mat mapHomography = Mat::eye(3, 3, CV_32F); Mat mapHomography = Mat::eye(3, 3, CV_32F);
findTransformECC(warpedImage, testImg, mapHomography, 3, findTransformECC(warpedImage, testImg, mapHomography, 3, criteria);
TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, ECC_iterations, ECC_epsilon));
if (!isMapCorrect(mapHomography)){ if (!checkMap(mapHomography, homoGround))
ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_OUTPUT);
return false; return false;
} }
if (computeRMS(mapHomography, homoGround)>MAX_RMS_ECC){
ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
ts->printf( ts->LOG, "RMS = %f",
computeRMS(mapHomography, homoGround));
return false;
}
}
return true; return true;
} }
void CV_ECC_Test_Homography::run(int from) class CV_ECC_Test_Mask : public CV_ECC_BaseTest {
{
if (!testHomography(from))
return;
ts->set_failed_test_info(cvtest::TS::OK);
}
class CV_ECC_Test_Mask : public CV_ECC_BaseTest
{
public: public:
CV_ECC_Test_Mask(); CV_ECC_Test_Mask();
protected:
void run(int);
bool testMask(int); protected:
bool test(const Mat);
}; };
CV_ECC_Test_Mask::CV_ECC_Test_Mask() {} CV_ECC_Test_Mask::CV_ECC_Test_Mask() {}
bool CV_ECC_Test_Mask::testMask(int from) bool CV_ECC_Test_Mask::test(const Mat testImg) {
{
Mat img = imread( string(ts->get_data_path()) + "shared/fruits.png", 0);
if (img.empty())
{
ts->printf( ts->LOG, "test image can not be read");
ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_TEST_DATA);
return false;
}
Mat scaledImage;
resize(img, scaledImage, Size(216, 216), 0, 0, INTER_LINEAR_EXACT );
Mat_<float> testImg;
scaledImage.convertTo(testImg, testImg.type());
cv::RNG rng = ts->get_rng(); cv::RNG rng = ts->get_rng();
int progress = 0; int progress = 0;
for (int k=from; k<ntests; k++){ for (int k = 0; k < ntests; k++) {
ts->update_context(this, k, true); ts->update_context(this, k, true);
progress = update_progress(progress, k, ntests, 0); progress = update_progress(progress, k, ntests, 0);
Mat translationGround = (Mat_<float>(2,3) << 1, 0, (rng.uniform(10.f, 20.f)), Mat translationGround = (Mat_<float>(2, 3) << 1, 0, (rng.uniform(10.f, 20.f)), 0, 1, (rng.uniform(10.f, 20.f)));
0, 1, (rng.uniform(10.f, 20.f)));
Mat warpedImage; Mat warpedImage;
warpAffine(testImg, warpedImage, translationGround, warpAffine(testImg, warpedImage, translationGround, Size(200, 200), INTER_LINEAR + WARP_INVERSE_MAP);
Size(200,200), INTER_LINEAR + WARP_INVERSE_MAP);
Mat mapTranslation = (Mat_<float>(2, 3) << 1, 0, 0, 0, 1, 0); Mat mapTranslation = (Mat_<float>(2, 3) << 1, 0, 0, 0, 1, 0);
Mat_<unsigned char> mask = Mat_<unsigned char>::ones(testImg.rows, testImg.cols); Mat_<unsigned char> mask = Mat_<unsigned char>::ones(testImg.rows, testImg.cols);
for (int i=testImg.rows*2/3; i<testImg.rows; i++) { Rect region(testImg.cols * 2 / 3, testImg.rows * 2 / 3, testImg.cols / 3, testImg.rows / 3);
for (int j=testImg.cols*2/3; j<testImg.cols; j++) {
testImg(i, j) = 0;
mask(i, j) = 0;
}
}
findTransformECC(warpedImage, testImg, mapTranslation, 0, rectangle(testImg, region, Scalar::all(0), FILLED);
TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, ECC_iterations, ECC_epsilon), mask); rectangle(mask, region, Scalar(0), FILLED);
if (!isMapCorrect(mapTranslation)){ findTransformECC(warpedImage, testImg, mapTranslation, 0, criteria, mask);
ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_OUTPUT);
if (!checkMap(mapTranslation, translationGround))
return false; return false;
}
if (computeRMS(mapTranslation, translationGround)>MAX_RMS_ECC){
ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
ts->printf( ts->LOG, "RMS = %f",
computeRMS(mapTranslation, translationGround));
return false;
}
// Test with non-default gaussian blur. // Test with non-default gaussian blur.
findTransformECC(warpedImage, testImg, mapTranslation, 0, findTransformECC(warpedImage, testImg, mapTranslation, 0, criteria, mask, 1);
TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, ECC_iterations, ECC_epsilon), mask, 1);
if (!isMapCorrect(mapTranslation)){ if (!checkMap(mapTranslation, translationGround))
ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_OUTPUT);
return false; return false;
} }
if (computeRMS(mapTranslation, translationGround)>MAX_RMS_ECC){
ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
ts->printf( ts->LOG, "RMS = %f",
computeRMS(mapTranslation, translationGround));
return false;
}
}
return true; return true;
} }
void CV_ECC_Test_Mask::run(int from) void testECCProperties(Mat x, float eps) {
{ // The channels are independent
if (!testMask(from)) Mat y = x.t();
return; Mat Z = Mat::zeros(x.size(), y.type());
Mat O = Mat::ones(x.size(), y.type());
ts->set_failed_test_info(cvtest::TS::OK); EXPECT_NEAR(computeECC(x, y), 0.0, eps);
if (x.type() != CV_8U && x.type() != CV_8U) {
EXPECT_NEAR(computeECC(x + y, x - y), 0.0, eps);
} }
TEST(Video_ECC_Test_Compute, accuracy) EXPECT_NEAR(computeECC(x, x), 1.0, eps);
{
Mat R, G, B, X, Y;
cv::merge(std::vector<cv::Mat>({O, Z, Z}), R);
cv::merge(std::vector<cv::Mat>({Z, O, Z}), G);
cv::merge(std::vector<cv::Mat>({Z, Z, O}), B);
cv::merge(std::vector<cv::Mat>({x, x, x}), X);
cv::merge(std::vector<cv::Mat>({y, y, y}), Y);
// 1. The channels are orthogonal and independent
EXPECT_NEAR(computeECC(X.mul(R), X.mul(G)), 0, eps);
EXPECT_NEAR(computeECC(X.mul(R), X.mul(B)), 0, eps);
EXPECT_NEAR(computeECC(X.mul(B), X.mul(G)), 0, eps);
EXPECT_NEAR(computeECC(X.mul(R) + Y.mul(B), X.mul(B) + Y.mul(R)), 0, eps);
EXPECT_NEAR(computeECC(X.mul(R) + Y.mul(G) + (X + Y).mul(B), Y.mul(R) + X.mul(G) + (X - Y).mul(B)), 0, eps);
// 2. Each channel contribute equally
EXPECT_NEAR(computeECC(X.mul(R) + Y.mul(G + B), X), 1.0 / 3, eps);
EXPECT_NEAR(computeECC(X.mul(G) + Y.mul(R + B), X), 1.0 / 3, eps);
EXPECT_NEAR(computeECC(X.mul(B) + Y.mul(G + R), X), 1.0 / 3, eps);
// 3. The coefficient is invariant with respect to the offset of channels
EXPECT_NEAR(computeECC(X - R + 2 * G + B, X), 1.0, eps);
if (x.type() != CV_8U && x.type() != CV_8U) {
EXPECT_NEAR(computeECC(X + R - 2 * G + B, Y), 0.0, eps);
}
// The channels are independent. Check orthogonal combinations
// full squares norm = sum of squared norms
EXPECT_NEAR(computeECC(X, Y + X), 1.0 / sqrt(2.0), eps);
EXPECT_NEAR(computeECC(X, 2 * Y + X), 1.0 / sqrt(5.0), eps);
}
TEST(Video_ECC_Test_Compute, properies) {
Mat xline(1, 100, CV_32F), x;
for (int i = 0; i < xline.cols; ++i) xline.at<float>(0, i) = (float)i;
repeat(xline, xline.cols, 1, x);
Mat x_f64, x_u8, x_u16;
x.convertTo(x_f64, CV_64F);
x.convertTo(x_u8, CV_8U);
x.convertTo(x_u16, CV_16U);
testECCProperties(x, 1e-5f);
testECCProperties(x_f64, 1e-5f);
testECCProperties(x_u8, 1);
testECCProperties(x_u16, 1);
}
TEST(Video_ECC_Test_Compute, accuracy) {
Mat testImg = (Mat_<float>(3, 3) << 1, 0, 0, 1, 0, 0, 1, 0, 0); Mat testImg = (Mat_<float>(3, 3) << 1, 0, 0, 1, 0, 0, 1, 0, 0);
Mat warpedImage = (Mat_<float>(3, 3) << 0, 1, 0, 0, 1, 0, 0, 1, 0); Mat warpedImage = (Mat_<float>(3, 3) << 0, 1, 0, 0, 1, 0, 0, 1, 0);
Mat_<unsigned char> mask = Mat_<unsigned char>::ones(testImg.rows, testImg.cols); Mat_<unsigned char> mask = Mat_<unsigned char>::ones(testImg.rows, testImg.cols);
@ -501,8 +420,7 @@ TEST(Video_ECC_Test_Compute, accuracy)
EXPECT_NEAR(ecc, -0.5f, 1e-5f); EXPECT_NEAR(ecc, -0.5f, 1e-5f);
} }
TEST(Video_ECC_Test_Compute, bug_14657) TEST(Video_ECC_Test_Compute, bug_14657) {
{
/* /*
* Simple test case - a 2 x 2 matrix with 10, 10, 10, 6. When the mean (36 / 4 = 9) is subtracted, * Simple test case - a 2 x 2 matrix with 10, 10, 10, 6. When the mean (36 / 4 = 9) is subtracted,
* it results in 1, 1, 1, 0 for the unsigned int case - compare to 1, 1, 1, -3 in the signed case. * it results in 1, 1, 1, 0 for the unsigned int case - compare to 1, 1, 1, -3 in the signed case.
@ -512,11 +430,26 @@ TEST(Video_ECC_Test_Compute, bug_14657)
EXPECT_NEAR(computeECC(img, img), 1.0f, 1e-5f); EXPECT_NEAR(computeECC(img, img), 1.0f, 1e-5f);
} }
TEST(Video_ECC_Translation, accuracy) {
CV_ECC_Test_Translation test;
test.safe_run();
}
TEST(Video_ECC_Euclidean, accuracy) {
CV_ECC_Test_Euclidean test;
test.safe_run();
}
TEST(Video_ECC_Affine, accuracy) {
CV_ECC_Test_Affine test;
test.safe_run();
}
TEST(Video_ECC_Homography, accuracy) {
CV_ECC_Test_Homography test;
test.safe_run();
}
TEST(Video_ECC_Mask, accuracy) {
CV_ECC_Test_Mask test;
test.safe_run();
}
TEST(Video_ECC_Translation, accuracy) { CV_ECC_Test_Translation test; test.safe_run();} } // namespace
TEST(Video_ECC_Euclidean, accuracy) { CV_ECC_Test_Euclidean test; test.safe_run(); } } // namespace opencv_test
TEST(Video_ECC_Affine, accuracy) { CV_ECC_Test_Affine test; test.safe_run(); }
TEST(Video_ECC_Homography, accuracy) { CV_ECC_Test_Homography test; test.safe_run(); }
TEST(Video_ECC_Mask, accuracy) { CV_ECC_Test_Mask test; test.safe_run(); }
}} // namespace