分享

OpenCV实现基于Zernike矩的亚像素边缘检测

 mscdj 2019-04-13

    在做物体检测时,由于成本和应用场合的限制,不能够一味地增加相机的分辨率,或者已经用了分辨率很高的相机,但是视野范围很大,仍然无法实现很高的精度,这时就要考虑亚像素技术,亚像素技术就是在两个像素点之间进行进一步的细分,从而得到亚像素级别的边缘点的坐标(也就是float类型的坐标),一般来说,现有的技术可以做到2细分、4细分,甚至很牛的能做到更高,通过亚像素边缘检测技术的使用,可以节约成本,提高识别精度。

    常见的亚像素技术包括灰度矩、Hu矩、空间矩、Zernike矩等,通过查阅多篇论文,综合比较精度、时间和实现难度,选择Zernike矩作为项目中的亚像素边缘检测算法。基于Zernike矩的边缘检测原理,下一篇文章详细再总结一下,这里大体把步骤说一下,并使用OpenCV实现。

    大体步骤就是,首先确定使用的模板大小,然后根据Zernike矩的公式求出各个模板系数,例如我选择的是7*7模板(模板越大精度越高,但是运算时间越长),要计算M00,M11R,M11I,M20,M31R,M31I,M40七个Zernike模板。第二步是对待检测图像进行预处理,包括滤波二值化等,也可以在进行一次Canny边缘检测。第三步是将预处理的图像跟7个Zernike矩模板分别进行卷积,得到7个Zernike矩。第四步是把上一步的矩乘上角度校正系数(这是因为利用Zernike的旋转不变性,Zernike模型把边缘都简化成了x=n的直线,这里要调整回来)。第五步是计算距离参数l和灰度差参数k,根据k和l判断该点是否为边缘点。以下是基于OpenCV的实现。

  1. #include <math.h>
  2. #include <opencv2/opencv.hpp>
  3. using namespace std;
  4. using namespace cv;
  5. const double PI = 3.14159265358979323846;
  6. const int g_N = 7;
  7. Mat M00 = (Mat_<double>(7, 7) <<
  8. 0, 0.0287, 0.0686, 0.0807, 0.0686, 0.0287, 0,
  9. 0.0287, 0.0815, 0.0816, 0.0816, 0.0816, 0.0815, 0.0287,
  10. 0.0686, 0.0816, 0.0816, 0.0816, 0.0816, 0.0816, 0.0686,
  11. 0.0807, 0.0816, 0.0816, 0.0816, 0.0816, 0.0816, 0.0807,
  12. 0.0686, 0.0816, 0.0816, 0.0816, 0.0816, 0.0816, 0.0686,
  13. 0.0287, 0.0815, 0.0816, 0.0816, 0.0816, 0.0815, 0.0287,
  14. 0, 0.0287, 0.0686, 0.0807, 0.0686, 0.0287, 0);
  15. Mat M11R = (Mat_<double>(7, 7) <<
  16. 0, -0.015, -0.019, 0, 0.019, 0.015, 0,
  17. -0.0224, -0.0466, -0.0233, 0, 0.0233, 0.0466, 0.0224,
  18. -0.0573, -0.0466, -0.0233, 0, 0.0233, 0.0466, 0.0573,
  19. -0.069, -0.0466, -0.0233, 0, 0.0233, 0.0466, 0.069,
  20. -0.0573, -0.0466, -0.0233, 0, 0.0233, 0.0466, 0.0573,
  21. -0.0224, -0.0466, -0.0233, 0, 0.0233, 0.0466, 0.0224,
  22. 0, -0.015, -0.019, 0, 0.019, 0.015, 0);
  23. Mat M11I = (Mat_<double>(7, 7) <<
  24. 0, -0.0224, -0.0573, -0.069, -0.0573, -0.0224, 0,
  25. -0.015, -0.0466, -0.0466, -0.0466, -0.0466, -0.0466, -0.015,
  26. -0.019, -0.0233, -0.0233, -0.0233, -0.0233, -0.0233, -0.019,
  27. 0, 0, 0, 0, 0, 0, 0,
  28. 0.019, 0.0233, 0.0233, 0.0233, 0.0233, 0.0233, 0.019,
  29. 0.015, 0.0466, 0.0466, 0.0466, 0.0466, 0.0466, 0.015,
  30. 0, 0.0224, 0.0573, 0.069, 0.0573, 0.0224, 0);
  31. Mat M20 = (Mat_<double>(7, 7) <<
  32. 0, 0.0225, 0.0394, 0.0396, 0.0394, 0.0225, 0,
  33. 0.0225, 0.0271, -0.0128, -0.0261, -0.0128, 0.0271, 0.0225,
  34. 0.0394, -0.0128, -0.0528, -0.0661, -0.0528, -0.0128, 0.0394,
  35. 0.0396, -0.0261, -0.0661, -0.0794, -0.0661, -0.0261, 0.0396,
  36. 0.0394, -0.0128, -0.0528, -0.0661, -0.0528, -0.0128, 0.0394,
  37. 0.0225, 0.0271, -0.0128, -0.0261, -0.0128, 0.0271, 0.0225,
  38. 0, 0.0225, 0.0394, 0.0396, 0.0394, 0.0225, 0);
  39. Mat M31R = (Mat_<double>(7, 7) <<
  40. 0, -0.0103, -0.0073, 0, 0.0073, 0.0103, 0,
  41. -0.0153, -0.0018, 0.0162, 0, -0.0162, 0.0018, 0.0153,
  42. -0.0223, 0.0324, 0.0333, 0, -0.0333, -0.0324, 0.0223,
  43. -0.0190, 0.0438, 0.0390, 0, -0.0390, -0.0438, 0.0190,
  44. -0.0223, 0.0324, 0.0333, 0, -0.0333, -0.0324, 0.0223,
  45. -0.0153, -0.0018, 0.0162, 0, -0.0162, 0.0018, 0.0153,
  46. 0, -0.0103, -0.0073, 0, 0.0073, 0.0103, 0);
  47. Mat M31I = (Mat_<double>(7, 7) <<
  48. 0, -0.0153, -0.0223, -0.019, -0.0223, -0.0153, 0,
  49. -0.0103, -0.0018, 0.0324, 0.0438, 0.0324, -0.0018, -0.0103,
  50. -0.0073, 0.0162, 0.0333, 0.039, 0.0333, 0.0162, -0.0073,
  51. 0, 0, 0, 0, 0, 0, 0,
  52. 0.0073, -0.0162, -0.0333, -0.039, -0.0333, -0.0162, 0.0073,
  53. 0.0103, 0.0018, -0.0324, -0.0438, -0.0324, 0.0018, 0.0103,
  54. 0, 0.0153, 0.0223, 0.0190, 0.0223, 0.0153, 0);
  55. Mat M40 = (Mat_<double>(7, 7) <<
  56. 0, 0.013, 0.0056, -0.0018, 0.0056, 0.013, 0,
  57. 0.0130, -0.0186, -0.0323, -0.0239, -0.0323, -0.0186, 0.0130,
  58. 0.0056, -0.0323, 0.0125, 0.0406, 0.0125, -0.0323, 0.0056,
  59. -0.0018, -0.0239, 0.0406, 0.0751, 0.0406, -0.0239, -0.0018,
  60. 0.0056, -0.0323, 0.0125, 0.0406, 0.0125, -0.0323, 0.0056,
  61. 0.0130, -0.0186, -0.0323, -0.0239, -0.0323, -0.0186, 0.0130,
  62. 0, 0.013, 0.0056, -0.0018, 0.0056, 0.013, 0);
  63. int main()
  64. {
  65. Mat OriginalImage = imread("original.png", 0);
  66. Mat SubImage = OriginalImage;
  67. Mat NewSmoothImage;
  68. medianBlur(OriginalImage, NewSmoothImage, 13);
  69. Mat NewAdaThresImage;
  70. adaptiveThreshold(NewSmoothImage, NewAdaThresImage, 255, ADAPTIVE_THRESH_GAUSSIAN_C, THRESH_BINARY_INV, 7, 4);
  71. vector<Point2d> SubEdgePoints;
  72. Mat ZerImageM00;
  73. filter2D(NewAdaThresImage, ZerImageM00, CV_64F, M00, Point(-1, -1), 0, BORDER_DEFAULT);
  74. //////////filter2D( cannyImage, zerImageM00, CV_64F, M00, Point(-1,-1), 0, BORDER_DEFAULT);
  75. Mat ZerImageM11R;
  76. filter2D(NewAdaThresImage, ZerImageM11R, CV_64F, M11R, Point(-1, -1), 0, BORDER_DEFAULT);
  77. //////////filter2D( cannyImage, zerImageM11R, CV_64F, M11R, Point(-1, -1), 0, BORDER_DEFAULT);
  78. Mat ZerImageM11I;
  79. filter2D(NewAdaThresImage, ZerImageM11I, CV_64F, M11I, Point(-1, -1), 0, BORDER_DEFAULT);
  80. //////////filter2D( cannyImage, zerImageM11I, CV_64F, M11I, Point(-1, -1), 0, BORDER_DEFAULT);
  81. Mat ZerImageM20;
  82. filter2D(NewAdaThresImage, ZerImageM20, CV_64F, M20, Point(-1, -1), 0, BORDER_DEFAULT);
  83. //////////filter2D( cannyImage, zerImageM20, CV_64F, M20, Point(-1, -1), 0, BORDER_DEFAULT);
  84. Mat ZerImageM31R;
  85. filter2D(NewAdaThresImage, ZerImageM31R, CV_64F, M31R, Point(-1, -1), 0, BORDER_DEFAULT);
  86. //////////filter2D(cannyImage, zerImageM31R, CV_64F, M31R, Point(-1, -1), 0, BORDER_DEFAULT);
  87. Mat ZerImageM31I;
  88. filter2D(NewAdaThresImage, ZerImageM31I, CV_64F, M31I, Point(-1, -1), 0, BORDER_DEFAULT);
  89. //////////filter2D(cannyImage, zerImageM31I, CV_64F, M31I, Point(-1, -1), 0, BORDER_DEFAULT);
  90. Mat ZerImageM40;
  91. filter2D(NewAdaThresImage, ZerImageM40, CV_64F, M40, Point(-1, -1), 0, BORDER_DEFAULT);
  92. //////////filter2D(cannyImage, zerImageM40, CV_64F, M40, Point(-1, -1), 0, BORDER_DEFAULT);
  93. int row_number = NewAdaThresImage.rows;
  94. int col_number = NewAdaThresImage.cols;
  95. //使用7个的7*7的Zernike模板(其本质是个矩阵)M00、M11R、M11I、M20、M31R、M31I、M40,分别与图像进行卷积,得到每个像素点的七个Zernike矩Z00、Z11R、Z11I、Z20、Z31R、Z31I、Z40
  96. //对于每个点,根据它的七个Zernike矩,求得距离参数l和灰度差参数k,当l和k都满足设定的条件时,则判读该点为边缘点,并进一步利用上述7个Zernike矩求出该点的亚像素级坐标
  97. //如果l或k不满足设定的条件,则该点不是边缘点,转到下一个点求解距离参数l和灰度差参数k
  98. for (int i = 0; i < row_number; i++)
  99. {
  100. for (int j = 0; j <col_number; j++)
  101. {
  102. if (ZerImageM00.at<double>(i, j) == 0)
  103. {
  104. continue;
  105. }
  106. //compute theta
  107. //vector<vector<double> > theta2(0);
  108. double theta_temporary = atan2(ZerImageM31I.at<double>(i, j), ZerImageM31R.at<double>(i, j));
  109. //theta2[i].push_back(thetaTem);
  110. //compute Z11'/Z31'
  111. double rotated_z11 = 0.0;
  112. rotated_z11 = sin(theta_temporary)*(ZerImageM11I.at<double>(i, j)) + cos(theta_temporary)*(ZerImageM11R.at<double>(i, j));
  113. double rotated_z31 = 0.0;
  114. rotated_z31 = sin(theta_temporary)*(ZerImageM31I.at<double>(i, j)) + cos(theta_temporary)*(ZerImageM31R.at<double>(i, j));
  115. //compute l
  116. double l_method1 = sqrt((5 * ZerImageM40.at<double>(i, j) + 3 * ZerImageM20.at<double>(i, j)) / (8 * ZerImageM20.at<double>(i, j)));
  117. double l_method2 = sqrt((5 * rotated_z31 + rotated_z11) / (6 * rotated_z11));
  118. double l = (l_method1 + l_method2) / 2;
  119. //compute k/h
  120. double k, h;
  121. k = 3 * rotated_z11 / 2 / pow((1 - l_method2*l_method2), 1.5);
  122. h = (ZerImageM00.at<double>(i, j) - k*PI / 2 + k*asin(l_method2) + k*l_method2*sqrt(1 - l_method2*l_method2)) / PI;
  123. //judge the edge
  124. double k_value = 20.0;
  125. double l_value = sqrt(2) / g_N;
  126. double absl = abs(l_method2 - l_method1);
  127. if (k >= k_value && absl <= l_value)
  128. {
  129. Point2d point_temporary;
  130. point_temporary.x = j + g_N*l*cos(theta_temporary) / 2;
  131. point_temporary.y = i + g_N*l*sin(theta_temporary) / 2;
  132. SubEdgePoints.push_back(point_temporary);
  133. }
  134. else
  135. {
  136. continue;
  137. }
  138. }
  139. }
  140. //显示所检测到的亚像素边缘
  141. for (size_t i = 0; i < SubEdgePoints.size(); i++)
  142. {
  143. Point center_forshow(cvRound(SubEdgePoints[i].x), cvRound(SubEdgePoints[i].y));
  144. circle(OriginalImage, center_forshow, 1, Scalar(0, 97, 255), 1, 8, 0);
  145. }
  146. imshow("亚像素边缘", OriginalImage);
  147. waitKey(0);
  148. return 0;
  149. }


---------------------------------------7.28更新分割线-------------------------------------------------

1.因为留言要源代码的朋友比较多,所以我把上面的代码补完整了,新的代码能够实现基本的过程,但是针对不同的应用,肯定不是最优的效果,可能需要根据不同的应用场合进行调整;

2.评论里提到的双层轮廓,我能想到的是因为对线进行边缘检测操作,因为线的两侧相当于都是前景和背景的边缘,所以两侧各会有一层边缘,不管是Canny(事实上不少Zernike论文都建议先用canny再用Zernike)还是二值化,只要送入Zernike的是线而不是区域,就会有双层轮廓,其他的原因我暂时还没想到,欢迎大家赐教;

3.双层轮廓并不一定是坏事,比如做圆检测,最后是要用这些边缘点来拟合圆,那么两层轮廓拟合出来的圆放大了说应该是处于两层轮廓中间,这样的结果其实更符合实际情况,但是如果不是规则轮廓,可能会对边缘点的表达和拟合有些问题;


    本站是提供个人知识管理的网络存储空间,所有内容均由用户发布,不代表本站观点。请注意甄别内容中的联系方式、诱导购买等信息,谨防诈骗。如发现有害或侵权内容,请点击一键举报。
    转藏 分享 献花(0

    0条评论

    发表

    请遵守用户 评论公约

    类似文章 更多