交通指示牌的特征匹配代码

void matchPic(Mat img_11, Mat img_22, int *coor1, int *coor2)
 {
  namedWindow("p2", 0);
  namedWindow("p1", 0);
  imshow("p2", img_11);
  imshow("p1", img_22);
  //匹配范围坐标
  int xmin1 = coor1[0], xmax1 = coor1[1],ymin1 = coor1[2],  ymax1 = coor1[3];
  int xmin2 = coor2[0],  xmax2 = coor2[1],ymin2 = coor2[2], ymax2 = coor2[3];
  //匹配范围图像截取
  Mat im_1, im_2;
  im_1 = img_11(Range(ymin1, ymax1), Range(xmin1, xmax1));
  im_2 = img_22(Range(ymin2, ymax2), Range(xmin2, xmax2));
  //分两部分匹配//1,3为初期图像,2,4为末期图像
  Mat image_1, image_2, image_3, image_4;
  image_1 = img_11(Range(ymin1, ymax1), Range(xmin1, (xmax1 + xmin1) / 2));
  image_2 = img_22(Range(ymin2, ymax2), Range(xmin2, (xmax2 + xmin2) / 2));
  image_3 = img_11(Range(ymin1, ymax1), Range((xmax1 + xmin1) / 2, xmax1));
  image_4 = img_22(Range(ymin2, ymax2), Range((xmax2 + xmin2) / 2, xmax2));

  Mat img_01, img_02, img_1, img_2, img_03, img_04, img_3, img_4, im_01, im_02, im_001, im_002;
  //锐化
  Mat kernel(3, 3, CV_32F, Scalar(-1));
  kernel.at<float>(1, 1) = 9.9;
  filter2D(image_1, img_01, -1, kernel);
  filter2D(image_2, img_02, -1, kernel);
  filter2D(image_3, img_03, -1, kernel);
  filter2D(image_4, img_04, -1, kernel);
  filter2D(im_1, im_01, -1, kernel);
  filter2D(im_2, im_02, -1, kernel);

  //灰度图转换
  cvtColor(img_01, img_1, CV_RGB2GRAY);
  cvtColor(img_02, img_2, CV_RGB2GRAY);
  cvtColor(img_03, img_3, CV_RGB2GRAY);
  cvtColor(img_04, img_4, CV_RGB2GRAY);
  cvtColor(im_01, im_001, CV_RGB2GRAY);
  cvtColor(im_02, im_002, CV_RGB2GRAY);
  //开始匹配准备
  vector<KeyPoint> keyPoint1, keyPoint2, keyPoint3, keyPoint4;//四块图像关键点设置
  //提取角点,预设1500个角点
  Ptr<ORB> orb = ORB::create(1500, 1.2f, 8, 10, 0, 2, ORB::HARRIS_SCORE, 21, 20);
  //检测角点位置
  orb->detect(img_1, keyPoint1);
  orb->detect(img_2, keyPoint2);
  orb->detect(img_3, keyPoint3);
  orb->detect(img_4, keyPoint4);
  //3、4坐标转换
  int i;
  for (i = 0; i < keyPoint3.size(); i++)
  {
   keyPoint3[i].pt.x = keyPoint3[i].pt.x + (xmax1 - xmin1) / 2;
  }
  for (i = 0; i < keyPoint4.size(); i++)
  {
   keyPoint4[i].pt.x = keyPoint4[i].pt.x + (xmax2 - xmin2) / 2;
  }
  //另存角点坐标,1、3存于Point1,2、4存于Point2,作为两期图像的关键点
  vector<KeyPoint>  Point1, Point2;
  for (i = 0; i < keyPoint1.size(); i++)
  {
   Point1.push_back(keyPoint1[i]);
  }
  for (i = 0; i < keyPoint3.size(); i++)
  {
   Point1.push_back(keyPoint3[i]);
  }
  for (i = 0; i < keyPoint2.size(); i++)
  {
   Point2.push_back(keyPoint2[i]);
  }
  for (i = 0; i < keyPoint4.size(); i++)
  {
   Point2.push_back(keyPoint4[i]);
  }
  //根据角点位置计算BRIEF描述子,001和002为两期图像的灰度图
  Mat descriptors_1, descriptors_2;
  orb->compute(im_001, Point1, descriptors_1);
  orb->compute(im_002, Point2, descriptors_2);
  //输出检测出的所有特征点
  Mat outimg1;
  drawKeypoints(im_1, Point1, outimg1, Scalar::all(-1), DrawMatchesFlags::DEFAULT);
  namedWindow("全部特征点", 0);
  imshow("全部特征点", outimg1);
  //对两幅图像中的BRIEF描述子进行匹配,使用 Hamming 距离
  //DMatch是用于存放匹配结果的结构 ,包含以下参数
  //int queryIdx;  //此匹配对应的查询图像的特征描述子索引
  //int trainIdx;   //此匹配对应的训练(模板)图像的特征描述子索引 
  //int imgIdx;    //训练图像的索引(若有多个) 
  //float distance;  //两个特征向量之间的欧氏距离,越小表明匹配度越高。
  vector<DMatch> matches;
  //创建一个BFMatcher匹配器,使用hamming距离//尝试所有可能的匹配,从而使得它总能够找到最佳匹配
  BFMatcher matcher(NORM_HAMMING);
  //调用matcher的match方法进行匹配,这里用到了描述子,没有用关键点。
  matcher.match(descriptors_1, descriptors_2, matches);
  //遍历matches[]数组,找出匹配点的最大距离和最小距离,用于后面的匹配点筛选。
  /*double min_dist1 = 0, max_dist1 = 0;//定义距离
  for (int i = 0; i < descriptors_1.rows; ++i)//遍历
  {
   double dist1 = matches[i].distance;
   if (dist1<min_dist1) min_dist1 = dist1;
   if (dist1>max_dist1) max_dist1 = dist1;
  }
  */
  //printf("Max dist: %f\n", max_dist1);
  //printf("Min dist: %f\n", min_dist1);
  //根据最小距离,对匹配点进行筛选
  std::vector<DMatch> good_matches;
  int j;
  for (j = 0; j < descriptors_1.rows; ++j)
  {
   //if (matches[j].distance <= max(2* min_dist1, 10.0))
   good_matches.push_back(matches[j]);
  }

  //绘制匹配结果
  /*
  Mat img_match;
  drawMatches(img_01, keyPoint1, img_02, keyPoint2, matches1, img_match);
  namedWindow("所有匹配点对", 0);
  imshow("所有匹配点对", img_match);
 */
  Mat img_goodmatch;//筛选后的匹配点图
  drawMatches(im_1, Point1, im_2, Point2, good_matches, img_goodmatch);
  namedWindow("所有匹配点对", 0);
  imshow("所有匹配点对", img_goodmatch);
  //利用RANSAC进行消除无匹配点
  //根据matches将特征点对齐,将坐标转换为float类型
  vector<KeyPoint> R_keypoint01, R_keypoint02;
  for (size_t i = 0; i < good_matches.size(); i++)
  {
   R_keypoint01.push_back(Point1[good_matches[i].queryIdx]);//查询特征描述索引
   R_keypoint02.push_back(Point2[good_matches[i].trainIdx]);//训练特征描述索引
   //这两句话的理解:R_keypoint1是要存储img01中能与img02匹配的特征点,
   //matches中存储了这些匹配点对的img01和img02的索引值
  }
  //坐标转移
  vector<Point2f> p01, p02;
  for (size_t i = 0; i < good_matches.size(); i++)
  {
   p01.push_back(R_keypoint01[i].pt);
   p02.push_back(R_keypoint02[i].pt);
  }
  vector<uchar> RansacStatus;
  //利用基础矩阵剔除误匹配点
  //Mat Fundamental =findFundamentalMat(p01, p02, RansacStatus, FM_RANSAC);
  //单应性矩阵剔除误差点
  Mat homography = findHomography(p01, p02, FM_RANSAC, 20, RansacStatus);
  //重新定义RR_keypoint 和RR_matches来存储新的关键点和匹配矩阵
  vector<KeyPoint> RR_keypoint01, RR_keypoint02;
  vector<DMatch> RR_matches;
  int index = 0;
  for (size_t i = 0; i < good_matches.size(); i++)
  {
   if (RansacStatus[i] != 0)
   {
    RR_keypoint01.push_back(R_keypoint01[i]);
    RR_keypoint02.push_back(R_keypoint02[i]);
    good_matches[i].queryIdx = index;
    good_matches[i].trainIdx = index;
    RR_matches.push_back(good_matches[i]);
    index++;
   }
  }
  //显示消除误匹配点的结果
  Mat img_RR_matches;
  drawMatches(img_11(Range(ymin1, ymax1), Range(xmin1, xmax1)), RR_keypoint01, img_22(Range(ymin2, ymax2), Range(xmin2, xmax2)), RR_keypoint02, RR_matches, img_RR_matches);
  namedWindow("消除误匹配点后", 0);
  imshow("消除误匹配点后", img_RR_matches);

  waitKey(0);
 }
代码里有部份用到他人代码,仅用于个人学习。

猜你喜欢

转载自blog.csdn.net/m0_38004789/article/details/80859844