版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/xxzxxzdlut/article/details/72962900
使用匹配寻找已知物体位置,并用方框标识出来。
使用劳氏算法进行特征点排除的时候,先进行的是匹配目标,然后是匹配标版。顺序弄错会drawmatches出错。
主要函数:
寻找透视变换:findHomography(InputArray src,//源平面上对应点
InputArray dst,//目标平面上对应点
int method=0,//计算方法,默认0,CV_RANSAC,CV_LMEDS三种
double ransacReprojThreshold=3,//允许重投影误差值,src,dst若以像素为单位,取1到10
OutputArray mask=noArray()//设置输出掩码)进行透视矩阵变换:perspectiveTransform(InputArray src,//原图像,需为双通道或三通道浮点型图像,每个元素是二维或三维可转换矢量
OutputArray dest,//保存运算结果
InputArray m //变换矩阵,3*3或4*4浮点型矩阵)
#include<opencv2/core/core.hpp>
#include<opencv2/features2d/features2d.hpp>
#include<opencv2/highgui/highgui.hpp>
#include<opencv2/calib3d/calib3d.hpp>
#include<opencv2/xfeatures2d/nonfree.hpp>
#include<opencv2/opencv.hpp>
#include<iostream>
using namespace std;
using namespace cv;
using namespace cv::xfeatures2d;
int main()
{
Mat srcImage1 = imread("1.jpg",1);
Mat srcImage2 = imread("2.jpg",1);
if (!srcImage1.data || !srcImage2.data)
{
cout << "读取出错" << endl;
return false;
}
Mat srcImage1_gray, srcImage2_gray;
cvtColor(srcImage1,srcImage1_gray,CV_BGR2GRAY);
cvtColor(srcImage2,srcImage2_gray,CV_BGR2GRAY);
Ptr<SurfFeatureDetector> Detector = SurfFeatureDetector::create(100);
vector<KeyPoint> keypoints_object, keypoints_scene;
Mat descriptor_object, descriptor_scene;
Detector->detectAndCompute(srcImage2_gray,Mat(),keypoints_scene,descriptor_scene);
vector<Mat>sc_collection(1,descriptor_scene);
FlannBasedMatcher matcher;
matcher.add(sc_collection);
matcher.train();
Detector->detectAndCompute(srcImage1_gray,Mat(),keypoints_object,descriptor_object);
vector<vector<DMatch>>matches;
matcher.knnMatch(descriptor_object,matches,2);
vector<DMatch>googmatches;
for (unsigned int i = 0; i < matches.size(); i++)
{
if (matches[i][0].distance < 0.6*matches[i][1].distance)
googmatches.push_back(matches[i][0]);
}
Mat img_matches;
drawMatches(srcImage1_gray,keypoints_object, srcImage2_gray, keypoints_scene, googmatches,img_matches);
vector<Point2f>obj;
vector<Point2f>scene;
for (unsigned int i = 0; i < googmatches.size(); i++)
{
obj.push_back(keypoints_object[googmatches[i].queryIdx].pt);
scene.push_back(keypoints_scene[googmatches[i].trainIdx].pt);
}
Mat H = findHomography(obj,scene,CV_RANSAC);
vector<Point2f>obj_corners(4);
obj_corners[0] = cvPoint(0,0);
obj_corners[1] = cvPoint(srcImage1_gray.cols,0);
obj_corners[2] = cvPoint(srcImage1_gray.cols, srcImage1_gray.rows);
obj_corners[3] = cvPoint(0,srcImage1_gray.rows);
vector<Point2f>scene_corners(4);
perspectiveTransform(obj_corners,scene_corners,H);
line(img_matches,
scene_corners[0]+Point2f(static_cast<float>(srcImage1_gray.cols),0),
scene_corners[1]+Point2f(static_cast<float>(srcImage1_gray.cols),0),
Scalar(255,0,123),4);
line(img_matches,
scene_corners[1] + Point2f(static_cast<float>(srcImage1_gray.cols), 0),
scene_corners[2] + Point2f(static_cast<float>(srcImage1_gray.cols), 0),
Scalar(255, 0, 123), 4);
line(img_matches,
scene_corners[2] + Point2f(static_cast<float>(srcImage1_gray.cols), 0),
scene_corners[3] + Point2f(static_cast<float>(srcImage1_gray.cols), 0),
Scalar(255, 0, 123), 4);
line(img_matches,
scene_corners[3] + Point2f(static_cast<float>(srcImage1_gray.cols), 0),
scene_corners[0] + Point2f(static_cast<float>(srcImage1_gray.cols), 0),
Scalar(255, 0, 123), 4);
imshow("寻找物体位置",img_matches);
waitKey(0);
return 0;
}