这一章由于g2o库的不同,程序要修改的地方有3d2d,3d3d中的
Block* solver_ptr = new Block( linearSolver ); // 矩阵块求解器
g2o::OptimizationAlgorithmLevenberg* solver = new g2o::OptimizationAlgorithmLevenberg( solver_ptr );
改为:
Block* solver_ptr = new Block( linearSolver ); // 矩阵块求解器
g2o::OptimizationAlgorithmLevenberg* solver = new g2o::OptimizationAlgorithmLevenberg( solver_ptr );
之后头文件引用也要改一下,具体为
#include <iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <Eigen/Core>
#include <Eigen/Geometry>
#include <Eigen/SVD>
#include <g2o/core/base_vertex.h>
#include <g2o/core/base_unary_edge.h>
#include <g2o/core/block_solver.h>
#include <g2o/core/optimization_algorithm_levenberg.h>
#include <g2o/core/optimization_algorithm_gauss_newton.h>
#include <g2o/core/optimization_algorithm_dogleg.h>
#include <g2o/solvers/eigen/linear_solver_eigen.h>
#include <g2o/types/sba/types_six_dof_expmap.h>
#include <chrono>
下面是个人练习。
feature_extraction:
#include <iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace std;
using namespace cv;
int main ( int argc,char** argv)
{
if(argc!=3)
{
cout<<"usage:feature_extraction img1 img2"<<endl;
return 1;
}
//读取图像
///这里是arg应该为3 从后文看,arg[0]应该是没有,但是占位置
Mat img_1 = imread ( argv[1],CV_LOAD_IMAGE_COLOR);
Mat img_2 = imread ( argv[2],CV_LOAD_IMAGE_COLOR);
///CV_LOAD_IMAGE_COLOR的意思是彩色读图
//初始化
std::vector<KeyPoint> keypoints_1,keypoints_2;
Mat descriptors_1,descriptors_2;
///关键点,描述子,匹配三连
Ptr<FeatureDetector> detector = ORB::create();
Ptr<DescriptorExtractor>descriptor = ORB::create();
Ptr<DescriptorMatcher>matcher = DescriptorMatcher::create("BruteForce-Hamming");
//1.检测角点位置
detector->detect( img_1,keypoints_1 );
detector->detect( img_2,keypoints_2 );
//2.根据角点位置计算BRIEF描述子
descriptor->compute( img_1,keypoints_1,descriptors_1 );
descriptor->compute( img_2,keypoints_2,descriptors_2 );
Mat outimg1;
drawKeypoints(img_1,keypoints_1,outimg1,Scalar::all(-1),DrawMatchesFlags::DEFAULT);
imshow( "ORB特征点",outimg1 );
///画点很有用感觉
//3.BRIEF描述子匹配,使用Hamming距离
vector<DMatch> matches;
matcher->match( descriptors_1,descriptors_2,matches );
//4.匹配点筛选
double min_dist=10000,max_dist=0;
//找出匹配之间最小距离和最大距离,即最相似和最不相似的两组之间距离
for (int i=0; i<descriptors_1.rows;i++)
{
double dist = matches[i].distance;
if (dist<min_dist) min_dist = dist;
if (dist>max_dist) max_dist = dist;
}
min_dist = min_element(matches.begin(),matches.end(),[]( const DMatch& m1,const DMatch& m2){return m1.distance<m2.distance;})->distance;
max_dist = max_element(matches.begin(),matches.end(),[]( const DMatch& m1,const DMatch& m2){return m1.distance<m2.distance;})->distance;
///虽然没理解什么意思,但是后面使用都是直接用前面的常见最值求解法,所以不看这两句了
printf("--max dist : %f\n",max_dist);
printf("--min dist : %f\n",min_dist);
//描述子之间距离大于两倍最小距离时,认为匹配有误,但有时最小距离会非常小,设置经验值30作下限
std::vector<DMatch>good_matches;
for (int i = 0;i<descriptors_1.rows;i++)
{
if(matches[i].distance<=max(2*min_dist,30.0))
{
good_matches.push_back(matches[i]);
}
}
//5.绘制匹配结果
Mat img_match;
Mat img_goodmatch;
drawMatches(img_1,keypoints_1,img_2,keypoints_2,matches,img_match);
drawMatches(img_1,keypoints_1,img_2,keypoints_2,good_matches,img_goodmatch);
///这个东西有专用绘图点开features2d.cpp可以学习一下按点连线制图方法
imshow ( "所有匹配点对", img_match );
imshow ( "优化后匹配点对", img_goodmatch );
waitKey(0);
return 0;
}
#include <iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/calib3d/calib3d.hpp>
using namespace std;
using namespace cv;
void find_feature_matches(
const Mat& img_1, const Mat& img_2,
std::vector<KeyPoint>& keypoints_1,
std::vector<KeyPoint>& keypoints_2,
std::vector< DMatch >& matches
);
///这个以后就是模板了
void pose_estimation_2d2d(
std::vector<KeyPoint> keypoints_1,
std::vector<KeyPoint> keypoints_2,
std::vector< DMatch > matches,
Mat& R,Mat& t
);
///输出旋转矩阵和平移向量
//像素坐标转相机归一化坐标
Point2d pixel2cam( const Point2d& p,const Mat& K);
int main (int argc,char** argv)
{
if(argc !=3)
{
cout<<"usage:pose_estimation_2d2d img1 img2"<<endl;
return 1;
}
//读取图像
Mat img_1 = imread (argv[1],CV_LOAD_IMAGE_COLOR);
Mat img_2 = imread (argv[2],CV_LOAD_IMAGE_COLOR);
vector<KeyPoint> keypoints_1,keypoints_2;
vector<DMatch> matches;
find_feature_matches( img_1,img_2,keypoints_1,keypoints_2,matches );
cout<<matches.size()<<endl;
//估计两张图片间的运动
Mat R,t;
pose_estimation_2d2d( keypoints_1,keypoints_2,matches,R,t );
//验证E=t^R*scale
Mat t_x = ( Mat_<double>(3,3)<<
0, -t.at<double>(2,0), t.at<double>(1,0),
t.at<double>(2,0), 0, -t.at<double>(0,0),
-t.at<double>(1,0),t.at<double>(0,0), 0
);
cout<<"t^R="<<endl<<t_x*R<<endl;
//验证对极约束
Mat K = (Mat_<double>(3,3)<<
520.9,0,325.1,
0,512.0,249.7,
0, 0, 1);
for (DMatch m:matches)
{
Point2d pt1 = pixel2cam (keypoints_1[m.queryIdx].pt,K);
Mat y1 = ( Mat_<double> (3,1)<<pt1.x,pt1.y,1);
Point2d pt2 = pixel2cam (keypoints_2[m.queryIdx].pt,K);
Mat y2 = ( Mat_<double> (3,1)<<pt2.x,pt2.y,1);
Mat d = y2.t() *t_x *R *y1;
cout << "epipolar constraint = "<< d <<endl;
}
return 0;
}
void find_feature_matches ( const Mat& img_1,const Mat& img_2,
std::vector<KeyPoint>& keypoints_1,
std::vector<KeyPoint>& keypoints_2,
std::vector< DMatch >& matches )
{
//初始化
Mat descriptors_1,descriptors_2;
Ptr<FeatureDetector> detector = ORB::create();
Ptr<DescriptorExtractor> descriptor = ORB::create();
Ptr<DescriptorMatcher>matcher = DescriptorMatcher::create("BruteForce_Hamming");
//1.检测角点位置
detector->detect (img_1,keypoints_1);
detector->detect (img_2,keypoints_2);
//2.根据角点位置计算BRIEF描述子
descriptor->compute( img_1,keypoints_1,descriptors_1 );
descriptor->compute( img_2,keypoints_2,descriptors_2 );
//3.BRIEF描述子匹配,使用Hamming距离
vector<DMatch> match;
matcher->match (descriptors_1,descriptors_2,match);
//4.匹配点筛选
double min_dist=10000,max_dist=0;
//找出匹配之间最小距离和最大距离,即最相似和最不相似的两组之间距离
for (int i=0; i<descriptors_1.rows;i++)
{
double dist = matches[i].distance;
if (dist<min_dist) min_dist = dist;
if (dist>max_dist) max_dist = dist;
}
printf("--max dist : %f\n",max_dist);
printf("--min dist : %f\n",min_dist);
//描述子之间距离大于两倍最小距离时,认为匹配有误,但有时最小距离会非常小,设置经验值30作下限
std::vector<DMatch>good_matches;
for (int i = 0;i<descriptors_1.rows;i++)
{
if(matches[i].distance<=max(2*min_dist,30.0))
{
good_matches.push_back(matches[i]);
}
}
}
Point2d pixel2cam( const Point2d& p,const Mat& K )
{
return Point2d
(
(p.x - K.at<double>(0,2))/K.at<double>(0,0),
(p.y - K.at<double>(1,2))/K.at<double>(1,1)
);
}
void pose_estimation_2d2d ( std::vector<KeyPoint> keypoints_1,
std::vector<KeyPoint> keypoints_2,
std::vector< DMatch > matches,
Mat& R,Mat& t)
{
//相机内参
Mat K = (Mat_<double>(3,3)<<
520.9,0,325.1,
0,512.0,249.7,
0, 0, 1);
//匹配点转化为vector<Point2f>形式
vector<Point2f> points1;
vector<Point2f> points2;
for( int i = 0;i<( int ) matches.size();i++ )
{
points1.push_back (keypoints_1[matches[i].queryIdx].pt);
points2.push_back (keypoints_2[matches[i].queryIdx].pt);
}
//计算基础矩阵
Mat fundamental_matrix;
fundamental_matrix = findFundamentalMat( points1,points2,CV_FM_8POINT );
cout<<"fundamental_matrix"<<endl<<fundamental_matrix<<endl;
//计算本质矩阵
Point2d principal_point(325.1,249.7); //相机光心,TUM dataset标定值
double focal_length = 521; //相机焦距,TUM dataset标定值
Mat essential_matrix;
essential_matrix = findEssentialMat( points1,points2,focal_length,principal_point );
cout<<"essential_matrix is"<<endl<<essential_matrix<<endl;
//计算单应矩阵
Mat homography_matrix;
homography_matrix = findHomography ( points1,points2,RANSAC,3 );
cout<<"homography_matrix is"<<endl<<homography_matrix<<endl;
//本质矩阵中恢复旋转平移信息
recoverPose ( essential_matrix,points1,points2,R,t,focal_length,principal_point );
cout<<"R is"<<endl<<R<<endl;
cout<<"t is"<<endl<<t<<endl;
}