《视觉SLAM十四讲精品总结》8:VO—— 3D-3D:ICP+SVD+BA

一、ICP

ICP(Iterative Closest Point)迭代最近点,指代匹配好的两组点云间运动估计问题。

ICP算法是对于X中的每一个点用当前的R和 t在Y中找最近的点(比如用欧式距离),然后这两个点就成了一对了,每个点都有了对应的映射点,用每一对的坐标列一个方程,就得到一系列的方程。

重复迭代运行上述过程,直到均方差误差小于某个阀值。 

二、RANSAC

 (RANdom SAmple Consensus随机抽样一致),它可以从一组包含“局外点”的观测数据集中,通过迭代方式估计数学模型的参数。这个算法要算出的就是蓝色部分那个模型的参数

RANSAC算法的输入是一组观测数据,一个可以解释或者适应于观测数据的参数化模型,一些可信的参数。

模型对应的是空间中一个点云数据到另外一个点云数据的旋转以及平移。

第一步随机得到的是一个点云中的点对作 ,利用其不变特征(两点距离,两点法向量夹角)作为哈希表的索引值搜索另一个点云中的一对对应点对,然后计算得到旋转及平移的参数值。

然后适用变换,找到其他局内点,并在找到局内点之后重新计算旋转及平移为下一个状态。

然后迭代上述过程,找到最终的位置

三、代码解析

1、主框架 

int main(int argc, char** argv)
{

	Mat img_1 = imread(argv[1], CV_LOAD_IMAGE_COLOR);
	Mat img_2 = imread(argv[2], CV_LOAD_IMAGE_COLOR);
	Mat depth1 = imread(argv[3], CV_LOAD_IMAGE_UNCHANGED);       // 深度图为16位无符号数,单通道图像
	Mat depth2 = imread(argv[4], CV_LOAD_IMAGE_UNCHANGED);       // 深度图为16位无符号数,单通道图像
	//1. 找关键点和匹配关系
	vector<KeyPoint> keypoints_1, keypoints_2;
	vector<DMatch> matches;
	find_feature_matches(img_1, img_2, keypoints_1, keypoints_2, matches);
	cout << "一共找到了" << matches.size() << "组匹配点" << endl;

	//2. 建立两幅图像的3D点
	Mat K = (Mat_<double>(3, 3) << 520.9, 0, 325.1, 0, 521.0, 249.7, 0, 0, 1);
	vector<Point3f> pts1, pts2;
	for (DMatch m : matches)
	{
		ushort d1 = depth1.ptr<unsigned short>(int(keypoints_1[m.queryIdx].pt.y))[int(keypoints_1[m.queryIdx].pt.x)];
		ushort d2 = depth2.ptr<unsigned short>(int(keypoints_2[m.trainIdx].pt.y))[int(keypoints_2[m.trainIdx].pt.x)];
		if (d1 == 0 || d2 == 0) 
			continue;
		Point2d p1 = pixel2cam(keypoints_1[m.queryIdx].pt, K);
		Point2d p2 = pixel2cam(keypoints_2[m.trainIdx].pt, K);
		float dd1 = float(d1) / 5000.0;
		float dd2 = float(d2) / 5000.0;
		pts1.push_back(Point3f(p1.x*dd1, p1.y*dd1, dd1));
		pts2.push_back(Point3f(p2.x*dd2, p2.y*dd2, dd2));
	}
	cout << "3d-3d pairs: " << pts1.size() << endl;
	// 3. 位姿估计使用SVD
	Mat R, t;
	pose_estimation_3d3d(pts1, pts2, R, t);
	cout << "ICP via SVD results: " << endl;
	cout << "R = " << R << endl;
	cout << "t = " << t << endl;
	cout << "R_inv = " << R.t() << endl;
	cout << "t_inv = " << -R.t() *t << endl;
	// 4. BA非线性优化
	cout << "calling bundle adjustment" << endl;
	bundleAdjustment(pts1, pts2, R, t);

	// 5. 验证p1 = R*p2 + t
	for (int i = 0; i<5; i++)
	{
		cout << "p1 = " << pts1[i] << endl;
		cout << "p2 = " << pts2[i] << endl;
		cout << "(R*p2+t) = " <<
			R * (Mat_<double>(3, 1) << pts2[i].x, pts2[i].y, pts2[i].z) + t
			<< endl;
		cout << endl;
	}
}

2、SVD解决ICP问题

1、调用函数svd

using namespace Eigen;
JacobiSVD<Eigen::Matrix3f> svd(W, ComputeFullU | ComputeFullV );
Matrix3f V = svd.matrixV(), U = svd.matrixU();
void pose_estimation_3d3d(
	const vector<Point3f>& pts1,
	const vector<Point3f>& pts2,
	Mat& R, Mat& t
	)
{
	//1. 去质心坐标q1 q2
	Point3f p1, p2;//Point3f是一个3D点的坐标  
	int N = pts1.size();
	for (int i = 0; i<N; i++)
	{
		p1 += pts1[i];
		p2 += pts2[i];
	}
	p1 = Point3f(Vec3f(p1) / N);//Vec3f指的是图像3通道
	p2 = Point3f(Vec3f(p2) / N);

	vector<Point3f> q1(N), q2(N); // vector<Point3f>向量,向量的每个分量都是3D点
	for (int i = 0; i<N; i++)
	{
		q1[i] = pts1[i] - p1;
		q2[i] = pts2[i] - p2;
	}

	// 2. W=sum(q1*q2^T)
	Eigen::Matrix3d W = Eigen::Matrix3d::Zero();//Matrix3d是3*3,Vector3d是3*1
	for (int i = 0; i<N; i++)
	{
		W += Eigen::Vector3d(q1[i].x, q1[i].y, q1[i].z) * Eigen::Vector3d(q2[i].x, q2[i].y, q2[i].z).transpose();
	}
	cout << "W=" << W << endl;

	// 3. SVD on W
	Eigen::JacobiSVD<Eigen::Matrix3d> svd(W, Eigen::ComputeFullU | Eigen::ComputeFullV);
	Eigen::Matrix3d U = svd.matrixU();
	Eigen::Matrix3d V = svd.matrixV();
	cout << "U=" << U << endl;
	cout << "V=" << V << endl;
	// 4. R=U*V^T
	Eigen::Matrix3d R_ = U* (V.transpose());
	Eigen::Vector3d t_ = Eigen::Vector3d(p1.x, p1.y, p1.z) - R_ * Eigen::Vector3d(p2.x, p2.y, p2.z);

	// convert to cv::Mat
	R = (Mat_<double>(3, 3) <<
		R_(0, 0), R_(0, 1), R_(0, 2),
		R_(1, 0), R_(1, 1), R_(1, 2),
		R_(2, 0), R_(2, 1), R_(2, 2)
		);
	t = (Mat_<double>(3, 1) << t_(0, 0), t_(1, 0), t_(2, 0));
}

1. 去质心坐标q1 q2

2. W=sum(q1*q2^T)

3. SVD on W

4. R=U*V^T,t=p1-R*p2^T

  • Point3f p1指的3D点坐标,vector<Point3f> q1(N)指的向量的每个分量都是3D点
  • p1 = Point3f ( Vec3f (p1) / N);  //Vec3f指的是图像3通道的float
     

3、BA非线性优化

void bundleAdjustment(
	const vector< Point3f >& pts1,
	const vector< Point3f >& pts2,
	Mat& R, Mat& t)
{
	// 1. 初始化g2o,设定线性方程求解器Blocksolver和迭代算法
	typedef g2o::BlockSolver< g2o::BlockSolverTraits<6, 3> > Block;  // pose维度为 6, landmark 维度为 3
	Block::LinearSolverType* linearSolver = new g2o::LinearSolverEigen<Block::PoseMatrixType>(); // 线性方程求解器
	Block* solver_ptr = new Block(linearSolver);      // 矩阵块求解器
	g2o::OptimizationAlgorithmGaussNewton* solver = new g2o::OptimizationAlgorithmGaussNewton(solver_ptr);
	g2o::SparseOptimizer optimizer;
	optimizer.setAlgorithm(solver);

	// 2.1 图中增加顶点位姿pose,相机位姿类型SE3Quat,四元数+位移向量
	g2o::VertexSE3Expmap* pose = new g2o::VertexSE3Expmap(); // camera pose
	pose->setId(0);
	pose->setEstimate(g2o::SE3Quat(
		Eigen::Matrix3d::Identity(),
		Eigen::Vector3d(0, 0, 0)
		));
	optimizer.addVertex(pose);

	// 3. 图中增加边
	int index = 1;
	vector<EdgeProjectXYZRGBDPoseOnly*> edges;
	for (size_t i = 0; i<pts1.size(); i++)
	{
		EdgeProjectXYZRGBDPoseOnly* edge = new EdgeProjectXYZRGBDPoseOnly(
			Eigen::Vector3d(pts2[i].x, pts2[i].y, pts2[i].z));
		edge->setId(index);
		edge->setVertex(0, dynamic_cast<g2o::VertexSE3Expmap*> (pose));
		edge->setMeasurement(Eigen::Vector3d(
			pts1[i].x, pts1[i].y, pts1[i].z));
		edge->setInformation(Eigen::Matrix3d::Identity()*1e4);

		optimizer.addEdge(edge);
		index++;
		edges.push_back(edge);
	}
	//4. 执行优化
	optimizer.setVerbose(true);
	optimizer.initializeOptimization();
	optimizer.optimize(10);

	cout << endl << "after optimization:" << endl;
	cout << "T=" << endl << Eigen::Isometry3d(pose->estimate()).matrix() << endl;

}

边EdgeProjectXYZRGBDPoseOnly类(3D-3D的边)

class EdgeProjectXYZRGBDPoseOnly : public g2o::BaseUnaryEdge<3, Eigen::Vector3d, g2o::VertexSE3Expmap>
{
public:
	EIGEN_MAKE_ALIGNED_OPERATOR_NEW;
        //1. 默认初始化 
	EdgeProjectXYZRGBDPoseOnly(const Eigen::Vector3d& point) : _point(point) {}
        //2. 重投影误差
	virtual void computeError()
	{
		const g2o::VertexSE3Expmap* pose = static_cast<const g2o::VertexSE3Expmap*> (_vertices[0]);
		// measurement is p, point is p'
		_error = _measurement - pose->estimate().map(_point);
	}
        //3. 线性变量增量
        Eigen::Vector3d _point;
	virtual void linearizeOplus()
	{
		g2o::VertexSE3Expmap* pose = static_cast<g2o::VertexSE3Expmap *>(_vertices[0]);
		g2o::SE3Quat T(pose->estimate());
		Eigen::Vector3d xyz_trans = T.map(_point);
		double x = xyz_trans[0];
		double y = xyz_trans[1];
		double z = xyz_trans[2];

		_jacobianOplusXi(0, 0) = 0;
		_jacobianOplusXi(0, 1) = -z;
		_jacobianOplusXi(0, 2) = y;
		_jacobianOplusXi(0, 3) = -1;
		_jacobianOplusXi(0, 4) = 0;
		_jacobianOplusXi(0, 5) = 0;

		_jacobianOplusXi(1, 0) = z;
		_jacobianOplusXi(1, 1) = 0;
		_jacobianOplusXi(1, 2) = -x;
		_jacobianOplusXi(1, 3) = 0;
		_jacobianOplusXi(1, 4) = -1;
		_jacobianOplusXi(1, 5) = 0;

		_jacobianOplusXi(2, 0) = -y;
		_jacobianOplusXi(2, 1) = x;
		_jacobianOplusXi(2, 2) = 0;
		_jacobianOplusXi(2, 3) = 0;
		_jacobianOplusXi(2, 4) = 0;
		_jacobianOplusXi(2, 5) = -1;
	}

	bool read(istream& in) {}
	bool write(ostream& out) const {}
protected:
	
};

猜你喜欢

转载自blog.csdn.net/try_again_later/article/details/81940965