ORB-SLAM2稠密点云重建:双目室外[1]

版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/qq_41524721/article/details/79129532

上一篇.

步骤4:读取每帧位姿信息,初始化点云

类似之前的RGBD,先在XCTool.h中加入XCKITTIKey类读取每帧信息:

class XCKITTIKey {
public:
	double r00, r01, r02, r10, r11, r12, r20, r21, r22;
	double tx, ty, tz;
};

在此要解释一下,输出文件的格式与之前RGBD的不一样,每行12个数字,分为r00,r01,r02,tx,r10,r11,r12,ty,r20,r21,r22,tz。其中rxx是世界旋转矩阵(从原点姿态转到当前姿态的旋转矩阵),tx,ty,tz同RGBD一样,是世界位置的负。

在JoinPCStereo中先读取文件,获得每帧信息,然后初始化路径数组:

ifstream fcamTrans(camTransFile);
	vector<XCKITTIKey> keyVec;
	while (!fcamTrans.eof()) {
		XCKITTIKey tkey;
		fcamTrans >> tkey.r00;
		fcamTrans >> tkey.r01;
		fcamTrans >> tkey.r02;
		fcamTrans >> tkey.tx;
		fcamTrans >> tkey.r10;
		fcamTrans >> tkey.r11;
		fcamTrans >> tkey.r12;
		fcamTrans >> tkey.ty;
		fcamTrans >> tkey.r20;
		fcamTrans >> tkey.r21;
		fcamTrans >> tkey.r22;
		fcamTrans >> tkey.tz;
		keyVec.push_back(tkey);
	}


	
	vector<string> leftPathVec, rightPathVec,depthLeftPathVec,depthRightPathVec;
	for (int i = 0; i < keyVec.size(); i++) {
		char ts[7];
		sprintf(ts, "%06d", i);
		leftPathVec.push_back(dataMainPath + "\\image_0\\" + ts+".png");
		rightPathVec.push_back(dataMainPath + "\\image_1\\" + ts + ".png");
		depthLeftPathVec.push_back(dataMainPath + "\\L\\" + ts + "l_disp.pgm");
		depthRightPathVec.push_back(dataMainPath + "\\R\\" + ts + "r_disp.pgm");
	}

对于初始化点云,我们要对高博的image2PointCloud改写,我在高博的slamBase.h,.cpp中添加了image2PointCloudInverse函数:

PointCloud::Ptr image2PointCloudInverse(cv::Mat& rgb, string depthPath, CAMERA_INTRINSIC_PARAMETERS& camera)
{
	PointCloud::Ptr cloud(new PointCloud);
	IplImage* img = cvLoadImage(depthPath.c_str(), 0);
	CvScalar pix;

	for (int m = 0; m < img->height; m++)
		for (int n = 0; n < img->width; n++)
		{
			pix = cvGet2D(img, m, n);
			PointT p;
			// 计算这个点的空间坐标
			p.z = camera.fx*0.3861448/pix.val[0];
			p.x = (n - camera.cx) * p.z / camera.fx;
			p.y = (m - camera.cy) * p.z / camera.fy;

			// 从rgb图像中获取它的颜色
			// rgb是三通道的BGR格式图,所以按下面的顺序获取颜色
			p.b = rgb.ptr<uchar>(m)[n * 3];
			p.g = rgb.ptr<uchar>(m)[n * 3 + 1];
			p.r = rgb.ptr<uchar>(m)[n * 3 + 2];

			// 把p加入到点云中
			cloud->points.push_back(p);
		}
	// 设置并保存点云
	cloud->height = 1;
	cloud->width = cloud->points.size();
	cloud->is_dense = false;

	return cloud;
}


其中,关键句是:

p.z = camera.fx*0.3861448/pix.val[0];


利用双目中的公式Z=f*B/d,其中0.38是00数据集的相机baseline长度,d即disparity,从视差图中得出。此Z不是左图中心点的Z,但我认为baseline相比Z足够短,就像某条直角边特别长的直角三角形,其长直角边与斜边相差几乎相等。

这样,主函数中可以利用此函数初始化点云了:

//初始化点云
	vector<PointCloud::Ptr> pcVec;
	CAMERA_INTRINSIC_PARAMETERS tcam;
	tcam.fx = camera.fx;
	tcam.fy = camera.fy;
	tcam.cx = camera.cx;
	tcam.cy = camera.cy;
	tcam.scale = 1000;
	for (int i = 0; i < keyVec.size(); i++) {
		FRAME tframe;
		tframe.rgb = cv::imread(leftPathVec[i]);
		tframe.depth = cv::imread(depthLeftPathVec[i], -1);
		pcVec.push_back(image2PointCloudInverse(tframe.rgb, depthLeftPathVec[i], tcam));
	}


由于双目的原因,如果你直接看一个点云,你会发现:

  1. 图片左右边缘的深度信息非常不准确,这是双目左右分布,边缘不能看见导致的。
  2. 在Z方向有许多拖影,整个点云像一长条越拉越稀的翔一样,但你如果具体看近处的点云还是很精确的。长条是因为许多远处,甚至天空的点云也被点云化(但位置并不精确)。
所以我对点云的x,z方向都有不同程度的裁剪。然后可选滤波:

//减裁
	for (auto& pciter : pcVec) {
		PointCloud::Ptr cloud_filtered3(new PointCloud());
		//z剪裁
		pcl::PassThrough<pcl::PointXYZRGBA> zpass;
		zpass.setInputCloud(pciter);
		zpass.setFilterFieldName("z");
		zpass.setFilterLimits(-100000, 10000.0);
		zpass.filter(*cloud_filtered3);
		pciter = cloud_filtered3;

		PointCloud::Ptr cloud_filtered4(new PointCloud());
		//x剪裁
		pcl::PassThrough<pcl::PointXYZRGBA> xpass;
		xpass.setInputCloud(cloud_filtered3);
		xpass.setFilterFieldName("x");
		xpass.setFilterLimits(-2000.0, 2000.0);
		xpass.filter(*cloud_filtered4);

		pciter = cloud_filtered4;
	}

	//滤波
	bool bGridFilter = false;
	for (auto& pciter : pcVec) {
		if (bGridFilter) {
			PointCloud::Ptr cloud_filtered(new PointCloud());
			pcl::VoxelGrid<pcl::PointXYZRGBA> sor;
			sor.setInputCloud(pciter);
			sor.setLeafSize(0.01f, 0.01f, 0.01f);
			sor.filter(*cloud_filtered);
			pciter = cloud_filtered;
		}

		PointCloud::Ptr cloud_filtered2(new PointCloud());
		pcl::StatisticalOutlierRemoval<pcl::PointXYZRGBA> sor2;
		sor2.setInputCloud(pciter);
		sor2.setMeanK(50);
		sor2.setStddevMulThresh(1.0);
		sor2.filter(*cloud_filtered2);
		pciter = cloud_filtered2;
	}
步骤5:类似RGBD,对齐原点,拼接,保存,展示。
//transform
	for (int i = 0; i<keyVec.size(); i++) {

		cv::Mat R;

		R = (cv::Mat_<double>(3, 3) <<
			keyVec[i].r00, keyVec[i].r01, keyVec[i].r02,
			keyVec[i].r10, keyVec[i].r11, keyVec[i].r12,
			keyVec[i].r20, keyVec[i].r21, keyVec[i].r22
			);
	
		R = R.inv();
		Eigen::Matrix3d r;
		cv::cv2eigen(R, r);

		// 将平移向量和旋转矩阵转换成变换矩阵
		Eigen::Isometry3d T = Eigen::Isometry3d::Identity();

		Eigen::AngleAxisd angle(r);
		cout << "translation" << endl;

		double tscale = 1;

		T = angle;
		T(0, 3) = keyVec[i].tx / tscale;
		T(1, 3) = keyVec[i].ty / tscale;
		T(2, 3) = keyVec[i].tz / tscale;

		PointCloud::Ptr toutput(new PointCloud());
		pcl::transformPointCloud(*pcVec[i], *toutput, T.matrix());
		pcVec[i] = toutput;
	}

	PointCloud::Ptr allOutput(new PointCloud()),allAfterFilter(new PointCloud());

	bool bExtractPlane = false;
	for (auto& pciter : pcVec) {
		//???
		if (bExtractPlane) {
			PointCloud::Ptr cloud_filtered3(new PointCloud());
			PointCloud::Ptr cloud_f(new PointCloud());

			pcl::ModelCoefficients::Ptr coefficients(new pcl::ModelCoefficients());
			pcl::PointIndices::Ptr inliers(new pcl::PointIndices());
			// Create the segmentation object
			pcl::SACSegmentation<pcl::PointXYZRGBA> seg;
			// Optional
			seg.setOptimizeCoefficients(true);
			// Mandatory
			seg.setModelType(pcl::SACMODEL_PLANE);
			seg.setMethodType(pcl::SAC_RANSAC);
			seg.setMaxIterations(1000);
			seg.setDistanceThreshold(0.01);

			// Create the filtering object
			pcl::ExtractIndices<pcl::PointXYZRGBA> extract;

			int i = 0, nr_points = (int)pciter->points.size();
			// While 30% of the original cloud is still there
			while (pciter->points.size() > 0.3 * nr_points)
			{
				// Segment the largest planar component from the remaining cloud
				seg.setInputCloud(pciter);
				seg.segment(*inliers, *coefficients);
				if (inliers->indices.size() == 0)
				{
					std::cerr << "Could not estimate a planar model for the given dataset." << std::endl;
					break;
				}

				// Extract the inliers
				extract.setInputCloud(pciter);
				extract.setIndices(inliers);
				extract.setNegative(false);
				extract.filter(*cloud_filtered3);
				std::cerr << "PointCloud representing the planar component: " << cloud_filtered3->width * cloud_filtered3->height << " data points." << std::endl;


				// Create the filtering object
				extract.setNegative(true);
				extract.filter(*cloud_f);
				pciter.swap(cloud_f);
				i++;
				cout << "%extract\n";
				break;
			}

			pciter = cloud_filtered3;
		}
	}
	cout << "Pairing...\n";
	//!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
	int algoFlag = 0;
	bool bNDT = false;
	int ndtindex = 0;
	bool bPCH = false;
	if (algoFlag == 0) {
		for (int i = 0; i < pcVec.size(); i++) {
			*allOutput += *pcVec[i];
		}
	}
	else if (algoFlag == 1) {
		allOutput = pcVec[0];
		for (int i = 0; i < pcVec.size() - 1; i++) {
			PairwiseICP(pcVec[i + 1], allOutput, allOutput);
		}
	}
	else if (algoFlag == 2) {
		//记录每相邻两个点云ICP的矩阵
		vector<Eigen::Matrix4f> icpMatVec;
		vector<function<void(void)>> funcVec;
		vector<thread> taskVec;
		for (int i = 0; i < pcVec.size() - 1; i++) {
			if (bPCH) {
				funcVec.push_back([&]() {icpMatVec.push_back(XCPairwisePCH(pcVec[i], pcVec[i + 1])); });
			}
			else if (bNDT&&i == ndtindex) {
				funcVec.push_back([&]() {icpMatVec.push_back(XCPairwiseNDT(pcVec[i], pcVec[i + 1])); });
			}
			else {
				funcVec.push_back([&]() {
					icpMatVec.push_back(XCPairwiseICP(pcVec[i], pcVec[i + 1]));
				});
			}
			taskVec.push_back(thread(funcVec[i]));
			taskVec[i].join();
		}
		cout << "PairOver\n";
		//同理
		for (int i = 1; i < pcVec.size(); i++) {
			for (int i2 = 0; i2 < i; i2++) {
				PointCloud::Ptr toutput(new PointCloud());
				pcl::transformPointCloud(*pcVec[i2], *toutput, icpMatVec[i - 1]);
				pcVec[i2] = toutput;
				cout << "@fusion:" << i2 + 1 << endl;
			}
		}

		for (int i = 0; i < pcVec.size(); i++) {
			*allOutput += *pcVec[i];
		}
	}
	

	pcl::io::savePCDFile("StereoReConstructionresult.pcd", *allOutput);
	cout << "Final result saved." << endl;

	pcl::visualization::CloudViewer viewer("viewer");
	struct callback_args cb_args;
	cb_args.clicked_points_3d = allOutput;
	cb_args.viewerPtr = &viewer;
	viewer.registerPointPickingCallback(pp_callback, (void*)&cb_args);
	viewer.showCloud(allOutput);
	while (!viewer.wasStopped())
	{

	}

至此,基于ORB-SLAM2的双目稠密点云重建部分完成。
有一些问题,下一篇继续解决。

猜你喜欢

转载自blog.csdn.net/qq_41524721/article/details/79129532