Reputation: 97
first at all thanks for reading.
I have an issue generating the point cloud with PCL given the info provided by openCV functions.
Below functions and the matrixes that i'm using:
Calling triangulate function:
TriangulatePoints(keypoints1, keypoints2, K.t(), P, P1, pointCloud)
PopulateTheCloud
PopulatePCLPointCloud(pointCloud);
Populate Function:
void PopulatePCLPointCloud(const vector<Point3d>& pointcloud) //Populate point cloud
{
cout << "Creating point cloud...";
cloud.reset(new pcl::PointCloud<pcl::PointXYZRGB>);
for (unsigned int i = 0; i<pointcloud.size(); i++)
{
// get the RGB color value for the point
Vec3b rgbv(255,255, 0);
// check for erroneous coordinates (NaN, Inf, etc.)
if (pointcloud[i].x != pointcloud[i].x || _isnan(pointcloud[i].x) || pointcloud[i].y != pointcloud[i].y || _isnan(pointcloud[i].y) || pointcloud[i].z != pointcloud[i].z || _isnan(pointcloud[i].z) || fabsf(pointcloud[i].x) > 10.0 || fabsf(pointcloud[i].y) > 10.0 || fabsf(pointcloud[i].z) > 10.0)
{
continue;
}
pcl::PointXYZRGB pclp;
// 3D coordinates
pclp.x = pointcloud[i].x;
pclp.y = pointcloud[i].y;
pclp.z = pointcloud[i].z;
// RGB color, needs to be represented as an integer uint32_t
float rgb = ((uint32_t)rgbv[2] << 16 | (uint32_t)rgbv[1] << 8 | (uint32_t)rgbv[0]);
pclp.rgb = *reinterpret_cast<float*>(&rgb);
cloud->push_back(pclp);
}
cloud->width = (uint32_t)cloud->points.size();
// number of points
cloud->height = 1;
// a list of points, one row of data
}
The function that fill the cloud with the 3d points (i commented the reproj_error cause copied this code from masterinOpenCV but did not work.
double TriangulatePoints(const vector<KeyPoint>& pt_set1, const vector<KeyPoint>& pt_set2, const Mat&Kinv, const Matx34d& P, const Matx34d& P1, vector<Point3d>& pointcloud) {
vector<double> reproj_error;
for (unsigned int i = 0; i<min(pt_set1.size(), pt_set2.size()); i++) { //convert to normalized homogeneous coordinates
Point2f kp = pt_set1[i].pt;
Point3d u(kp.x, kp.y, 1.0);
Mat_<double> um = Kinv * Mat_<double>(u);
u = (Point3d)um(0, 0);
Point2f kp1 = pt_set2[i].pt;
Point3d u1(kp1.x, kp1.y, 1.0);
Mat_<double> um1 = Kinv * Mat_<double>(u1);
u1 = (Point3d)um1(0, 0);
//triangulate
Mat_<double> X = LinearLSTriangulation(u, P, u1, P1);
/*Mat_<double> xPt_img = Kinv.t() * Mat(P1) * X;
Point2f xPt_img_(xPt_img(0)/xPt_img(2),xPt_img(1)/xPt_img(2));
//calculate reprojection error
reproj_error.push_back(norm(xPt_img_-kp1)); //store 3D point */
//carga la nube de puntos
pointcloud.push_back(Point3d(X(0), X(1), X(2)));
} //return mean reprojection error
/*Scalar me = mean(reproj_error);
return me[0]; */
return 0;
}
Linear Triangulation:
Mat_<double> LinearLSTriangulation(Point3d u,//homogenous image point (u,v,1)
Matx34d P,//camera 1 matrix
Point3d u1,//homogenous image point in 2nd camera
Matx34d P1//camera 2 matrix
) {
//build A matrix
Matx43d A(u.x*P(2, 0) - P(0, 0), u.x*P(2, 1) - P(0, 1), u.x*P(2, 2) - P(0, 2), u.y*P(2, 0) - P(1, 0), u.y*P(2, 1) - P(1, 1), u.y*P(2, 2) - P(1, 2), u1.x*P1(2, 0) - P1(0, 0), u1.x*P1(2, 1) - P1(0, 1), u1.x*P1(2, 2) - P1(0, 2), u1.y*P1(2, 0) - P1(1, 0), u1.y*P1(2, 1) - P1(1, 1), u1.y*P1(2, 2) - P1(1, 2));
//build B vector
Matx41d B(-(u.x*P(2, 3) - P(0, 3)), -(u.y*P(2, 3) - P(1, 3)), -(u1.x*P1(2, 3) - P1(0, 3)), -(u1.y*P1(2, 3) - P1(1, 3))); //solve for X
Mat_<double> X;
solve(A, B, X, DECOMP_SVD);
return X;
}
Matrix:
Fundamental =
[-5.365548729323536e-007, 0.0003108718787914248, -0.0457266834161677;
-0.0003258809500026533, 4.695400741230473e-006, 1.295466303565132;
0.05008017646011816, -1.300323239531621, 1]
Calibration Matrix =
[744.2366711500123, 0, 304.166818982576;
0, 751.1308610972965, 225.3750058508892;
0, 0, 1]
Essential =
[-0.2971914249411831, 173.7833277398352, 17.99033324690517;
-182.1736856953757, 2.649133690692166, 899.405863948026;
-17.51073288084396, -904.8934348365967, 0.3895173270497594]
Rotation matrix =
[-0.9243506387712034, 0.03758098759490174, -0.3796887751496749;
0.03815782996164848, 0.9992536546828119, 0.006009460513344713;
-0.379631237671357, 0.008933251056327281, 0.9250947629349537]
Traslation matrix =
[-0.9818733349058273;
0.01972152607878091;
-0.1885094576142884]
P0 matrix =
[1, 0, 0, 0;
0, 1, 0, 0;
0, 0, 1, 0]
P1 matrix =
[-0.9243506387712034, 0.03758098759490174, -0.3796887751496749, -0.9818733349058273;
0.03815782996164848, 0.9992536546828119, 0.006009460513344713, 0.01972152607878091;
-0.379631237671357, 0.008933251056327281, 0.9250947629349537, -0.1885094576142884]
Upvotes: 2
Views: 1767
Reputation: 97
I solved the problem, i have two big problems..
First at all i was passing the non filtered keypoints to the triangulate function, so i saw the matches points and the non useful points. And probably we will have more unuseful than useful points...
So as you will see in the triangulate function i'm giving the matches points that i obtained with ransacTest and SymTest filtered. And then just using of the keypoints the index of the matches. SO everything is good =) just showing the good matchesl.
Second the triangulateFunctions was wrong.
Here its corrected:
double TriangulatePoints(const vector<KeyPoint>& pt_set1, const vector<KeyPoint>& pt_set2, const Mat&Kinv, const Matx34d& P, const Matx34d& P1, vector<Point3d>& pointcloud, vector<DMatch>& matches)
{
//Mat_<double> KP1 = Kinv.inv() *Mat(P1);
vector<double> reproj_error;
for (unsigned int i = 0; i < matches.size(); i++)
{ //convert to normalized homogeneous coordinates
Point2f kp = pt_set1[matches[i].queryIdx].pt;
Point3d u(kp.x, kp.y, 1.0);
Mat_<double> um = Kinv * Mat_<double>(u);
u.x = um(0);
u.y = um(1);
u.z = um(2);
Point2f kp1 = pt_set2[matches[i].trainIdx].pt;
Point3d u1(kp1.x, kp1.y, 1.0);
Mat_<double> um1 = Kinv * Mat_<double>(u1);
u1.x = um1(0);
u1.y = um1(1);
u1.z = um1(2);
//triangulate
Mat_<double> X = LinearLSTriangulation(u, P, u1, P1);
pointcloud.push_back(Point3d(X(0), X(1), X(2)));
}
cout << "cantidad Puntos" << pointcloud.size() << endl;
return 1;
}
Upvotes: 1