Reputation: 3125
I am converting this depth image to a pcl::pointcloud.
using the following:
PointCloud::Ptr PointcloudUtils::RGBDtoPCL(cv::Mat depth_image, Eigen::Matrix3f& _intrinsics)
{
PointCloud::Ptr pointcloud(new PointCloud);
float fx = _intrinsics(0, 0);
float fy = _intrinsics(1, 1);
float cx = _intrinsics(0, 2);
float cy = _intrinsics(1, 2);
float factor = 1;
depth_image.convertTo(depth_image, CV_32F); // convert the image data to float type
if (!depth_image.data) {
std::cerr << "No depth data!!!" << std::endl;
exit(EXIT_FAILURE);
}
pointcloud->width = depth_image.cols; //Dimensions must be initialized to use 2-D indexing
pointcloud->height = depth_image.rows;
pointcloud->resize(pointcloud->width*pointcloud->height);
#pragma omp parallel for
for (int v = 0; v < depth_image.rows; v += 4)
{
for (int u = 0; u < depth_image.cols; u += 4)
{
float Z = depth_image.at<float>(v, u) / factor;
PointT p;
p.z = Z;
p.x = (u - cx) * Z / fx;
p.y = (v - cy) * Z / fy;
p.z = p.z / 1000;
p.x = p.x / 1000;
p.y = p.y / 1000;
pointcloud->points.push_back(p);
}
}
return pointcloud;
}
this works great, I have run some processing on the cloud, and now I need to convert the pointcloud back to a cv::Mat depth image. I cannot find an example for this, and am having trouble getting m head around it. What is the opposite of the above function?
How can i convert a pcl::pointcloud back to a cv::mat?
Thank you.
Upvotes: 3
Views: 8481
Reputation: 316
I don't know about OpenCV methods, but in case you do something that makes your point cloud unstructured your process could be something like this
% rescale the points by 1000
p.x = p.x * 1000; p.y = p.y * 1000; p.z = p.z * 1000;
% project points on image plane and correct center point + factor
image_p.x = ( p.x * fx / p.z -cf ) * factor;
image_p.y = ( p.y * fy / p.z -cy ) * factor;
Now depending on what you have done with the point cloud the points might not map exactly to image matrix pixel center points (or top left corner for some applications) or you might be missing points -> NaN/0 value pixels. How you process that is up to you, but the most simple way would be to cast image_p.x and image_p.y as integers, make sure they are withing image boundaries and set
depth_image.at<float>(image_p.y, image_p.x) = p.Z;`
Upvotes: 0
Reputation: 6468
This is untested code, since I don't have point cloud on my machine. From your own conversion code I am assuming your image a single channel image.
void PCL2Mat(PointCloud::Ptr pointcloud, cv::Mat& depth_image, int original_width, int original_height)
{
if (!depth_image.empty())
depth_image.release();
depth_image.create(original_height, original_width, CV_32F);
int count = 0;
#pragma omp parallel for
for (int v = 0; v < depth_image.rows; ++v)
{
for (int u = 0; u < depth_image.cols; ++u)
{
depth_image.at<float>(v, u) = pointcloud->points.at(count++).z * 1000;
}
}
depth_image.convertTo(depth_image,CV_8U);
}
Upvotes: 1