Reputation: 11
As part of a project, I need to run a SLAM to determine points of interest from my Gazebo Classic simulation.
I've made a URDF that's as representative as possible of the real model: the two cameras are spaced by the right amount, I used the same HFOV as the real cameras, and the intrinsic and matrix parameters are equal to those in the SLAM EuRoC.yaml file. I used the camera plug-ins in gazebo to do this. I have also simulated this model in a well-textured environment (bayland type).
And I use the OrbSLAM3 Stereo Fixed library : ORBSLAM3-STEREO-FIXED
<sensor name='cameraLeft' type='camera'>
<pose relative_to='camera_chassis'>0 0.03649 0 0 -0 0</pose>
<always_on>1</always_on>
<update_rate>4</update_rate>
<visualize>1</visualize>
<topic>camera/left/</topic>
<camera>
<horizontal_fov>2.70526</horizontal_fov>

<clip>
<near>0.1</near>
<far>100</far>
</clip>
<lens>
<intrinsics>
<fx>307.99879313442784</fx>
<fy>328.57751082369407</fy>
<cx>280.24657373225784</cx>
<cy>134.9335316514946</cy>
<s>0</s>
</intrinsics>
<projection>
<p_fx>312.8884288345382</p_fx>
<p_fy>312.8884288345382</p_fy>
<p_cx>312.28302001953125</p_cx>
<p_cy>135.72957611083984</p_cy>
<tx>0.0</tx>
<ty>0.0</ty>
</projection>
</lens>
</camera>
<plugin name='camera_left_controller' filename='libgazebo_ros_camera.so'>
<camera_name>cameraLeft</camera_name>
<frame_name>camera_left</frame_name>
<hack_baseline>0.07298</hack_baseline>
<min_depth>0.001</min_depth>
<max_depth>5.0</max_depth>
</plugin>
</sensor>
<sensor name='cameraRight' type='camera'>
<pose relative_to='camera_chassis'>0.0665 -0.03649 0.0395 0 -0 0</pose>
<always_on>1</always_on>
<update_rate>4</update_rate>
<visualize>1</visualize>
<topic>camera/right/</topic>
<camera>
<horizontal_fov>2.70526</horizontal_fov>

<clip>
<near>0.1</near>
<far>100</far>
</clip>
<lens>
<intrinsics>
<fx>307.1754049460059</fx>
<fy>328.803046449949</fy>
<cx>265.74658118131305</cx>
<cy>141.73334383034592</cy>
<s>0</s>
</intrinsics>
<projection>
<p_fx>312.8884288345382</p_fx>
<p_fy>312.8884288345382</p_fy>
<p_cx>312.28302001953125</p_cx>
<p_cy>135.72957611083984</p_cy>
<tx>-22.834775948524072</tx>
<ty>0.0</ty>
</projection>
</lens>
</camera>
<plugin name='camera_right_controller' filename='libgazebo_ros_camera.so'>
<camera_name>cameraRight</camera_name>
<frame_name>camera_right</frame_name>
<hack_baseline>0.07298</hack_baseline>
<min_depth>0.001</min_depth>
<max_depth>5.0</max_depth>
</plugin>
</sensor>
</link>
However, I can't, because the SLAM code doesn't detect any points. It runs fine. This code has also been tried on stereo cameras, on real images, and it worked well. I don't understand why this same code doesn't work on my simulation.
Here is the SLAM code for stereo cameras:
#include "stereo-slam-node.hpp"
#include<opencv2/core/core.hpp>
using std::placeholders::_1;
using std::placeholders::_2;
void StereoSlamNode::init() {
// Initialize the node and create subscribers and synchronizers for stereo images
auto node_shared_ptr = shared_from_this();
RCLCPP_INFO(this->get_logger(), "Subscriber !!\n");
left_sub = std::make_shared<message_filters::Subscriber<ImageMsg>>(node_shared_ptr, m_topic_cam_left);
right_sub = std::make_shared<message_filters::Subscriber<ImageMsg>>(node_shared_ptr, m_topic_cam_right);
RCLCPP_INFO(this->get_logger(), "Synchronizer !!\n");
syncApproximate = std::make_shared<message_filters::Synchronizer<approximate_sync_policy>>(approximate_sync_policy(10), *left_sub, *right_sub);
syncApproximate->registerCallback(&StereoSlamNode::GrabStereo, this);
}
geometry_msgs::msg::TransformStamped StereoSlamNode::CvMat_to_TransformedStamped(cv::Mat T_, const std::string &frame_id, const std::string &child_frame_id) {
// Convert cv::Mat transformation matrix to a ROS TransformStamped message
std::vector<float> q = ORB_SLAM3::Converter::toQuaternion(T_);
geometry_msgs::msg::TransformStamped t;
t.header.stamp = this->get_clock()->now();
t.header.frame_id = frame_id;
t.child_frame_id = child_frame_id;
tf2::Quaternion tf_quaternion(q[0], q[1], q[2], q[3]);
tf2::Vector3 tf_translation(-T_.at<float>(0, 3), -T_.at<float>(1, 3), -T_.at<float>(2, 3)); // Inverting the translation
t.transform.rotation.x = tf_quaternion.x();
t.transform.rotation.y = tf_quaternion.y();
t.transform.rotation.z = tf_quaternion.z();
t.transform.rotation.w = tf_quaternion.w();
t.transform.translation.x = tf_translation.x();
t.transform.translation.y = tf_translation.y();
t.transform.translation.z = tf_translation.z();
return t;
}
cv::Mat StereoSlamNode::SE3ToCvMat(const Sophus::SE3<float>& se3) {
// Convert Sophus::SE3 to cv::Mat
const Eigen::Matrix<float, 4, 4> m = se3.matrix();
cv::Mat cvMat(m.rows(), m.cols(), CV_32F);
for (int i = 0; i < m.rows(); ++i) {
for (int j = 0; j < m.cols(); ++j) {
cvMat.at<float>(i, j) = m(i, j);
}
}
return cvMat;
}
void StereoSlamNode::tf_static_init(tf2::Transform &tf, const std::string &to_frame, const std::string &from_frame) {
// Initialize static transformation between "map" and "cam0"
geometry_msgs::msg::TransformStamped transform;
try {
transform = this->tf_buffer_->lookupTransform(to_frame, from_frame, tf2::TimePoint(), tf2::durationFromSec(10.0));
} catch (tf2::TransformException& ex) {
RCLCPP_ERROR(this->get_logger(), "[tf_static_init] - Error looking up transform from %s to %s: %s", from_frame.c_str(), to_frame.c_str(), ex.what());
throw;
}
tf2::fromMsg(transform.transform, tf);
}
void StereoSlamNode::image_with_keypoints_publish() {
// Publish images with keypoints
cv::Mat image_left = m_SLAM->mpFrameDrawer->DrawFrame(this->trackedImageScale);
cv::Mat image_right = m_SLAM->mpFrameDrawer->DrawRightFrame(this->trackedImageScale);
auto img_msg = cv_bridge::CvImage(std_msgs::msg::Header(), "bgr8", image_left).toImageMsg();
auto img2_msg = cv_bridge::CvImage(std_msgs::msg::Header(), "bgr8", image_right).toImageMsg();
KeysPoints_Image_pub_->publish(*img_msg);
KeysPoints_Image_Right_pub_->publish(*img2_msg);
RCLCPP_INFO(this->get_logger(), "Images published !!\n");
}
std::vector<uint8_t> StereoSlamNode::get_point_cloud() {
// Get the point cloud data from the current map in ORB_SLAM3
ORB_SLAM3::Map* pActiveMap = m_SLAM->mpMapDrawer->mpAtlas->GetCurrentMap();
RCLCPP_INFO(this->get_logger(), "GetCurrentMap!!\n");
if (!pActiveMap) {
throw std::runtime_error("[ERROR] - [Orbslam3_ros2] : Can't get Current Map");
}
const std::vector<ORB_SLAM3::MapPoint*>& vpMPs = pActiveMap->GetAllMapPoints();
RCLCPP_INFO(this->get_logger(), "GetAllMP!!\n");
const std::vector<ORB_SLAM3::MapPoint*>& vpRefMPs = pActiveMap->GetReferenceMapPoints();
RCLCPP_INFO(this->get_logger(), "GetRefMP !!\n");
std::set<ORB_SLAM3::MapPoint*> spRefMPs(vpRefMPs.begin(), vpRefMPs.end());
std::vector<uint8_t> map_data;
map_data.reserve(10000);
if (vpMPs.empty()) {
throw std::runtime_error("[ERROR] - [Orbslam3_ros2] : Can't get All Maps Points");
}
for (size_t i = 0, iend = vpMPs.size(); i < iend; i++) {
if (vpMPs[i]->isBad() || spRefMPs.count(vpMPs[i])) {
continue;
}
RCLCPP_INFO(this->get_logger(), "Get images !!\n");
this->pos_cam0 = vpMPs[i]->GetWorldPos();
this->pos_map = this->tf_base_link_to_camera_left * tf2::Vector3(pos_cam0(0), pos_cam0(1), pos_cam0(2));
this->x = pos_map.x();
this->y = pos_map.y();
this->z = pos_map.z();
this->array = reinterpret_cast<uint8_t*>(&this->x); // Store pos_map(0) (float32) in an array of 4 uint8_t
for (this->j = 0; this->j < 4; this->j++) {
map_data.push_back(array[j]);
}
this->array = reinterpret_cast<uint8_t*>(&this->y); // Store pos_map(1) (float32) in an array of 4 uint8_t
for (this->j = 0; this->j < 4; this->j++) {
map_data.push_back(array[j]);
}
this->array = reinterpret_cast<uint8_t*>(&this->z); // Store pos_map(2) (float32) in an array of 4 uint8_t
for (this->j = 0; this->j < 4; this->j++) {
map_data.push_back(array[j]);
}
}
for (std::set<ORB_SLAM3::MapPoint*>::iterator sit = spRefMPs.begin(), send = spRefMPs.end(); sit != send; sit++) {
if ((*sit)->isBad())
continue;
this->pos_cam0 = (*sit)->GetWorldPos();
this->pos_map = this->tf_base_link_to_camera_left * tf2::Vector3(pos_cam0(0), pos_cam0(1), pos_cam0(2));
this->x = pos_map.x();
this->y = pos_map.y();
this->z = pos_map.z();
this->array = reinterpret_cast<uint8_t*>(&this->x); // Store pos_map(0) (float32) in an array of 4 uint8_t
for (this->j = 0; this->j < 4; this->j++) {
map_data.push_back(array[j]);
}
this->array = reinterpret_cast<uint8_t*>(&this->y); // Store pos_map(1) (float32) in an array of 4 uint8_t
for (this->j = 0; this->j < 4; this->j++) {
map_data.push_back(array[j]);
}
this->array = reinterpret_cast<uint8_t*>(&this->z); // Store pos_map(2) (float32) in an array of 4 uint8_t
for (this->j = 0; this->j < 4; this->j++) {
map_data.push_back(array[j]);
}
}
RCLCPP_INFO(this->get_logger(), "Returning map data !!\n");
return map_data;
}
StereoSlamNode::StereoSlamNode(ORB_SLAM3::System* pSLAM, const std::string &strSettingsFile, const std::string &strDoRectify, const std::string &strPub_images, const std::string &topic_cam_left, const std::string &topic_cam_right)
: Node("ORB_SLAM3_ROS2"),
m_SLAM(pSLAM),
m_topic_cam_left(topic_cam_left),
m_topic_cam_right(topic_cam_right)
{
RCLCPP_INFO(this->get_logger(), "Starting constructor!!\n");
std::cout << "\ntopic_cam_left = " << m_topic_cam_left << std::endl;
std::cout << "\ntopic_cam_right = " << m_topic_cam_right << std::endl;
// Convert string to boolean
std::stringstream ssRectify(strDoRectify);
ssRectify >> std::boolalpha >> doRectify;
std::stringstream ssPubImages(strPub_images);
ssPubImages >> std::boolalpha >> pub_images;
std::cout << "\nPub_images = " << pub_images << std::endl;
// Initialize static components of pointcloud_msg
this->pointcloud.height = 1;
this->count1 = 0;
sensor_msgs::msg::PointField field;
field.name = "x";
field.offset = 0;
field.datatype = sensor_msgs::msg::PointField::FLOAT32;
field.count = 1;
this->pointcloud.fields.push_back(field);
field.name = "y";
field.offset = 4;
field.datatype = sensor_msgs::msg::PointField::FLOAT32;
field.count = 1;
this->pointcloud.fields.push_back(field);
field.name = "z";
field.offset = 8;
field.datatype = sensor_msgs::msg::PointField::FLOAT32;
field.count = 1;
this->pointcloud.fields.push_back(field);
this->pointcloud.is_bigendian = false;
this->pointcloud.point_step = 12; // point_step = len(fields) * dtype = 3 * 4 (since sizeof(float) = 4 bytes)
this->pointcloud.is_dense = true;
if (doRectify) { // Compute undistortion matrices
cv::FileStorage fsSettings(strSettingsFile, cv::FileStorage::READ);
if(!fsSettings.isOpened()){
std::cerr << "ERROR: Wrong path to settings" << std::endl;
assert(0);
}
cv::Mat K_l, K_r, P_l, P_r, R_l, R_r, D_l, D_r;
fsSettings["LEFT.K"] >> K_l;
fsSettings["RIGHT.K"] >> K_r;
fsSettings["LEFT.P"] >> P_l;
fsSettings["RIGHT.P"] >> P_r;
fsSettings["LEFT.R"] >> R_l;
fsSettings["RIGHT.R"] >> R_r;
fsSettings["LEFT.D"] >> D_l;
fsSettings["RIGHT.D"] >> D_r;
int rows_l = fsSettings["LEFT.height"];
int cols_l = fsSettings["LEFT.width"];
int rows_r = fsSettings["RIGHT.height"];
int cols_r = fsSettings["RIGHT.width"];
if(K_l.empty() || K_r.empty() || P_l.empty() || P_r.empty() || R_l.empty() || R_r.empty() || D_l.empty() || D_r.empty() ||
rows_l == 0 || rows_r == 0 || cols_l == 0 || cols_r == 0){
std::cerr << "ERROR: Calibration parameters to rectify stereo are missing!" << std::endl;
assert(0);
}
cv::initUndistortRectifyMap(K_l, D_l, R_l, P_l.rowRange(0,3).colRange(0,3), cv::Size(cols_l, rows_l), CV_32F, M1l, M2l);
cv::initUndistortRectifyMap(K_r, D_r, R_r, P_r.rowRange(0,3).colRange(0,3), cv::Size(cols_r, rows_r), CV_32F, M1r, M2r);
}
// Instantiate broadcasters
tf_broadcaster_ = std::make_shared<tf2_ros::TransformBroadcaster>(this);
// Instantiate listeners
tf_buffer_ = std::make_unique<tf2_ros::Buffer>(this->get_clock());
tf_listener_ = std::make_shared<tf2_ros::TransformListener>(*tf_buffer_);
// Create publishers
pose_pub_ = this->create_publisher<geometry_msgs::msg::PoseStamped>("orbslam3/pose", 10);
pcl_pub_ = this->create_publisher<PointCloud>("orbslam3/pcl", 5);
if(pub_images){
KeysPoints_Image_pub_ = this->create_publisher<ImageMsg>("orbslam3/image_with_keypoints", 5);
KeysPoints_Image_Right_pub_ = this->create_publisher<ImageMsg>("orbslam3/image_with_keypoints_right", 5);
}
// Listen and store the static transform cam0 -> map once
tf_static_init(this->tf_base_link_to_camera_left, "base_link", "camera_left");
}
StereoSlamNode::~StereoSlamNode()
{
// Stop all threads
m_SLAM->Shutdown();
// Save camera trajectory
m_SLAM->SaveKeyFrameTrajectoryTUM("KeyFrameTrajectory.txt");
}
void StereoSlamNode::GrabStereo(const ImageMsg::SharedPtr msgLeft, const ImageMsg::SharedPtr msgRight)
{
// Extract images from the topics /camera/left and /camera/right, perform tracking, and calculate tf and robot pose
try {
cv_ptrLeft = cv_bridge::toCvShare(msgLeft);
} catch (cv_bridge::Exception& e) {
RCLCPP_ERROR(this->get_logger(), "cv_bridge exception: %s", e.what());
return;
}
try {
cv_ptrRight = cv_bridge::toCvShare(msgRight);
} catch (cv_bridge::Exception& e) {
RCLCPP_ERROR(this->get_logger(), "cv_bridge exception: %s", e.what());
return;
}
Sophus::SE3<float> T1_;
if (doRectify){
cv::Mat imLeft, imRight;
cv::remap(cv_ptrLeft->image, imLeft, M1l, M2l, cv::INTER_LINEAR);
cv::remap(cv_ptrRight->image, imRight, M1r, M2r, cv::INTER_LINEAR);
T1_ = m_SLAM->TrackStereo(imLeft, imRight, Utility::StampToSec(msgLeft->header.stamp));
} else {
T1_ = m_SLAM->TrackStereo(cv_ptrLeft->image, cv_ptrRight->image, Utility::StampToSec(msgLeft->header.stamp));
}
this->T_ = this->SE3ToCvMat(T1_);
// Once we have the tracking result, let's compute the associated tf to determine the pose.
RCLCPP_INFO(this->get_logger(), "Calculating TF!!\n");
if (!this->T_.empty()) {
this->s = this->T_.size();
if (this->s.height >= 3 && this->s.width >= 3) {
if(this->count1 % 4 == 0){
// Create a TransformStamped: tf between the camera frame and cam0 (camera at time 0)
RCLCPP_INFO(this->get_logger(), "TF between cam and cam0\n");
this->transform_map_to_camera_left = CvMat_to_TransformedStamped(this->T_, "map", "base_link");
// Convert the tf (cam0 -> camera_left) to tf (map -> camera_left)
tf2::Vector3 camera_position(this->transform_map_to_camera_left.transform.translation.x,
this->transform_map_to_camera_left.transform.translation.y,
this->transform_map_to_camera_left.transform.translation.z);
tf2::Vector3 camera_position_map = this->tf_base_link_to_camera_left * camera_position;
this->transform_map_to_camera_left.transform.translation.x = camera_position_map.getX();
this->transform_map_to_camera_left.transform.translation.y = camera_position_map.getY();
this->transform_map_to_camera_left.transform.translation.z = camera_position_map.getZ();
this->tf_broadcaster_->sendTransform(transform_map_to_camera_left);
RCLCPP_INFO(this->get_logger(), "Sending TF!!\n");
}
// Generate PointCloud2 msg which will contain all the map points
try {
RCLCPP_INFO(this->get_logger(), "TRY\n");
this->point_cloud_data = this->get_point_cloud();
} catch (const std::runtime_error& e) {
RCLCPP_INFO(this->get_logger(), "CATCH!!\n");
std::cerr << e.what() << std::endl;
return;
}
RCLCPP_INFO(this->get_logger(), "Publishing point cloud!!\n");
this->pointcloud.header.stamp = this->get_clock()->now();
this->pointcloud.header.frame_id = "map";
this->pointcloud.width = this->point_cloud_data.size() / this->pointcloud.point_step;
this->pointcloud.row_step = this->pointcloud.width * this->pointcloud.point_step; // Length of row in bytes = nb_points*point_step
this->pointcloud.data = this->point_cloud_data;
pcl_pub_->publish(pointcloud);
if(pub_images){
RCLCPP_INFO(this->get_logger(), "Publishing images with keypoints!!\n");
this->image_with_keypoints_publish();
}
}
}
this->count1++;
}
And here the EuRoC.yaml settings :
%YAML:1.0
#--------------------------------------------------------------------------------------------
# System config
#--------------------------------------------------------------------------------------------
# When the variables are commented, the system doesn't load a previous session or not store the current one
# If the LoadFile doesn't exist, the system give a message and create a new Atlas from scratch
#System.LoadAtlasFromFile: "Session_MH01_MH02_MH03_Stereo60_Pseudo"
# The store file is created from the current session, if a file with the same name exists it is deleted
System.SaveAtlasToFile: "Session_MH01_MH00"
#--------------------------------------------------------------------------------------------
# Camera Parameters. Adjust them!
#--------------------------------------------------------------------------------------------
Camera.type: "PinHole"
# Camera calibration and distortion parameters (OpenCV)
Camera.fx: 312.8884288345382
Camera.fy: 312.8884288345382
Camera.cx: 312.28302001953125
Camera.cy: 135.72957611083984
Camera.k1: 0.0
Camera.k2: 0.0
Camera.p1: 0.0
Camera.p2: 0.0
Camera.bFishEye: 0
Camera.width: 500
Camera.height: 300
# Camera frames per second
Camera.fps: 4.0
# stereo baseline times fx
#Camera.bf: 204.4
Camera.bf: 204.345596589
# Color order of the images (0: BGR, 1: RGB. It is ignored if images are grayscale)
Camera.RGB: 0
# Close/Far threshold. Baseline times.
ThDepth: 35.0 #35.0 #60.0
#--------------------------------------------------------------------------------------------
# Stereo Rectification. Only if you need to pre-rectify the images.
# Camera.fx, .fy, etc must be the same as in LEFT.P
#--------------------------------------------------------------------------------------------
LEFT.height: 300
LEFT.width: 500
LEFT.D: !!opencv-matrix
rows: 1
cols: 5
dt: d
data: [-0.3380559277581608, 0.11626935429289885, 0.00010784764983882595, 0.00013873928731441542, 0.0]
LEFT.K: !!opencv-matrix
rows: 3
cols: 3
dt: d
data: [307.99879313442784, 0.0, 280.24657373225784, 0.0, 328.57751082369407, 134.9335316514946, 0.0, 0.0, 1.0]
LEFT.R: !!opencv-matrix
rows: 3
cols: 3
dt: d
data: [0.9993110756869167, -0.021194650361427977, -0.03046573165893826, 0.021097824599082184, 0.9997713031405834, -0.00349617130104049, 0.03053286437011724, 0.0028510020408055066, 0.9995296973980909]
LEFT.P: !!opencv-matrix
rows: 3
cols: 4
dt: d
data: [312.8884288345382, 0.0, 312.28302001953125, 0.0, 0.0, 312.8884288345382, 135.72957611083984, 0.0, 0.0, 0.0, 1.0, 0.0]
RIGHT.height: 300
RIGHT.width: 500
RIGHT.D: !!opencv-matrix
rows: 1
cols: 5
dt: d
data: [-0.3030993837488012, 0.07921723141437267, 0.0002907230546131888, 0.0024144389554838883, 0.0]
RIGHT.K: !!opencv-matrix
rows: 3
cols: 3
dt: d
data: [307.1754049460059, 0.0, 265.74658118131305, 0.0, 328.803046449949, 141.73334383034592, 0.0, 0.0, 1.0]
RIGHT.R: !!opencv-matrix
rows: 3
cols: 3
dt: d
data: [0.9989622475147044, -0.027174718796768325, -0.0365508235021734, 0.027290629131779057, 0.9996239597212905, 0.002675950806643575, 0.03646436070963205, -0.003670668800702387, 0.9993282126451716]
RIGHT.P: !!opencv-matrix
rows: 3
cols: 4
dt: d
data: [312.8884288345382, 0.0, 312.28302001953125, -22.834775948524072, 0.0, 312.8884288345382, 135.72957611083984, 0.0, 0.0, 0.0, 1.0, 0.0]
#--------------------------------------------------------------------------------------------
# ORB Parameters
#--------------------------------------------------------------------------------------------
# ORB Extractor: Number of features per image
ORBextractor.nFeatures: 1200
# ORB Extractor: Scale factor between levels in the scale pyramid
ORBextractor.scaleFactor: 1.2
# ORB Extractor: Number of levels in the scale pyramid
ORBextractor.nLevels: 8
# ORB Extractor: Fast threshold
# Image is divided in a grid. At each cell FAST are extracted imposing a minimum response.
# Firstly we impose iniThFAST. If no corners are detected we impose a lower value minThFAST
# You can lower these values if your images have low contrast
ORBextractor.iniThFAST: 20
ORBextractor.minThFAST: 7
#--------------------------------------------------------------------------------------------
# Viewer Parameters
#--------------------------------------------------------------------------------------------
Viewer.KeyFrameSize: 0.05
Viewer.KeyFrameLineWidth: 1
Viewer.GraphLineWidth: 0.9
Viewer.PointSize: 2
Viewer.CameraSize: 0.08
Viewer.CameraLineWidth: 3
Viewer.ViewpointX: 0
Viewer.ViewpointY: -0.7
Viewer.ViewpointZ: -1.8
Viewer.ViewpointF: 500
Viewer.imageViewScale: 2
I hope I've given you enough information. This is a first experience for me, please excuse me for any errors or inconsistencies. Thank you in advance for your help.
Don't hesitate to get back to me if anything is missing.
Have a nice day.
Upvotes: 1
Views: 301