Reputation: 87
I am mapping a lidar point cloud to 2d image. I have successfully mapped the point cloud to 2d image but there is a shift of 0.5 meter to 1meter in the 2d image from the lidar point cloud when overlaped. This overlapping is important for me as i will work with polygon which is created by the lidar data, The polygon and the image also doesnot overlap properly. The extent of the 2d image is of the lidar data. I cannot figure it out why there is a small misalignment even though they are in the same coordinate system and the image have the same extent as the lidar data.
Here is the data i am working on.
https://drive.google.com/drive/folders/1cBNDJmoh8i3Sb4AoU5FLDqPXjy7OYfaW?usp=sharing
Here is the code that i have tired.
import laspy
import rasterio
import numpy as np
from PIL import Image
from scipy.interpolate import griddata
import matplotlib.pyplot as plt
import geopandas as gpd
class SingleTree:
def __init__(self, las_data):
self.points = np.vstack((las_data.x, las_data.y, las_data.z, las_data.red, las_data.green,
las_data.blue)).T
def point_cloud_to_image(self, output_size):
# Extract x, y, and color values
x = self.points[:, 0]
y = self.points[:, 1]
red = self.points[:, 3]
green = self.points[:, 4]
blue = self.points[:, 5]
# Normalize coordinates to range [0, 1]
x_normalized = (x - x.min()) / (x.max() - x.min())
y_normalized = (y - y.min()) / (y.max() - y.min())
# Scale normalized coordinates to image size, ensuring they are within the bounds
x_img = np.clip((x_normalized * (output_size[1] - 1)).astype(int), 0, output_size[1] - 1)
y_img = np.clip((y_normalized * (output_size[0] - 1)).astype(int), 0, output_size[0] - 1)
# Convert color values to 8-bit
r = np.interp(red, (red.min(), red.max()), (0, 255)).astype(np.uint8)
g = np.interp(green, (green.min(), green.max()), (0, 255)).astype(np.uint8)
b = np.interp(blue, (blue.min(), blue.max()), (0, 255)).astype(np.uint8)
# Initialize an empty image
img = np.zeros(output_size + (3,), dtype=np.uint8)
# Assign colors to image pixels safely
for xi, yi, ri, gi, bi in zip(x_img, y_img, r, g, b):
img[yi, xi] = [ri, gi, bi]
return img
def polygon_bound(self, poly_path):
# Load the polygon data
gdf = gpd.read_file(poly_path)
# Assuming you're interested in the first polygon if there are multiple
polygon = gdf.geometry[0]
# Get the bounding box of the polygon
minx, miny, maxx, maxy = polygon.bounds
polygon_width = maxx - minx
polygon_height = maxy - miny
# The top-left corner coordinates
top_left_x = minx
top_left_y = miny
return top_left_x, top_left_y, polygon_width, polygon_height
import os
from rasterio.transform import from_origin
# Assuming 'path' is the directory containing your '.las' files
path = r"lasdata path"
output_path = r"path"
number_of_files = len([name for name in os.listdir("directory of lasdata")])
discarded_lasdata = []
# Loop through your LAS files
for i in range(1, number_of_files):
file = f"tree_i{i}.las"
poly_file = "polygons\\poly{}.shp".format(i)
image_path = os.path.join(path, file)
# Read the LAS file
las_data = laspy.read(image_path)
# Process the point cloud to generate an image
tree = SingleTree(las_data)
top_left_x, top_left_y, polygon_width, polygon_height = tree.polygon_bound(poly_file)
# Get metadata from LAS file for georeferencing
scale = las_data.header.scale
offset = las_data.header.offset
# Assuming las_data is a LasData object from laspy
min_x, min_y, min_z = las_data.header.min
max_x, max_y, max_z = las_data.header.max
lidar_width = round((max_x - min_x), 0)
lidar_height = round((max_y - min_y), 0)
if (lidar_width <= 0 or lidar_height <= 0):
discarded_lasdata.append(i)
continue
ltop_left_x = min_x
ltop_left_y = min_y
resolution = 0.5
image_width_pixels = int((lidar_width * 2 + 5) / resolution)
image_height_pixels = int((lidar_height * 2 + 5) / resolution)
img = tree.point_cloud_to_image((int(lidar_height*2), int(lidar_width*2)))
# Create the geotransform
transform = from_origin(ltop_left_x, ltop_left_y, resolution, -resolution)
# Define the CRS (coordinate reference system)
crs = "EPSG:25832" # Replace with the correct EPSG code for your data
# Define the rasterio metadata dictionary
meta = {
'driver': 'GTiff',
'dtype': 'uint8',
'nodata': None,
'width': img.shape[1],
'height': img.shape[0],
'count': 3,
'crs': crs,
'transform': transform,
'compress': 'lzw'
}
output_filename = os.path.join(output_path, f"lidar_test{i}.tif")
# Write the image data and metadata to a GeoTIFF
with rasterio.open(output_filename, 'w', **meta) as dst:
for k in range(img.shape[2]):
dst.write(img[:, :, k], k+1)
print("GeoTIFFs have been saved.")
You can see that the polygon doesnot properly overlap with image whereas all the lidar point fall inside the polygon
Edit: Upon second look i found that the extent of the lidar data and image is not same and is off by 0 to 1 meter. I think this is because the lidar data width and height is in floating point and we pass an integer as the width and height.But i cannot still figure the solution
Upvotes: 4
Views: 732
Reputation:
Use the bounding box information from the lidar data instead of the minimum values to ensures that the coordinates are normalized relative to the lidar data:
x_normalized = (x - ltop_left_x) / (resolution * output_size[1])
y_normalized = 1 - (y - ltop_left_y) / (resolution * output_size[0])
The calculation of image width and height has to be adjusted to use the bounding box dimensions from the polygon instead of the lidar data:
image_width_pixels = int((polygon_width + 5) / resolution)
image_height_pixels = int((polygon_height + 5) / resolution)
Create geotransform using the top_left_x and top_left_y values obtained from the polygon bounding box to ensure proper GeoTIFF reference.
transform = from_origin(top_left_x, top_left_y, resolution, -resolution)
Upvotes: 0
Reputation: 678
I suspect that your problem could come from the lines
x_img = np.clip((x_normalized * (output_size[1] - 1)).astype(int), 0, output_size[1] - 1)
y_img = np.clip((y_normalized * (output_size[0] - 1)).astype(int), 0, output_size[0] - 1)
as astype
just removes decimals, instead of rounding to the closer integer. This is coherent with the fact that your image is shifted to the left.
You can try to round before typecasting, e.g. with
x_img = np.clip(np.round((x_normalized * (output_size[1]))), 0, output_size[1]-1).astype(int)
y_img = np.clip(np.round((y_normalized * (output_size[0]))), 0, output_size[0]-1).astype(int)
Upvotes: 2